Treat CEE_BREAK the same as Debugger:Break (), i.e. route it through sdb.
[mono-project/dkf.git] / mono / mini / method-to-ir.c
blobb11a4cc4a305c3f455f0537aa4247463796181f3
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/utils/mono-memory-model.h>
55 #include <mono/metadata/mono-basic-block.h>
57 #include "mini.h"
58 #include "trace.h"
60 #include "ir-emit.h"
62 #include "jit-icalls.h"
63 #include "jit.h"
64 #include "debugger-agent.h"
66 #define BRANCH_COST 10
67 #define INLINE_LENGTH_LIMIT 20
68 #define INLINE_FAILURE do {\
69 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 goto inline_failure;\
71 } while (0)
72 #define CHECK_CFG_EXCEPTION do {\
73 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 goto exception_exit;\
75 } while (0)
76 #define METHOD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
79 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
80 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (cil_method_fname); \
83 goto exception_exit; \
84 } while (0)
85 #define FIELD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *field_fname = mono_field_full_name (field); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (field_fname); \
92 goto exception_exit; \
93 } while (0)
94 #define GENERIC_SHARING_FAILURE(opcode) do { \
95 if (cfg->generic_sharing_context) { \
96 if (cfg->verbose_level > 2) \
97 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
98 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
99 goto exception_exit; \
101 } while (0)
102 #define OUT_OF_MEMORY_FAILURE do { \
103 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
104 goto exception_exit; \
105 } while (0)
106 /* Determine whenever 'ins' represents a load of the 'this' argument */
107 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
109 static int ldind_to_load_membase (int opcode);
110 static int stind_to_store_membase (int opcode);
112 int mono_op_to_op_imm (int opcode);
113 int mono_op_to_op_imm_noemul (int opcode);
115 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
116 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
117 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
119 /* helper methods signatures */
120 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
121 static MonoMethodSignature *helper_sig_domain_get = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
123 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
124 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
126 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
129 * Instruction metadata
131 #ifdef MINI_OP
132 #undef MINI_OP
133 #endif
134 #ifdef MINI_OP3
135 #undef MINI_OP3
136 #endif
137 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
138 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #define NONE ' '
140 #define IREG 'i'
141 #define FREG 'f'
142 #define VREG 'v'
143 #define XREG 'x'
144 #if SIZEOF_REGISTER == 8
145 #define LREG IREG
146 #else
147 #define LREG 'l'
148 #endif
149 /* keep in sync with the enum in mini.h */
150 const char
151 ins_info[] = {
152 #include "mini-ops.h"
154 #undef MINI_OP
155 #undef MINI_OP3
157 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
158 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
160 * This should contain the index of the last sreg + 1. This is not the same
161 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
163 const gint8 ins_sreg_counts[] = {
164 #include "mini-ops.h"
166 #undef MINI_OP
167 #undef MINI_OP3
169 #define MONO_INIT_VARINFO(vi,id) do { \
170 (vi)->range.first_use.pos.bid = 0xffff; \
171 (vi)->reg = -1; \
172 (vi)->idx = (id); \
173 } while (0)
175 void
176 mono_inst_set_src_registers (MonoInst *ins, int *regs)
178 ins->sreg1 = regs [0];
179 ins->sreg2 = regs [1];
180 ins->sreg3 = regs [2];
183 guint32
184 mono_alloc_ireg (MonoCompile *cfg)
186 return alloc_ireg (cfg);
189 guint32
190 mono_alloc_freg (MonoCompile *cfg)
192 return alloc_freg (cfg);
195 guint32
196 mono_alloc_preg (MonoCompile *cfg)
198 return alloc_preg (cfg);
201 guint32
202 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
204 return alloc_dreg (cfg, stack_type);
208 * mono_alloc_ireg_ref:
210 * Allocate an IREG, and mark it as holding a GC ref.
212 guint32
213 mono_alloc_ireg_ref (MonoCompile *cfg)
215 return alloc_ireg_ref (cfg);
219 * mono_alloc_ireg_mp:
221 * Allocate an IREG, and mark it as holding a managed pointer.
223 guint32
224 mono_alloc_ireg_mp (MonoCompile *cfg)
226 return alloc_ireg_mp (cfg);
230 * mono_alloc_ireg_copy:
232 * Allocate an IREG with the same GC type as VREG.
234 guint32
235 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
237 if (vreg_is_ref (cfg, vreg))
238 return alloc_ireg_ref (cfg);
239 else if (vreg_is_mp (cfg, vreg))
240 return alloc_ireg_mp (cfg);
241 else
242 return alloc_ireg (cfg);
245 guint
246 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
248 if (type->byref)
249 return OP_MOVE;
251 handle_enum:
252 switch (type->type) {
253 case MONO_TYPE_I1:
254 case MONO_TYPE_U1:
255 case MONO_TYPE_BOOLEAN:
256 return OP_MOVE;
257 case MONO_TYPE_I2:
258 case MONO_TYPE_U2:
259 case MONO_TYPE_CHAR:
260 return OP_MOVE;
261 case MONO_TYPE_I4:
262 case MONO_TYPE_U4:
263 return OP_MOVE;
264 case MONO_TYPE_I:
265 case MONO_TYPE_U:
266 case MONO_TYPE_PTR:
267 case MONO_TYPE_FNPTR:
268 return OP_MOVE;
269 case MONO_TYPE_CLASS:
270 case MONO_TYPE_STRING:
271 case MONO_TYPE_OBJECT:
272 case MONO_TYPE_SZARRAY:
273 case MONO_TYPE_ARRAY:
274 return OP_MOVE;
275 case MONO_TYPE_I8:
276 case MONO_TYPE_U8:
277 #if SIZEOF_REGISTER == 8
278 return OP_MOVE;
279 #else
280 return OP_LMOVE;
281 #endif
282 case MONO_TYPE_R4:
283 return OP_FMOVE;
284 case MONO_TYPE_R8:
285 return OP_FMOVE;
286 case MONO_TYPE_VALUETYPE:
287 if (type->data.klass->enumtype) {
288 type = mono_class_enum_basetype (type->data.klass);
289 goto handle_enum;
291 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
292 return OP_XMOVE;
293 return OP_VMOVE;
294 case MONO_TYPE_TYPEDBYREF:
295 return OP_VMOVE;
296 case MONO_TYPE_GENERICINST:
297 type = &type->data.generic_class->container_class->byval_arg;
298 goto handle_enum;
299 case MONO_TYPE_VAR:
300 case MONO_TYPE_MVAR:
301 g_assert (cfg->generic_sharing_context);
302 return OP_MOVE;
303 default:
304 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
306 return -1;
309 void
310 mono_print_bb (MonoBasicBlock *bb, const char *msg)
312 int i;
313 MonoInst *tree;
315 printf ("\n%s %d: [IN: ", msg, bb->block_num);
316 for (i = 0; i < bb->in_count; ++i)
317 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
318 printf (", OUT: ");
319 for (i = 0; i < bb->out_count; ++i)
320 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
321 printf (" ]\n");
322 for (tree = bb->code; tree; tree = tree->next)
323 mono_print_ins_index (-1, tree);
326 void
327 mono_create_helper_signatures (void)
329 helper_sig_domain_get = mono_create_icall_signature ("ptr");
330 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
332 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
333 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
334 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
335 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
339 * Can't put this at the beginning, since other files reference stuff from this
340 * file.
342 #ifndef DISABLE_JIT
344 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
346 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
348 #define GET_BBLOCK(cfg,tblock,ip) do { \
349 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
350 if (!(tblock)) { \
351 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
352 NEW_BBLOCK (cfg, (tblock)); \
353 (tblock)->cil_code = (ip); \
354 ADD_BBLOCK (cfg, (tblock)); \
356 } while (0)
358 #if defined(TARGET_X86) || defined(TARGET_AMD64)
359 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
360 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
361 (dest)->dreg = alloc_ireg_mp ((cfg)); \
362 (dest)->sreg1 = (sr1); \
363 (dest)->sreg2 = (sr2); \
364 (dest)->inst_imm = (imm); \
365 (dest)->backend.shift_amount = (shift); \
366 MONO_ADD_INS ((cfg)->cbb, (dest)); \
367 } while (0)
368 #endif
370 #if SIZEOF_REGISTER == 8
371 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
372 /* FIXME: Need to add many more cases */ \
373 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
374 MonoInst *widen; \
375 int dr = alloc_preg (cfg); \
376 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
377 (ins)->sreg2 = widen->dreg; \
379 } while (0)
380 #else
381 #define ADD_WIDEN_OP(ins, arg1, arg2)
382 #endif
384 #define ADD_BINOP(op) do { \
385 MONO_INST_NEW (cfg, ins, (op)); \
386 sp -= 2; \
387 ins->sreg1 = sp [0]->dreg; \
388 ins->sreg2 = sp [1]->dreg; \
389 type_from_op (ins, sp [0], sp [1]); \
390 CHECK_TYPE (ins); \
391 /* Have to insert a widening op */ \
392 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
393 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
394 MONO_ADD_INS ((cfg)->cbb, (ins)); \
395 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
396 } while (0)
398 #define ADD_UNOP(op) do { \
399 MONO_INST_NEW (cfg, ins, (op)); \
400 sp--; \
401 ins->sreg1 = sp [0]->dreg; \
402 type_from_op (ins, sp [0], NULL); \
403 CHECK_TYPE (ins); \
404 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
405 MONO_ADD_INS ((cfg)->cbb, (ins)); \
406 *sp++ = mono_decompose_opcode (cfg, ins); \
407 } while (0)
409 #define ADD_BINCOND(next_block) do { \
410 MonoInst *cmp; \
411 sp -= 2; \
412 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
413 cmp->sreg1 = sp [0]->dreg; \
414 cmp->sreg2 = sp [1]->dreg; \
415 type_from_op (cmp, sp [0], sp [1]); \
416 CHECK_TYPE (cmp); \
417 type_from_op (ins, sp [0], sp [1]); \
418 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
419 GET_BBLOCK (cfg, tblock, target); \
420 link_bblock (cfg, bblock, tblock); \
421 ins->inst_true_bb = tblock; \
422 if ((next_block)) { \
423 link_bblock (cfg, bblock, (next_block)); \
424 ins->inst_false_bb = (next_block); \
425 start_new_bblock = 1; \
426 } else { \
427 GET_BBLOCK (cfg, tblock, ip); \
428 link_bblock (cfg, bblock, tblock); \
429 ins->inst_false_bb = tblock; \
430 start_new_bblock = 2; \
432 if (sp != stack_start) { \
433 handle_stack_args (cfg, stack_start, sp - stack_start); \
434 CHECK_UNVERIFIABLE (cfg); \
436 MONO_ADD_INS (bblock, cmp); \
437 MONO_ADD_INS (bblock, ins); \
438 } while (0)
440 /* *
441 * link_bblock: Links two basic blocks
443 * links two basic blocks in the control flow graph, the 'from'
444 * argument is the starting block and the 'to' argument is the block
445 * the control flow ends to after 'from'.
447 static void
448 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
450 MonoBasicBlock **newa;
451 int i, found;
453 #if 0
454 if (from->cil_code) {
455 if (to->cil_code)
456 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
457 else
458 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
459 } else {
460 if (to->cil_code)
461 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
462 else
463 printf ("edge from entry to exit\n");
465 #endif
467 found = FALSE;
468 for (i = 0; i < from->out_count; ++i) {
469 if (to == from->out_bb [i]) {
470 found = TRUE;
471 break;
474 if (!found) {
475 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
476 for (i = 0; i < from->out_count; ++i) {
477 newa [i] = from->out_bb [i];
479 newa [i] = to;
480 from->out_count++;
481 from->out_bb = newa;
484 found = FALSE;
485 for (i = 0; i < to->in_count; ++i) {
486 if (from == to->in_bb [i]) {
487 found = TRUE;
488 break;
491 if (!found) {
492 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
493 for (i = 0; i < to->in_count; ++i) {
494 newa [i] = to->in_bb [i];
496 newa [i] = from;
497 to->in_count++;
498 to->in_bb = newa;
502 void
503 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
505 link_bblock (cfg, from, to);
509 * mono_find_block_region:
511 * We mark each basic block with a region ID. We use that to avoid BB
512 * optimizations when blocks are in different regions.
514 * Returns:
515 * A region token that encodes where this region is, and information
516 * about the clause owner for this block.
518 * The region encodes the try/catch/filter clause that owns this block
519 * as well as the type. -1 is a special value that represents a block
520 * that is in none of try/catch/filter.
522 static int
523 mono_find_block_region (MonoCompile *cfg, int offset)
525 MonoMethodHeader *header = cfg->header;
526 MonoExceptionClause *clause;
527 int i;
529 for (i = 0; i < header->num_clauses; ++i) {
530 clause = &header->clauses [i];
531 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
532 (offset < (clause->handler_offset)))
533 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
535 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
536 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
537 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
538 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
539 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
540 else
541 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
544 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
545 return ((i + 1) << 8) | clause->flags;
548 return -1;
551 static GList*
552 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
554 MonoMethodHeader *header = cfg->header;
555 MonoExceptionClause *clause;
556 int i;
557 GList *res = NULL;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
562 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
563 if (clause->flags == type)
564 res = g_list_append (res, clause);
567 return res;
570 static void
571 mono_create_spvar_for_region (MonoCompile *cfg, int region)
573 MonoInst *var;
575 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
576 if (var)
577 return;
579 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
580 /* prevent it from being register allocated */
581 var->flags |= MONO_INST_INDIRECT;
583 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
586 MonoInst *
587 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
589 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
592 static MonoInst*
593 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
595 MonoInst *var;
597 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
598 if (var)
599 return var;
601 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
602 /* prevent it from being register allocated */
603 var->flags |= MONO_INST_INDIRECT;
605 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
607 return var;
611 * Returns the type used in the eval stack when @type is loaded.
612 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
614 void
615 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
617 MonoClass *klass;
619 inst->klass = klass = mono_class_from_mono_type (type);
620 if (type->byref) {
621 inst->type = STACK_MP;
622 return;
625 handle_enum:
626 switch (type->type) {
627 case MONO_TYPE_VOID:
628 inst->type = STACK_INV;
629 return;
630 case MONO_TYPE_I1:
631 case MONO_TYPE_U1:
632 case MONO_TYPE_BOOLEAN:
633 case MONO_TYPE_I2:
634 case MONO_TYPE_U2:
635 case MONO_TYPE_CHAR:
636 case MONO_TYPE_I4:
637 case MONO_TYPE_U4:
638 inst->type = STACK_I4;
639 return;
640 case MONO_TYPE_I:
641 case MONO_TYPE_U:
642 case MONO_TYPE_PTR:
643 case MONO_TYPE_FNPTR:
644 inst->type = STACK_PTR;
645 return;
646 case MONO_TYPE_CLASS:
647 case MONO_TYPE_STRING:
648 case MONO_TYPE_OBJECT:
649 case MONO_TYPE_SZARRAY:
650 case MONO_TYPE_ARRAY:
651 inst->type = STACK_OBJ;
652 return;
653 case MONO_TYPE_I8:
654 case MONO_TYPE_U8:
655 inst->type = STACK_I8;
656 return;
657 case MONO_TYPE_R4:
658 case MONO_TYPE_R8:
659 inst->type = STACK_R8;
660 return;
661 case MONO_TYPE_VALUETYPE:
662 if (type->data.klass->enumtype) {
663 type = mono_class_enum_basetype (type->data.klass);
664 goto handle_enum;
665 } else {
666 inst->klass = klass;
667 inst->type = STACK_VTYPE;
668 return;
670 case MONO_TYPE_TYPEDBYREF:
671 inst->klass = mono_defaults.typed_reference_class;
672 inst->type = STACK_VTYPE;
673 return;
674 case MONO_TYPE_GENERICINST:
675 type = &type->data.generic_class->container_class->byval_arg;
676 goto handle_enum;
677 case MONO_TYPE_VAR :
678 case MONO_TYPE_MVAR :
679 /* FIXME: all the arguments must be references for now,
680 * later look inside cfg and see if the arg num is
681 * really a reference
683 g_assert (cfg->generic_sharing_context);
684 inst->type = STACK_OBJ;
685 return;
686 default:
687 g_error ("unknown type 0x%02x in eval stack type", type->type);
692 * The following tables are used to quickly validate the IL code in type_from_op ().
694 static const char
695 bin_num_table [STACK_MAX] [STACK_MAX] = {
696 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
706 static const char
707 neg_table [] = {
708 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
711 /* reduce the size of this table */
712 static const char
713 bin_int_table [STACK_MAX] [STACK_MAX] = {
714 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
724 static const char
725 bin_comp_table [STACK_MAX] [STACK_MAX] = {
726 /* Inv i L p F & O vt */
727 {0},
728 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
729 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
730 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
731 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
732 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
733 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
734 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
737 /* reduce the size of this table */
738 static const char
739 shift_table [STACK_MAX] [STACK_MAX] = {
740 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
751 * Tables to map from the non-specific opcode to the matching
752 * type-specific opcode.
754 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
755 static const guint16
756 binops_op_map [STACK_MAX] = {
757 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
760 /* handles from CEE_NEG to CEE_CONV_U8 */
761 static const guint16
762 unops_op_map [STACK_MAX] = {
763 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
766 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
767 static const guint16
768 ovfops_op_map [STACK_MAX] = {
769 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
772 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
773 static const guint16
774 ovf2ops_op_map [STACK_MAX] = {
775 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
778 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
779 static const guint16
780 ovf3ops_op_map [STACK_MAX] = {
781 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
784 /* handles from CEE_BEQ to CEE_BLT_UN */
785 static const guint16
786 beqops_op_map [STACK_MAX] = {
787 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
790 /* handles from CEE_CEQ to CEE_CLT_UN */
791 static const guint16
792 ceqops_op_map [STACK_MAX] = {
793 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
797 * Sets ins->type (the type on the eval stack) according to the
798 * type of the opcode and the arguments to it.
799 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
801 * FIXME: this function sets ins->type unconditionally in some cases, but
802 * it should set it to invalid for some types (a conv.x on an object)
804 static void
805 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
807 switch (ins->opcode) {
808 /* binops */
809 case CEE_ADD:
810 case CEE_SUB:
811 case CEE_MUL:
812 case CEE_DIV:
813 case CEE_REM:
814 /* FIXME: check unverifiable args for STACK_MP */
815 ins->type = bin_num_table [src1->type] [src2->type];
816 ins->opcode += binops_op_map [ins->type];
817 break;
818 case CEE_DIV_UN:
819 case CEE_REM_UN:
820 case CEE_AND:
821 case CEE_OR:
822 case CEE_XOR:
823 ins->type = bin_int_table [src1->type] [src2->type];
824 ins->opcode += binops_op_map [ins->type];
825 break;
826 case CEE_SHL:
827 case CEE_SHR:
828 case CEE_SHR_UN:
829 ins->type = shift_table [src1->type] [src2->type];
830 ins->opcode += binops_op_map [ins->type];
831 break;
832 case OP_COMPARE:
833 case OP_LCOMPARE:
834 case OP_ICOMPARE:
835 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
836 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
837 ins->opcode = OP_LCOMPARE;
838 else if (src1->type == STACK_R8)
839 ins->opcode = OP_FCOMPARE;
840 else
841 ins->opcode = OP_ICOMPARE;
842 break;
843 case OP_ICOMPARE_IMM:
844 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
845 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
846 ins->opcode = OP_LCOMPARE_IMM;
847 break;
848 case CEE_BEQ:
849 case CEE_BGE:
850 case CEE_BGT:
851 case CEE_BLE:
852 case CEE_BLT:
853 case CEE_BNE_UN:
854 case CEE_BGE_UN:
855 case CEE_BGT_UN:
856 case CEE_BLE_UN:
857 case CEE_BLT_UN:
858 ins->opcode += beqops_op_map [src1->type];
859 break;
860 case OP_CEQ:
861 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
862 ins->opcode += ceqops_op_map [src1->type];
863 break;
864 case OP_CGT:
865 case OP_CGT_UN:
866 case OP_CLT:
867 case OP_CLT_UN:
868 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
869 ins->opcode += ceqops_op_map [src1->type];
870 break;
871 /* unops */
872 case CEE_NEG:
873 ins->type = neg_table [src1->type];
874 ins->opcode += unops_op_map [ins->type];
875 break;
876 case CEE_NOT:
877 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
878 ins->type = src1->type;
879 else
880 ins->type = STACK_INV;
881 ins->opcode += unops_op_map [ins->type];
882 break;
883 case CEE_CONV_I1:
884 case CEE_CONV_I2:
885 case CEE_CONV_I4:
886 case CEE_CONV_U4:
887 ins->type = STACK_I4;
888 ins->opcode += unops_op_map [src1->type];
889 break;
890 case CEE_CONV_R_UN:
891 ins->type = STACK_R8;
892 switch (src1->type) {
893 case STACK_I4:
894 case STACK_PTR:
895 ins->opcode = OP_ICONV_TO_R_UN;
896 break;
897 case STACK_I8:
898 ins->opcode = OP_LCONV_TO_R_UN;
899 break;
901 break;
902 case CEE_CONV_OVF_I1:
903 case CEE_CONV_OVF_U1:
904 case CEE_CONV_OVF_I2:
905 case CEE_CONV_OVF_U2:
906 case CEE_CONV_OVF_I4:
907 case CEE_CONV_OVF_U4:
908 ins->type = STACK_I4;
909 ins->opcode += ovf3ops_op_map [src1->type];
910 break;
911 case CEE_CONV_OVF_I_UN:
912 case CEE_CONV_OVF_U_UN:
913 ins->type = STACK_PTR;
914 ins->opcode += ovf2ops_op_map [src1->type];
915 break;
916 case CEE_CONV_OVF_I1_UN:
917 case CEE_CONV_OVF_I2_UN:
918 case CEE_CONV_OVF_I4_UN:
919 case CEE_CONV_OVF_U1_UN:
920 case CEE_CONV_OVF_U2_UN:
921 case CEE_CONV_OVF_U4_UN:
922 ins->type = STACK_I4;
923 ins->opcode += ovf2ops_op_map [src1->type];
924 break;
925 case CEE_CONV_U:
926 ins->type = STACK_PTR;
927 switch (src1->type) {
928 case STACK_I4:
929 ins->opcode = OP_ICONV_TO_U;
930 break;
931 case STACK_PTR:
932 case STACK_MP:
933 #if SIZEOF_VOID_P == 8
934 ins->opcode = OP_LCONV_TO_U;
935 #else
936 ins->opcode = OP_MOVE;
937 #endif
938 break;
939 case STACK_I8:
940 ins->opcode = OP_LCONV_TO_U;
941 break;
942 case STACK_R8:
943 ins->opcode = OP_FCONV_TO_U;
944 break;
946 break;
947 case CEE_CONV_I8:
948 case CEE_CONV_U8:
949 ins->type = STACK_I8;
950 ins->opcode += unops_op_map [src1->type];
951 break;
952 case CEE_CONV_OVF_I8:
953 case CEE_CONV_OVF_U8:
954 ins->type = STACK_I8;
955 ins->opcode += ovf3ops_op_map [src1->type];
956 break;
957 case CEE_CONV_OVF_U8_UN:
958 case CEE_CONV_OVF_I8_UN:
959 ins->type = STACK_I8;
960 ins->opcode += ovf2ops_op_map [src1->type];
961 break;
962 case CEE_CONV_R4:
963 case CEE_CONV_R8:
964 ins->type = STACK_R8;
965 ins->opcode += unops_op_map [src1->type];
966 break;
967 case OP_CKFINITE:
968 ins->type = STACK_R8;
969 break;
970 case CEE_CONV_U2:
971 case CEE_CONV_U1:
972 ins->type = STACK_I4;
973 ins->opcode += ovfops_op_map [src1->type];
974 break;
975 case CEE_CONV_I:
976 case CEE_CONV_OVF_I:
977 case CEE_CONV_OVF_U:
978 ins->type = STACK_PTR;
979 ins->opcode += ovfops_op_map [src1->type];
980 break;
981 case CEE_ADD_OVF:
982 case CEE_ADD_OVF_UN:
983 case CEE_MUL_OVF:
984 case CEE_MUL_OVF_UN:
985 case CEE_SUB_OVF:
986 case CEE_SUB_OVF_UN:
987 ins->type = bin_num_table [src1->type] [src2->type];
988 ins->opcode += ovfops_op_map [src1->type];
989 if (ins->type == STACK_R8)
990 ins->type = STACK_INV;
991 break;
992 case OP_LOAD_MEMBASE:
993 ins->type = STACK_PTR;
994 break;
995 case OP_LOADI1_MEMBASE:
996 case OP_LOADU1_MEMBASE:
997 case OP_LOADI2_MEMBASE:
998 case OP_LOADU2_MEMBASE:
999 case OP_LOADI4_MEMBASE:
1000 case OP_LOADU4_MEMBASE:
1001 ins->type = STACK_PTR;
1002 break;
1003 case OP_LOADI8_MEMBASE:
1004 ins->type = STACK_I8;
1005 break;
1006 case OP_LOADR4_MEMBASE:
1007 case OP_LOADR8_MEMBASE:
1008 ins->type = STACK_R8;
1009 break;
1010 default:
1011 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1012 break;
1015 if (ins->type == STACK_MP)
1016 ins->klass = mono_defaults.object_class;
1019 static const char
1020 ldind_type [] = {
1021 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1024 #if 0
1026 static const char
1027 param_table [STACK_MAX] [STACK_MAX] = {
1028 {0},
1031 static int
1032 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1033 int i;
1035 if (sig->hasthis) {
1036 switch (args->type) {
1037 case STACK_I4:
1038 case STACK_I8:
1039 case STACK_R8:
1040 case STACK_VTYPE:
1041 case STACK_INV:
1042 return 0;
1044 args++;
1046 for (i = 0; i < sig->param_count; ++i) {
1047 switch (args [i].type) {
1048 case STACK_INV:
1049 return 0;
1050 case STACK_MP:
1051 if (!sig->params [i]->byref)
1052 return 0;
1053 continue;
1054 case STACK_OBJ:
1055 if (sig->params [i]->byref)
1056 return 0;
1057 switch (sig->params [i]->type) {
1058 case MONO_TYPE_CLASS:
1059 case MONO_TYPE_STRING:
1060 case MONO_TYPE_OBJECT:
1061 case MONO_TYPE_SZARRAY:
1062 case MONO_TYPE_ARRAY:
1063 break;
1064 default:
1065 return 0;
1067 continue;
1068 case STACK_R8:
1069 if (sig->params [i]->byref)
1070 return 0;
1071 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1072 return 0;
1073 continue;
1074 case STACK_PTR:
1075 case STACK_I4:
1076 case STACK_I8:
1077 case STACK_VTYPE:
1078 break;
1080 /*if (!param_table [args [i].type] [sig->params [i]->type])
1081 return 0;*/
1083 return 1;
1085 #endif
1088 * When we need a pointer to the current domain many times in a method, we
1089 * call mono_domain_get() once and we store the result in a local variable.
1090 * This function returns the variable that represents the MonoDomain*.
1092 inline static MonoInst *
1093 mono_get_domainvar (MonoCompile *cfg)
1095 if (!cfg->domainvar)
1096 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1097 return cfg->domainvar;
1101 * The got_var contains the address of the Global Offset Table when AOT
1102 * compiling.
1104 MonoInst *
1105 mono_get_got_var (MonoCompile *cfg)
1107 #ifdef MONO_ARCH_NEED_GOT_VAR
1108 if (!cfg->compile_aot)
1109 return NULL;
1110 if (!cfg->got_var) {
1111 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1113 return cfg->got_var;
1114 #else
1115 return NULL;
1116 #endif
1119 static MonoInst *
1120 mono_get_vtable_var (MonoCompile *cfg)
1122 g_assert (cfg->generic_sharing_context);
1124 if (!cfg->rgctx_var) {
1125 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1126 /* force the var to be stack allocated */
1127 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1130 return cfg->rgctx_var;
1133 static MonoType*
1134 type_from_stack_type (MonoInst *ins) {
1135 switch (ins->type) {
1136 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1137 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1138 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1139 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1140 case STACK_MP:
1141 return &ins->klass->this_arg;
1142 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1143 case STACK_VTYPE: return &ins->klass->byval_arg;
1144 default:
1145 g_error ("stack type %d to monotype not handled\n", ins->type);
1147 return NULL;
1150 static G_GNUC_UNUSED int
1151 type_to_stack_type (MonoType *t)
1153 t = mono_type_get_underlying_type (t);
1154 switch (t->type) {
1155 case MONO_TYPE_I1:
1156 case MONO_TYPE_U1:
1157 case MONO_TYPE_BOOLEAN:
1158 case MONO_TYPE_I2:
1159 case MONO_TYPE_U2:
1160 case MONO_TYPE_CHAR:
1161 case MONO_TYPE_I4:
1162 case MONO_TYPE_U4:
1163 return STACK_I4;
1164 case MONO_TYPE_I:
1165 case MONO_TYPE_U:
1166 case MONO_TYPE_PTR:
1167 case MONO_TYPE_FNPTR:
1168 return STACK_PTR;
1169 case MONO_TYPE_CLASS:
1170 case MONO_TYPE_STRING:
1171 case MONO_TYPE_OBJECT:
1172 case MONO_TYPE_SZARRAY:
1173 case MONO_TYPE_ARRAY:
1174 return STACK_OBJ;
1175 case MONO_TYPE_I8:
1176 case MONO_TYPE_U8:
1177 return STACK_I8;
1178 case MONO_TYPE_R4:
1179 case MONO_TYPE_R8:
1180 return STACK_R8;
1181 case MONO_TYPE_VALUETYPE:
1182 case MONO_TYPE_TYPEDBYREF:
1183 return STACK_VTYPE;
1184 case MONO_TYPE_GENERICINST:
1185 if (mono_type_generic_inst_is_valuetype (t))
1186 return STACK_VTYPE;
1187 else
1188 return STACK_OBJ;
1189 break;
1190 default:
1191 g_assert_not_reached ();
1194 return -1;
1197 static MonoClass*
1198 array_access_to_klass (int opcode)
1200 switch (opcode) {
1201 case CEE_LDELEM_U1:
1202 return mono_defaults.byte_class;
1203 case CEE_LDELEM_U2:
1204 return mono_defaults.uint16_class;
1205 case CEE_LDELEM_I:
1206 case CEE_STELEM_I:
1207 return mono_defaults.int_class;
1208 case CEE_LDELEM_I1:
1209 case CEE_STELEM_I1:
1210 return mono_defaults.sbyte_class;
1211 case CEE_LDELEM_I2:
1212 case CEE_STELEM_I2:
1213 return mono_defaults.int16_class;
1214 case CEE_LDELEM_I4:
1215 case CEE_STELEM_I4:
1216 return mono_defaults.int32_class;
1217 case CEE_LDELEM_U4:
1218 return mono_defaults.uint32_class;
1219 case CEE_LDELEM_I8:
1220 case CEE_STELEM_I8:
1221 return mono_defaults.int64_class;
1222 case CEE_LDELEM_R4:
1223 case CEE_STELEM_R4:
1224 return mono_defaults.single_class;
1225 case CEE_LDELEM_R8:
1226 case CEE_STELEM_R8:
1227 return mono_defaults.double_class;
1228 case CEE_LDELEM_REF:
1229 case CEE_STELEM_REF:
1230 return mono_defaults.object_class;
1231 default:
1232 g_assert_not_reached ();
1234 return NULL;
1238 * We try to share variables when possible
1240 static MonoInst *
1241 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1243 MonoInst *res;
1244 int pos, vnum;
1246 /* inlining can result in deeper stacks */
1247 if (slot >= cfg->header->max_stack)
1248 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1250 pos = ins->type - 1 + slot * STACK_MAX;
1252 switch (ins->type) {
1253 case STACK_I4:
1254 case STACK_I8:
1255 case STACK_R8:
1256 case STACK_PTR:
1257 case STACK_MP:
1258 case STACK_OBJ:
1259 if ((vnum = cfg->intvars [pos]))
1260 return cfg->varinfo [vnum];
1261 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1262 cfg->intvars [pos] = res->inst_c0;
1263 break;
1264 default:
1265 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1267 return res;
1270 static void
1271 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1274 * Don't use this if a generic_context is set, since that means AOT can't
1275 * look up the method using just the image+token.
1276 * table == 0 means this is a reference made from a wrapper.
1278 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1279 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1280 jump_info_token->image = image;
1281 jump_info_token->token = token;
1282 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1287 * This function is called to handle items that are left on the evaluation stack
1288 * at basic block boundaries. What happens is that we save the values to local variables
1289 * and we reload them later when first entering the target basic block (with the
1290 * handle_loaded_temps () function).
1291 * A single joint point will use the same variables (stored in the array bb->out_stack or
1292 * bb->in_stack, if the basic block is before or after the joint point).
1294 * This function needs to be called _before_ emitting the last instruction of
1295 * the bb (i.e. before emitting a branch).
1296 * If the stack merge fails at a join point, cfg->unverifiable is set.
1298 static void
1299 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1301 int i, bindex;
1302 MonoBasicBlock *bb = cfg->cbb;
1303 MonoBasicBlock *outb;
1304 MonoInst *inst, **locals;
1305 gboolean found;
1307 if (!count)
1308 return;
1309 if (cfg->verbose_level > 3)
1310 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1311 if (!bb->out_scount) {
1312 bb->out_scount = count;
1313 //printf ("bblock %d has out:", bb->block_num);
1314 found = FALSE;
1315 for (i = 0; i < bb->out_count; ++i) {
1316 outb = bb->out_bb [i];
1317 /* exception handlers are linked, but they should not be considered for stack args */
1318 if (outb->flags & BB_EXCEPTION_HANDLER)
1319 continue;
1320 //printf (" %d", outb->block_num);
1321 if (outb->in_stack) {
1322 found = TRUE;
1323 bb->out_stack = outb->in_stack;
1324 break;
1327 //printf ("\n");
1328 if (!found) {
1329 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1330 for (i = 0; i < count; ++i) {
1332 * try to reuse temps already allocated for this purpouse, if they occupy the same
1333 * stack slot and if they are of the same type.
1334 * This won't cause conflicts since if 'local' is used to
1335 * store one of the values in the in_stack of a bblock, then
1336 * the same variable will be used for the same outgoing stack
1337 * slot as well.
1338 * This doesn't work when inlining methods, since the bblocks
1339 * in the inlined methods do not inherit their in_stack from
1340 * the bblock they are inlined to. See bug #58863 for an
1341 * example.
1343 if (cfg->inlined_method)
1344 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1345 else
1346 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1351 for (i = 0; i < bb->out_count; ++i) {
1352 outb = bb->out_bb [i];
1353 /* exception handlers are linked, but they should not be considered for stack args */
1354 if (outb->flags & BB_EXCEPTION_HANDLER)
1355 continue;
1356 if (outb->in_scount) {
1357 if (outb->in_scount != bb->out_scount) {
1358 cfg->unverifiable = TRUE;
1359 return;
1361 continue; /* check they are the same locals */
1363 outb->in_scount = count;
1364 outb->in_stack = bb->out_stack;
1367 locals = bb->out_stack;
1368 cfg->cbb = bb;
1369 for (i = 0; i < count; ++i) {
1370 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1371 inst->cil_code = sp [i]->cil_code;
1372 sp [i] = locals [i];
1373 if (cfg->verbose_level > 3)
1374 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1378 * It is possible that the out bblocks already have in_stack assigned, and
1379 * the in_stacks differ. In this case, we will store to all the different
1380 * in_stacks.
1383 found = TRUE;
1384 bindex = 0;
1385 while (found) {
1386 /* Find a bblock which has a different in_stack */
1387 found = FALSE;
1388 while (bindex < bb->out_count) {
1389 outb = bb->out_bb [bindex];
1390 /* exception handlers are linked, but they should not be considered for stack args */
1391 if (outb->flags & BB_EXCEPTION_HANDLER) {
1392 bindex++;
1393 continue;
1395 if (outb->in_stack != locals) {
1396 for (i = 0; i < count; ++i) {
1397 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1398 inst->cil_code = sp [i]->cil_code;
1399 sp [i] = locals [i];
1400 if (cfg->verbose_level > 3)
1401 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1403 locals = outb->in_stack;
1404 found = TRUE;
1405 break;
1407 bindex ++;
1412 /* Emit code which loads interface_offsets [klass->interface_id]
1413 * The array is stored in memory before vtable.
1415 static void
1416 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1418 if (cfg->compile_aot) {
1419 int ioffset_reg = alloc_preg (cfg);
1420 int iid_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1423 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1424 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1426 else {
1427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1431 static void
1432 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1434 int ibitmap_reg = alloc_preg (cfg);
1435 #ifdef COMPRESSED_INTERFACE_BITMAP
1436 MonoInst *args [2];
1437 MonoInst *res, *ins;
1438 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1439 MONO_ADD_INS (cfg->cbb, ins);
1440 args [0] = ins;
1441 if (cfg->compile_aot)
1442 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1443 else
1444 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1445 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1447 #else
1448 int ibitmap_byte_reg = alloc_preg (cfg);
1450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1452 if (cfg->compile_aot) {
1453 int iid_reg = alloc_preg (cfg);
1454 int shifted_iid_reg = alloc_preg (cfg);
1455 int ibitmap_byte_address_reg = alloc_preg (cfg);
1456 int masked_iid_reg = alloc_preg (cfg);
1457 int iid_one_bit_reg = alloc_preg (cfg);
1458 int iid_bit_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1461 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1462 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1464 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1467 } else {
1468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1471 #endif
1475 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1476 * stored in "klass_reg" implements the interface "klass".
1478 static void
1479 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1481 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1485 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1486 * stored in "vtable_reg" implements the interface "klass".
1488 static void
1489 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1491 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1495 * Emit code which checks whenever the interface id of @klass is smaller than
1496 * than the value given by max_iid_reg.
1498 static void
1499 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1500 MonoBasicBlock *false_target)
1502 if (cfg->compile_aot) {
1503 int iid_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1507 else
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1509 if (false_target)
1510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1511 else
1512 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1515 /* Same as above, but obtains max_iid from a vtable */
1516 static void
1517 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1518 MonoBasicBlock *false_target)
1520 int max_iid_reg = alloc_preg (cfg);
1522 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1523 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1526 /* Same as above, but obtains max_iid from a klass */
1527 static void
1528 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1529 MonoBasicBlock *false_target)
1531 int max_iid_reg = alloc_preg (cfg);
1533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1534 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1537 static void
1538 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int idepth_reg = alloc_preg (cfg);
1541 int stypes_reg = alloc_preg (cfg);
1542 int stype = alloc_preg (cfg);
1544 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1545 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1547 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1551 if (klass_ins) {
1552 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1553 } else if (cfg->compile_aot) {
1554 int const_reg = alloc_preg (cfg);
1555 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1556 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1557 } else {
1558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1563 static void
1564 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1566 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1569 static void
1570 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1572 int intf_reg = alloc_preg (cfg);
1574 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1575 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1577 if (true_target)
1578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1579 else
1580 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1584 * Variant of the above that takes a register to the class, not the vtable.
1586 static void
1587 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1589 int intf_bit_reg = alloc_preg (cfg);
1591 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1592 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1594 if (true_target)
1595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1596 else
1597 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1600 static inline void
1601 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1603 if (klass_inst) {
1604 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1605 } else if (cfg->compile_aot) {
1606 int const_reg = alloc_preg (cfg);
1607 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1608 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1609 } else {
1610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1612 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1615 static inline void
1616 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1618 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1621 static inline void
1622 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1624 if (cfg->compile_aot) {
1625 int const_reg = alloc_preg (cfg);
1626 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1627 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1628 } else {
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1634 static void
1635 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1637 static void
1638 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1640 if (klass->rank) {
1641 int rank_reg = alloc_preg (cfg);
1642 int eclass_reg = alloc_preg (cfg);
1644 g_assert (!klass_inst);
1645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1647 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1648 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1650 if (klass->cast_class == mono_defaults.object_class) {
1651 int parent_reg = alloc_preg (cfg);
1652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1653 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1654 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1655 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1656 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1657 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1658 } else if (klass->cast_class == mono_defaults.enum_class) {
1659 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1660 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1661 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1662 } else {
1663 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1664 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1667 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1668 /* Check that the object is a vector too */
1669 int bounds_reg = alloc_preg (cfg);
1670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1672 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1674 } else {
1675 int idepth_reg = alloc_preg (cfg);
1676 int stypes_reg = alloc_preg (cfg);
1677 int stype = alloc_preg (cfg);
1679 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1682 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1686 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1690 static void
1691 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1693 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1696 static void
1697 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1699 int val_reg;
1701 g_assert (val == 0);
1703 if (align == 0)
1704 align = 4;
1706 if ((size <= 4) && (size <= align)) {
1707 switch (size) {
1708 case 1:
1709 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1710 return;
1711 case 2:
1712 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1713 return;
1714 case 4:
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1716 return;
1717 #if SIZEOF_REGISTER == 8
1718 case 8:
1719 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1720 return;
1721 #endif
1725 val_reg = alloc_preg (cfg);
1727 if (SIZEOF_REGISTER == 8)
1728 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1729 else
1730 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1732 if (align < 4) {
1733 /* This could be optimized further if neccesary */
1734 while (size >= 1) {
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1736 offset += 1;
1737 size -= 1;
1739 return;
1742 #if !NO_UNALIGNED_ACCESS
1743 if (SIZEOF_REGISTER == 8) {
1744 if (offset % 8) {
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1746 offset += 4;
1747 size -= 4;
1749 while (size >= 8) {
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1751 offset += 8;
1752 size -= 8;
1755 #endif
1757 while (size >= 4) {
1758 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1759 offset += 4;
1760 size -= 4;
1762 while (size >= 2) {
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1764 offset += 2;
1765 size -= 2;
1767 while (size >= 1) {
1768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1769 offset += 1;
1770 size -= 1;
1774 void
1775 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1777 int cur_reg;
1779 if (align == 0)
1780 align = 4;
1782 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1783 g_assert (size < 10000);
1785 if (align < 4) {
1786 /* This could be optimized further if neccesary */
1787 while (size >= 1) {
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1791 doffset += 1;
1792 soffset += 1;
1793 size -= 1;
1797 #if !NO_UNALIGNED_ACCESS
1798 if (SIZEOF_REGISTER == 8) {
1799 while (size >= 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1803 doffset += 8;
1804 soffset += 8;
1805 size -= 8;
1808 #endif
1810 while (size >= 4) {
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1814 doffset += 4;
1815 soffset += 4;
1816 size -= 4;
1818 while (size >= 2) {
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1822 doffset += 2;
1823 soffset += 2;
1824 size -= 2;
1826 while (size >= 1) {
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1830 doffset += 1;
1831 soffset += 1;
1832 size -= 1;
1836 static int
1837 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1839 if (type->byref)
1840 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1842 handle_enum:
1843 type = mini_get_basic_type_from_generic (gsctx, type);
1844 switch (type->type) {
1845 case MONO_TYPE_VOID:
1846 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1847 case MONO_TYPE_I1:
1848 case MONO_TYPE_U1:
1849 case MONO_TYPE_BOOLEAN:
1850 case MONO_TYPE_I2:
1851 case MONO_TYPE_U2:
1852 case MONO_TYPE_CHAR:
1853 case MONO_TYPE_I4:
1854 case MONO_TYPE_U4:
1855 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1856 case MONO_TYPE_I:
1857 case MONO_TYPE_U:
1858 case MONO_TYPE_PTR:
1859 case MONO_TYPE_FNPTR:
1860 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1861 case MONO_TYPE_CLASS:
1862 case MONO_TYPE_STRING:
1863 case MONO_TYPE_OBJECT:
1864 case MONO_TYPE_SZARRAY:
1865 case MONO_TYPE_ARRAY:
1866 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1867 case MONO_TYPE_I8:
1868 case MONO_TYPE_U8:
1869 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1870 case MONO_TYPE_R4:
1871 case MONO_TYPE_R8:
1872 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1873 case MONO_TYPE_VALUETYPE:
1874 if (type->data.klass->enumtype) {
1875 type = mono_class_enum_basetype (type->data.klass);
1876 goto handle_enum;
1877 } else
1878 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1879 case MONO_TYPE_TYPEDBYREF:
1880 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1881 case MONO_TYPE_GENERICINST:
1882 type = &type->data.generic_class->container_class->byval_arg;
1883 goto handle_enum;
1884 default:
1885 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1887 return -1;
1891 * target_type_is_incompatible:
1892 * @cfg: MonoCompile context
1894 * Check that the item @arg on the evaluation stack can be stored
1895 * in the target type (can be a local, or field, etc).
1896 * The cfg arg can be used to check if we need verification or just
1897 * validity checks.
1899 * Returns: non-0 value if arg can't be stored on a target.
1901 static int
1902 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1904 MonoType *simple_type;
1905 MonoClass *klass;
1907 if (target->byref) {
1908 /* FIXME: check that the pointed to types match */
1909 if (arg->type == STACK_MP)
1910 return arg->klass != mono_class_from_mono_type (target);
1911 if (arg->type == STACK_PTR)
1912 return 0;
1913 return 1;
1916 simple_type = mono_type_get_underlying_type (target);
1917 switch (simple_type->type) {
1918 case MONO_TYPE_VOID:
1919 return 1;
1920 case MONO_TYPE_I1:
1921 case MONO_TYPE_U1:
1922 case MONO_TYPE_BOOLEAN:
1923 case MONO_TYPE_I2:
1924 case MONO_TYPE_U2:
1925 case MONO_TYPE_CHAR:
1926 case MONO_TYPE_I4:
1927 case MONO_TYPE_U4:
1928 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1929 return 1;
1930 return 0;
1931 case MONO_TYPE_PTR:
1932 /* STACK_MP is needed when setting pinned locals */
1933 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1934 return 1;
1935 return 0;
1936 case MONO_TYPE_I:
1937 case MONO_TYPE_U:
1938 case MONO_TYPE_FNPTR:
1940 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1941 * in native int. (#688008).
1943 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1944 return 1;
1945 return 0;
1946 case MONO_TYPE_CLASS:
1947 case MONO_TYPE_STRING:
1948 case MONO_TYPE_OBJECT:
1949 case MONO_TYPE_SZARRAY:
1950 case MONO_TYPE_ARRAY:
1951 if (arg->type != STACK_OBJ)
1952 return 1;
1953 /* FIXME: check type compatibility */
1954 return 0;
1955 case MONO_TYPE_I8:
1956 case MONO_TYPE_U8:
1957 if (arg->type != STACK_I8)
1958 return 1;
1959 return 0;
1960 case MONO_TYPE_R4:
1961 case MONO_TYPE_R8:
1962 if (arg->type != STACK_R8)
1963 return 1;
1964 return 0;
1965 case MONO_TYPE_VALUETYPE:
1966 if (arg->type != STACK_VTYPE)
1967 return 1;
1968 klass = mono_class_from_mono_type (simple_type);
1969 if (klass != arg->klass)
1970 return 1;
1971 return 0;
1972 case MONO_TYPE_TYPEDBYREF:
1973 if (arg->type != STACK_VTYPE)
1974 return 1;
1975 klass = mono_class_from_mono_type (simple_type);
1976 if (klass != arg->klass)
1977 return 1;
1978 return 0;
1979 case MONO_TYPE_GENERICINST:
1980 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1981 if (arg->type != STACK_VTYPE)
1982 return 1;
1983 klass = mono_class_from_mono_type (simple_type);
1984 if (klass != arg->klass)
1985 return 1;
1986 return 0;
1987 } else {
1988 if (arg->type != STACK_OBJ)
1989 return 1;
1990 /* FIXME: check type compatibility */
1991 return 0;
1993 case MONO_TYPE_VAR:
1994 case MONO_TYPE_MVAR:
1995 /* FIXME: all the arguments must be references for now,
1996 * later look inside cfg and see if the arg num is
1997 * really a reference
1999 g_assert (cfg->generic_sharing_context);
2000 if (arg->type != STACK_OBJ)
2001 return 1;
2002 return 0;
2003 default:
2004 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2006 return 1;
2010 * Prepare arguments for passing to a function call.
2011 * Return a non-zero value if the arguments can't be passed to the given
2012 * signature.
2013 * The type checks are not yet complete and some conversions may need
2014 * casts on 32 or 64 bit architectures.
2016 * FIXME: implement this using target_type_is_incompatible ()
2018 static int
2019 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2021 MonoType *simple_type;
2022 int i;
2024 if (sig->hasthis) {
2025 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2026 return 1;
2027 args++;
2029 for (i = 0; i < sig->param_count; ++i) {
2030 if (sig->params [i]->byref) {
2031 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2032 return 1;
2033 continue;
2035 simple_type = sig->params [i];
2036 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2037 handle_enum:
2038 switch (simple_type->type) {
2039 case MONO_TYPE_VOID:
2040 return 1;
2041 continue;
2042 case MONO_TYPE_I1:
2043 case MONO_TYPE_U1:
2044 case MONO_TYPE_BOOLEAN:
2045 case MONO_TYPE_I2:
2046 case MONO_TYPE_U2:
2047 case MONO_TYPE_CHAR:
2048 case MONO_TYPE_I4:
2049 case MONO_TYPE_U4:
2050 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2051 return 1;
2052 continue;
2053 case MONO_TYPE_I:
2054 case MONO_TYPE_U:
2055 case MONO_TYPE_PTR:
2056 case MONO_TYPE_FNPTR:
2057 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2058 return 1;
2059 continue;
2060 case MONO_TYPE_CLASS:
2061 case MONO_TYPE_STRING:
2062 case MONO_TYPE_OBJECT:
2063 case MONO_TYPE_SZARRAY:
2064 case MONO_TYPE_ARRAY:
2065 if (args [i]->type != STACK_OBJ)
2066 return 1;
2067 continue;
2068 case MONO_TYPE_I8:
2069 case MONO_TYPE_U8:
2070 if (args [i]->type != STACK_I8)
2071 return 1;
2072 continue;
2073 case MONO_TYPE_R4:
2074 case MONO_TYPE_R8:
2075 if (args [i]->type != STACK_R8)
2076 return 1;
2077 continue;
2078 case MONO_TYPE_VALUETYPE:
2079 if (simple_type->data.klass->enumtype) {
2080 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2081 goto handle_enum;
2083 if (args [i]->type != STACK_VTYPE)
2084 return 1;
2085 continue;
2086 case MONO_TYPE_TYPEDBYREF:
2087 if (args [i]->type != STACK_VTYPE)
2088 return 1;
2089 continue;
2090 case MONO_TYPE_GENERICINST:
2091 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2092 goto handle_enum;
2094 default:
2095 g_error ("unknown type 0x%02x in check_call_signature",
2096 simple_type->type);
2099 return 0;
2102 static int
2103 callvirt_to_call (int opcode)
2105 switch (opcode) {
2106 case OP_CALLVIRT:
2107 return OP_CALL;
2108 case OP_VOIDCALLVIRT:
2109 return OP_VOIDCALL;
2110 case OP_FCALLVIRT:
2111 return OP_FCALL;
2112 case OP_VCALLVIRT:
2113 return OP_VCALL;
2114 case OP_LCALLVIRT:
2115 return OP_LCALL;
2116 default:
2117 g_assert_not_reached ();
2120 return -1;
2123 static int
2124 callvirt_to_call_membase (int opcode)
2126 switch (opcode) {
2127 case OP_CALLVIRT:
2128 return OP_CALL_MEMBASE;
2129 case OP_VOIDCALLVIRT:
2130 return OP_VOIDCALL_MEMBASE;
2131 case OP_FCALLVIRT:
2132 return OP_FCALL_MEMBASE;
2133 case OP_LCALLVIRT:
2134 return OP_LCALL_MEMBASE;
2135 case OP_VCALLVIRT:
2136 return OP_VCALL_MEMBASE;
2137 default:
2138 g_assert_not_reached ();
2141 return -1;
2144 #ifdef MONO_ARCH_HAVE_IMT
2145 static void
2146 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2148 int method_reg;
2150 if (COMPILE_LLVM (cfg)) {
2151 method_reg = alloc_preg (cfg);
2153 if (imt_arg) {
2154 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2155 } else if (cfg->compile_aot) {
2156 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2157 } else {
2158 MonoInst *ins;
2159 MONO_INST_NEW (cfg, ins, OP_PCONST);
2160 ins->inst_p0 = call->method;
2161 ins->dreg = method_reg;
2162 MONO_ADD_INS (cfg->cbb, ins);
2165 #ifdef ENABLE_LLVM
2166 call->imt_arg_reg = method_reg;
2167 #endif
2168 #ifdef MONO_ARCH_IMT_REG
2169 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2170 #else
2171 /* Need this to keep the IMT arg alive */
2172 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2173 #endif
2174 return;
2177 #ifdef MONO_ARCH_IMT_REG
2178 method_reg = alloc_preg (cfg);
2180 if (imt_arg) {
2181 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2182 } else if (cfg->compile_aot) {
2183 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2184 } else {
2185 MonoInst *ins;
2186 MONO_INST_NEW (cfg, ins, OP_PCONST);
2187 ins->inst_p0 = call->method;
2188 ins->dreg = method_reg;
2189 MONO_ADD_INS (cfg->cbb, ins);
2192 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2193 #else
2194 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2195 #endif
2197 #endif
2199 static MonoJumpInfo *
2200 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2202 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2204 ji->ip.i = ip;
2205 ji->type = type;
2206 ji->data.target = target;
2208 return ji;
2211 inline static MonoCallInst *
2212 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2213 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2215 MonoCallInst *call;
2216 #ifdef MONO_ARCH_SOFT_FLOAT
2217 int i;
2218 #endif
2220 if (tail)
2221 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2222 else
2223 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2225 call->args = args;
2226 call->signature = sig;
2227 call->rgctx_reg = rgctx;
2229 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2231 if (tail) {
2232 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2233 call->vret_var = cfg->vret_addr;
2234 //g_assert_not_reached ();
2236 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2237 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2238 MonoInst *loada;
2240 temp->backend.is_pinvoke = sig->pinvoke;
2243 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2244 * address of return value to increase optimization opportunities.
2245 * Before vtype decomposition, the dreg of the call ins itself represents the
2246 * fact the call modifies the return value. After decomposition, the call will
2247 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2248 * will be transformed into an LDADDR.
2250 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2251 loada->dreg = alloc_preg (cfg);
2252 loada->inst_p0 = temp;
2253 /* We reference the call too since call->dreg could change during optimization */
2254 loada->inst_p1 = call;
2255 MONO_ADD_INS (cfg->cbb, loada);
2257 call->inst.dreg = temp->dreg;
2259 call->vret_var = loada;
2260 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2261 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2263 #ifdef MONO_ARCH_SOFT_FLOAT
2264 if (COMPILE_SOFT_FLOAT (cfg)) {
2266 * If the call has a float argument, we would need to do an r8->r4 conversion using
2267 * an icall, but that cannot be done during the call sequence since it would clobber
2268 * the call registers + the stack. So we do it before emitting the call.
2270 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2271 MonoType *t;
2272 MonoInst *in = call->args [i];
2274 if (i >= sig->hasthis)
2275 t = sig->params [i - sig->hasthis];
2276 else
2277 t = &mono_defaults.int_class->byval_arg;
2278 t = mono_type_get_underlying_type (t);
2280 if (!t->byref && t->type == MONO_TYPE_R4) {
2281 MonoInst *iargs [1];
2282 MonoInst *conv;
2284 iargs [0] = in;
2285 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2287 /* The result will be in an int vreg */
2288 call->args [i] = conv;
2292 #endif
2294 #ifdef ENABLE_LLVM
2295 if (COMPILE_LLVM (cfg))
2296 mono_llvm_emit_call (cfg, call);
2297 else
2298 mono_arch_emit_call (cfg, call);
2299 #else
2300 mono_arch_emit_call (cfg, call);
2301 #endif
2303 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2304 cfg->flags |= MONO_CFG_HAS_CALLS;
2306 return call;
2309 static void
2310 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2312 #ifdef MONO_ARCH_RGCTX_REG
2313 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2314 cfg->uses_rgctx_reg = TRUE;
2315 call->rgctx_reg = TRUE;
2316 #ifdef ENABLE_LLVM
2317 call->rgctx_arg_reg = rgctx_reg;
2318 #endif
2319 #else
2320 NOT_IMPLEMENTED;
2321 #endif
2324 inline static MonoInst*
2325 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2327 MonoCallInst *call;
2328 int rgctx_reg = -1;
2330 if (rgctx_arg) {
2331 rgctx_reg = mono_alloc_preg (cfg);
2332 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2335 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2337 call->inst.sreg1 = addr->dreg;
2339 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2341 if (rgctx_arg)
2342 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2344 return (MonoInst*)call;
2347 static MonoInst*
2348 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2349 static MonoInst*
2350 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2352 static MonoInst*
2353 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2354 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2356 gboolean might_be_remote;
2357 gboolean virtual = this != NULL;
2358 gboolean enable_for_aot = TRUE;
2359 int context_used;
2360 MonoCallInst *call;
2361 int rgctx_reg = 0;
2363 if (rgctx_arg) {
2364 rgctx_reg = mono_alloc_preg (cfg);
2365 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2368 if (method->string_ctor) {
2369 /* Create the real signature */
2370 /* FIXME: Cache these */
2371 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2372 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2374 sig = ctor_sig;
2377 context_used = mono_method_check_context_used (method);
2379 might_be_remote = this && sig->hasthis &&
2380 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2381 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2383 if (might_be_remote && context_used) {
2384 MonoInst *addr;
2386 g_assert (cfg->generic_sharing_context);
2388 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2390 return mono_emit_calli (cfg, sig, args, addr, NULL);
2393 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2395 if (might_be_remote)
2396 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2397 else
2398 call->method = method;
2399 call->inst.flags |= MONO_INST_HAS_METHOD;
2400 call->inst.inst_left = this;
2402 if (virtual) {
2403 int vtable_reg, slot_reg, this_reg;
2405 this_reg = this->dreg;
2407 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2408 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2409 MonoInst *dummy_use;
2411 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2413 /* Make a call to delegate->invoke_impl */
2414 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2415 call->inst.inst_basereg = this_reg;
2416 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2417 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2419 /* We must emit a dummy use here because the delegate trampoline will
2420 replace the 'this' argument with the delegate target making this activation
2421 no longer a root for the delegate.
2422 This is an issue for delegates that target collectible code such as dynamic
2423 methods of GC'able assemblies.
2425 For a test case look into #667921.
2427 FIXME: a dummy use is not the best way to do it as the local register allocator
2428 will put it on a caller save register and spil it around the call.
2429 Ideally, we would either put it on a callee save register or only do the store part.
2431 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2433 return (MonoInst*)call;
2435 #endif
2437 if ((!cfg->compile_aot || enable_for_aot) &&
2438 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2439 (MONO_METHOD_IS_FINAL (method) &&
2440 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2441 !(method->klass->marshalbyref && context_used)) {
2443 * the method is not virtual, we just need to ensure this is not null
2444 * and then we can call the method directly.
2446 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2448 * The check above ensures method is not gshared, this is needed since
2449 * gshared methods can't have wrappers.
2451 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2454 if (!method->string_ctor)
2455 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2457 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2458 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2460 * the method is virtual, but we can statically dispatch since either
2461 * it's class or the method itself are sealed.
2462 * But first we need to ensure it's not a null reference.
2464 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2466 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2467 } else {
2468 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2470 vtable_reg = alloc_preg (cfg);
2471 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2472 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2473 slot_reg = -1;
2474 #ifdef MONO_ARCH_HAVE_IMT
2475 if (mono_use_imt) {
2476 guint32 imt_slot = mono_method_get_imt_slot (method);
2477 emit_imt_argument (cfg, call, imt_arg);
2478 slot_reg = vtable_reg;
2479 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2481 #endif
2482 if (slot_reg == -1) {
2483 slot_reg = alloc_preg (cfg);
2484 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2485 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2487 } else {
2488 slot_reg = vtable_reg;
2489 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2490 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2491 #ifdef MONO_ARCH_HAVE_IMT
2492 if (imt_arg) {
2493 g_assert (mono_method_signature (method)->generic_param_count);
2494 emit_imt_argument (cfg, call, imt_arg);
2496 #endif
2499 call->inst.sreg1 = slot_reg;
2500 call->virtual = TRUE;
2504 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2506 if (rgctx_arg)
2507 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2509 return (MonoInst*)call;
2512 MonoInst*
2513 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2515 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2518 MonoInst*
2519 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2520 MonoInst **args)
2522 MonoCallInst *call;
2524 g_assert (sig);
2526 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2527 call->fptr = func;
2529 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2531 return (MonoInst*)call;
2534 MonoInst*
2535 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2537 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2539 g_assert (info);
2541 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2545 * mono_emit_abs_call:
2547 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2549 inline static MonoInst*
2550 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2551 MonoMethodSignature *sig, MonoInst **args)
2553 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2554 MonoInst *ins;
2557 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2558 * handle it.
2560 if (cfg->abs_patches == NULL)
2561 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2562 g_hash_table_insert (cfg->abs_patches, ji, ji);
2563 ins = mono_emit_native_call (cfg, ji, sig, args);
2564 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2565 return ins;
2568 static MonoInst*
2569 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2571 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2572 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2573 int widen_op = -1;
2576 * Native code might return non register sized integers
2577 * without initializing the upper bits.
2579 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2580 case OP_LOADI1_MEMBASE:
2581 widen_op = OP_ICONV_TO_I1;
2582 break;
2583 case OP_LOADU1_MEMBASE:
2584 widen_op = OP_ICONV_TO_U1;
2585 break;
2586 case OP_LOADI2_MEMBASE:
2587 widen_op = OP_ICONV_TO_I2;
2588 break;
2589 case OP_LOADU2_MEMBASE:
2590 widen_op = OP_ICONV_TO_U2;
2591 break;
2592 default:
2593 break;
2596 if (widen_op != -1) {
2597 int dreg = alloc_preg (cfg);
2598 MonoInst *widen;
2600 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2601 widen->type = ins->type;
2602 ins = widen;
2607 return ins;
2610 static MonoMethod*
2611 get_memcpy_method (void)
2613 static MonoMethod *memcpy_method = NULL;
2614 if (!memcpy_method) {
2615 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2616 if (!memcpy_method)
2617 g_error ("Old corlib found. Install a new one");
2619 return memcpy_method;
2622 static void
2623 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2625 MonoClassField *field;
2626 gpointer iter = NULL;
2628 while ((field = mono_class_get_fields (klass, &iter))) {
2629 int foffset;
2631 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2632 continue;
2633 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2634 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2635 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2636 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2637 } else {
2638 MonoClass *field_class = mono_class_from_mono_type (field->type);
2639 if (field_class->has_references)
2640 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2645 static void
2646 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2648 int card_table_shift_bits;
2649 gpointer card_table_mask;
2650 guint8 *card_table;
2651 MonoInst *dummy_use;
2652 int nursery_shift_bits;
2653 size_t nursery_size;
2654 gboolean has_card_table_wb = FALSE;
2656 if (!cfg->gen_write_barriers)
2657 return;
2659 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2661 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2663 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2664 has_card_table_wb = TRUE;
2665 #endif
2667 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2668 MonoInst *wbarrier;
2670 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2671 wbarrier->sreg1 = ptr->dreg;
2672 if (value)
2673 wbarrier->sreg2 = value->dreg;
2674 else
2675 wbarrier->sreg2 = value_reg;
2676 MONO_ADD_INS (cfg->cbb, wbarrier);
2677 } else if (card_table) {
2678 int offset_reg = alloc_preg (cfg);
2679 int card_reg = alloc_preg (cfg);
2680 MonoInst *ins;
2682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2683 if (card_table_mask)
2684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2686 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2687 * IMM's larger than 32bits.
2689 if (cfg->compile_aot) {
2690 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2691 } else {
2692 MONO_INST_NEW (cfg, ins, OP_PCONST);
2693 ins->inst_p0 = card_table;
2694 ins->dreg = card_reg;
2695 MONO_ADD_INS (cfg->cbb, ins);
2698 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2699 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2700 } else {
2701 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2702 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2705 if (value) {
2706 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2707 } else {
2708 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2709 dummy_use->sreg1 = value_reg;
2710 MONO_ADD_INS (cfg->cbb, dummy_use);
2714 static gboolean
2715 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2717 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2718 unsigned need_wb = 0;
2720 if (align == 0)
2721 align = 4;
2723 /*types with references can't have alignment smaller than sizeof(void*) */
2724 if (align < SIZEOF_VOID_P)
2725 return FALSE;
2727 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2728 if (size > 32 * SIZEOF_VOID_P)
2729 return FALSE;
2731 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2733 /* We don't unroll more than 5 stores to avoid code bloat. */
2734 if (size > 5 * SIZEOF_VOID_P) {
2735 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2736 size += (SIZEOF_VOID_P - 1);
2737 size &= ~(SIZEOF_VOID_P - 1);
2739 EMIT_NEW_ICONST (cfg, iargs [2], size);
2740 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2741 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2742 return TRUE;
2745 destreg = iargs [0]->dreg;
2746 srcreg = iargs [1]->dreg;
2747 offset = 0;
2749 dest_ptr_reg = alloc_preg (cfg);
2750 tmp_reg = alloc_preg (cfg);
2752 /*tmp = dreg*/
2753 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2755 while (size >= SIZEOF_VOID_P) {
2756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2759 if (need_wb & 0x1)
2760 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2762 offset += SIZEOF_VOID_P;
2763 size -= SIZEOF_VOID_P;
2764 need_wb >>= 1;
2766 /*tmp += sizeof (void*)*/
2767 if (size >= SIZEOF_VOID_P) {
2768 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2769 MONO_ADD_INS (cfg->cbb, iargs [0]);
2773 /* Those cannot be references since size < sizeof (void*) */
2774 while (size >= 4) {
2775 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2776 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2777 offset += 4;
2778 size -= 4;
2781 while (size >= 2) {
2782 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2783 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2784 offset += 2;
2785 size -= 2;
2788 while (size >= 1) {
2789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2791 offset += 1;
2792 size -= 1;
2795 return TRUE;
2799 * Emit code to copy a valuetype of type @klass whose address is stored in
2800 * @src->dreg to memory whose address is stored at @dest->dreg.
2802 void
2803 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2805 MonoInst *iargs [4];
2806 int n;
2807 guint32 align = 0;
2808 MonoMethod *memcpy_method;
2810 g_assert (klass);
2812 * This check breaks with spilled vars... need to handle it during verification anyway.
2813 * g_assert (klass && klass == src->klass && klass == dest->klass);
2816 if (native)
2817 n = mono_class_native_size (klass, &align);
2818 else
2819 n = mono_class_value_size (klass, &align);
2821 /* if native is true there should be no references in the struct */
2822 if (cfg->gen_write_barriers && klass->has_references && !native) {
2823 /* Avoid barriers when storing to the stack */
2824 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2825 (dest->opcode == OP_LDADDR))) {
2826 int context_used = 0;
2828 iargs [0] = dest;
2829 iargs [1] = src;
2831 if (cfg->generic_sharing_context)
2832 context_used = mono_class_check_context_used (klass);
2834 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2835 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2836 return;
2837 } else if (context_used) {
2838 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2839 } else {
2840 if (cfg->compile_aot) {
2841 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2842 } else {
2843 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2844 mono_class_compute_gc_descriptor (klass);
2848 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2849 return;
2853 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2854 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2855 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2856 } else {
2857 iargs [0] = dest;
2858 iargs [1] = src;
2859 EMIT_NEW_ICONST (cfg, iargs [2], n);
2861 memcpy_method = get_memcpy_method ();
2862 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2866 static MonoMethod*
2867 get_memset_method (void)
2869 static MonoMethod *memset_method = NULL;
2870 if (!memset_method) {
2871 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2872 if (!memset_method)
2873 g_error ("Old corlib found. Install a new one");
2875 return memset_method;
2878 void
2879 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2881 MonoInst *iargs [3];
2882 int n;
2883 guint32 align;
2884 MonoMethod *memset_method;
2886 /* FIXME: Optimize this for the case when dest is an LDADDR */
2888 mono_class_init (klass);
2889 n = mono_class_value_size (klass, &align);
2891 if (n <= sizeof (gpointer) * 5) {
2892 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2894 else {
2895 memset_method = get_memset_method ();
2896 iargs [0] = dest;
2897 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2898 EMIT_NEW_ICONST (cfg, iargs [2], n);
2899 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2903 static MonoInst*
2904 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2906 MonoInst *this = NULL;
2908 g_assert (cfg->generic_sharing_context);
2910 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2911 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2912 !method->klass->valuetype)
2913 EMIT_NEW_ARGLOAD (cfg, this, 0);
2915 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2916 MonoInst *mrgctx_loc, *mrgctx_var;
2918 g_assert (!this);
2919 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2921 mrgctx_loc = mono_get_vtable_var (cfg);
2922 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2924 return mrgctx_var;
2925 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2926 MonoInst *vtable_loc, *vtable_var;
2928 g_assert (!this);
2930 vtable_loc = mono_get_vtable_var (cfg);
2931 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2933 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2934 MonoInst *mrgctx_var = vtable_var;
2935 int vtable_reg;
2937 vtable_reg = alloc_preg (cfg);
2938 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2939 vtable_var->type = STACK_PTR;
2942 return vtable_var;
2943 } else {
2944 MonoInst *ins;
2945 int vtable_reg;
2947 vtable_reg = alloc_preg (cfg);
2948 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2949 return ins;
2953 static MonoJumpInfoRgctxEntry *
2954 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2956 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2957 res->method = method;
2958 res->in_mrgctx = in_mrgctx;
2959 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2960 res->data->type = patch_type;
2961 res->data->data.target = patch_data;
2962 res->info_type = info_type;
2964 return res;
2967 static inline MonoInst*
2968 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2970 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2973 static MonoInst*
2974 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2975 MonoClass *klass, int rgctx_type)
2977 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2978 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2980 return emit_rgctx_fetch (cfg, rgctx, entry);
2984 * emit_get_rgctx_method:
2986 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2987 * normal constants, else emit a load from the rgctx.
2989 static MonoInst*
2990 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2991 MonoMethod *cmethod, int rgctx_type)
2993 if (!context_used) {
2994 MonoInst *ins;
2996 switch (rgctx_type) {
2997 case MONO_RGCTX_INFO_METHOD:
2998 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2999 return ins;
3000 case MONO_RGCTX_INFO_METHOD_RGCTX:
3001 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3002 return ins;
3003 default:
3004 g_assert_not_reached ();
3006 } else {
3007 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3008 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3010 return emit_rgctx_fetch (cfg, rgctx, entry);
3014 static MonoInst*
3015 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3016 MonoClassField *field, int rgctx_type)
3018 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3019 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3021 return emit_rgctx_fetch (cfg, rgctx, entry);
3025 * On return the caller must check @klass for load errors.
3027 static void
3028 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3030 MonoInst *vtable_arg;
3031 MonoCallInst *call;
3032 int context_used = 0;
3034 if (cfg->generic_sharing_context)
3035 context_used = mono_class_check_context_used (klass);
3037 if (context_used) {
3038 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3039 klass, MONO_RGCTX_INFO_VTABLE);
3040 } else {
3041 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3043 if (!vtable)
3044 return;
3045 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3048 if (COMPILE_LLVM (cfg))
3049 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3050 else
3051 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3052 #ifdef MONO_ARCH_VTABLE_REG
3053 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3054 cfg->uses_vtable_reg = TRUE;
3055 #else
3056 NOT_IMPLEMENTED;
3057 #endif
3060 static void
3061 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3063 if (mini_get_debug_options ()->better_cast_details) {
3064 int to_klass_reg = alloc_preg (cfg);
3065 int vtable_reg = alloc_preg (cfg);
3066 int klass_reg = alloc_preg (cfg);
3067 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3069 if (!tls_get) {
3070 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3071 exit (1);
3074 MONO_ADD_INS (cfg->cbb, tls_get);
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3078 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3079 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3084 static void
3085 reset_cast_details (MonoCompile *cfg)
3087 /* Reset the variables holding the cast details */
3088 if (mini_get_debug_options ()->better_cast_details) {
3089 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3091 MONO_ADD_INS (cfg->cbb, tls_get);
3092 /* It is enough to reset the from field */
3093 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3098 * On return the caller must check @array_class for load errors
3100 static void
3101 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3103 int vtable_reg = alloc_preg (cfg);
3104 int context_used = 0;
3106 if (cfg->generic_sharing_context)
3107 context_used = mono_class_check_context_used (array_class);
3109 save_cast_details (cfg, array_class, obj->dreg);
3111 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3113 if (cfg->opt & MONO_OPT_SHARED) {
3114 int class_reg = alloc_preg (cfg);
3115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3116 if (cfg->compile_aot) {
3117 int klass_reg = alloc_preg (cfg);
3118 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3119 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3120 } else {
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3123 } else if (context_used) {
3124 MonoInst *vtable_ins;
3126 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3127 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3128 } else {
3129 if (cfg->compile_aot) {
3130 int vt_reg;
3131 MonoVTable *vtable;
3133 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3134 return;
3135 vt_reg = alloc_preg (cfg);
3136 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3137 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3138 } else {
3139 MonoVTable *vtable;
3140 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3141 return;
3142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3146 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3148 reset_cast_details (cfg);
3152 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3153 * generic code is generated.
3155 static MonoInst*
3156 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3158 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3160 if (context_used) {
3161 MonoInst *rgctx, *addr;
3163 /* FIXME: What if the class is shared? We might not
3164 have to get the address of the method from the
3165 RGCTX. */
3166 addr = emit_get_rgctx_method (cfg, context_used, method,
3167 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3169 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3171 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3172 } else {
3173 return mono_emit_method_call (cfg, method, &val, NULL);
3177 static MonoInst*
3178 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3180 MonoInst *add;
3181 int obj_reg;
3182 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3183 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3184 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3185 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3187 obj_reg = sp [0]->dreg;
3188 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3189 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3191 /* FIXME: generics */
3192 g_assert (klass->rank == 0);
3194 // Check rank == 0
3195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3196 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3199 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3201 if (context_used) {
3202 MonoInst *element_class;
3204 /* This assertion is from the unboxcast insn */
3205 g_assert (klass->rank == 0);
3207 element_class = emit_get_rgctx_klass (cfg, context_used,
3208 klass->element_class, MONO_RGCTX_INFO_KLASS);
3210 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3211 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3212 } else {
3213 save_cast_details (cfg, klass->element_class, obj_reg);
3214 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3215 reset_cast_details (cfg);
3218 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3219 MONO_ADD_INS (cfg->cbb, add);
3220 add->type = STACK_MP;
3221 add->klass = klass;
3223 return add;
3227 * Returns NULL and set the cfg exception on error.
3229 static MonoInst*
3230 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3232 MonoInst *iargs [2];
3233 void *alloc_ftn;
3235 if (context_used) {
3236 MonoInst *data;
3237 int rgctx_info;
3238 MonoInst *iargs [2];
3241 FIXME: we cannot get managed_alloc here because we can't get
3242 the class's vtable (because it's not a closed class)
3244 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3245 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3248 if (cfg->opt & MONO_OPT_SHARED)
3249 rgctx_info = MONO_RGCTX_INFO_KLASS;
3250 else
3251 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3252 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3254 if (cfg->opt & MONO_OPT_SHARED) {
3255 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3256 iargs [1] = data;
3257 alloc_ftn = mono_object_new;
3258 } else {
3259 iargs [0] = data;
3260 alloc_ftn = mono_object_new_specific;
3263 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3266 if (cfg->opt & MONO_OPT_SHARED) {
3267 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3268 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3270 alloc_ftn = mono_object_new;
3271 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3272 /* This happens often in argument checking code, eg. throw new FooException... */
3273 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3274 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3275 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3276 } else {
3277 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3278 MonoMethod *managed_alloc = NULL;
3279 gboolean pass_lw;
3281 if (!vtable) {
3282 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3283 cfg->exception_ptr = klass;
3284 return NULL;
3287 #ifndef MONO_CROSS_COMPILE
3288 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3289 #endif
3291 if (managed_alloc) {
3292 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3293 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3295 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3296 if (pass_lw) {
3297 guint32 lw = vtable->klass->instance_size;
3298 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3299 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3300 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3302 else {
3303 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3307 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3311 * Returns NULL and set the cfg exception on error.
3313 static MonoInst*
3314 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3316 MonoInst *alloc, *ins;
3318 if (mono_class_is_nullable (klass)) {
3319 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3321 if (context_used) {
3322 /* FIXME: What if the class is shared? We might not
3323 have to get the method address from the RGCTX. */
3324 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3325 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3326 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3328 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3329 } else {
3330 return mono_emit_method_call (cfg, method, &val, NULL);
3334 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3335 if (!alloc)
3336 return NULL;
3338 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3340 return alloc;
3344 static gboolean
3345 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3347 int i;
3348 MonoGenericContainer *container;
3349 MonoGenericInst *ginst;
3351 if (klass->generic_class) {
3352 container = klass->generic_class->container_class->generic_container;
3353 ginst = klass->generic_class->context.class_inst;
3354 } else if (klass->generic_container && context_used) {
3355 container = klass->generic_container;
3356 ginst = container->context.class_inst;
3357 } else {
3358 return FALSE;
3361 for (i = 0; i < container->type_argc; ++i) {
3362 MonoType *type;
3363 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3364 continue;
3365 type = ginst->type_argv [i];
3366 if (mini_type_is_reference (cfg, type))
3367 return TRUE;
3369 return FALSE;
3372 // FIXME: This doesn't work yet (class libs tests fail?)
3373 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3376 * Returns NULL and set the cfg exception on error.
3378 static MonoInst*
3379 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3381 MonoBasicBlock *is_null_bb;
3382 int obj_reg = src->dreg;
3383 int vtable_reg = alloc_preg (cfg);
3384 MonoInst *klass_inst = NULL;
3386 if (context_used) {
3387 MonoInst *args [3];
3389 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3390 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3391 MonoInst *cache_ins;
3393 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3395 /* obj */
3396 args [0] = src;
3398 /* klass - it's the second element of the cache entry*/
3399 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3401 /* cache */
3402 args [2] = cache_ins;
3404 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3407 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3409 if (is_complex_isinst (klass)) {
3410 /* Complex case, handle by an icall */
3412 /* obj */
3413 args [0] = src;
3415 /* klass */
3416 args [1] = klass_inst;
3418 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3419 } else {
3420 /* Simple case, handled by the code below */
3424 NEW_BBLOCK (cfg, is_null_bb);
3426 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3427 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3429 save_cast_details (cfg, klass, obj_reg);
3431 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3433 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3434 } else {
3435 int klass_reg = alloc_preg (cfg);
3437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3439 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3440 /* the remoting code is broken, access the class for now */
3441 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3442 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3443 if (!vt) {
3444 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3445 cfg->exception_ptr = klass;
3446 return NULL;
3448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3449 } else {
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3453 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3454 } else {
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3456 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3460 MONO_START_BB (cfg, is_null_bb);
3462 reset_cast_details (cfg);
3464 return src;
3468 * Returns NULL and set the cfg exception on error.
3470 static MonoInst*
3471 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3473 MonoInst *ins;
3474 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3475 int obj_reg = src->dreg;
3476 int vtable_reg = alloc_preg (cfg);
3477 int res_reg = alloc_ireg_ref (cfg);
3478 MonoInst *klass_inst = NULL;
3480 if (context_used) {
3481 MonoInst *args [3];
3483 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3484 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3485 MonoInst *cache_ins;
3487 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3489 /* obj */
3490 args [0] = src;
3492 /* klass - it's the second element of the cache entry*/
3493 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3495 /* cache */
3496 args [2] = cache_ins;
3498 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3501 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3503 if (is_complex_isinst (klass)) {
3504 /* Complex case, handle by an icall */
3506 /* obj */
3507 args [0] = src;
3509 /* klass */
3510 args [1] = klass_inst;
3512 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3513 } else {
3514 /* Simple case, the code below can handle it */
3518 NEW_BBLOCK (cfg, is_null_bb);
3519 NEW_BBLOCK (cfg, false_bb);
3520 NEW_BBLOCK (cfg, end_bb);
3522 /* Do the assignment at the beginning, so the other assignment can be if converted */
3523 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3524 ins->type = STACK_OBJ;
3525 ins->klass = klass;
3527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3528 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3532 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3533 g_assert (!context_used);
3534 /* the is_null_bb target simply copies the input register to the output */
3535 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3536 } else {
3537 int klass_reg = alloc_preg (cfg);
3539 if (klass->rank) {
3540 int rank_reg = alloc_preg (cfg);
3541 int eclass_reg = alloc_preg (cfg);
3543 g_assert (!context_used);
3544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3549 if (klass->cast_class == mono_defaults.object_class) {
3550 int parent_reg = alloc_preg (cfg);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3552 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3553 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3555 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3556 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3557 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3558 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3559 } else if (klass->cast_class == mono_defaults.enum_class) {
3560 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3562 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3563 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3564 } else {
3565 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3566 /* Check that the object is a vector too */
3567 int bounds_reg = alloc_preg (cfg);
3568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3573 /* the is_null_bb target simply copies the input register to the output */
3574 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3576 } else if (mono_class_is_nullable (klass)) {
3577 g_assert (!context_used);
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3579 /* the is_null_bb target simply copies the input register to the output */
3580 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3581 } else {
3582 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3583 g_assert (!context_used);
3584 /* the remoting code is broken, access the class for now */
3585 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3586 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3587 if (!vt) {
3588 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3589 cfg->exception_ptr = klass;
3590 return NULL;
3592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3593 } else {
3594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3598 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3599 } else {
3600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3601 /* the is_null_bb target simply copies the input register to the output */
3602 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3607 MONO_START_BB (cfg, false_bb);
3609 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3612 MONO_START_BB (cfg, is_null_bb);
3614 MONO_START_BB (cfg, end_bb);
3616 return ins;
3619 static MonoInst*
3620 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3622 /* This opcode takes as input an object reference and a class, and returns:
3623 0) if the object is an instance of the class,
3624 1) if the object is not instance of the class,
3625 2) if the object is a proxy whose type cannot be determined */
3627 MonoInst *ins;
3628 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3629 int obj_reg = src->dreg;
3630 int dreg = alloc_ireg (cfg);
3631 int tmp_reg;
3632 int klass_reg = alloc_preg (cfg);
3634 NEW_BBLOCK (cfg, true_bb);
3635 NEW_BBLOCK (cfg, false_bb);
3636 NEW_BBLOCK (cfg, false2_bb);
3637 NEW_BBLOCK (cfg, end_bb);
3638 NEW_BBLOCK (cfg, no_proxy_bb);
3640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3643 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3644 NEW_BBLOCK (cfg, interface_fail_bb);
3646 tmp_reg = alloc_preg (cfg);
3647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3648 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3649 MONO_START_BB (cfg, interface_fail_bb);
3650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3652 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3654 tmp_reg = alloc_preg (cfg);
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3657 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3658 } else {
3659 tmp_reg = alloc_preg (cfg);
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3663 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3664 tmp_reg = alloc_preg (cfg);
3665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3668 tmp_reg = alloc_preg (cfg);
3669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3673 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3676 MONO_START_BB (cfg, no_proxy_bb);
3678 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3681 MONO_START_BB (cfg, false_bb);
3683 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3686 MONO_START_BB (cfg, false2_bb);
3688 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3691 MONO_START_BB (cfg, true_bb);
3693 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3695 MONO_START_BB (cfg, end_bb);
3697 /* FIXME: */
3698 MONO_INST_NEW (cfg, ins, OP_ICONST);
3699 ins->dreg = dreg;
3700 ins->type = STACK_I4;
3702 return ins;
3705 static MonoInst*
3706 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3708 /* This opcode takes as input an object reference and a class, and returns:
3709 0) if the object is an instance of the class,
3710 1) if the object is a proxy whose type cannot be determined
3711 an InvalidCastException exception is thrown otherwhise*/
3713 MonoInst *ins;
3714 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3715 int obj_reg = src->dreg;
3716 int dreg = alloc_ireg (cfg);
3717 int tmp_reg = alloc_preg (cfg);
3718 int klass_reg = alloc_preg (cfg);
3720 NEW_BBLOCK (cfg, end_bb);
3721 NEW_BBLOCK (cfg, ok_result_bb);
3723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3726 save_cast_details (cfg, klass, obj_reg);
3728 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3729 NEW_BBLOCK (cfg, interface_fail_bb);
3731 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3732 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3733 MONO_START_BB (cfg, interface_fail_bb);
3734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3736 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3738 tmp_reg = alloc_preg (cfg);
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3741 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3743 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3744 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3746 } else {
3747 NEW_BBLOCK (cfg, no_proxy_bb);
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3751 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3753 tmp_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3757 tmp_reg = alloc_preg (cfg);
3758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3762 NEW_BBLOCK (cfg, fail_1_bb);
3764 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3766 MONO_START_BB (cfg, fail_1_bb);
3768 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3771 MONO_START_BB (cfg, no_proxy_bb);
3773 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3776 MONO_START_BB (cfg, ok_result_bb);
3778 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3780 MONO_START_BB (cfg, end_bb);
3782 /* FIXME: */
3783 MONO_INST_NEW (cfg, ins, OP_ICONST);
3784 ins->dreg = dreg;
3785 ins->type = STACK_I4;
3787 return ins;
3791 * Returns NULL and set the cfg exception on error.
3793 static G_GNUC_UNUSED MonoInst*
3794 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3796 MonoInst *ptr;
3797 int dreg;
3798 gpointer *trampoline;
3799 MonoInst *obj, *method_ins, *tramp_ins;
3800 MonoDomain *domain;
3801 guint8 **code_slot;
3803 obj = handle_alloc (cfg, klass, FALSE, 0);
3804 if (!obj)
3805 return NULL;
3807 /* Inline the contents of mono_delegate_ctor */
3809 /* Set target field */
3810 /* Optimize away setting of NULL target */
3811 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3813 if (cfg->gen_write_barriers) {
3814 dreg = alloc_preg (cfg);
3815 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3816 emit_write_barrier (cfg, ptr, target, 0);
3820 /* Set method field */
3821 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3823 if (cfg->gen_write_barriers) {
3824 dreg = alloc_preg (cfg);
3825 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3826 emit_write_barrier (cfg, ptr, method_ins, 0);
3829 * To avoid looking up the compiled code belonging to the target method
3830 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3831 * store it, and we fill it after the method has been compiled.
3833 if (!cfg->compile_aot && !method->dynamic) {
3834 MonoInst *code_slot_ins;
3836 if (context_used) {
3837 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3838 } else {
3839 domain = mono_domain_get ();
3840 mono_domain_lock (domain);
3841 if (!domain_jit_info (domain)->method_code_hash)
3842 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3843 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3844 if (!code_slot) {
3845 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3846 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3848 mono_domain_unlock (domain);
3850 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3852 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3855 /* Set invoke_impl field */
3856 if (cfg->compile_aot) {
3857 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3858 } else {
3859 trampoline = mono_create_delegate_trampoline (klass);
3860 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3864 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3866 return obj;
3869 static MonoInst*
3870 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3872 MonoJitICallInfo *info;
3874 /* Need to register the icall so it gets an icall wrapper */
3875 info = mono_get_array_new_va_icall (rank);
3877 cfg->flags |= MONO_CFG_HAS_VARARGS;
3879 /* mono_array_new_va () needs a vararg calling convention */
3880 cfg->disable_llvm = TRUE;
3882 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3883 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3886 static void
3887 mono_emit_load_got_addr (MonoCompile *cfg)
3889 MonoInst *getaddr, *dummy_use;
3891 if (!cfg->got_var || cfg->got_var_allocated)
3892 return;
3894 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3895 getaddr->dreg = cfg->got_var->dreg;
3897 /* Add it to the start of the first bblock */
3898 if (cfg->bb_entry->code) {
3899 getaddr->next = cfg->bb_entry->code;
3900 cfg->bb_entry->code = getaddr;
3902 else
3903 MONO_ADD_INS (cfg->bb_entry, getaddr);
3905 cfg->got_var_allocated = TRUE;
3908 * Add a dummy use to keep the got_var alive, since real uses might
3909 * only be generated by the back ends.
3910 * Add it to end_bblock, so the variable's lifetime covers the whole
3911 * method.
3912 * It would be better to make the usage of the got var explicit in all
3913 * cases when the backend needs it (i.e. calls, throw etc.), so this
3914 * wouldn't be needed.
3916 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3917 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3920 static int inline_limit;
3921 static gboolean inline_limit_inited;
3923 static gboolean
3924 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3926 MonoMethodHeaderSummary header;
3927 MonoVTable *vtable;
3928 #ifdef MONO_ARCH_SOFT_FLOAT
3929 MonoMethodSignature *sig = mono_method_signature (method);
3930 int i;
3931 #endif
3933 if (cfg->generic_sharing_context)
3934 return FALSE;
3936 if (cfg->inline_depth > 10)
3937 return FALSE;
3939 #ifdef MONO_ARCH_HAVE_LMF_OPS
3940 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3941 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3942 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3943 return TRUE;
3944 #endif
3947 if (!mono_method_get_header_summary (method, &header))
3948 return FALSE;
3950 /*runtime, icall and pinvoke are checked by summary call*/
3951 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3952 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3953 (method->klass->marshalbyref) ||
3954 header.has_clauses)
3955 return FALSE;
3957 /* also consider num_locals? */
3958 /* Do the size check early to avoid creating vtables */
3959 if (!inline_limit_inited) {
3960 if (getenv ("MONO_INLINELIMIT"))
3961 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3962 else
3963 inline_limit = INLINE_LENGTH_LIMIT;
3964 inline_limit_inited = TRUE;
3966 if (header.code_size >= inline_limit)
3967 return FALSE;
3970 * if we can initialize the class of the method right away, we do,
3971 * otherwise we don't allow inlining if the class needs initialization,
3972 * since it would mean inserting a call to mono_runtime_class_init()
3973 * inside the inlined code
3975 if (!(cfg->opt & MONO_OPT_SHARED)) {
3976 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3977 if (cfg->run_cctors && method->klass->has_cctor) {
3978 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3979 if (!method->klass->runtime_info)
3980 /* No vtable created yet */
3981 return FALSE;
3982 vtable = mono_class_vtable (cfg->domain, method->klass);
3983 if (!vtable)
3984 return FALSE;
3985 /* This makes so that inline cannot trigger */
3986 /* .cctors: too many apps depend on them */
3987 /* running with a specific order... */
3988 if (! vtable->initialized)
3989 return FALSE;
3990 mono_runtime_class_init (vtable);
3992 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3993 if (!method->klass->runtime_info)
3994 /* No vtable created yet */
3995 return FALSE;
3996 vtable = mono_class_vtable (cfg->domain, method->klass);
3997 if (!vtable)
3998 return FALSE;
3999 if (!vtable->initialized)
4000 return FALSE;
4002 } else {
4004 * If we're compiling for shared code
4005 * the cctor will need to be run at aot method load time, for example,
4006 * or at the end of the compilation of the inlining method.
4008 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4009 return FALSE;
4013 * CAS - do not inline methods with declarative security
4014 * Note: this has to be before any possible return TRUE;
4016 if (mono_method_has_declsec (method))
4017 return FALSE;
4019 #ifdef MONO_ARCH_SOFT_FLOAT
4020 /* FIXME: */
4021 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4022 return FALSE;
4023 for (i = 0; i < sig->param_count; ++i)
4024 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4025 return FALSE;
4026 #endif
4028 return TRUE;
4031 static gboolean
4032 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4034 if (vtable->initialized && !cfg->compile_aot)
4035 return FALSE;
4037 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4038 return FALSE;
4040 if (!mono_class_needs_cctor_run (vtable->klass, method))
4041 return FALSE;
4043 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4044 /* The initialization is already done before the method is called */
4045 return FALSE;
4047 return TRUE;
4050 static MonoInst*
4051 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4053 MonoInst *ins;
4054 guint32 size;
4055 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4057 mono_class_init (klass);
4058 size = mono_class_array_element_size (klass);
4060 mult_reg = alloc_preg (cfg);
4061 array_reg = arr->dreg;
4062 index_reg = index->dreg;
4064 #if SIZEOF_REGISTER == 8
4065 /* The array reg is 64 bits but the index reg is only 32 */
4066 if (COMPILE_LLVM (cfg)) {
4067 /* Not needed */
4068 index2_reg = index_reg;
4069 } else {
4070 index2_reg = alloc_preg (cfg);
4071 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4073 #else
4074 if (index->type == STACK_I8) {
4075 index2_reg = alloc_preg (cfg);
4076 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4077 } else {
4078 index2_reg = index_reg;
4080 #endif
4082 if (bcheck)
4083 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4085 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4086 if (size == 1 || size == 2 || size == 4 || size == 8) {
4087 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4089 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4090 ins->klass = mono_class_get_element_class (klass);
4091 ins->type = STACK_MP;
4093 return ins;
4095 #endif
4097 add_reg = alloc_ireg_mp (cfg);
4099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4100 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4101 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4102 ins->klass = mono_class_get_element_class (klass);
4103 ins->type = STACK_MP;
4104 MONO_ADD_INS (cfg->cbb, ins);
4106 return ins;
4109 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4110 static MonoInst*
4111 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4113 int bounds_reg = alloc_preg (cfg);
4114 int add_reg = alloc_ireg_mp (cfg);
4115 int mult_reg = alloc_preg (cfg);
4116 int mult2_reg = alloc_preg (cfg);
4117 int low1_reg = alloc_preg (cfg);
4118 int low2_reg = alloc_preg (cfg);
4119 int high1_reg = alloc_preg (cfg);
4120 int high2_reg = alloc_preg (cfg);
4121 int realidx1_reg = alloc_preg (cfg);
4122 int realidx2_reg = alloc_preg (cfg);
4123 int sum_reg = alloc_preg (cfg);
4124 int index1, index2;
4125 MonoInst *ins;
4126 guint32 size;
4128 mono_class_init (klass);
4129 size = mono_class_array_element_size (klass);
4131 index1 = index_ins1->dreg;
4132 index2 = index_ins2->dreg;
4134 /* range checking */
4135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4136 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4138 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4139 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4140 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4141 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4142 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4143 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4144 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4146 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4147 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4148 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4149 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4150 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4151 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4152 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4154 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4155 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4157 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4158 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4160 ins->type = STACK_MP;
4161 ins->klass = klass;
4162 MONO_ADD_INS (cfg->cbb, ins);
4164 return ins;
4166 #endif
4168 static MonoInst*
4169 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4171 int rank;
4172 MonoInst *addr;
4173 MonoMethod *addr_method;
4174 int element_size;
4176 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4178 if (rank == 1)
4179 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4181 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4182 /* emit_ldelema_2 depends on OP_LMUL */
4183 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4184 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4186 #endif
4188 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4189 addr_method = mono_marshal_get_array_address (rank, element_size);
4190 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4192 return addr;
4195 static MonoBreakPolicy
4196 always_insert_breakpoint (MonoMethod *method)
4198 return MONO_BREAK_POLICY_ALWAYS;
4201 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4204 * mono_set_break_policy:
4205 * policy_callback: the new callback function
4207 * Allow embedders to decide wherther to actually obey breakpoint instructions
4208 * (both break IL instructions and Debugger.Break () method calls), for example
4209 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4210 * untrusted or semi-trusted code.
4212 * @policy_callback will be called every time a break point instruction needs to
4213 * be inserted with the method argument being the method that calls Debugger.Break()
4214 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4215 * if it wants the breakpoint to not be effective in the given method.
4216 * #MONO_BREAK_POLICY_ALWAYS is the default.
4218 void
4219 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4221 if (policy_callback)
4222 break_policy_func = policy_callback;
4223 else
4224 break_policy_func = always_insert_breakpoint;
4227 static gboolean
4228 should_insert_brekpoint (MonoMethod *method) {
4229 switch (break_policy_func (method)) {
4230 case MONO_BREAK_POLICY_ALWAYS:
4231 return TRUE;
4232 case MONO_BREAK_POLICY_NEVER:
4233 return FALSE;
4234 case MONO_BREAK_POLICY_ON_DBG:
4235 return mono_debug_using_mono_debugger ();
4236 default:
4237 g_warning ("Incorrect value returned from break policy callback");
4238 return FALSE;
4242 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4243 static MonoInst*
4244 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4246 MonoInst *addr, *store, *load;
4247 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4249 /* the bounds check is already done by the callers */
4250 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4251 if (is_set) {
4252 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4253 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4254 } else {
4255 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4256 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4258 return store;
4261 static MonoInst*
4262 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4264 MonoInst *ins = NULL;
4265 #ifdef MONO_ARCH_SIMD_INTRINSICS
4266 if (cfg->opt & MONO_OPT_SIMD) {
4267 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4268 if (ins)
4269 return ins;
4271 #endif
4273 return ins;
4276 static MonoInst*
4277 emit_memory_barrier (MonoCompile *cfg, int kind)
4279 MonoInst *ins = NULL;
4280 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4281 MONO_ADD_INS (cfg->cbb, ins);
4282 ins->backend.memory_barrier_kind = kind;
4284 return ins;
4287 static MonoInst*
4288 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4290 MonoInst *ins = NULL;
4292 static MonoClass *runtime_helpers_class = NULL;
4293 if (! runtime_helpers_class)
4294 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4295 "System.Runtime.CompilerServices", "RuntimeHelpers");
4297 if (cmethod->klass == mono_defaults.string_class) {
4298 if (strcmp (cmethod->name, "get_Chars") == 0) {
4299 int dreg = alloc_ireg (cfg);
4300 int index_reg = alloc_preg (cfg);
4301 int mult_reg = alloc_preg (cfg);
4302 int add_reg = alloc_preg (cfg);
4304 #if SIZEOF_REGISTER == 8
4305 /* The array reg is 64 bits but the index reg is only 32 */
4306 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4307 #else
4308 index_reg = args [1]->dreg;
4309 #endif
4310 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4312 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4313 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4314 add_reg = ins->dreg;
4315 /* Avoid a warning */
4316 mult_reg = 0;
4317 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4318 add_reg, 0);
4319 #else
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4321 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4322 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4323 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4324 #endif
4325 type_from_op (ins, NULL, NULL);
4326 return ins;
4327 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4328 int dreg = alloc_ireg (cfg);
4329 /* Decompose later to allow more optimizations */
4330 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4331 ins->type = STACK_I4;
4332 ins->flags |= MONO_INST_FAULT;
4333 cfg->cbb->has_array_access = TRUE;
4334 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4336 return ins;
4337 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4338 int mult_reg = alloc_preg (cfg);
4339 int add_reg = alloc_preg (cfg);
4341 /* The corlib functions check for oob already. */
4342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4343 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4344 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4345 return cfg->cbb->last_ins;
4346 } else
4347 return NULL;
4348 } else if (cmethod->klass == mono_defaults.object_class) {
4350 if (strcmp (cmethod->name, "GetType") == 0) {
4351 int dreg = alloc_ireg_ref (cfg);
4352 int vt_reg = alloc_preg (cfg);
4353 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4354 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4355 type_from_op (ins, NULL, NULL);
4357 return ins;
4358 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4359 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4360 int dreg = alloc_ireg (cfg);
4361 int t1 = alloc_ireg (cfg);
4363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4364 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4365 ins->type = STACK_I4;
4367 return ins;
4368 #endif
4369 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4370 MONO_INST_NEW (cfg, ins, OP_NOP);
4371 MONO_ADD_INS (cfg->cbb, ins);
4372 return ins;
4373 } else
4374 return NULL;
4375 } else if (cmethod->klass == mono_defaults.array_class) {
4376 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4377 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4379 #ifndef MONO_BIG_ARRAYS
4381 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4382 * Array methods.
4384 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4385 int dreg = alloc_ireg (cfg);
4386 int bounds_reg = alloc_ireg_mp (cfg);
4387 MonoBasicBlock *end_bb, *szarray_bb;
4388 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4390 NEW_BBLOCK (cfg, end_bb);
4391 NEW_BBLOCK (cfg, szarray_bb);
4393 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4394 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4397 /* Non-szarray case */
4398 if (get_length)
4399 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4400 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4401 else
4402 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4403 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4404 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4405 MONO_START_BB (cfg, szarray_bb);
4406 /* Szarray case */
4407 if (get_length)
4408 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4409 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4410 else
4411 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4412 MONO_START_BB (cfg, end_bb);
4414 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4415 ins->type = STACK_I4;
4417 return ins;
4419 #endif
4421 if (cmethod->name [0] != 'g')
4422 return NULL;
4424 if (strcmp (cmethod->name, "get_Rank") == 0) {
4425 int dreg = alloc_ireg (cfg);
4426 int vtable_reg = alloc_preg (cfg);
4427 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4428 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4429 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4430 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4431 type_from_op (ins, NULL, NULL);
4433 return ins;
4434 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4435 int dreg = alloc_ireg (cfg);
4437 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4438 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4439 type_from_op (ins, NULL, NULL);
4441 return ins;
4442 } else
4443 return NULL;
4444 } else if (cmethod->klass == runtime_helpers_class) {
4446 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4447 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4448 return ins;
4449 } else
4450 return NULL;
4451 } else if (cmethod->klass == mono_defaults.thread_class) {
4452 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4453 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4454 MONO_ADD_INS (cfg->cbb, ins);
4455 return ins;
4456 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4457 return emit_memory_barrier (cfg, FullBarrier);
4459 } else if (cmethod->klass == mono_defaults.monitor_class) {
4461 /* FIXME this should be integrated to the check below once we support the trampoline version */
4462 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4463 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4464 MonoMethod *fast_method = NULL;
4466 /* Avoid infinite recursion */
4467 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4468 return NULL;
4470 fast_method = mono_monitor_get_fast_path (cmethod);
4471 if (!fast_method)
4472 return NULL;
4474 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4476 #endif
4478 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4479 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4480 MonoCallInst *call;
4482 if (COMPILE_LLVM (cfg)) {
4484 * Pass the argument normally, the LLVM backend will handle the
4485 * calling convention problems.
4487 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4488 } else {
4489 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4490 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4491 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4492 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4495 return (MonoInst*)call;
4496 } else if (strcmp (cmethod->name, "Exit") == 0) {
4497 MonoCallInst *call;
4499 if (COMPILE_LLVM (cfg)) {
4500 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4501 } else {
4502 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4503 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4504 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4505 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4508 return (MonoInst*)call;
4510 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4512 MonoMethod *fast_method = NULL;
4514 /* Avoid infinite recursion */
4515 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4516 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4517 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4518 return NULL;
4520 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4521 strcmp (cmethod->name, "Exit") == 0)
4522 fast_method = mono_monitor_get_fast_path (cmethod);
4523 if (!fast_method)
4524 return NULL;
4526 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4528 #endif
4529 } else if (cmethod->klass->image == mono_defaults.corlib &&
4530 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4531 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4532 ins = NULL;
4534 #if SIZEOF_REGISTER == 8
4535 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4536 /* 64 bit reads are already atomic */
4537 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4538 ins->dreg = mono_alloc_preg (cfg);
4539 ins->inst_basereg = args [0]->dreg;
4540 ins->inst_offset = 0;
4541 MONO_ADD_INS (cfg->cbb, ins);
4543 #endif
4545 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4546 if (strcmp (cmethod->name, "Increment") == 0) {
4547 MonoInst *ins_iconst;
4548 guint32 opcode = 0;
4550 if (fsig->params [0]->type == MONO_TYPE_I4)
4551 opcode = OP_ATOMIC_ADD_NEW_I4;
4552 #if SIZEOF_REGISTER == 8
4553 else if (fsig->params [0]->type == MONO_TYPE_I8)
4554 opcode = OP_ATOMIC_ADD_NEW_I8;
4555 #endif
4556 if (opcode) {
4557 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4558 ins_iconst->inst_c0 = 1;
4559 ins_iconst->dreg = mono_alloc_ireg (cfg);
4560 MONO_ADD_INS (cfg->cbb, ins_iconst);
4562 MONO_INST_NEW (cfg, ins, opcode);
4563 ins->dreg = mono_alloc_ireg (cfg);
4564 ins->inst_basereg = args [0]->dreg;
4565 ins->inst_offset = 0;
4566 ins->sreg2 = ins_iconst->dreg;
4567 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4568 MONO_ADD_INS (cfg->cbb, ins);
4570 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4571 MonoInst *ins_iconst;
4572 guint32 opcode = 0;
4574 if (fsig->params [0]->type == MONO_TYPE_I4)
4575 opcode = OP_ATOMIC_ADD_NEW_I4;
4576 #if SIZEOF_REGISTER == 8
4577 else if (fsig->params [0]->type == MONO_TYPE_I8)
4578 opcode = OP_ATOMIC_ADD_NEW_I8;
4579 #endif
4580 if (opcode) {
4581 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4582 ins_iconst->inst_c0 = -1;
4583 ins_iconst->dreg = mono_alloc_ireg (cfg);
4584 MONO_ADD_INS (cfg->cbb, ins_iconst);
4586 MONO_INST_NEW (cfg, ins, opcode);
4587 ins->dreg = mono_alloc_ireg (cfg);
4588 ins->inst_basereg = args [0]->dreg;
4589 ins->inst_offset = 0;
4590 ins->sreg2 = ins_iconst->dreg;
4591 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4592 MONO_ADD_INS (cfg->cbb, ins);
4594 } else if (strcmp (cmethod->name, "Add") == 0) {
4595 guint32 opcode = 0;
4597 if (fsig->params [0]->type == MONO_TYPE_I4)
4598 opcode = OP_ATOMIC_ADD_NEW_I4;
4599 #if SIZEOF_REGISTER == 8
4600 else if (fsig->params [0]->type == MONO_TYPE_I8)
4601 opcode = OP_ATOMIC_ADD_NEW_I8;
4602 #endif
4604 if (opcode) {
4605 MONO_INST_NEW (cfg, ins, opcode);
4606 ins->dreg = mono_alloc_ireg (cfg);
4607 ins->inst_basereg = args [0]->dreg;
4608 ins->inst_offset = 0;
4609 ins->sreg2 = args [1]->dreg;
4610 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4611 MONO_ADD_INS (cfg->cbb, ins);
4614 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4616 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4617 if (strcmp (cmethod->name, "Exchange") == 0) {
4618 guint32 opcode;
4619 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4621 if (fsig->params [0]->type == MONO_TYPE_I4)
4622 opcode = OP_ATOMIC_EXCHANGE_I4;
4623 #if SIZEOF_REGISTER == 8
4624 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4625 (fsig->params [0]->type == MONO_TYPE_I))
4626 opcode = OP_ATOMIC_EXCHANGE_I8;
4627 #else
4628 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4629 opcode = OP_ATOMIC_EXCHANGE_I4;
4630 #endif
4631 else
4632 return NULL;
4634 MONO_INST_NEW (cfg, ins, opcode);
4635 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4636 ins->inst_basereg = args [0]->dreg;
4637 ins->inst_offset = 0;
4638 ins->sreg2 = args [1]->dreg;
4639 MONO_ADD_INS (cfg->cbb, ins);
4641 switch (fsig->params [0]->type) {
4642 case MONO_TYPE_I4:
4643 ins->type = STACK_I4;
4644 break;
4645 case MONO_TYPE_I8:
4646 case MONO_TYPE_I:
4647 ins->type = STACK_I8;
4648 break;
4649 case MONO_TYPE_OBJECT:
4650 ins->type = STACK_OBJ;
4651 break;
4652 default:
4653 g_assert_not_reached ();
4656 if (cfg->gen_write_barriers && is_ref)
4657 emit_write_barrier (cfg, args [0], args [1], -1);
4659 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4661 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4662 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4663 int size = 0;
4664 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4665 if (fsig->params [1]->type == MONO_TYPE_I4)
4666 size = 4;
4667 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4668 size = sizeof (gpointer);
4669 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4670 size = 8;
4671 if (size == 4) {
4672 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4673 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4674 ins->sreg1 = args [0]->dreg;
4675 ins->sreg2 = args [1]->dreg;
4676 ins->sreg3 = args [2]->dreg;
4677 ins->type = STACK_I4;
4678 MONO_ADD_INS (cfg->cbb, ins);
4679 } else if (size == 8) {
4680 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4681 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4682 ins->sreg1 = args [0]->dreg;
4683 ins->sreg2 = args [1]->dreg;
4684 ins->sreg3 = args [2]->dreg;
4685 ins->type = STACK_I8;
4686 MONO_ADD_INS (cfg->cbb, ins);
4687 } else {
4688 /* g_assert_not_reached (); */
4690 if (cfg->gen_write_barriers && is_ref)
4691 emit_write_barrier (cfg, args [0], args [1], -1);
4693 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4695 if (ins)
4696 return ins;
4697 } else if (cmethod->klass->image == mono_defaults.corlib) {
4698 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4699 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4700 if (should_insert_brekpoint (cfg->method)) {
4701 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4702 } else {
4703 MONO_INST_NEW (cfg, ins, OP_NOP);
4704 MONO_ADD_INS (cfg->cbb, ins);
4706 return ins;
4708 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4709 && strcmp (cmethod->klass->name, "Environment") == 0) {
4710 #ifdef TARGET_WIN32
4711 EMIT_NEW_ICONST (cfg, ins, 1);
4712 #else
4713 EMIT_NEW_ICONST (cfg, ins, 0);
4714 #endif
4715 return ins;
4717 } else if (cmethod->klass == mono_defaults.math_class) {
4719 * There is general branches code for Min/Max, but it does not work for
4720 * all inputs:
4721 * http://everything2.com/?node_id=1051618
4725 #ifdef MONO_ARCH_SIMD_INTRINSICS
4726 if (cfg->opt & MONO_OPT_SIMD) {
4727 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4728 if (ins)
4729 return ins;
4731 #endif
4733 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4737 * This entry point could be used later for arbitrary method
4738 * redirection.
4740 inline static MonoInst*
4741 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4742 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4744 if (method->klass == mono_defaults.string_class) {
4745 /* managed string allocation support */
4746 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4747 MonoInst *iargs [2];
4748 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4749 MonoMethod *managed_alloc = NULL;
4751 g_assert (vtable); /*Should not fail since it System.String*/
4752 #ifndef MONO_CROSS_COMPILE
4753 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4754 #endif
4755 if (!managed_alloc)
4756 return NULL;
4757 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4758 iargs [1] = args [0];
4759 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4762 return NULL;
4765 static void
4766 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4768 MonoInst *store, *temp;
4769 int i;
4771 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4772 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4775 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4776 * would be different than the MonoInst's used to represent arguments, and
4777 * the ldelema implementation can't deal with that.
4778 * Solution: When ldelema is used on an inline argument, create a var for
4779 * it, emit ldelema on that var, and emit the saving code below in
4780 * inline_method () if needed.
4782 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4783 cfg->args [i] = temp;
4784 /* This uses cfg->args [i] which is set by the preceeding line */
4785 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4786 store->cil_code = sp [0]->cil_code;
4787 sp++;
4791 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4792 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4794 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4795 static gboolean
4796 check_inline_called_method_name_limit (MonoMethod *called_method)
4798 int strncmp_result;
4799 static char *limit = NULL;
4801 if (limit == NULL) {
4802 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4804 if (limit_string != NULL)
4805 limit = limit_string;
4806 else
4807 limit = (char *) "";
4810 if (limit [0] != '\0') {
4811 char *called_method_name = mono_method_full_name (called_method, TRUE);
4813 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4814 g_free (called_method_name);
4816 //return (strncmp_result <= 0);
4817 return (strncmp_result == 0);
4818 } else {
4819 return TRUE;
4822 #endif
4824 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4825 static gboolean
4826 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4828 int strncmp_result;
4829 static char *limit = NULL;
4831 if (limit == NULL) {
4832 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4833 if (limit_string != NULL) {
4834 limit = limit_string;
4835 } else {
4836 limit = (char *) "";
4840 if (limit [0] != '\0') {
4841 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4843 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4844 g_free (caller_method_name);
4846 //return (strncmp_result <= 0);
4847 return (strncmp_result == 0);
4848 } else {
4849 return TRUE;
4852 #endif
4854 static int
4855 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4856 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4858 MonoInst *ins, *rvar = NULL;
4859 MonoMethodHeader *cheader;
4860 MonoBasicBlock *ebblock, *sbblock;
4861 int i, costs;
4862 MonoMethod *prev_inlined_method;
4863 MonoInst **prev_locals, **prev_args;
4864 MonoType **prev_arg_types;
4865 guint prev_real_offset;
4866 GHashTable *prev_cbb_hash;
4867 MonoBasicBlock **prev_cil_offset_to_bb;
4868 MonoBasicBlock *prev_cbb;
4869 unsigned char* prev_cil_start;
4870 guint32 prev_cil_offset_to_bb_len;
4871 MonoMethod *prev_current_method;
4872 MonoGenericContext *prev_generic_context;
4873 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4875 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4877 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4878 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4879 return 0;
4880 #endif
4881 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4882 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4883 return 0;
4884 #endif
4886 if (cfg->verbose_level > 2)
4887 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4889 if (!cmethod->inline_info) {
4890 cfg->stat_inlineable_methods++;
4891 cmethod->inline_info = 1;
4894 /* allocate local variables */
4895 cheader = mono_method_get_header (cmethod);
4897 if (cheader == NULL || mono_loader_get_last_error ()) {
4898 MonoLoaderError *error = mono_loader_get_last_error ();
4900 if (cheader)
4901 mono_metadata_free_mh (cheader);
4902 if (inline_always && error)
4903 mono_cfg_set_exception (cfg, error->exception_type);
4905 mono_loader_clear_error ();
4906 return 0;
4909 /*Must verify before creating locals as it can cause the JIT to assert.*/
4910 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4911 mono_metadata_free_mh (cheader);
4912 return 0;
4915 /* allocate space to store the return value */
4916 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4917 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4920 prev_locals = cfg->locals;
4921 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4922 for (i = 0; i < cheader->num_locals; ++i)
4923 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4925 /* allocate start and end blocks */
4926 /* This is needed so if the inline is aborted, we can clean up */
4927 NEW_BBLOCK (cfg, sbblock);
4928 sbblock->real_offset = real_offset;
4930 NEW_BBLOCK (cfg, ebblock);
4931 ebblock->block_num = cfg->num_bblocks++;
4932 ebblock->real_offset = real_offset;
4934 prev_args = cfg->args;
4935 prev_arg_types = cfg->arg_types;
4936 prev_inlined_method = cfg->inlined_method;
4937 cfg->inlined_method = cmethod;
4938 cfg->ret_var_set = FALSE;
4939 cfg->inline_depth ++;
4940 prev_real_offset = cfg->real_offset;
4941 prev_cbb_hash = cfg->cbb_hash;
4942 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4943 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4944 prev_cil_start = cfg->cil_start;
4945 prev_cbb = cfg->cbb;
4946 prev_current_method = cfg->current_method;
4947 prev_generic_context = cfg->generic_context;
4948 prev_ret_var_set = cfg->ret_var_set;
4950 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4951 virtual = TRUE;
4953 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4955 ret_var_set = cfg->ret_var_set;
4957 cfg->inlined_method = prev_inlined_method;
4958 cfg->real_offset = prev_real_offset;
4959 cfg->cbb_hash = prev_cbb_hash;
4960 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4961 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4962 cfg->cil_start = prev_cil_start;
4963 cfg->locals = prev_locals;
4964 cfg->args = prev_args;
4965 cfg->arg_types = prev_arg_types;
4966 cfg->current_method = prev_current_method;
4967 cfg->generic_context = prev_generic_context;
4968 cfg->ret_var_set = prev_ret_var_set;
4969 cfg->inline_depth --;
4971 if ((costs >= 0 && costs < 60) || inline_always) {
4972 if (cfg->verbose_level > 2)
4973 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4975 cfg->stat_inlined_methods++;
4977 /* always add some code to avoid block split failures */
4978 MONO_INST_NEW (cfg, ins, OP_NOP);
4979 MONO_ADD_INS (prev_cbb, ins);
4981 prev_cbb->next_bb = sbblock;
4982 link_bblock (cfg, prev_cbb, sbblock);
4985 * Get rid of the begin and end bblocks if possible to aid local
4986 * optimizations.
4988 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4990 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4991 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4993 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4994 MonoBasicBlock *prev = ebblock->in_bb [0];
4995 mono_merge_basic_blocks (cfg, prev, ebblock);
4996 cfg->cbb = prev;
4997 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4998 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4999 cfg->cbb = prev_cbb;
5001 } else {
5002 cfg->cbb = ebblock;
5005 if (rvar) {
5007 * If the inlined method contains only a throw, then the ret var is not
5008 * set, so set it to a dummy value.
5010 if (!ret_var_set) {
5011 static double r8_0 = 0.0;
5013 switch (rvar->type) {
5014 case STACK_I4:
5015 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5016 break;
5017 case STACK_I8:
5018 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5019 break;
5020 case STACK_PTR:
5021 case STACK_MP:
5022 case STACK_OBJ:
5023 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5024 break;
5025 case STACK_R8:
5026 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5027 ins->type = STACK_R8;
5028 ins->inst_p0 = (void*)&r8_0;
5029 ins->dreg = rvar->dreg;
5030 MONO_ADD_INS (cfg->cbb, ins);
5031 break;
5032 case STACK_VTYPE:
5033 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5034 break;
5035 default:
5036 g_assert_not_reached ();
5040 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5041 *sp++ = ins;
5043 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5044 return costs + 1;
5045 } else {
5046 if (cfg->verbose_level > 2)
5047 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5048 cfg->exception_type = MONO_EXCEPTION_NONE;
5049 mono_loader_clear_error ();
5051 /* This gets rid of the newly added bblocks */
5052 cfg->cbb = prev_cbb;
5054 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5055 return 0;
5059 * Some of these comments may well be out-of-date.
5060 * Design decisions: we do a single pass over the IL code (and we do bblock
5061 * splitting/merging in the few cases when it's required: a back jump to an IL
5062 * address that was not already seen as bblock starting point).
5063 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5064 * Complex operations are decomposed in simpler ones right away. We need to let the
5065 * arch-specific code peek and poke inside this process somehow (except when the
5066 * optimizations can take advantage of the full semantic info of coarse opcodes).
5067 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5068 * MonoInst->opcode initially is the IL opcode or some simplification of that
5069 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5070 * opcode with value bigger than OP_LAST.
5071 * At this point the IR can be handed over to an interpreter, a dumb code generator
5072 * or to the optimizing code generator that will translate it to SSA form.
5074 * Profiling directed optimizations.
5075 * We may compile by default with few or no optimizations and instrument the code
5076 * or the user may indicate what methods to optimize the most either in a config file
5077 * or through repeated runs where the compiler applies offline the optimizations to
5078 * each method and then decides if it was worth it.
5081 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5082 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5083 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5084 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5085 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5086 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5087 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5088 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5090 /* offset from br.s -> br like opcodes */
5091 #define BIG_BRANCH_OFFSET 13
5093 static gboolean
5094 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5096 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5098 return b == NULL || b == bb;
5101 static int
5102 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5104 unsigned char *ip = start;
5105 unsigned char *target;
5106 int i;
5107 guint cli_addr;
5108 MonoBasicBlock *bblock;
5109 const MonoOpcode *opcode;
5111 while (ip < end) {
5112 cli_addr = ip - start;
5113 i = mono_opcode_value ((const guint8 **)&ip, end);
5114 if (i < 0)
5115 UNVERIFIED;
5116 opcode = &mono_opcodes [i];
5117 switch (opcode->argument) {
5118 case MonoInlineNone:
5119 ip++;
5120 break;
5121 case MonoInlineString:
5122 case MonoInlineType:
5123 case MonoInlineField:
5124 case MonoInlineMethod:
5125 case MonoInlineTok:
5126 case MonoInlineSig:
5127 case MonoShortInlineR:
5128 case MonoInlineI:
5129 ip += 5;
5130 break;
5131 case MonoInlineVar:
5132 ip += 3;
5133 break;
5134 case MonoShortInlineVar:
5135 case MonoShortInlineI:
5136 ip += 2;
5137 break;
5138 case MonoShortInlineBrTarget:
5139 target = start + cli_addr + 2 + (signed char)ip [1];
5140 GET_BBLOCK (cfg, bblock, target);
5141 ip += 2;
5142 if (ip < end)
5143 GET_BBLOCK (cfg, bblock, ip);
5144 break;
5145 case MonoInlineBrTarget:
5146 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5147 GET_BBLOCK (cfg, bblock, target);
5148 ip += 5;
5149 if (ip < end)
5150 GET_BBLOCK (cfg, bblock, ip);
5151 break;
5152 case MonoInlineSwitch: {
5153 guint32 n = read32 (ip + 1);
5154 guint32 j;
5155 ip += 5;
5156 cli_addr += 5 + 4 * n;
5157 target = start + cli_addr;
5158 GET_BBLOCK (cfg, bblock, target);
5160 for (j = 0; j < n; ++j) {
5161 target = start + cli_addr + (gint32)read32 (ip);
5162 GET_BBLOCK (cfg, bblock, target);
5163 ip += 4;
5165 break;
5167 case MonoInlineR:
5168 case MonoInlineI8:
5169 ip += 9;
5170 break;
5171 default:
5172 g_assert_not_reached ();
5175 if (i == CEE_THROW) {
5176 unsigned char *bb_start = ip - 1;
5178 /* Find the start of the bblock containing the throw */
5179 bblock = NULL;
5180 while ((bb_start >= start) && !bblock) {
5181 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5182 bb_start --;
5184 if (bblock)
5185 bblock->out_of_line = 1;
5188 return 0;
5189 unverified:
5190 *pos = ip;
5191 return 1;
5194 static inline MonoMethod *
5195 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5197 MonoMethod *method;
5199 if (m->wrapper_type != MONO_WRAPPER_NONE)
5200 return mono_method_get_wrapper_data (m, token);
5202 method = mono_get_method_full (m->klass->image, token, klass, context);
5204 return method;
5207 static inline MonoMethod *
5208 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5210 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5212 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5213 return NULL;
5215 return method;
5218 static inline MonoClass*
5219 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5221 MonoClass *klass;
5223 if (method->wrapper_type != MONO_WRAPPER_NONE)
5224 klass = mono_method_get_wrapper_data (method, token);
5225 else
5226 klass = mono_class_get_full (method->klass->image, token, context);
5227 if (klass)
5228 mono_class_init (klass);
5229 return klass;
5233 * Returns TRUE if the JIT should abort inlining because "callee"
5234 * is influenced by security attributes.
5236 static
5237 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5239 guint32 result;
5241 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5242 return TRUE;
5245 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5246 if (result == MONO_JIT_SECURITY_OK)
5247 return FALSE;
5249 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5250 /* Generate code to throw a SecurityException before the actual call/link */
5251 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5252 MonoInst *args [2];
5254 NEW_ICONST (cfg, args [0], 4);
5255 NEW_METHODCONST (cfg, args [1], caller);
5256 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5257 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5258 /* don't hide previous results */
5259 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5260 cfg->exception_data = result;
5261 return TRUE;
5264 return FALSE;
5267 static MonoMethod*
5268 throw_exception (void)
5270 static MonoMethod *method = NULL;
5272 if (!method) {
5273 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5274 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5276 g_assert (method);
5277 return method;
5280 static void
5281 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5283 MonoMethod *thrower = throw_exception ();
5284 MonoInst *args [1];
5286 EMIT_NEW_PCONST (cfg, args [0], ex);
5287 mono_emit_method_call (cfg, thrower, args, NULL);
5291 * Return the original method is a wrapper is specified. We can only access
5292 * the custom attributes from the original method.
5294 static MonoMethod*
5295 get_original_method (MonoMethod *method)
5297 if (method->wrapper_type == MONO_WRAPPER_NONE)
5298 return method;
5300 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5301 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5302 return NULL;
5304 /* in other cases we need to find the original method */
5305 return mono_marshal_method_from_wrapper (method);
5308 static void
5309 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5310 MonoBasicBlock *bblock, unsigned char *ip)
5312 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5313 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5314 if (ex)
5315 emit_throw_exception (cfg, ex);
5318 static void
5319 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5320 MonoBasicBlock *bblock, unsigned char *ip)
5322 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5323 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5324 if (ex)
5325 emit_throw_exception (cfg, ex);
5329 * Check that the IL instructions at ip are the array initialization
5330 * sequence and return the pointer to the data and the size.
5332 static const char*
5333 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5336 * newarr[System.Int32]
5337 * dup
5338 * ldtoken field valuetype ...
5339 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5341 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5342 guint32 token = read32 (ip + 7);
5343 guint32 field_token = read32 (ip + 2);
5344 guint32 field_index = field_token & 0xffffff;
5345 guint32 rva;
5346 const char *data_ptr;
5347 int size = 0;
5348 MonoMethod *cmethod;
5349 MonoClass *dummy_class;
5350 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5351 int dummy_align;
5353 if (!field)
5354 return NULL;
5356 *out_field_token = field_token;
5358 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5359 if (!cmethod)
5360 return NULL;
5361 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5362 return NULL;
5363 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5364 case MONO_TYPE_BOOLEAN:
5365 case MONO_TYPE_I1:
5366 case MONO_TYPE_U1:
5367 size = 1; break;
5368 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5369 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5370 case MONO_TYPE_CHAR:
5371 case MONO_TYPE_I2:
5372 case MONO_TYPE_U2:
5373 size = 2; break;
5374 case MONO_TYPE_I4:
5375 case MONO_TYPE_U4:
5376 case MONO_TYPE_R4:
5377 size = 4; break;
5378 case MONO_TYPE_R8:
5379 #ifdef ARM_FPU_FPA
5380 return NULL; /* stupid ARM FP swapped format */
5381 #endif
5382 case MONO_TYPE_I8:
5383 case MONO_TYPE_U8:
5384 size = 8; break;
5385 #endif
5386 default:
5387 return NULL;
5389 size *= len;
5390 if (size > mono_type_size (field->type, &dummy_align))
5391 return NULL;
5392 *out_size = size;
5393 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5394 if (!method->klass->image->dynamic) {
5395 field_index = read32 (ip + 2) & 0xffffff;
5396 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5397 data_ptr = mono_image_rva_map (method->klass->image, rva);
5398 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5399 /* for aot code we do the lookup on load */
5400 if (aot && data_ptr)
5401 return GUINT_TO_POINTER (rva);
5402 } else {
5403 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5404 g_assert (!aot);
5405 data_ptr = mono_field_get_data (field);
5407 return data_ptr;
5409 return NULL;
5412 static void
5413 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5415 char *method_fname = mono_method_full_name (method, TRUE);
5416 char *method_code;
5417 MonoMethodHeader *header = mono_method_get_header (method);
5419 if (header->code_size == 0)
5420 method_code = g_strdup ("method body is empty.");
5421 else
5422 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5423 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5424 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5425 g_free (method_fname);
5426 g_free (method_code);
5427 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5430 static void
5431 set_exception_object (MonoCompile *cfg, MonoException *exception)
5433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5434 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5435 cfg->exception_ptr = exception;
5438 static gboolean
5439 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5441 return mini_type_is_reference (cfg, &klass->byval_arg);
5444 static void
5445 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5447 MonoInst *ins;
5448 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5449 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5450 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5451 /* Optimize reg-reg moves away */
5453 * Can't optimize other opcodes, since sp[0] might point to
5454 * the last ins of a decomposed opcode.
5456 sp [0]->dreg = (cfg)->locals [n]->dreg;
5457 } else {
5458 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5463 * ldloca inhibits many optimizations so try to get rid of it in common
5464 * cases.
5466 static inline unsigned char *
5467 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5469 int local, token;
5470 MonoClass *klass;
5472 if (size == 1) {
5473 local = ip [1];
5474 ip += 2;
5475 } else {
5476 local = read16 (ip + 2);
5477 ip += 4;
5480 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5481 gboolean skip = FALSE;
5483 /* From the INITOBJ case */
5484 token = read32 (ip + 2);
5485 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5486 CHECK_TYPELOAD (klass);
5487 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5488 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5489 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5490 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5491 } else {
5492 skip = TRUE;
5495 if (!skip)
5496 return ip + 6;
5498 load_error:
5499 return NULL;
5502 static gboolean
5503 is_exception_class (MonoClass *class)
5505 while (class) {
5506 if (class == mono_defaults.exception_class)
5507 return TRUE;
5508 class = class->parent;
5510 return FALSE;
5514 * is_jit_optimizer_disabled:
5516 * Determine whenever M's assembly has a DebuggableAttribute with the
5517 * IsJITOptimizerDisabled flag set.
5519 static gboolean
5520 is_jit_optimizer_disabled (MonoMethod *m)
5522 MonoAssembly *ass = m->klass->image->assembly;
5523 MonoCustomAttrInfo* attrs;
5524 static MonoClass *klass;
5525 int i;
5526 gboolean val = FALSE;
5528 g_assert (ass);
5529 if (ass->jit_optimizer_disabled_inited)
5530 return ass->jit_optimizer_disabled;
5532 if (!klass)
5533 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5534 if (!klass) {
5535 /* Linked away */
5536 ass->jit_optimizer_disabled = FALSE;
5537 mono_memory_barrier ();
5538 ass->jit_optimizer_disabled_inited = TRUE;
5539 return FALSE;
5542 attrs = mono_custom_attrs_from_assembly (ass);
5543 if (attrs) {
5544 for (i = 0; i < attrs->num_attrs; ++i) {
5545 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5546 const gchar *p;
5547 int len;
5548 MonoMethodSignature *sig;
5550 if (!attr->ctor || attr->ctor->klass != klass)
5551 continue;
5552 /* Decode the attribute. See reflection.c */
5553 len = attr->data_size;
5554 p = (const char*)attr->data;
5555 g_assert (read16 (p) == 0x0001);
5556 p += 2;
5558 // FIXME: Support named parameters
5559 sig = mono_method_signature (attr->ctor);
5560 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5561 continue;
5562 /* Two boolean arguments */
5563 p ++;
5564 val = *p;
5566 mono_custom_attrs_free (attrs);
5569 ass->jit_optimizer_disabled = val;
5570 mono_memory_barrier ();
5571 ass->jit_optimizer_disabled_inited = TRUE;
5573 return val;
5576 static gboolean
5577 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5579 gboolean supported_tail_call;
5580 int i;
5582 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5583 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5584 #else
5585 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5586 #endif
5588 for (i = 0; i < fsig->param_count; ++i) {
5589 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5590 /* These can point to the current method's stack */
5591 supported_tail_call = FALSE;
5593 if (fsig->hasthis && cmethod->klass->valuetype)
5594 /* this might point to the current method's stack */
5595 supported_tail_call = FALSE;
5596 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5597 supported_tail_call = FALSE;
5598 if (cfg->method->save_lmf)
5599 supported_tail_call = FALSE;
5600 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5601 supported_tail_call = FALSE;
5603 /* Debugging support */
5604 #if 0
5605 if (supported_tail_call) {
5606 static int count = 0;
5607 count ++;
5608 if (getenv ("COUNT")) {
5609 if (count == atoi (getenv ("COUNT")))
5610 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5611 if (count > atoi (getenv ("COUNT")))
5612 supported_tail_call = FALSE;
5615 #endif
5617 return supported_tail_call;
5620 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5621 * it to the thread local value based on the tls_offset field. Every other kind of access to
5622 * the field causes an assert.
5624 static gboolean
5625 is_magic_tls_access (MonoClassField *field)
5627 if (strcmp (field->name, "tlsdata"))
5628 return FALSE;
5629 if (strcmp (field->parent->name, "ThreadLocal`1"))
5630 return FALSE;
5631 return field->parent->image == mono_defaults.corlib;
5634 /* emits the code needed to access a managed tls var (like ThreadStatic)
5635 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5636 * pointer for the current thread.
5637 * Returns the MonoInst* representing the address of the tls var.
5639 static MonoInst*
5640 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5642 MonoInst *addr;
5643 int static_data_reg, array_reg, dreg;
5644 int offset2_reg, idx_reg;
5645 // inlined access to the tls data
5646 // idx = (offset >> 24) - 1;
5647 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5648 static_data_reg = alloc_ireg (cfg);
5649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5650 idx_reg = alloc_ireg (cfg);
5651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5654 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5655 array_reg = alloc_ireg (cfg);
5656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5657 offset2_reg = alloc_ireg (cfg);
5658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5659 dreg = alloc_ireg (cfg);
5660 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5661 return addr;
5665 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5666 * this address is cached per-method in cached_tls_addr.
5668 static MonoInst*
5669 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5671 MonoInst *load, *addr, *temp, *store, *thread_ins;
5672 MonoClassField *offset_field;
5674 if (*cached_tls_addr) {
5675 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5676 return addr;
5678 thread_ins = mono_get_thread_intrinsic (cfg);
5679 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5682 if (thread_ins) {
5683 MONO_ADD_INS (cfg->cbb, thread_ins);
5684 } else {
5685 MonoMethod *thread_method;
5686 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5687 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5689 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5690 addr->klass = mono_class_from_mono_type (tls_field->type);
5691 addr->type = STACK_MP;
5692 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5693 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5695 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5696 return addr;
5700 * mono_method_to_ir:
5702 * Translate the .net IL into linear IR.
5705 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5706 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5707 guint inline_offset, gboolean is_virtual_call)
5709 MonoError error;
5710 MonoInst *ins, **sp, **stack_start;
5711 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5712 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5713 MonoMethod *cmethod, *method_definition;
5714 MonoInst **arg_array;
5715 MonoMethodHeader *header;
5716 MonoImage *image;
5717 guint32 token, ins_flag;
5718 MonoClass *klass;
5719 MonoClass *constrained_call = NULL;
5720 unsigned char *ip, *end, *target, *err_pos;
5721 static double r8_0 = 0.0;
5722 MonoMethodSignature *sig;
5723 MonoGenericContext *generic_context = NULL;
5724 MonoGenericContainer *generic_container = NULL;
5725 MonoType **param_types;
5726 int i, n, start_new_bblock, dreg;
5727 int num_calls = 0, inline_costs = 0;
5728 int breakpoint_id = 0;
5729 guint num_args;
5730 MonoBoolean security, pinvoke;
5731 MonoSecurityManager* secman = NULL;
5732 MonoDeclSecurityActions actions;
5733 GSList *class_inits = NULL;
5734 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5735 int context_used;
5736 gboolean init_locals, seq_points, skip_dead_blocks;
5737 gboolean disable_inline;
5738 MonoInst *cached_tls_addr = NULL;
5740 disable_inline = is_jit_optimizer_disabled (method);
5742 /* serialization and xdomain stuff may need access to private fields and methods */
5743 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5744 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5745 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5746 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5747 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5748 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5750 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5752 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5753 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5754 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5755 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5756 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5758 image = method->klass->image;
5759 header = mono_method_get_header (method);
5760 if (!header) {
5761 MonoLoaderError *error;
5763 if ((error = mono_loader_get_last_error ())) {
5764 mono_cfg_set_exception (cfg, error->exception_type);
5765 } else {
5766 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5767 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5769 goto exception_exit;
5771 generic_container = mono_method_get_generic_container (method);
5772 sig = mono_method_signature (method);
5773 num_args = sig->hasthis + sig->param_count;
5774 ip = (unsigned char*)header->code;
5775 cfg->cil_start = ip;
5776 end = ip + header->code_size;
5777 cfg->stat_cil_code_size += header->code_size;
5778 init_locals = header->init_locals;
5780 seq_points = cfg->gen_seq_points && cfg->method == method;
5783 * Methods without init_locals set could cause asserts in various passes
5784 * (#497220).
5786 init_locals = TRUE;
5788 method_definition = method;
5789 while (method_definition->is_inflated) {
5790 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5791 method_definition = imethod->declaring;
5794 /* SkipVerification is not allowed if core-clr is enabled */
5795 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5796 dont_verify = TRUE;
5797 dont_verify_stloc = TRUE;
5800 if (mono_debug_using_mono_debugger ())
5801 cfg->keep_cil_nops = TRUE;
5803 if (sig->is_inflated)
5804 generic_context = mono_method_get_context (method);
5805 else if (generic_container)
5806 generic_context = &generic_container->context;
5807 cfg->generic_context = generic_context;
5809 if (!cfg->generic_sharing_context)
5810 g_assert (!sig->has_type_parameters);
5812 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5813 g_assert (method->is_inflated);
5814 g_assert (mono_method_get_context (method)->method_inst);
5816 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5817 g_assert (sig->generic_param_count);
5819 if (cfg->method == method) {
5820 cfg->real_offset = 0;
5821 } else {
5822 cfg->real_offset = inline_offset;
5825 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5826 cfg->cil_offset_to_bb_len = header->code_size;
5828 cfg->current_method = method;
5830 if (cfg->verbose_level > 2)
5831 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5833 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5834 if (sig->hasthis)
5835 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5836 for (n = 0; n < sig->param_count; ++n)
5837 param_types [n + sig->hasthis] = sig->params [n];
5838 cfg->arg_types = param_types;
5840 dont_inline = g_list_prepend (dont_inline, method);
5841 if (cfg->method == method) {
5843 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5844 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5846 /* ENTRY BLOCK */
5847 NEW_BBLOCK (cfg, start_bblock);
5848 cfg->bb_entry = start_bblock;
5849 start_bblock->cil_code = NULL;
5850 start_bblock->cil_length = 0;
5851 #if defined(__native_client_codegen__)
5852 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5853 ins->dreg = alloc_dreg (cfg, STACK_I4);
5854 MONO_ADD_INS (start_bblock, ins);
5855 #endif
5857 /* EXIT BLOCK */
5858 NEW_BBLOCK (cfg, end_bblock);
5859 cfg->bb_exit = end_bblock;
5860 end_bblock->cil_code = NULL;
5861 end_bblock->cil_length = 0;
5862 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5863 g_assert (cfg->num_bblocks == 2);
5865 arg_array = cfg->args;
5867 if (header->num_clauses) {
5868 cfg->spvars = g_hash_table_new (NULL, NULL);
5869 cfg->exvars = g_hash_table_new (NULL, NULL);
5871 /* handle exception clauses */
5872 for (i = 0; i < header->num_clauses; ++i) {
5873 MonoBasicBlock *try_bb;
5874 MonoExceptionClause *clause = &header->clauses [i];
5875 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5876 try_bb->real_offset = clause->try_offset;
5877 try_bb->try_start = TRUE;
5878 try_bb->region = ((i + 1) << 8) | clause->flags;
5879 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5880 tblock->real_offset = clause->handler_offset;
5881 tblock->flags |= BB_EXCEPTION_HANDLER;
5883 link_bblock (cfg, try_bb, tblock);
5885 if (*(ip + clause->handler_offset) == CEE_POP)
5886 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5888 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5889 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5890 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5891 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5892 MONO_ADD_INS (tblock, ins);
5894 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5895 /* finally clauses already have a seq point */
5896 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5897 MONO_ADD_INS (tblock, ins);
5900 /* todo: is a fault block unsafe to optimize? */
5901 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5902 tblock->flags |= BB_EXCEPTION_UNSAFE;
5906 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5907 while (p < end) {
5908 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5910 /* catch and filter blocks get the exception object on the stack */
5911 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5912 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5913 MonoInst *dummy_use;
5915 /* mostly like handle_stack_args (), but just sets the input args */
5916 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5917 tblock->in_scount = 1;
5918 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5919 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5922 * Add a dummy use for the exvar so its liveness info will be
5923 * correct.
5925 cfg->cbb = tblock;
5926 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5928 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5929 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5930 tblock->flags |= BB_EXCEPTION_HANDLER;
5931 tblock->real_offset = clause->data.filter_offset;
5932 tblock->in_scount = 1;
5933 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5934 /* The filter block shares the exvar with the handler block */
5935 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5936 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5937 MONO_ADD_INS (tblock, ins);
5941 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5942 clause->data.catch_class &&
5943 cfg->generic_sharing_context &&
5944 mono_class_check_context_used (clause->data.catch_class)) {
5946 * In shared generic code with catch
5947 * clauses containing type variables
5948 * the exception handling code has to
5949 * be able to get to the rgctx.
5950 * Therefore we have to make sure that
5951 * the vtable/mrgctx argument (for
5952 * static or generic methods) or the
5953 * "this" argument (for non-static
5954 * methods) are live.
5956 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5957 mini_method_get_context (method)->method_inst ||
5958 method->klass->valuetype) {
5959 mono_get_vtable_var (cfg);
5960 } else {
5961 MonoInst *dummy_use;
5963 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5967 } else {
5968 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5969 cfg->cbb = start_bblock;
5970 cfg->args = arg_array;
5971 mono_save_args (cfg, sig, inline_args);
5974 /* FIRST CODE BLOCK */
5975 NEW_BBLOCK (cfg, bblock);
5976 bblock->cil_code = ip;
5977 cfg->cbb = bblock;
5978 cfg->ip = ip;
5980 ADD_BBLOCK (cfg, bblock);
5982 if (cfg->method == method) {
5983 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5984 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5985 MONO_INST_NEW (cfg, ins, OP_BREAK);
5986 MONO_ADD_INS (bblock, ins);
5990 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5991 secman = mono_security_manager_get_methods ();
5993 security = (secman && mono_method_has_declsec (method));
5994 /* at this point having security doesn't mean we have any code to generate */
5995 if (security && (cfg->method == method)) {
5996 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5997 * And we do not want to enter the next section (with allocation) if we
5998 * have nothing to generate */
5999 security = mono_declsec_get_demands (method, &actions);
6002 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6003 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6004 if (pinvoke) {
6005 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6006 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6007 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6009 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6010 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6011 pinvoke = FALSE;
6013 if (custom)
6014 mono_custom_attrs_free (custom);
6016 if (pinvoke) {
6017 custom = mono_custom_attrs_from_class (wrapped->klass);
6018 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6019 pinvoke = FALSE;
6021 if (custom)
6022 mono_custom_attrs_free (custom);
6024 } else {
6025 /* not a P/Invoke after all */
6026 pinvoke = FALSE;
6030 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6031 /* we use a separate basic block for the initialization code */
6032 NEW_BBLOCK (cfg, init_localsbb);
6033 cfg->bb_init = init_localsbb;
6034 init_localsbb->real_offset = cfg->real_offset;
6035 start_bblock->next_bb = init_localsbb;
6036 init_localsbb->next_bb = bblock;
6037 link_bblock (cfg, start_bblock, init_localsbb);
6038 link_bblock (cfg, init_localsbb, bblock);
6040 cfg->cbb = init_localsbb;
6041 } else {
6042 start_bblock->next_bb = bblock;
6043 link_bblock (cfg, start_bblock, bblock);
6046 /* at this point we know, if security is TRUE, that some code needs to be generated */
6047 if (security && (cfg->method == method)) {
6048 MonoInst *args [2];
6050 cfg->stat_cas_demand_generation++;
6052 if (actions.demand.blob) {
6053 /* Add code for SecurityAction.Demand */
6054 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6055 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6056 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6057 mono_emit_method_call (cfg, secman->demand, args, NULL);
6059 if (actions.noncasdemand.blob) {
6060 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6061 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6062 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6063 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6064 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6065 mono_emit_method_call (cfg, secman->demand, args, NULL);
6067 if (actions.demandchoice.blob) {
6068 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6069 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6070 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6071 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6072 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6076 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6077 if (pinvoke) {
6078 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6081 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6082 /* check if this is native code, e.g. an icall or a p/invoke */
6083 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6084 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6085 if (wrapped) {
6086 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6087 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6089 /* if this ia a native call then it can only be JITted from platform code */
6090 if ((icall || pinvk) && method->klass && method->klass->image) {
6091 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6092 MonoException *ex = icall ? mono_get_exception_security () :
6093 mono_get_exception_method_access ();
6094 emit_throw_exception (cfg, ex);
6101 if (header->code_size == 0)
6102 UNVERIFIED;
6104 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6105 ip = err_pos;
6106 UNVERIFIED;
6109 if (cfg->method == method)
6110 mono_debug_init_method (cfg, bblock, breakpoint_id);
6112 for (n = 0; n < header->num_locals; ++n) {
6113 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6114 UNVERIFIED;
6116 class_inits = NULL;
6118 /* We force the vtable variable here for all shared methods
6119 for the possibility that they might show up in a stack
6120 trace where their exact instantiation is needed. */
6121 if (cfg->generic_sharing_context && method == cfg->method) {
6122 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6123 mini_method_get_context (method)->method_inst ||
6124 method->klass->valuetype) {
6125 mono_get_vtable_var (cfg);
6126 } else {
6127 /* FIXME: Is there a better way to do this?
6128 We need the variable live for the duration
6129 of the whole method. */
6130 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6134 /* add a check for this != NULL to inlined methods */
6135 if (is_virtual_call) {
6136 MonoInst *arg_ins;
6138 NEW_ARGLOAD (cfg, arg_ins, 0);
6139 MONO_ADD_INS (cfg->cbb, arg_ins);
6140 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6143 skip_dead_blocks = !dont_verify;
6144 if (skip_dead_blocks) {
6145 original_bb = bb = mono_basic_block_split (method, &error);
6146 if (!mono_error_ok (&error)) {
6147 mono_error_cleanup (&error);
6148 UNVERIFIED;
6150 g_assert (bb);
6153 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6154 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6156 ins_flag = 0;
6157 start_new_bblock = 0;
6158 cfg->cbb = bblock;
6159 while (ip < end) {
6160 if (cfg->method == method)
6161 cfg->real_offset = ip - header->code;
6162 else
6163 cfg->real_offset = inline_offset;
6164 cfg->ip = ip;
6166 context_used = 0;
6168 if (start_new_bblock) {
6169 bblock->cil_length = ip - bblock->cil_code;
6170 if (start_new_bblock == 2) {
6171 g_assert (ip == tblock->cil_code);
6172 } else {
6173 GET_BBLOCK (cfg, tblock, ip);
6175 bblock->next_bb = tblock;
6176 bblock = tblock;
6177 cfg->cbb = bblock;
6178 start_new_bblock = 0;
6179 for (i = 0; i < bblock->in_scount; ++i) {
6180 if (cfg->verbose_level > 3)
6181 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6182 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6183 *sp++ = ins;
6185 if (class_inits)
6186 g_slist_free (class_inits);
6187 class_inits = NULL;
6188 } else {
6189 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6190 link_bblock (cfg, bblock, tblock);
6191 if (sp != stack_start) {
6192 handle_stack_args (cfg, stack_start, sp - stack_start);
6193 sp = stack_start;
6194 CHECK_UNVERIFIABLE (cfg);
6196 bblock->next_bb = tblock;
6197 bblock = tblock;
6198 cfg->cbb = bblock;
6199 for (i = 0; i < bblock->in_scount; ++i) {
6200 if (cfg->verbose_level > 3)
6201 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6202 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6203 *sp++ = ins;
6205 g_slist_free (class_inits);
6206 class_inits = NULL;
6210 if (skip_dead_blocks) {
6211 int ip_offset = ip - header->code;
6213 if (ip_offset == bb->end)
6214 bb = bb->next;
6216 if (bb->dead) {
6217 int op_size = mono_opcode_size (ip, end);
6218 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6220 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6222 if (ip_offset + op_size == bb->end) {
6223 MONO_INST_NEW (cfg, ins, OP_NOP);
6224 MONO_ADD_INS (bblock, ins);
6225 start_new_bblock = 1;
6228 ip += op_size;
6229 continue;
6233 * Sequence points are points where the debugger can place a breakpoint.
6234 * Currently, we generate these automatically at points where the IL
6235 * stack is empty.
6237 if (seq_points && sp == stack_start) {
6238 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6239 MONO_ADD_INS (cfg->cbb, ins);
6242 bblock->real_offset = cfg->real_offset;
6244 if ((cfg->method == method) && cfg->coverage_info) {
6245 guint32 cil_offset = ip - header->code;
6246 cfg->coverage_info->data [cil_offset].cil_code = ip;
6248 /* TODO: Use an increment here */
6249 #if defined(TARGET_X86)
6250 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6251 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6252 ins->inst_imm = 1;
6253 MONO_ADD_INS (cfg->cbb, ins);
6254 #else
6255 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6256 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6257 #endif
6260 if (cfg->verbose_level > 3)
6261 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6263 switch (*ip) {
6264 case CEE_NOP:
6265 if (cfg->keep_cil_nops)
6266 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6267 else
6268 MONO_INST_NEW (cfg, ins, OP_NOP);
6269 ip++;
6270 MONO_ADD_INS (bblock, ins);
6271 break;
6272 case CEE_BREAK:
6273 if (should_insert_brekpoint (cfg->method)) {
6274 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6275 } else {
6276 MONO_INST_NEW (cfg, ins, OP_NOP);
6278 ip++;
6279 MONO_ADD_INS (bblock, ins);
6280 break;
6281 case CEE_LDARG_0:
6282 case CEE_LDARG_1:
6283 case CEE_LDARG_2:
6284 case CEE_LDARG_3:
6285 CHECK_STACK_OVF (1);
6286 n = (*ip)-CEE_LDARG_0;
6287 CHECK_ARG (n);
6288 EMIT_NEW_ARGLOAD (cfg, ins, n);
6289 ip++;
6290 *sp++ = ins;
6291 break;
6292 case CEE_LDLOC_0:
6293 case CEE_LDLOC_1:
6294 case CEE_LDLOC_2:
6295 case CEE_LDLOC_3:
6296 CHECK_STACK_OVF (1);
6297 n = (*ip)-CEE_LDLOC_0;
6298 CHECK_LOCAL (n);
6299 EMIT_NEW_LOCLOAD (cfg, ins, n);
6300 ip++;
6301 *sp++ = ins;
6302 break;
6303 case CEE_STLOC_0:
6304 case CEE_STLOC_1:
6305 case CEE_STLOC_2:
6306 case CEE_STLOC_3: {
6307 CHECK_STACK (1);
6308 n = (*ip)-CEE_STLOC_0;
6309 CHECK_LOCAL (n);
6310 --sp;
6311 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6312 UNVERIFIED;
6313 emit_stloc_ir (cfg, sp, header, n);
6314 ++ip;
6315 inline_costs += 1;
6316 break;
6318 case CEE_LDARG_S:
6319 CHECK_OPSIZE (2);
6320 CHECK_STACK_OVF (1);
6321 n = ip [1];
6322 CHECK_ARG (n);
6323 EMIT_NEW_ARGLOAD (cfg, ins, n);
6324 *sp++ = ins;
6325 ip += 2;
6326 break;
6327 case CEE_LDARGA_S:
6328 CHECK_OPSIZE (2);
6329 CHECK_STACK_OVF (1);
6330 n = ip [1];
6331 CHECK_ARG (n);
6332 NEW_ARGLOADA (cfg, ins, n);
6333 MONO_ADD_INS (cfg->cbb, ins);
6334 *sp++ = ins;
6335 ip += 2;
6336 break;
6337 case CEE_STARG_S:
6338 CHECK_OPSIZE (2);
6339 CHECK_STACK (1);
6340 --sp;
6341 n = ip [1];
6342 CHECK_ARG (n);
6343 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6344 UNVERIFIED;
6345 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6346 ip += 2;
6347 break;
6348 case CEE_LDLOC_S:
6349 CHECK_OPSIZE (2);
6350 CHECK_STACK_OVF (1);
6351 n = ip [1];
6352 CHECK_LOCAL (n);
6353 EMIT_NEW_LOCLOAD (cfg, ins, n);
6354 *sp++ = ins;
6355 ip += 2;
6356 break;
6357 case CEE_LDLOCA_S: {
6358 unsigned char *tmp_ip;
6359 CHECK_OPSIZE (2);
6360 CHECK_STACK_OVF (1);
6361 CHECK_LOCAL (ip [1]);
6363 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6364 ip = tmp_ip;
6365 inline_costs += 1;
6366 break;
6369 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6370 *sp++ = ins;
6371 ip += 2;
6372 break;
6374 case CEE_STLOC_S:
6375 CHECK_OPSIZE (2);
6376 CHECK_STACK (1);
6377 --sp;
6378 CHECK_LOCAL (ip [1]);
6379 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6380 UNVERIFIED;
6381 emit_stloc_ir (cfg, sp, header, ip [1]);
6382 ip += 2;
6383 inline_costs += 1;
6384 break;
6385 case CEE_LDNULL:
6386 CHECK_STACK_OVF (1);
6387 EMIT_NEW_PCONST (cfg, ins, NULL);
6388 ins->type = STACK_OBJ;
6389 ++ip;
6390 *sp++ = ins;
6391 break;
6392 case CEE_LDC_I4_M1:
6393 CHECK_STACK_OVF (1);
6394 EMIT_NEW_ICONST (cfg, ins, -1);
6395 ++ip;
6396 *sp++ = ins;
6397 break;
6398 case CEE_LDC_I4_0:
6399 case CEE_LDC_I4_1:
6400 case CEE_LDC_I4_2:
6401 case CEE_LDC_I4_3:
6402 case CEE_LDC_I4_4:
6403 case CEE_LDC_I4_5:
6404 case CEE_LDC_I4_6:
6405 case CEE_LDC_I4_7:
6406 case CEE_LDC_I4_8:
6407 CHECK_STACK_OVF (1);
6408 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6409 ++ip;
6410 *sp++ = ins;
6411 break;
6412 case CEE_LDC_I4_S:
6413 CHECK_OPSIZE (2);
6414 CHECK_STACK_OVF (1);
6415 ++ip;
6416 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6417 ++ip;
6418 *sp++ = ins;
6419 break;
6420 case CEE_LDC_I4:
6421 CHECK_OPSIZE (5);
6422 CHECK_STACK_OVF (1);
6423 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6424 ip += 5;
6425 *sp++ = ins;
6426 break;
6427 case CEE_LDC_I8:
6428 CHECK_OPSIZE (9);
6429 CHECK_STACK_OVF (1);
6430 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6431 ins->type = STACK_I8;
6432 ins->dreg = alloc_dreg (cfg, STACK_I8);
6433 ++ip;
6434 ins->inst_l = (gint64)read64 (ip);
6435 MONO_ADD_INS (bblock, ins);
6436 ip += 8;
6437 *sp++ = ins;
6438 break;
6439 case CEE_LDC_R4: {
6440 float *f;
6441 gboolean use_aotconst = FALSE;
6443 #ifdef TARGET_POWERPC
6444 /* FIXME: Clean this up */
6445 if (cfg->compile_aot)
6446 use_aotconst = TRUE;
6447 #endif
6449 /* FIXME: we should really allocate this only late in the compilation process */
6450 f = mono_domain_alloc (cfg->domain, sizeof (float));
6451 CHECK_OPSIZE (5);
6452 CHECK_STACK_OVF (1);
6454 if (use_aotconst) {
6455 MonoInst *cons;
6456 int dreg;
6458 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6460 dreg = alloc_freg (cfg);
6461 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6462 ins->type = STACK_R8;
6463 } else {
6464 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6465 ins->type = STACK_R8;
6466 ins->dreg = alloc_dreg (cfg, STACK_R8);
6467 ins->inst_p0 = f;
6468 MONO_ADD_INS (bblock, ins);
6470 ++ip;
6471 readr4 (ip, f);
6472 ip += 4;
6473 *sp++ = ins;
6474 break;
6476 case CEE_LDC_R8: {
6477 double *d;
6478 gboolean use_aotconst = FALSE;
6480 #ifdef TARGET_POWERPC
6481 /* FIXME: Clean this up */
6482 if (cfg->compile_aot)
6483 use_aotconst = TRUE;
6484 #endif
6486 /* FIXME: we should really allocate this only late in the compilation process */
6487 d = mono_domain_alloc (cfg->domain, sizeof (double));
6488 CHECK_OPSIZE (9);
6489 CHECK_STACK_OVF (1);
6491 if (use_aotconst) {
6492 MonoInst *cons;
6493 int dreg;
6495 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6497 dreg = alloc_freg (cfg);
6498 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6499 ins->type = STACK_R8;
6500 } else {
6501 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6502 ins->type = STACK_R8;
6503 ins->dreg = alloc_dreg (cfg, STACK_R8);
6504 ins->inst_p0 = d;
6505 MONO_ADD_INS (bblock, ins);
6507 ++ip;
6508 readr8 (ip, d);
6509 ip += 8;
6510 *sp++ = ins;
6511 break;
6513 case CEE_DUP: {
6514 MonoInst *temp, *store;
6515 CHECK_STACK (1);
6516 CHECK_STACK_OVF (1);
6517 sp--;
6518 ins = *sp;
6520 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6521 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6523 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6524 *sp++ = ins;
6526 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6527 *sp++ = ins;
6529 ++ip;
6530 inline_costs += 2;
6531 break;
6533 case CEE_POP:
6534 CHECK_STACK (1);
6535 ip++;
6536 --sp;
6538 #ifdef TARGET_X86
6539 if (sp [0]->type == STACK_R8)
6540 /* we need to pop the value from the x86 FP stack */
6541 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6542 #endif
6543 break;
6544 case CEE_JMP: {
6545 MonoCallInst *call;
6547 INLINE_FAILURE;
6549 CHECK_OPSIZE (5);
6550 if (stack_start != sp)
6551 UNVERIFIED;
6552 token = read32 (ip + 1);
6553 /* FIXME: check the signature matches */
6554 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6556 if (!cmethod || mono_loader_get_last_error ())
6557 LOAD_ERROR;
6559 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6560 GENERIC_SHARING_FAILURE (CEE_JMP);
6562 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6563 CHECK_CFG_EXCEPTION;
6565 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6567 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6568 int i, n;
6570 /* Handle tail calls similarly to calls */
6571 n = fsig->param_count + fsig->hasthis;
6573 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6574 call->method = cmethod;
6575 call->tail_call = TRUE;
6576 call->signature = mono_method_signature (cmethod);
6577 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6578 call->inst.inst_p0 = cmethod;
6579 for (i = 0; i < n; ++i)
6580 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6582 mono_arch_emit_call (cfg, call);
6583 MONO_ADD_INS (bblock, (MonoInst*)call);
6585 #else
6586 for (i = 0; i < num_args; ++i)
6587 /* Prevent arguments from being optimized away */
6588 arg_array [i]->flags |= MONO_INST_VOLATILE;
6590 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6591 ins = (MonoInst*)call;
6592 ins->inst_p0 = cmethod;
6593 MONO_ADD_INS (bblock, ins);
6594 #endif
6596 ip += 5;
6597 start_new_bblock = 1;
6598 break;
6600 case CEE_CALLI:
6601 case CEE_CALL:
6602 case CEE_CALLVIRT: {
6603 MonoInst *addr = NULL;
6604 MonoMethodSignature *fsig = NULL;
6605 int array_rank = 0;
6606 int virtual = *ip == CEE_CALLVIRT;
6607 int calli = *ip == CEE_CALLI;
6608 gboolean pass_imt_from_rgctx = FALSE;
6609 MonoInst *imt_arg = NULL;
6610 gboolean pass_vtable = FALSE;
6611 gboolean pass_mrgctx = FALSE;
6612 MonoInst *vtable_arg = NULL;
6613 gboolean check_this = FALSE;
6614 gboolean supported_tail_call = FALSE;
6616 CHECK_OPSIZE (5);
6617 token = read32 (ip + 1);
6619 if (calli) {
6620 cmethod = NULL;
6621 CHECK_STACK (1);
6622 --sp;
6623 addr = *sp;
6624 if (method->wrapper_type != MONO_WRAPPER_NONE)
6625 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6626 else
6627 fsig = mono_metadata_parse_signature (image, token);
6629 n = fsig->param_count + fsig->hasthis;
6631 if (method->dynamic && fsig->pinvoke) {
6632 MonoInst *args [3];
6635 * This is a call through a function pointer using a pinvoke
6636 * signature. Have to create a wrapper and call that instead.
6637 * FIXME: This is very slow, need to create a wrapper at JIT time
6638 * instead based on the signature.
6640 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6641 EMIT_NEW_PCONST (cfg, args [1], fsig);
6642 args [2] = addr;
6643 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6645 } else {
6646 MonoMethod *cil_method;
6648 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6649 if (constrained_call && cfg->verbose_level > 2)
6650 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6651 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6652 cil_method = cmethod;
6653 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6654 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6655 cfg->generic_sharing_context)) {
6656 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6658 } else if (constrained_call) {
6659 if (cfg->verbose_level > 2)
6660 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6662 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6664 * This is needed since get_method_constrained can't find
6665 * the method in klass representing a type var.
6666 * The type var is guaranteed to be a reference type in this
6667 * case.
6669 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6670 cil_method = cmethod;
6671 g_assert (!cmethod->klass->valuetype);
6672 } else {
6673 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6675 } else {
6676 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6677 cil_method = cmethod;
6680 if (!cmethod || mono_loader_get_last_error ())
6681 LOAD_ERROR;
6682 if (!dont_verify && !cfg->skip_visibility) {
6683 MonoMethod *target_method = cil_method;
6684 if (method->is_inflated) {
6685 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6687 if (!mono_method_can_access_method (method_definition, target_method) &&
6688 !mono_method_can_access_method (method, cil_method))
6689 METHOD_ACCESS_FAILURE;
6692 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6693 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6695 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6696 /* MS.NET seems to silently convert this to a callvirt */
6697 virtual = 1;
6701 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6702 * converts to a callvirt.
6704 * tests/bug-515884.il is an example of this behavior
6706 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6707 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6708 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6709 virtual = 1;
6712 if (!cmethod->klass->inited)
6713 if (!mono_class_init (cmethod->klass))
6714 LOAD_ERROR;
6716 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6717 mini_class_is_system_array (cmethod->klass)) {
6718 array_rank = cmethod->klass->rank;
6719 fsig = mono_method_signature (cmethod);
6720 } else {
6721 fsig = mono_method_signature (cmethod);
6723 if (!fsig)
6724 LOAD_ERROR;
6726 if (fsig->pinvoke) {
6727 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6728 check_for_pending_exc, FALSE);
6729 fsig = mono_method_signature (wrapper);
6730 } else if (constrained_call) {
6731 fsig = mono_method_signature (cmethod);
6732 } else {
6733 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6737 mono_save_token_info (cfg, image, token, cil_method);
6739 n = fsig->param_count + fsig->hasthis;
6741 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6742 if (check_linkdemand (cfg, method, cmethod))
6743 INLINE_FAILURE;
6744 CHECK_CFG_EXCEPTION;
6747 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6748 g_assert_not_reached ();
6751 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6752 UNVERIFIED;
6754 if (!cfg->generic_sharing_context && cmethod)
6755 g_assert (!mono_method_check_context_used (cmethod));
6757 CHECK_STACK (n);
6759 //g_assert (!virtual || fsig->hasthis);
6761 sp -= n;
6763 if (constrained_call) {
6765 * We have the `constrained.' prefix opcode.
6767 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6769 * The type parameter is instantiated as a valuetype,
6770 * but that type doesn't override the method we're
6771 * calling, so we need to box `this'.
6773 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6774 ins->klass = constrained_call;
6775 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6776 CHECK_CFG_EXCEPTION;
6777 } else if (!constrained_call->valuetype) {
6778 int dreg = alloc_ireg_ref (cfg);
6781 * The type parameter is instantiated as a reference
6782 * type. We have a managed pointer on the stack, so
6783 * we need to dereference it here.
6785 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6786 ins->type = STACK_OBJ;
6787 sp [0] = ins;
6788 } else if (cmethod->klass->valuetype)
6789 virtual = 0;
6790 constrained_call = NULL;
6793 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6794 UNVERIFIED;
6797 * If the callee is a shared method, then its static cctor
6798 * might not get called after the call was patched.
6800 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6801 emit_generic_class_init (cfg, cmethod->klass);
6802 CHECK_TYPELOAD (cmethod->klass);
6805 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6806 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6807 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6808 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6809 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6812 * Pass vtable iff target method might
6813 * be shared, which means that sharing
6814 * is enabled for its class and its
6815 * context is sharable (and it's not a
6816 * generic method).
6818 if (sharing_enabled && context_sharable &&
6819 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6820 pass_vtable = TRUE;
6823 if (cmethod && mini_method_get_context (cmethod) &&
6824 mini_method_get_context (cmethod)->method_inst) {
6825 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6826 MonoGenericContext *context = mini_method_get_context (cmethod);
6827 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6829 g_assert (!pass_vtable);
6831 if (sharing_enabled && context_sharable)
6832 pass_mrgctx = TRUE;
6835 if (cfg->generic_sharing_context && cmethod) {
6836 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6838 context_used = mono_method_check_context_used (cmethod);
6840 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6841 /* Generic method interface
6842 calls are resolved via a
6843 helper function and don't
6844 need an imt. */
6845 if (!cmethod_context || !cmethod_context->method_inst)
6846 pass_imt_from_rgctx = TRUE;
6850 * If a shared method calls another
6851 * shared method then the caller must
6852 * have a generic sharing context
6853 * because the magic trampoline
6854 * requires it. FIXME: We shouldn't
6855 * have to force the vtable/mrgctx
6856 * variable here. Instead there
6857 * should be a flag in the cfg to
6858 * request a generic sharing context.
6860 if (context_used &&
6861 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6862 mono_get_vtable_var (cfg);
6865 if (pass_vtable) {
6866 if (context_used) {
6867 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6868 } else {
6869 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6871 CHECK_TYPELOAD (cmethod->klass);
6872 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6876 if (pass_mrgctx) {
6877 g_assert (!vtable_arg);
6879 if (!cfg->compile_aot) {
6881 * emit_get_rgctx_method () calls mono_class_vtable () so check
6882 * for type load errors before.
6884 mono_class_setup_vtable (cmethod->klass);
6885 CHECK_TYPELOAD (cmethod->klass);
6888 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6890 /* !marshalbyref is needed to properly handle generic methods + remoting */
6891 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6892 MONO_METHOD_IS_FINAL (cmethod)) &&
6893 !cmethod->klass->marshalbyref) {
6894 if (virtual)
6895 check_this = TRUE;
6896 virtual = 0;
6900 if (pass_imt_from_rgctx) {
6901 g_assert (!pass_vtable);
6902 g_assert (cmethod);
6904 imt_arg = emit_get_rgctx_method (cfg, context_used,
6905 cmethod, MONO_RGCTX_INFO_METHOD);
6908 if (check_this)
6909 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6911 /* Calling virtual generic methods */
6912 if (cmethod && virtual &&
6913 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6914 !(MONO_METHOD_IS_FINAL (cmethod) &&
6915 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6916 mono_method_signature (cmethod)->generic_param_count) {
6917 MonoInst *this_temp, *this_arg_temp, *store;
6918 MonoInst *iargs [4];
6920 g_assert (mono_method_signature (cmethod)->is_inflated);
6922 /* Prevent inlining of methods that contain indirect calls */
6923 INLINE_FAILURE;
6925 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6926 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6927 g_assert (!imt_arg);
6928 if (!context_used)
6929 g_assert (cmethod->is_inflated);
6930 imt_arg = emit_get_rgctx_method (cfg, context_used,
6931 cmethod, MONO_RGCTX_INFO_METHOD);
6932 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6933 } else
6934 #endif
6936 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6937 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6938 MONO_ADD_INS (bblock, store);
6940 /* FIXME: This should be a managed pointer */
6941 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6943 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6944 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6945 cmethod, MONO_RGCTX_INFO_METHOD);
6946 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6947 addr = mono_emit_jit_icall (cfg,
6948 mono_helper_compile_generic_method, iargs);
6950 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6952 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6955 if (!MONO_TYPE_IS_VOID (fsig->ret))
6956 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6958 CHECK_CFG_EXCEPTION;
6960 ip += 5;
6961 ins_flag = 0;
6962 break;
6966 * Implement a workaround for the inherent races involved in locking:
6967 * Monitor.Enter ()
6968 * try {
6969 * } finally {
6970 * Monitor.Exit ()
6972 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6973 * try block, the Exit () won't be executed, see:
6974 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6975 * To work around this, we extend such try blocks to include the last x bytes
6976 * of the Monitor.Enter () call.
6978 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6979 MonoBasicBlock *tbb;
6981 GET_BBLOCK (cfg, tbb, ip + 5);
6983 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6984 * from Monitor.Enter like ArgumentNullException.
6986 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6987 /* Mark this bblock as needing to be extended */
6988 tbb->extend_try_block = TRUE;
6992 /* Conversion to a JIT intrinsic */
6993 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6994 bblock = cfg->cbb;
6995 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6996 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6997 *sp = ins;
6998 sp++;
7001 CHECK_CFG_EXCEPTION;
7003 ip += 5;
7004 ins_flag = 0;
7005 break;
7008 /* Inlining */
7009 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7010 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7011 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7012 !g_list_find (dont_inline, cmethod)) {
7013 int costs;
7014 gboolean always = FALSE;
7016 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7017 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7018 /* Prevent inlining of methods that call wrappers */
7019 INLINE_FAILURE;
7020 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7021 always = TRUE;
7024 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7025 ip += 5;
7026 cfg->real_offset += 5;
7027 bblock = cfg->cbb;
7029 if (!MONO_TYPE_IS_VOID (fsig->ret))
7030 /* *sp is already set by inline_method */
7031 sp++;
7033 inline_costs += costs;
7034 ins_flag = 0;
7035 break;
7039 inline_costs += 10 * num_calls++;
7041 /* Tail recursion elimination */
7042 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7043 gboolean has_vtargs = FALSE;
7044 int i;
7046 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7047 INLINE_FAILURE;
7049 /* keep it simple */
7050 for (i = fsig->param_count - 1; i >= 0; i--) {
7051 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7052 has_vtargs = TRUE;
7055 if (!has_vtargs) {
7056 for (i = 0; i < n; ++i)
7057 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7058 MONO_INST_NEW (cfg, ins, OP_BR);
7059 MONO_ADD_INS (bblock, ins);
7060 tblock = start_bblock->out_bb [0];
7061 link_bblock (cfg, bblock, tblock);
7062 ins->inst_target_bb = tblock;
7063 start_new_bblock = 1;
7065 /* skip the CEE_RET, too */
7066 if (ip_in_bb (cfg, bblock, ip + 5))
7067 ip += 6;
7068 else
7069 ip += 5;
7071 ins_flag = 0;
7072 break;
7076 /* Generic sharing */
7077 /* FIXME: only do this for generic methods if
7078 they are not shared! */
7079 if (context_used && !imt_arg && !array_rank &&
7080 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7081 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7082 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7083 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7084 INLINE_FAILURE;
7086 g_assert (cfg->generic_sharing_context && cmethod);
7087 g_assert (!addr);
7090 * We are compiling a call to a
7091 * generic method from shared code,
7092 * which means that we have to look up
7093 * the method in the rgctx and do an
7094 * indirect call.
7096 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7099 /* Indirect calls */
7100 if (addr) {
7101 g_assert (!imt_arg);
7103 if (*ip == CEE_CALL)
7104 g_assert (context_used);
7105 else if (*ip == CEE_CALLI)
7106 g_assert (!vtable_arg);
7107 else
7108 /* FIXME: what the hell is this??? */
7109 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7110 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7112 /* Prevent inlining of methods with indirect calls */
7113 INLINE_FAILURE;
7115 if (vtable_arg) {
7116 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7117 } else {
7118 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7120 * Instead of emitting an indirect call, emit a direct call
7121 * with the contents of the aotconst as the patch info.
7123 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7124 NULLIFY_INS (addr);
7125 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7126 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7127 NULLIFY_INS (addr);
7128 } else {
7129 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7132 if (!MONO_TYPE_IS_VOID (fsig->ret))
7133 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7135 CHECK_CFG_EXCEPTION;
7137 ip += 5;
7138 ins_flag = 0;
7139 break;
7142 /* Array methods */
7143 if (array_rank) {
7144 MonoInst *addr;
7146 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7147 MonoInst *val = sp [fsig->param_count];
7149 if (val->type == STACK_OBJ) {
7150 MonoInst *iargs [2];
7152 iargs [0] = sp [0];
7153 iargs [1] = val;
7155 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7158 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7159 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7160 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7161 emit_write_barrier (cfg, addr, val, 0);
7162 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7163 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7167 *sp++ = ins;
7168 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7169 if (!cmethod->klass->element_class->valuetype && !readonly)
7170 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7171 CHECK_TYPELOAD (cmethod->klass);
7173 readonly = FALSE;
7174 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7175 *sp++ = addr;
7176 } else {
7177 g_assert_not_reached ();
7180 CHECK_CFG_EXCEPTION;
7182 ip += 5;
7183 ins_flag = 0;
7184 break;
7187 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7188 if (ins) {
7189 if (!MONO_TYPE_IS_VOID (fsig->ret))
7190 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7192 CHECK_CFG_EXCEPTION;
7194 ip += 5;
7195 ins_flag = 0;
7196 break;
7199 /* Tail prefix / tail call optimization */
7201 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7202 /* FIXME: runtime generic context pointer for jumps? */
7203 /* FIXME: handle this for generic sharing eventually */
7204 supported_tail_call = cmethod &&
7205 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7206 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7207 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7209 if (supported_tail_call) {
7210 MonoCallInst *call;
7212 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7213 INLINE_FAILURE;
7215 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7217 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7218 /* Handle tail calls similarly to calls */
7219 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7220 #else
7221 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7222 call->tail_call = TRUE;
7223 call->method = cmethod;
7224 call->signature = mono_method_signature (cmethod);
7227 * We implement tail calls by storing the actual arguments into the
7228 * argument variables, then emitting a CEE_JMP.
7230 for (i = 0; i < n; ++i) {
7231 /* Prevent argument from being register allocated */
7232 arg_array [i]->flags |= MONO_INST_VOLATILE;
7233 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7235 #endif
7237 ins = (MonoInst*)call;
7238 ins->inst_p0 = cmethod;
7239 ins->inst_p1 = arg_array [0];
7240 MONO_ADD_INS (bblock, ins);
7241 link_bblock (cfg, bblock, end_bblock);
7242 start_new_bblock = 1;
7244 CHECK_CFG_EXCEPTION;
7246 ip += 5;
7247 ins_flag = 0;
7249 // FIXME: Eliminate unreachable epilogs
7252 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7253 * only reachable from this call.
7255 GET_BBLOCK (cfg, tblock, ip);
7256 if (tblock == bblock || tblock->in_count == 0)
7257 ip += 1;
7258 break;
7261 /* Common call */
7262 INLINE_FAILURE;
7263 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7264 imt_arg, vtable_arg);
7266 if (!MONO_TYPE_IS_VOID (fsig->ret))
7267 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7269 CHECK_CFG_EXCEPTION;
7271 ip += 5;
7272 ins_flag = 0;
7273 break;
7275 case CEE_RET:
7276 if (cfg->method != method) {
7277 /* return from inlined method */
7279 * If in_count == 0, that means the ret is unreachable due to
7280 * being preceeded by a throw. In that case, inline_method () will
7281 * handle setting the return value
7282 * (test case: test_0_inline_throw ()).
7284 if (return_var && cfg->cbb->in_count) {
7285 MonoInst *store;
7286 CHECK_STACK (1);
7287 --sp;
7288 //g_assert (returnvar != -1);
7289 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7290 cfg->ret_var_set = TRUE;
7292 } else {
7293 if (cfg->ret) {
7294 MonoType *ret_type = mono_method_signature (method)->ret;
7296 if (seq_points) {
7298 * Place a seq point here too even through the IL stack is not
7299 * empty, so a step over on
7300 * call <FOO>
7301 * ret
7302 * will work correctly.
7304 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7305 MONO_ADD_INS (cfg->cbb, ins);
7308 g_assert (!return_var);
7309 CHECK_STACK (1);
7310 --sp;
7312 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7313 UNVERIFIED;
7315 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7316 MonoInst *ret_addr;
7318 if (!cfg->vret_addr) {
7319 MonoInst *ins;
7321 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7322 } else {
7323 EMIT_NEW_RETLOADA (cfg, ret_addr);
7325 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7326 ins->klass = mono_class_from_mono_type (ret_type);
7328 } else {
7329 #ifdef MONO_ARCH_SOFT_FLOAT
7330 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7331 MonoInst *iargs [1];
7332 MonoInst *conv;
7334 iargs [0] = *sp;
7335 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7336 mono_arch_emit_setret (cfg, method, conv);
7337 } else {
7338 mono_arch_emit_setret (cfg, method, *sp);
7340 #else
7341 mono_arch_emit_setret (cfg, method, *sp);
7342 #endif
7346 if (sp != stack_start)
7347 UNVERIFIED;
7348 MONO_INST_NEW (cfg, ins, OP_BR);
7349 ip++;
7350 ins->inst_target_bb = end_bblock;
7351 MONO_ADD_INS (bblock, ins);
7352 link_bblock (cfg, bblock, end_bblock);
7353 start_new_bblock = 1;
7354 break;
7355 case CEE_BR_S:
7356 CHECK_OPSIZE (2);
7357 MONO_INST_NEW (cfg, ins, OP_BR);
7358 ip++;
7359 target = ip + 1 + (signed char)(*ip);
7360 ++ip;
7361 GET_BBLOCK (cfg, tblock, target);
7362 link_bblock (cfg, bblock, tblock);
7363 ins->inst_target_bb = tblock;
7364 if (sp != stack_start) {
7365 handle_stack_args (cfg, stack_start, sp - stack_start);
7366 sp = stack_start;
7367 CHECK_UNVERIFIABLE (cfg);
7369 MONO_ADD_INS (bblock, ins);
7370 start_new_bblock = 1;
7371 inline_costs += BRANCH_COST;
7372 break;
7373 case CEE_BEQ_S:
7374 case CEE_BGE_S:
7375 case CEE_BGT_S:
7376 case CEE_BLE_S:
7377 case CEE_BLT_S:
7378 case CEE_BNE_UN_S:
7379 case CEE_BGE_UN_S:
7380 case CEE_BGT_UN_S:
7381 case CEE_BLE_UN_S:
7382 case CEE_BLT_UN_S:
7383 CHECK_OPSIZE (2);
7384 CHECK_STACK (2);
7385 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7386 ip++;
7387 target = ip + 1 + *(signed char*)ip;
7388 ip++;
7390 ADD_BINCOND (NULL);
7392 sp = stack_start;
7393 inline_costs += BRANCH_COST;
7394 break;
7395 case CEE_BR:
7396 CHECK_OPSIZE (5);
7397 MONO_INST_NEW (cfg, ins, OP_BR);
7398 ip++;
7400 target = ip + 4 + (gint32)read32(ip);
7401 ip += 4;
7402 GET_BBLOCK (cfg, tblock, target);
7403 link_bblock (cfg, bblock, tblock);
7404 ins->inst_target_bb = tblock;
7405 if (sp != stack_start) {
7406 handle_stack_args (cfg, stack_start, sp - stack_start);
7407 sp = stack_start;
7408 CHECK_UNVERIFIABLE (cfg);
7411 MONO_ADD_INS (bblock, ins);
7413 start_new_bblock = 1;
7414 inline_costs += BRANCH_COST;
7415 break;
7416 case CEE_BRFALSE_S:
7417 case CEE_BRTRUE_S:
7418 case CEE_BRFALSE:
7419 case CEE_BRTRUE: {
7420 MonoInst *cmp;
7421 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7422 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7423 guint32 opsize = is_short ? 1 : 4;
7425 CHECK_OPSIZE (opsize);
7426 CHECK_STACK (1);
7427 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7428 UNVERIFIED;
7429 ip ++;
7430 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7431 ip += opsize;
7433 sp--;
7435 GET_BBLOCK (cfg, tblock, target);
7436 link_bblock (cfg, bblock, tblock);
7437 GET_BBLOCK (cfg, tblock, ip);
7438 link_bblock (cfg, bblock, tblock);
7440 if (sp != stack_start) {
7441 handle_stack_args (cfg, stack_start, sp - stack_start);
7442 CHECK_UNVERIFIABLE (cfg);
7445 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7446 cmp->sreg1 = sp [0]->dreg;
7447 type_from_op (cmp, sp [0], NULL);
7448 CHECK_TYPE (cmp);
7450 #if SIZEOF_REGISTER == 4
7451 if (cmp->opcode == OP_LCOMPARE_IMM) {
7452 /* Convert it to OP_LCOMPARE */
7453 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7454 ins->type = STACK_I8;
7455 ins->dreg = alloc_dreg (cfg, STACK_I8);
7456 ins->inst_l = 0;
7457 MONO_ADD_INS (bblock, ins);
7458 cmp->opcode = OP_LCOMPARE;
7459 cmp->sreg2 = ins->dreg;
7461 #endif
7462 MONO_ADD_INS (bblock, cmp);
7464 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7465 type_from_op (ins, sp [0], NULL);
7466 MONO_ADD_INS (bblock, ins);
7467 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7468 GET_BBLOCK (cfg, tblock, target);
7469 ins->inst_true_bb = tblock;
7470 GET_BBLOCK (cfg, tblock, ip);
7471 ins->inst_false_bb = tblock;
7472 start_new_bblock = 2;
7474 sp = stack_start;
7475 inline_costs += BRANCH_COST;
7476 break;
7478 case CEE_BEQ:
7479 case CEE_BGE:
7480 case CEE_BGT:
7481 case CEE_BLE:
7482 case CEE_BLT:
7483 case CEE_BNE_UN:
7484 case CEE_BGE_UN:
7485 case CEE_BGT_UN:
7486 case CEE_BLE_UN:
7487 case CEE_BLT_UN:
7488 CHECK_OPSIZE (5);
7489 CHECK_STACK (2);
7490 MONO_INST_NEW (cfg, ins, *ip);
7491 ip++;
7492 target = ip + 4 + (gint32)read32(ip);
7493 ip += 4;
7495 ADD_BINCOND (NULL);
7497 sp = stack_start;
7498 inline_costs += BRANCH_COST;
7499 break;
7500 case CEE_SWITCH: {
7501 MonoInst *src1;
7502 MonoBasicBlock **targets;
7503 MonoBasicBlock *default_bblock;
7504 MonoJumpInfoBBTable *table;
7505 int offset_reg = alloc_preg (cfg);
7506 int target_reg = alloc_preg (cfg);
7507 int table_reg = alloc_preg (cfg);
7508 int sum_reg = alloc_preg (cfg);
7509 gboolean use_op_switch;
7511 CHECK_OPSIZE (5);
7512 CHECK_STACK (1);
7513 n = read32 (ip + 1);
7514 --sp;
7515 src1 = sp [0];
7516 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7517 UNVERIFIED;
7519 ip += 5;
7520 CHECK_OPSIZE (n * sizeof (guint32));
7521 target = ip + n * sizeof (guint32);
7523 GET_BBLOCK (cfg, default_bblock, target);
7524 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7526 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7527 for (i = 0; i < n; ++i) {
7528 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7529 targets [i] = tblock;
7530 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7531 ip += 4;
7534 if (sp != stack_start) {
7536 * Link the current bb with the targets as well, so handle_stack_args
7537 * will set their in_stack correctly.
7539 link_bblock (cfg, bblock, default_bblock);
7540 for (i = 0; i < n; ++i)
7541 link_bblock (cfg, bblock, targets [i]);
7543 handle_stack_args (cfg, stack_start, sp - stack_start);
7544 sp = stack_start;
7545 CHECK_UNVERIFIABLE (cfg);
7548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7550 bblock = cfg->cbb;
7552 for (i = 0; i < n; ++i)
7553 link_bblock (cfg, bblock, targets [i]);
7555 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7556 table->table = targets;
7557 table->table_size = n;
7559 use_op_switch = FALSE;
7560 #ifdef TARGET_ARM
7561 /* ARM implements SWITCH statements differently */
7562 /* FIXME: Make it use the generic implementation */
7563 if (!cfg->compile_aot)
7564 use_op_switch = TRUE;
7565 #endif
7567 if (COMPILE_LLVM (cfg))
7568 use_op_switch = TRUE;
7570 cfg->cbb->has_jump_table = 1;
7572 if (use_op_switch) {
7573 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7574 ins->sreg1 = src1->dreg;
7575 ins->inst_p0 = table;
7576 ins->inst_many_bb = targets;
7577 ins->klass = GUINT_TO_POINTER (n);
7578 MONO_ADD_INS (cfg->cbb, ins);
7579 } else {
7580 if (sizeof (gpointer) == 8)
7581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7582 else
7583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7585 #if SIZEOF_REGISTER == 8
7586 /* The upper word might not be zero, and we add it to a 64 bit address later */
7587 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7588 #endif
7590 if (cfg->compile_aot) {
7591 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7592 } else {
7593 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7594 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7595 ins->inst_p0 = table;
7596 ins->dreg = table_reg;
7597 MONO_ADD_INS (cfg->cbb, ins);
7600 /* FIXME: Use load_memindex */
7601 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7603 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7605 start_new_bblock = 1;
7606 inline_costs += (BRANCH_COST * 2);
7607 break;
7609 case CEE_LDIND_I1:
7610 case CEE_LDIND_U1:
7611 case CEE_LDIND_I2:
7612 case CEE_LDIND_U2:
7613 case CEE_LDIND_I4:
7614 case CEE_LDIND_U4:
7615 case CEE_LDIND_I8:
7616 case CEE_LDIND_I:
7617 case CEE_LDIND_R4:
7618 case CEE_LDIND_R8:
7619 case CEE_LDIND_REF:
7620 CHECK_STACK (1);
7621 --sp;
7623 switch (*ip) {
7624 case CEE_LDIND_R4:
7625 case CEE_LDIND_R8:
7626 dreg = alloc_freg (cfg);
7627 break;
7628 case CEE_LDIND_I8:
7629 dreg = alloc_lreg (cfg);
7630 break;
7631 case CEE_LDIND_REF:
7632 dreg = alloc_ireg_ref (cfg);
7633 break;
7634 default:
7635 dreg = alloc_preg (cfg);
7638 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7639 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7640 ins->flags |= ins_flag;
7641 ins_flag = 0;
7642 MONO_ADD_INS (bblock, ins);
7643 *sp++ = ins;
7644 if (ins->flags & MONO_INST_VOLATILE) {
7645 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7646 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7647 emit_memory_barrier (cfg, FullBarrier);
7649 ++ip;
7650 break;
7651 case CEE_STIND_REF:
7652 case CEE_STIND_I1:
7653 case CEE_STIND_I2:
7654 case CEE_STIND_I4:
7655 case CEE_STIND_I8:
7656 case CEE_STIND_R4:
7657 case CEE_STIND_R8:
7658 case CEE_STIND_I:
7659 CHECK_STACK (2);
7660 sp -= 2;
7662 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7663 ins->flags |= ins_flag;
7664 ins_flag = 0;
7666 if (ins->flags & MONO_INST_VOLATILE) {
7667 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7668 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7669 emit_memory_barrier (cfg, FullBarrier);
7672 MONO_ADD_INS (bblock, ins);
7674 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7675 emit_write_barrier (cfg, sp [0], sp [1], -1);
7677 inline_costs += 1;
7678 ++ip;
7679 break;
7681 case CEE_MUL:
7682 CHECK_STACK (2);
7684 MONO_INST_NEW (cfg, ins, (*ip));
7685 sp -= 2;
7686 ins->sreg1 = sp [0]->dreg;
7687 ins->sreg2 = sp [1]->dreg;
7688 type_from_op (ins, sp [0], sp [1]);
7689 CHECK_TYPE (ins);
7690 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7692 /* Use the immediate opcodes if possible */
7693 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7694 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7695 if (imm_opcode != -1) {
7696 ins->opcode = imm_opcode;
7697 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7698 ins->sreg2 = -1;
7700 sp [1]->opcode = OP_NOP;
7704 MONO_ADD_INS ((cfg)->cbb, (ins));
7706 *sp++ = mono_decompose_opcode (cfg, ins);
7707 ip++;
7708 break;
7709 case CEE_ADD:
7710 case CEE_SUB:
7711 case CEE_DIV:
7712 case CEE_DIV_UN:
7713 case CEE_REM:
7714 case CEE_REM_UN:
7715 case CEE_AND:
7716 case CEE_OR:
7717 case CEE_XOR:
7718 case CEE_SHL:
7719 case CEE_SHR:
7720 case CEE_SHR_UN:
7721 CHECK_STACK (2);
7723 MONO_INST_NEW (cfg, ins, (*ip));
7724 sp -= 2;
7725 ins->sreg1 = sp [0]->dreg;
7726 ins->sreg2 = sp [1]->dreg;
7727 type_from_op (ins, sp [0], sp [1]);
7728 CHECK_TYPE (ins);
7729 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7730 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7732 /* FIXME: Pass opcode to is_inst_imm */
7734 /* Use the immediate opcodes if possible */
7735 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7736 int imm_opcode;
7738 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7739 if (imm_opcode != -1) {
7740 ins->opcode = imm_opcode;
7741 if (sp [1]->opcode == OP_I8CONST) {
7742 #if SIZEOF_REGISTER == 8
7743 ins->inst_imm = sp [1]->inst_l;
7744 #else
7745 ins->inst_ls_word = sp [1]->inst_ls_word;
7746 ins->inst_ms_word = sp [1]->inst_ms_word;
7747 #endif
7749 else
7750 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7751 ins->sreg2 = -1;
7753 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7754 if (sp [1]->next == NULL)
7755 sp [1]->opcode = OP_NOP;
7758 MONO_ADD_INS ((cfg)->cbb, (ins));
7760 *sp++ = mono_decompose_opcode (cfg, ins);
7761 ip++;
7762 break;
7763 case CEE_NEG:
7764 case CEE_NOT:
7765 case CEE_CONV_I1:
7766 case CEE_CONV_I2:
7767 case CEE_CONV_I4:
7768 case CEE_CONV_R4:
7769 case CEE_CONV_R8:
7770 case CEE_CONV_U4:
7771 case CEE_CONV_I8:
7772 case CEE_CONV_U8:
7773 case CEE_CONV_OVF_I8:
7774 case CEE_CONV_OVF_U8:
7775 case CEE_CONV_R_UN:
7776 CHECK_STACK (1);
7778 /* Special case this earlier so we have long constants in the IR */
7779 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7780 int data = sp [-1]->inst_c0;
7781 sp [-1]->opcode = OP_I8CONST;
7782 sp [-1]->type = STACK_I8;
7783 #if SIZEOF_REGISTER == 8
7784 if ((*ip) == CEE_CONV_U8)
7785 sp [-1]->inst_c0 = (guint32)data;
7786 else
7787 sp [-1]->inst_c0 = data;
7788 #else
7789 sp [-1]->inst_ls_word = data;
7790 if ((*ip) == CEE_CONV_U8)
7791 sp [-1]->inst_ms_word = 0;
7792 else
7793 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7794 #endif
7795 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7797 else {
7798 ADD_UNOP (*ip);
7800 ip++;
7801 break;
7802 case CEE_CONV_OVF_I4:
7803 case CEE_CONV_OVF_I1:
7804 case CEE_CONV_OVF_I2:
7805 case CEE_CONV_OVF_I:
7806 case CEE_CONV_OVF_U:
7807 CHECK_STACK (1);
7809 if (sp [-1]->type == STACK_R8) {
7810 ADD_UNOP (CEE_CONV_OVF_I8);
7811 ADD_UNOP (*ip);
7812 } else {
7813 ADD_UNOP (*ip);
7815 ip++;
7816 break;
7817 case CEE_CONV_OVF_U1:
7818 case CEE_CONV_OVF_U2:
7819 case CEE_CONV_OVF_U4:
7820 CHECK_STACK (1);
7822 if (sp [-1]->type == STACK_R8) {
7823 ADD_UNOP (CEE_CONV_OVF_U8);
7824 ADD_UNOP (*ip);
7825 } else {
7826 ADD_UNOP (*ip);
7828 ip++;
7829 break;
7830 case CEE_CONV_OVF_I1_UN:
7831 case CEE_CONV_OVF_I2_UN:
7832 case CEE_CONV_OVF_I4_UN:
7833 case CEE_CONV_OVF_I8_UN:
7834 case CEE_CONV_OVF_U1_UN:
7835 case CEE_CONV_OVF_U2_UN:
7836 case CEE_CONV_OVF_U4_UN:
7837 case CEE_CONV_OVF_U8_UN:
7838 case CEE_CONV_OVF_I_UN:
7839 case CEE_CONV_OVF_U_UN:
7840 case CEE_CONV_U2:
7841 case CEE_CONV_U1:
7842 case CEE_CONV_I:
7843 case CEE_CONV_U:
7844 CHECK_STACK (1);
7845 ADD_UNOP (*ip);
7846 CHECK_CFG_EXCEPTION;
7847 ip++;
7848 break;
7849 case CEE_ADD_OVF:
7850 case CEE_ADD_OVF_UN:
7851 case CEE_MUL_OVF:
7852 case CEE_MUL_OVF_UN:
7853 case CEE_SUB_OVF:
7854 case CEE_SUB_OVF_UN:
7855 CHECK_STACK (2);
7856 ADD_BINOP (*ip);
7857 ip++;
7858 break;
7859 case CEE_CPOBJ:
7860 CHECK_OPSIZE (5);
7861 CHECK_STACK (2);
7862 token = read32 (ip + 1);
7863 klass = mini_get_class (method, token, generic_context);
7864 CHECK_TYPELOAD (klass);
7865 sp -= 2;
7866 if (generic_class_is_reference_type (cfg, klass)) {
7867 MonoInst *store, *load;
7868 int dreg = alloc_ireg_ref (cfg);
7870 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7871 load->flags |= ins_flag;
7872 MONO_ADD_INS (cfg->cbb, load);
7874 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7875 store->flags |= ins_flag;
7876 MONO_ADD_INS (cfg->cbb, store);
7878 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7879 emit_write_barrier (cfg, sp [0], sp [1], -1);
7880 } else {
7881 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7883 ins_flag = 0;
7884 ip += 5;
7885 break;
7886 case CEE_LDOBJ: {
7887 int loc_index = -1;
7888 int stloc_len = 0;
7890 CHECK_OPSIZE (5);
7891 CHECK_STACK (1);
7892 --sp;
7893 token = read32 (ip + 1);
7894 klass = mini_get_class (method, token, generic_context);
7895 CHECK_TYPELOAD (klass);
7897 /* Optimize the common ldobj+stloc combination */
7898 switch (ip [5]) {
7899 case CEE_STLOC_S:
7900 loc_index = ip [6];
7901 stloc_len = 2;
7902 break;
7903 case CEE_STLOC_0:
7904 case CEE_STLOC_1:
7905 case CEE_STLOC_2:
7906 case CEE_STLOC_3:
7907 loc_index = ip [5] - CEE_STLOC_0;
7908 stloc_len = 1;
7909 break;
7910 default:
7911 break;
7914 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7915 CHECK_LOCAL (loc_index);
7917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7918 ins->dreg = cfg->locals [loc_index]->dreg;
7919 ip += 5;
7920 ip += stloc_len;
7921 break;
7924 /* Optimize the ldobj+stobj combination */
7925 /* The reference case ends up being a load+store anyway */
7926 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7927 CHECK_STACK (1);
7929 sp --;
7931 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7933 ip += 5 + 5;
7934 ins_flag = 0;
7935 break;
7938 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7939 *sp++ = ins;
7941 ip += 5;
7942 ins_flag = 0;
7943 inline_costs += 1;
7944 break;
7946 case CEE_LDSTR:
7947 CHECK_STACK_OVF (1);
7948 CHECK_OPSIZE (5);
7949 n = read32 (ip + 1);
7951 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7952 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7953 ins->type = STACK_OBJ;
7954 *sp = ins;
7956 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7957 MonoInst *iargs [1];
7959 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7960 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7961 } else {
7962 if (cfg->opt & MONO_OPT_SHARED) {
7963 MonoInst *iargs [3];
7965 if (cfg->compile_aot) {
7966 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7968 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7969 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7970 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7971 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7972 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7973 } else {
7974 if (bblock->out_of_line) {
7975 MonoInst *iargs [2];
7977 if (image == mono_defaults.corlib) {
7979 * Avoid relocations in AOT and save some space by using a
7980 * version of helper_ldstr specialized to mscorlib.
7982 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7983 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7984 } else {
7985 /* Avoid creating the string object */
7986 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7987 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7988 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7991 else
7992 if (cfg->compile_aot) {
7993 NEW_LDSTRCONST (cfg, ins, image, n);
7994 *sp = ins;
7995 MONO_ADD_INS (bblock, ins);
7997 else {
7998 NEW_PCONST (cfg, ins, NULL);
7999 ins->type = STACK_OBJ;
8000 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8001 if (!ins->inst_p0)
8002 OUT_OF_MEMORY_FAILURE;
8004 *sp = ins;
8005 MONO_ADD_INS (bblock, ins);
8010 sp++;
8011 ip += 5;
8012 break;
8013 case CEE_NEWOBJ: {
8014 MonoInst *iargs [2];
8015 MonoMethodSignature *fsig;
8016 MonoInst this_ins;
8017 MonoInst *alloc;
8018 MonoInst *vtable_arg = NULL;
8020 CHECK_OPSIZE (5);
8021 token = read32 (ip + 1);
8022 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8023 if (!cmethod || mono_loader_get_last_error ())
8024 LOAD_ERROR;
8025 fsig = mono_method_get_signature (cmethod, image, token);
8026 if (!fsig)
8027 LOAD_ERROR;
8029 mono_save_token_info (cfg, image, token, cmethod);
8031 if (!mono_class_init (cmethod->klass))
8032 LOAD_ERROR;
8034 if (cfg->generic_sharing_context)
8035 context_used = mono_method_check_context_used (cmethod);
8037 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8038 if (check_linkdemand (cfg, method, cmethod))
8039 INLINE_FAILURE;
8040 CHECK_CFG_EXCEPTION;
8041 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8042 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8045 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8046 emit_generic_class_init (cfg, cmethod->klass);
8047 CHECK_TYPELOAD (cmethod->klass);
8050 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8051 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8052 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8053 mono_class_vtable (cfg->domain, cmethod->klass);
8054 CHECK_TYPELOAD (cmethod->klass);
8056 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8057 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8058 } else {
8059 if (context_used) {
8060 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8061 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8062 } else {
8063 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8065 CHECK_TYPELOAD (cmethod->klass);
8066 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8071 n = fsig->param_count;
8072 CHECK_STACK (n);
8075 * Generate smaller code for the common newobj <exception> instruction in
8076 * argument checking code.
8078 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8079 is_exception_class (cmethod->klass) && n <= 2 &&
8080 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8081 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8082 MonoInst *iargs [3];
8084 g_assert (!vtable_arg);
8086 sp -= n;
8088 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8089 switch (n) {
8090 case 0:
8091 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8092 break;
8093 case 1:
8094 iargs [1] = sp [0];
8095 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8096 break;
8097 case 2:
8098 iargs [1] = sp [0];
8099 iargs [2] = sp [1];
8100 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8101 break;
8102 default:
8103 g_assert_not_reached ();
8106 ip += 5;
8107 inline_costs += 5;
8108 break;
8111 /* move the args to allow room for 'this' in the first position */
8112 while (n--) {
8113 --sp;
8114 sp [1] = sp [0];
8117 /* check_call_signature () requires sp[0] to be set */
8118 this_ins.type = STACK_OBJ;
8119 sp [0] = &this_ins;
8120 if (check_call_signature (cfg, fsig, sp))
8121 UNVERIFIED;
8123 iargs [0] = NULL;
8125 if (mini_class_is_system_array (cmethod->klass)) {
8126 g_assert (!vtable_arg);
8128 *sp = emit_get_rgctx_method (cfg, context_used,
8129 cmethod, MONO_RGCTX_INFO_METHOD);
8131 /* Avoid varargs in the common case */
8132 if (fsig->param_count == 1)
8133 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8134 else if (fsig->param_count == 2)
8135 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8136 else if (fsig->param_count == 3)
8137 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8138 else
8139 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8140 } else if (cmethod->string_ctor) {
8141 g_assert (!context_used);
8142 g_assert (!vtable_arg);
8143 /* we simply pass a null pointer */
8144 EMIT_NEW_PCONST (cfg, *sp, NULL);
8145 /* now call the string ctor */
8146 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8147 } else {
8148 MonoInst* callvirt_this_arg = NULL;
8150 if (cmethod->klass->valuetype) {
8151 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8152 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8153 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8155 alloc = NULL;
8158 * The code generated by mini_emit_virtual_call () expects
8159 * iargs [0] to be a boxed instance, but luckily the vcall
8160 * will be transformed into a normal call there.
8162 } else if (context_used) {
8163 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8164 *sp = alloc;
8165 } else {
8166 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8168 CHECK_TYPELOAD (cmethod->klass);
8171 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8172 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8173 * As a workaround, we call class cctors before allocating objects.
8175 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8176 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8177 if (cfg->verbose_level > 2)
8178 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8179 class_inits = g_slist_prepend (class_inits, vtable);
8182 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8183 *sp = alloc;
8185 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8187 if (alloc)
8188 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8190 /* Now call the actual ctor */
8191 /* Avoid virtual calls to ctors if possible */
8192 if (cmethod->klass->marshalbyref)
8193 callvirt_this_arg = sp [0];
8196 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8197 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8198 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8199 *sp = ins;
8200 sp++;
8203 CHECK_CFG_EXCEPTION;
8204 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8205 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8206 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8207 !g_list_find (dont_inline, cmethod)) {
8208 int costs;
8210 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8211 cfg->real_offset += 5;
8212 bblock = cfg->cbb;
8214 inline_costs += costs - 5;
8215 } else {
8216 INLINE_FAILURE;
8217 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8219 } else if (context_used &&
8220 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8221 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8222 MonoInst *cmethod_addr;
8224 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8225 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8227 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8228 } else {
8229 INLINE_FAILURE;
8230 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8231 callvirt_this_arg, NULL, vtable_arg);
8235 if (alloc == NULL) {
8236 /* Valuetype */
8237 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8238 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8239 *sp++= ins;
8241 else
8242 *sp++ = alloc;
8244 ip += 5;
8245 inline_costs += 5;
8246 break;
8248 case CEE_CASTCLASS:
8249 CHECK_STACK (1);
8250 --sp;
8251 CHECK_OPSIZE (5);
8252 token = read32 (ip + 1);
8253 klass = mini_get_class (method, token, generic_context);
8254 CHECK_TYPELOAD (klass);
8255 if (sp [0]->type != STACK_OBJ)
8256 UNVERIFIED;
8258 if (cfg->generic_sharing_context)
8259 context_used = mono_class_check_context_used (klass);
8261 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8262 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8263 MonoInst *args [3];
8265 /* obj */
8266 args [0] = *sp;
8268 /* klass */
8269 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8271 /* inline cache*/
8272 if (cfg->compile_aot)
8273 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8274 else
8275 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8277 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8278 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8279 ip += 5;
8280 inline_costs += 2;
8281 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8282 MonoMethod *mono_castclass;
8283 MonoInst *iargs [1];
8284 int costs;
8286 mono_castclass = mono_marshal_get_castclass (klass);
8287 iargs [0] = sp [0];
8289 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8290 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8291 CHECK_CFG_EXCEPTION;
8292 g_assert (costs > 0);
8294 ip += 5;
8295 cfg->real_offset += 5;
8296 bblock = cfg->cbb;
8298 *sp++ = iargs [0];
8300 inline_costs += costs;
8302 else {
8303 ins = handle_castclass (cfg, klass, *sp, context_used);
8304 CHECK_CFG_EXCEPTION;
8305 bblock = cfg->cbb;
8306 *sp ++ = ins;
8307 ip += 5;
8309 break;
8310 case CEE_ISINST: {
8311 CHECK_STACK (1);
8312 --sp;
8313 CHECK_OPSIZE (5);
8314 token = read32 (ip + 1);
8315 klass = mini_get_class (method, token, generic_context);
8316 CHECK_TYPELOAD (klass);
8317 if (sp [0]->type != STACK_OBJ)
8318 UNVERIFIED;
8320 if (cfg->generic_sharing_context)
8321 context_used = mono_class_check_context_used (klass);
8323 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8324 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8325 MonoInst *args [3];
8327 /* obj */
8328 args [0] = *sp;
8330 /* klass */
8331 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8333 /* inline cache*/
8334 if (cfg->compile_aot)
8335 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8336 else
8337 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8339 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8340 ip += 5;
8341 inline_costs += 2;
8342 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8343 MonoMethod *mono_isinst;
8344 MonoInst *iargs [1];
8345 int costs;
8347 mono_isinst = mono_marshal_get_isinst (klass);
8348 iargs [0] = sp [0];
8350 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8351 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8352 CHECK_CFG_EXCEPTION;
8353 g_assert (costs > 0);
8355 ip += 5;
8356 cfg->real_offset += 5;
8357 bblock = cfg->cbb;
8359 *sp++= iargs [0];
8361 inline_costs += costs;
8363 else {
8364 ins = handle_isinst (cfg, klass, *sp, context_used);
8365 CHECK_CFG_EXCEPTION;
8366 bblock = cfg->cbb;
8367 *sp ++ = ins;
8368 ip += 5;
8370 break;
8372 case CEE_UNBOX_ANY: {
8373 CHECK_STACK (1);
8374 --sp;
8375 CHECK_OPSIZE (5);
8376 token = read32 (ip + 1);
8377 klass = mini_get_class (method, token, generic_context);
8378 CHECK_TYPELOAD (klass);
8380 mono_save_token_info (cfg, image, token, klass);
8382 if (cfg->generic_sharing_context)
8383 context_used = mono_class_check_context_used (klass);
8385 if (generic_class_is_reference_type (cfg, klass)) {
8386 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8387 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8388 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8389 MonoInst *args [3];
8391 /* obj */
8392 args [0] = *sp;
8394 /* klass */
8395 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8397 /* inline cache*/
8398 /*FIXME AOT support*/
8399 if (cfg->compile_aot)
8400 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8401 else
8402 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8404 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8405 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8406 ip += 5;
8407 inline_costs += 2;
8408 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8409 MonoMethod *mono_castclass;
8410 MonoInst *iargs [1];
8411 int costs;
8413 mono_castclass = mono_marshal_get_castclass (klass);
8414 iargs [0] = sp [0];
8416 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8417 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8418 CHECK_CFG_EXCEPTION;
8419 g_assert (costs > 0);
8421 ip += 5;
8422 cfg->real_offset += 5;
8423 bblock = cfg->cbb;
8425 *sp++ = iargs [0];
8426 inline_costs += costs;
8427 } else {
8428 ins = handle_castclass (cfg, klass, *sp, context_used);
8429 CHECK_CFG_EXCEPTION;
8430 bblock = cfg->cbb;
8431 *sp ++ = ins;
8432 ip += 5;
8434 break;
8437 if (mono_class_is_nullable (klass)) {
8438 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8439 *sp++= ins;
8440 ip += 5;
8441 break;
8444 /* UNBOX */
8445 ins = handle_unbox (cfg, klass, sp, context_used);
8446 *sp = ins;
8448 ip += 5;
8450 /* LDOBJ */
8451 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8452 *sp++ = ins;
8454 inline_costs += 2;
8455 break;
8457 case CEE_BOX: {
8458 MonoInst *val;
8460 CHECK_STACK (1);
8461 --sp;
8462 val = *sp;
8463 CHECK_OPSIZE (5);
8464 token = read32 (ip + 1);
8465 klass = mini_get_class (method, token, generic_context);
8466 CHECK_TYPELOAD (klass);
8468 mono_save_token_info (cfg, image, token, klass);
8470 if (cfg->generic_sharing_context)
8471 context_used = mono_class_check_context_used (klass);
8473 if (generic_class_is_reference_type (cfg, klass)) {
8474 *sp++ = val;
8475 ip += 5;
8476 break;
8479 if (klass == mono_defaults.void_class)
8480 UNVERIFIED;
8481 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8482 UNVERIFIED;
8483 /* frequent check in generic code: box (struct), brtrue */
8485 // FIXME: LLVM can't handle the inconsistent bb linking
8486 if (!mono_class_is_nullable (klass) &&
8487 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8488 (ip [5] == CEE_BRTRUE ||
8489 ip [5] == CEE_BRTRUE_S ||
8490 ip [5] == CEE_BRFALSE ||
8491 ip [5] == CEE_BRFALSE_S)) {
8492 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8493 int dreg;
8494 MonoBasicBlock *true_bb, *false_bb;
8496 ip += 5;
8498 if (cfg->verbose_level > 3) {
8499 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8500 printf ("<box+brtrue opt>\n");
8503 switch (*ip) {
8504 case CEE_BRTRUE_S:
8505 case CEE_BRFALSE_S:
8506 CHECK_OPSIZE (2);
8507 ip++;
8508 target = ip + 1 + (signed char)(*ip);
8509 ip++;
8510 break;
8511 case CEE_BRTRUE:
8512 case CEE_BRFALSE:
8513 CHECK_OPSIZE (5);
8514 ip++;
8515 target = ip + 4 + (gint)(read32 (ip));
8516 ip += 4;
8517 break;
8518 default:
8519 g_assert_not_reached ();
8523 * We need to link both bblocks, since it is needed for handling stack
8524 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8525 * Branching to only one of them would lead to inconsistencies, so
8526 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8528 GET_BBLOCK (cfg, true_bb, target);
8529 GET_BBLOCK (cfg, false_bb, ip);
8531 mono_link_bblock (cfg, cfg->cbb, true_bb);
8532 mono_link_bblock (cfg, cfg->cbb, false_bb);
8534 if (sp != stack_start) {
8535 handle_stack_args (cfg, stack_start, sp - stack_start);
8536 sp = stack_start;
8537 CHECK_UNVERIFIABLE (cfg);
8540 if (COMPILE_LLVM (cfg)) {
8541 dreg = alloc_ireg (cfg);
8542 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8545 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8546 } else {
8547 /* The JIT can't eliminate the iconst+compare */
8548 MONO_INST_NEW (cfg, ins, OP_BR);
8549 ins->inst_target_bb = is_true ? true_bb : false_bb;
8550 MONO_ADD_INS (cfg->cbb, ins);
8553 start_new_bblock = 1;
8554 break;
8557 *sp++ = handle_box (cfg, val, klass, context_used);
8559 CHECK_CFG_EXCEPTION;
8560 ip += 5;
8561 inline_costs += 1;
8562 break;
8564 case CEE_UNBOX: {
8565 CHECK_STACK (1);
8566 --sp;
8567 CHECK_OPSIZE (5);
8568 token = read32 (ip + 1);
8569 klass = mini_get_class (method, token, generic_context);
8570 CHECK_TYPELOAD (klass);
8572 mono_save_token_info (cfg, image, token, klass);
8574 if (cfg->generic_sharing_context)
8575 context_used = mono_class_check_context_used (klass);
8577 if (mono_class_is_nullable (klass)) {
8578 MonoInst *val;
8580 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8581 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8583 *sp++= ins;
8584 } else {
8585 ins = handle_unbox (cfg, klass, sp, context_used);
8586 *sp++ = ins;
8588 ip += 5;
8589 inline_costs += 2;
8590 break;
8592 case CEE_LDFLD:
8593 case CEE_LDFLDA:
8594 case CEE_STFLD: {
8595 MonoClassField *field;
8596 int costs;
8597 guint foffset;
8599 if (*ip == CEE_STFLD) {
8600 CHECK_STACK (2);
8601 sp -= 2;
8602 } else {
8603 CHECK_STACK (1);
8604 --sp;
8606 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8607 UNVERIFIED;
8608 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8609 UNVERIFIED;
8610 CHECK_OPSIZE (5);
8611 token = read32 (ip + 1);
8612 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8613 field = mono_method_get_wrapper_data (method, token);
8614 klass = field->parent;
8616 else {
8617 field = mono_field_from_token (image, token, &klass, generic_context);
8619 if (!field)
8620 LOAD_ERROR;
8621 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8622 FIELD_ACCESS_FAILURE;
8623 mono_class_init (klass);
8625 if (*ip != CEE_LDFLDA && is_magic_tls_access (field))
8626 UNVERIFIED;
8627 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8628 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8629 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8630 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8633 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8634 if (*ip == CEE_STFLD) {
8635 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8636 UNVERIFIED;
8637 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8638 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8639 MonoInst *iargs [5];
8641 iargs [0] = sp [0];
8642 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8643 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8644 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8645 field->offset);
8646 iargs [4] = sp [1];
8648 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8649 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8650 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8651 CHECK_CFG_EXCEPTION;
8652 g_assert (costs > 0);
8654 cfg->real_offset += 5;
8655 bblock = cfg->cbb;
8657 inline_costs += costs;
8658 } else {
8659 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8661 } else {
8662 MonoInst *store;
8664 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8666 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8667 if (sp [0]->opcode != OP_LDADDR)
8668 store->flags |= MONO_INST_FAULT;
8670 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8671 /* insert call to write barrier */
8672 MonoInst *ptr;
8673 int dreg;
8675 dreg = alloc_ireg_mp (cfg);
8676 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8677 emit_write_barrier (cfg, ptr, sp [1], -1);
8680 store->flags |= ins_flag;
8682 ins_flag = 0;
8683 ip += 5;
8684 break;
8687 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8688 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8689 MonoInst *iargs [4];
8691 iargs [0] = sp [0];
8692 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8693 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8694 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8695 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8696 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8697 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8698 CHECK_CFG_EXCEPTION;
8699 bblock = cfg->cbb;
8700 g_assert (costs > 0);
8702 cfg->real_offset += 5;
8704 *sp++ = iargs [0];
8706 inline_costs += costs;
8707 } else {
8708 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8709 *sp++ = ins;
8711 } else {
8712 if (sp [0]->type == STACK_VTYPE) {
8713 MonoInst *var;
8715 /* Have to compute the address of the variable */
8717 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8718 if (!var)
8719 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8720 else
8721 g_assert (var->klass == klass);
8723 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8724 sp [0] = ins;
8727 if (*ip == CEE_LDFLDA) {
8728 if (is_magic_tls_access (field)) {
8729 ins = sp [0];
8730 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8731 } else {
8732 if (sp [0]->type == STACK_OBJ) {
8733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8734 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8737 dreg = alloc_ireg_mp (cfg);
8739 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8740 ins->klass = mono_class_from_mono_type (field->type);
8741 ins->type = STACK_MP;
8742 *sp++ = ins;
8744 } else {
8745 MonoInst *load;
8747 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8749 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8750 load->flags |= ins_flag;
8751 if (sp [0]->opcode != OP_LDADDR)
8752 load->flags |= MONO_INST_FAULT;
8753 *sp++ = load;
8756 ins_flag = 0;
8757 ip += 5;
8758 break;
8760 case CEE_LDSFLD:
8761 case CEE_LDSFLDA:
8762 case CEE_STSFLD: {
8763 MonoClassField *field;
8764 gpointer addr = NULL;
8765 gboolean is_special_static;
8766 MonoType *ftype;
8768 CHECK_OPSIZE (5);
8769 token = read32 (ip + 1);
8771 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8772 field = mono_method_get_wrapper_data (method, token);
8773 klass = field->parent;
8775 else
8776 field = mono_field_from_token (image, token, &klass, generic_context);
8777 if (!field)
8778 LOAD_ERROR;
8779 mono_class_init (klass);
8780 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8781 FIELD_ACCESS_FAILURE;
8783 /* if the class is Critical then transparent code cannot access it's fields */
8784 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8785 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8788 * We can only support shared generic static
8789 * field access on architectures where the
8790 * trampoline code has been extended to handle
8791 * the generic class init.
8793 #ifndef MONO_ARCH_VTABLE_REG
8794 GENERIC_SHARING_FAILURE (*ip);
8795 #endif
8797 if (cfg->generic_sharing_context)
8798 context_used = mono_class_check_context_used (klass);
8800 ftype = mono_field_get_type (field);
8802 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8804 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8805 * to be called here.
8807 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8808 mono_class_vtable (cfg->domain, klass);
8809 CHECK_TYPELOAD (klass);
8811 mono_domain_lock (cfg->domain);
8812 if (cfg->domain->special_static_fields)
8813 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8814 mono_domain_unlock (cfg->domain);
8816 is_special_static = mono_class_field_is_special_static (field);
8818 /* Generate IR to compute the field address */
8819 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8821 * Fast access to TLS data
8822 * Inline version of get_thread_static_data () in
8823 * threads.c.
8825 guint32 offset;
8826 int idx, static_data_reg, array_reg, dreg;
8827 MonoInst *thread_ins;
8829 // offset &= 0x7fffffff;
8830 // idx = (offset >> 24) - 1;
8831 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8833 thread_ins = mono_get_thread_intrinsic (cfg);
8834 MONO_ADD_INS (cfg->cbb, thread_ins);
8835 static_data_reg = alloc_ireg (cfg);
8836 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8838 if (cfg->compile_aot) {
8839 int offset_reg, offset2_reg, idx_reg;
8841 /* For TLS variables, this will return the TLS offset */
8842 EMIT_NEW_SFLDACONST (cfg, ins, field);
8843 offset_reg = ins->dreg;
8844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8845 idx_reg = alloc_ireg (cfg);
8846 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8849 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8850 array_reg = alloc_ireg (cfg);
8851 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8852 offset2_reg = alloc_ireg (cfg);
8853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8854 dreg = alloc_ireg (cfg);
8855 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8856 } else {
8857 offset = (gsize)addr & 0x7fffffff;
8858 idx = (offset >> 24) - 1;
8860 array_reg = alloc_ireg (cfg);
8861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8862 dreg = alloc_ireg (cfg);
8863 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8865 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8866 (cfg->compile_aot && is_special_static) ||
8867 (context_used && is_special_static)) {
8868 MonoInst *iargs [2];
8870 g_assert (field->parent);
8871 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8872 if (context_used) {
8873 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8874 field, MONO_RGCTX_INFO_CLASS_FIELD);
8875 } else {
8876 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8878 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8879 } else if (context_used) {
8880 MonoInst *static_data;
8883 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8884 method->klass->name_space, method->klass->name, method->name,
8885 depth, field->offset);
8888 if (mono_class_needs_cctor_run (klass, method))
8889 emit_generic_class_init (cfg, klass);
8892 * The pointer we're computing here is
8894 * super_info.static_data + field->offset
8896 static_data = emit_get_rgctx_klass (cfg, context_used,
8897 klass, MONO_RGCTX_INFO_STATIC_DATA);
8899 if (field->offset == 0) {
8900 ins = static_data;
8901 } else {
8902 int addr_reg = mono_alloc_preg (cfg);
8903 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8905 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8906 MonoInst *iargs [2];
8908 g_assert (field->parent);
8909 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8910 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8911 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8912 } else {
8913 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8915 CHECK_TYPELOAD (klass);
8916 if (!addr) {
8917 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
8918 if (!(g_slist_find (class_inits, vtable))) {
8919 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8920 if (cfg->verbose_level > 2)
8921 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8922 class_inits = g_slist_prepend (class_inits, vtable);
8924 } else {
8925 if (cfg->run_cctors) {
8926 MonoException *ex;
8927 /* This makes so that inline cannot trigger */
8928 /* .cctors: too many apps depend on them */
8929 /* running with a specific order... */
8930 if (! vtable->initialized)
8931 INLINE_FAILURE;
8932 ex = mono_runtime_class_init_full (vtable, FALSE);
8933 if (ex) {
8934 set_exception_object (cfg, ex);
8935 goto exception_exit;
8939 addr = (char*)vtable->data + field->offset;
8941 if (cfg->compile_aot)
8942 EMIT_NEW_SFLDACONST (cfg, ins, field);
8943 else
8944 EMIT_NEW_PCONST (cfg, ins, addr);
8945 } else {
8946 MonoInst *iargs [1];
8947 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8948 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8952 /* Generate IR to do the actual load/store operation */
8954 if (*ip == CEE_LDSFLDA) {
8955 ins->klass = mono_class_from_mono_type (ftype);
8956 ins->type = STACK_PTR;
8957 *sp++ = ins;
8958 } else if (*ip == CEE_STSFLD) {
8959 MonoInst *store;
8960 CHECK_STACK (1);
8961 sp--;
8963 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8964 store->flags |= ins_flag;
8965 } else {
8966 gboolean is_const = FALSE;
8967 MonoVTable *vtable = NULL;
8969 if (!context_used) {
8970 vtable = mono_class_vtable (cfg->domain, klass);
8971 CHECK_TYPELOAD (klass);
8973 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8974 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8975 gpointer addr = (char*)vtable->data + field->offset;
8976 int ro_type = ftype->type;
8977 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8978 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8980 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8981 is_const = TRUE;
8982 switch (ro_type) {
8983 case MONO_TYPE_BOOLEAN:
8984 case MONO_TYPE_U1:
8985 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8986 sp++;
8987 break;
8988 case MONO_TYPE_I1:
8989 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8990 sp++;
8991 break;
8992 case MONO_TYPE_CHAR:
8993 case MONO_TYPE_U2:
8994 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8995 sp++;
8996 break;
8997 case MONO_TYPE_I2:
8998 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8999 sp++;
9000 break;
9001 break;
9002 case MONO_TYPE_I4:
9003 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9004 sp++;
9005 break;
9006 case MONO_TYPE_U4:
9007 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9008 sp++;
9009 break;
9010 case MONO_TYPE_I:
9011 case MONO_TYPE_U:
9012 case MONO_TYPE_PTR:
9013 case MONO_TYPE_FNPTR:
9014 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9015 type_to_eval_stack_type ((cfg), field->type, *sp);
9016 sp++;
9017 break;
9018 case MONO_TYPE_STRING:
9019 case MONO_TYPE_OBJECT:
9020 case MONO_TYPE_CLASS:
9021 case MONO_TYPE_SZARRAY:
9022 case MONO_TYPE_ARRAY:
9023 if (!mono_gc_is_moving ()) {
9024 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9025 type_to_eval_stack_type ((cfg), field->type, *sp);
9026 sp++;
9027 } else {
9028 is_const = FALSE;
9030 break;
9031 case MONO_TYPE_I8:
9032 case MONO_TYPE_U8:
9033 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9034 sp++;
9035 break;
9036 case MONO_TYPE_R4:
9037 case MONO_TYPE_R8:
9038 case MONO_TYPE_VALUETYPE:
9039 default:
9040 is_const = FALSE;
9041 break;
9045 if (!is_const) {
9046 MonoInst *load;
9048 CHECK_STACK_OVF (1);
9050 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9051 load->flags |= ins_flag;
9052 ins_flag = 0;
9053 *sp++ = load;
9056 ins_flag = 0;
9057 ip += 5;
9058 break;
9060 case CEE_STOBJ:
9061 CHECK_STACK (2);
9062 sp -= 2;
9063 CHECK_OPSIZE (5);
9064 token = read32 (ip + 1);
9065 klass = mini_get_class (method, token, generic_context);
9066 CHECK_TYPELOAD (klass);
9067 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9068 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9069 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9070 generic_class_is_reference_type (cfg, klass)) {
9071 /* insert call to write barrier */
9072 emit_write_barrier (cfg, sp [0], sp [1], -1);
9074 ins_flag = 0;
9075 ip += 5;
9076 inline_costs += 1;
9077 break;
9080 * Array opcodes
9082 case CEE_NEWARR: {
9083 MonoInst *len_ins;
9084 const char *data_ptr;
9085 int data_size = 0;
9086 guint32 field_token;
9088 CHECK_STACK (1);
9089 --sp;
9091 CHECK_OPSIZE (5);
9092 token = read32 (ip + 1);
9094 klass = mini_get_class (method, token, generic_context);
9095 CHECK_TYPELOAD (klass);
9097 if (cfg->generic_sharing_context)
9098 context_used = mono_class_check_context_used (klass);
9100 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9101 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9102 ins->sreg1 = sp [0]->dreg;
9103 ins->type = STACK_I4;
9104 ins->dreg = alloc_ireg (cfg);
9105 MONO_ADD_INS (cfg->cbb, ins);
9106 *sp = mono_decompose_opcode (cfg, ins);
9109 if (context_used) {
9110 MonoInst *args [3];
9111 MonoClass *array_class = mono_array_class_get (klass, 1);
9112 /* FIXME: we cannot get a managed
9113 allocator because we can't get the
9114 open generic class's vtable. We
9115 have the same problem in
9116 handle_alloc(). This
9117 needs to be solved so that we can
9118 have managed allocs of shared
9119 generic classes. */
9121 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9122 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9124 MonoMethod *managed_alloc = NULL;
9126 /* FIXME: Decompose later to help abcrem */
9128 /* vtable */
9129 args [0] = emit_get_rgctx_klass (cfg, context_used,
9130 array_class, MONO_RGCTX_INFO_VTABLE);
9131 /* array len */
9132 args [1] = sp [0];
9134 if (managed_alloc)
9135 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9136 else
9137 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9138 } else {
9139 if (cfg->opt & MONO_OPT_SHARED) {
9140 /* Decompose now to avoid problems with references to the domainvar */
9141 MonoInst *iargs [3];
9143 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9144 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9145 iargs [2] = sp [0];
9147 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9148 } else {
9149 /* Decompose later since it is needed by abcrem */
9150 MonoClass *array_type = mono_array_class_get (klass, 1);
9151 mono_class_vtable (cfg->domain, array_type);
9152 CHECK_TYPELOAD (array_type);
9154 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9155 ins->dreg = alloc_ireg_ref (cfg);
9156 ins->sreg1 = sp [0]->dreg;
9157 ins->inst_newa_class = klass;
9158 ins->type = STACK_OBJ;
9159 ins->klass = klass;
9160 MONO_ADD_INS (cfg->cbb, ins);
9161 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9162 cfg->cbb->has_array_access = TRUE;
9164 /* Needed so mono_emit_load_get_addr () gets called */
9165 mono_get_got_var (cfg);
9169 len_ins = sp [0];
9170 ip += 5;
9171 *sp++ = ins;
9172 inline_costs += 1;
9175 * we inline/optimize the initialization sequence if possible.
9176 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9177 * for small sizes open code the memcpy
9178 * ensure the rva field is big enough
9180 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9181 MonoMethod *memcpy_method = get_memcpy_method ();
9182 MonoInst *iargs [3];
9183 int add_reg = alloc_ireg_mp (cfg);
9185 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9186 if (cfg->compile_aot) {
9187 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9188 } else {
9189 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9191 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9192 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9193 ip += 11;
9196 break;
9198 case CEE_LDLEN:
9199 CHECK_STACK (1);
9200 --sp;
9201 if (sp [0]->type != STACK_OBJ)
9202 UNVERIFIED;
9204 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9205 ins->dreg = alloc_preg (cfg);
9206 ins->sreg1 = sp [0]->dreg;
9207 ins->type = STACK_I4;
9208 /* This flag will be inherited by the decomposition */
9209 ins->flags |= MONO_INST_FAULT;
9210 MONO_ADD_INS (cfg->cbb, ins);
9211 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9212 cfg->cbb->has_array_access = TRUE;
9213 ip ++;
9214 *sp++ = ins;
9215 break;
9216 case CEE_LDELEMA:
9217 CHECK_STACK (2);
9218 sp -= 2;
9219 CHECK_OPSIZE (5);
9220 if (sp [0]->type != STACK_OBJ)
9221 UNVERIFIED;
9223 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9225 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9226 CHECK_TYPELOAD (klass);
9227 /* we need to make sure that this array is exactly the type it needs
9228 * to be for correctness. the wrappers are lax with their usage
9229 * so we need to ignore them here
9231 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9232 MonoClass *array_class = mono_array_class_get (klass, 1);
9233 mini_emit_check_array_type (cfg, sp [0], array_class);
9234 CHECK_TYPELOAD (array_class);
9237 readonly = FALSE;
9238 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9239 *sp++ = ins;
9240 ip += 5;
9241 break;
9242 case CEE_LDELEM:
9243 case CEE_LDELEM_I1:
9244 case CEE_LDELEM_U1:
9245 case CEE_LDELEM_I2:
9246 case CEE_LDELEM_U2:
9247 case CEE_LDELEM_I4:
9248 case CEE_LDELEM_U4:
9249 case CEE_LDELEM_I8:
9250 case CEE_LDELEM_I:
9251 case CEE_LDELEM_R4:
9252 case CEE_LDELEM_R8:
9253 case CEE_LDELEM_REF: {
9254 MonoInst *addr;
9256 CHECK_STACK (2);
9257 sp -= 2;
9259 if (*ip == CEE_LDELEM) {
9260 CHECK_OPSIZE (5);
9261 token = read32 (ip + 1);
9262 klass = mini_get_class (method, token, generic_context);
9263 CHECK_TYPELOAD (klass);
9264 mono_class_init (klass);
9266 else
9267 klass = array_access_to_klass (*ip);
9269 if (sp [0]->type != STACK_OBJ)
9270 UNVERIFIED;
9272 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9274 if (sp [1]->opcode == OP_ICONST) {
9275 int array_reg = sp [0]->dreg;
9276 int index_reg = sp [1]->dreg;
9277 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9279 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9280 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9281 } else {
9282 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9283 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9285 *sp++ = ins;
9286 if (*ip == CEE_LDELEM)
9287 ip += 5;
9288 else
9289 ++ip;
9290 break;
9292 case CEE_STELEM_I:
9293 case CEE_STELEM_I1:
9294 case CEE_STELEM_I2:
9295 case CEE_STELEM_I4:
9296 case CEE_STELEM_I8:
9297 case CEE_STELEM_R4:
9298 case CEE_STELEM_R8:
9299 case CEE_STELEM_REF:
9300 case CEE_STELEM: {
9301 MonoInst *addr;
9303 CHECK_STACK (3);
9304 sp -= 3;
9306 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9308 if (*ip == CEE_STELEM) {
9309 CHECK_OPSIZE (5);
9310 token = read32 (ip + 1);
9311 klass = mini_get_class (method, token, generic_context);
9312 CHECK_TYPELOAD (klass);
9313 mono_class_init (klass);
9315 else
9316 klass = array_access_to_klass (*ip);
9318 if (sp [0]->type != STACK_OBJ)
9319 UNVERIFIED;
9321 /* storing a NULL doesn't need any of the complex checks in stelemref */
9322 if (generic_class_is_reference_type (cfg, klass) &&
9323 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9324 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9325 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9326 MonoInst *iargs [3];
9328 if (!helper->slot)
9329 mono_class_setup_vtable (obj_array);
9330 g_assert (helper->slot);
9332 if (sp [0]->type != STACK_OBJ)
9333 UNVERIFIED;
9334 if (sp [2]->type != STACK_OBJ)
9335 UNVERIFIED;
9337 iargs [2] = sp [2];
9338 iargs [1] = sp [1];
9339 iargs [0] = sp [0];
9341 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9342 } else {
9343 if (sp [1]->opcode == OP_ICONST) {
9344 int array_reg = sp [0]->dreg;
9345 int index_reg = sp [1]->dreg;
9346 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9348 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9349 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9350 } else {
9351 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9352 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9356 if (*ip == CEE_STELEM)
9357 ip += 5;
9358 else
9359 ++ip;
9360 inline_costs += 1;
9361 break;
9363 case CEE_CKFINITE: {
9364 CHECK_STACK (1);
9365 --sp;
9367 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9368 ins->sreg1 = sp [0]->dreg;
9369 ins->dreg = alloc_freg (cfg);
9370 ins->type = STACK_R8;
9371 MONO_ADD_INS (bblock, ins);
9373 *sp++ = mono_decompose_opcode (cfg, ins);
9375 ++ip;
9376 break;
9378 case CEE_REFANYVAL: {
9379 MonoInst *src_var, *src;
9381 int klass_reg = alloc_preg (cfg);
9382 int dreg = alloc_preg (cfg);
9384 CHECK_STACK (1);
9385 MONO_INST_NEW (cfg, ins, *ip);
9386 --sp;
9387 CHECK_OPSIZE (5);
9388 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9389 CHECK_TYPELOAD (klass);
9390 mono_class_init (klass);
9392 if (cfg->generic_sharing_context)
9393 context_used = mono_class_check_context_used (klass);
9395 // FIXME:
9396 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9397 if (!src_var)
9398 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9399 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9402 if (context_used) {
9403 MonoInst *klass_ins;
9405 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9406 klass, MONO_RGCTX_INFO_KLASS);
9408 // FIXME:
9409 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9410 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9411 } else {
9412 mini_emit_class_check (cfg, klass_reg, klass);
9414 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9415 ins->type = STACK_MP;
9416 *sp++ = ins;
9417 ip += 5;
9418 break;
9420 case CEE_MKREFANY: {
9421 MonoInst *loc, *addr;
9423 CHECK_STACK (1);
9424 MONO_INST_NEW (cfg, ins, *ip);
9425 --sp;
9426 CHECK_OPSIZE (5);
9427 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9428 CHECK_TYPELOAD (klass);
9429 mono_class_init (klass);
9431 if (cfg->generic_sharing_context)
9432 context_used = mono_class_check_context_used (klass);
9434 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9435 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9437 if (context_used) {
9438 MonoInst *const_ins;
9439 int type_reg = alloc_preg (cfg);
9441 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9442 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9444 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9445 } else if (cfg->compile_aot) {
9446 int const_reg = alloc_preg (cfg);
9447 int type_reg = alloc_preg (cfg);
9449 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9450 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9452 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9453 } else {
9454 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9455 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9457 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9459 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9460 ins->type = STACK_VTYPE;
9461 ins->klass = mono_defaults.typed_reference_class;
9462 *sp++ = ins;
9463 ip += 5;
9464 break;
9466 case CEE_LDTOKEN: {
9467 gpointer handle;
9468 MonoClass *handle_class;
9470 CHECK_STACK_OVF (1);
9472 CHECK_OPSIZE (5);
9473 n = read32 (ip + 1);
9475 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9476 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9477 handle = mono_method_get_wrapper_data (method, n);
9478 handle_class = mono_method_get_wrapper_data (method, n + 1);
9479 if (handle_class == mono_defaults.typehandle_class)
9480 handle = &((MonoClass*)handle)->byval_arg;
9482 else {
9483 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9485 if (!handle)
9486 LOAD_ERROR;
9487 mono_class_init (handle_class);
9488 if (cfg->generic_sharing_context) {
9489 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9490 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9491 /* This case handles ldtoken
9492 of an open type, like for
9493 typeof(Gen<>). */
9494 context_used = 0;
9495 } else if (handle_class == mono_defaults.typehandle_class) {
9496 /* If we get a MONO_TYPE_CLASS
9497 then we need to provide the
9498 open type, not an
9499 instantiation of it. */
9500 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9501 context_used = 0;
9502 else
9503 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9504 } else if (handle_class == mono_defaults.fieldhandle_class)
9505 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9506 else if (handle_class == mono_defaults.methodhandle_class)
9507 context_used = mono_method_check_context_used (handle);
9508 else
9509 g_assert_not_reached ();
9512 if ((cfg->opt & MONO_OPT_SHARED) &&
9513 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9514 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9515 MonoInst *addr, *vtvar, *iargs [3];
9516 int method_context_used;
9518 if (cfg->generic_sharing_context)
9519 method_context_used = mono_method_check_context_used (method);
9520 else
9521 method_context_used = 0;
9523 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9525 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9526 EMIT_NEW_ICONST (cfg, iargs [1], n);
9527 if (method_context_used) {
9528 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9529 method, MONO_RGCTX_INFO_METHOD);
9530 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9531 } else {
9532 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9533 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9535 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9537 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9539 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9540 } else {
9541 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9542 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9543 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9544 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9545 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9546 MonoClass *tclass = mono_class_from_mono_type (handle);
9548 mono_class_init (tclass);
9549 if (context_used) {
9550 ins = emit_get_rgctx_klass (cfg, context_used,
9551 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9552 } else if (cfg->compile_aot) {
9553 if (method->wrapper_type) {
9554 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9555 /* Special case for static synchronized wrappers */
9556 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9557 } else {
9558 /* FIXME: n is not a normal token */
9559 cfg->disable_aot = TRUE;
9560 EMIT_NEW_PCONST (cfg, ins, NULL);
9562 } else {
9563 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9565 } else {
9566 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9568 ins->type = STACK_OBJ;
9569 ins->klass = cmethod->klass;
9570 ip += 5;
9571 } else {
9572 MonoInst *addr, *vtvar;
9574 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9576 if (context_used) {
9577 if (handle_class == mono_defaults.typehandle_class) {
9578 ins = emit_get_rgctx_klass (cfg, context_used,
9579 mono_class_from_mono_type (handle),
9580 MONO_RGCTX_INFO_TYPE);
9581 } else if (handle_class == mono_defaults.methodhandle_class) {
9582 ins = emit_get_rgctx_method (cfg, context_used,
9583 handle, MONO_RGCTX_INFO_METHOD);
9584 } else if (handle_class == mono_defaults.fieldhandle_class) {
9585 ins = emit_get_rgctx_field (cfg, context_used,
9586 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9587 } else {
9588 g_assert_not_reached ();
9590 } else if (cfg->compile_aot) {
9591 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9592 } else {
9593 EMIT_NEW_PCONST (cfg, ins, handle);
9595 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9596 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9597 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9601 *sp++ = ins;
9602 ip += 5;
9603 break;
9605 case CEE_THROW:
9606 CHECK_STACK (1);
9607 MONO_INST_NEW (cfg, ins, OP_THROW);
9608 --sp;
9609 ins->sreg1 = sp [0]->dreg;
9610 ip++;
9611 bblock->out_of_line = TRUE;
9612 MONO_ADD_INS (bblock, ins);
9613 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9614 MONO_ADD_INS (bblock, ins);
9615 sp = stack_start;
9617 link_bblock (cfg, bblock, end_bblock);
9618 start_new_bblock = 1;
9619 break;
9620 case CEE_ENDFINALLY:
9621 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9622 MONO_ADD_INS (bblock, ins);
9623 ip++;
9624 start_new_bblock = 1;
9627 * Control will leave the method so empty the stack, otherwise
9628 * the next basic block will start with a nonempty stack.
9630 while (sp != stack_start) {
9631 sp--;
9633 break;
9634 case CEE_LEAVE:
9635 case CEE_LEAVE_S: {
9636 GList *handlers;
9638 if (*ip == CEE_LEAVE) {
9639 CHECK_OPSIZE (5);
9640 target = ip + 5 + (gint32)read32(ip + 1);
9641 } else {
9642 CHECK_OPSIZE (2);
9643 target = ip + 2 + (signed char)(ip [1]);
9646 /* empty the stack */
9647 while (sp != stack_start) {
9648 sp--;
9652 * If this leave statement is in a catch block, check for a
9653 * pending exception, and rethrow it if necessary.
9654 * We avoid doing this in runtime invoke wrappers, since those are called
9655 * by native code which excepts the wrapper to catch all exceptions.
9657 for (i = 0; i < header->num_clauses; ++i) {
9658 MonoExceptionClause *clause = &header->clauses [i];
9661 * Use <= in the final comparison to handle clauses with multiple
9662 * leave statements, like in bug #78024.
9663 * The ordering of the exception clauses guarantees that we find the
9664 * innermost clause.
9666 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9667 MonoInst *exc_ins;
9668 MonoBasicBlock *dont_throw;
9671 MonoInst *load;
9673 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9676 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9678 NEW_BBLOCK (cfg, dont_throw);
9681 * Currently, we always rethrow the abort exception, despite the
9682 * fact that this is not correct. See thread6.cs for an example.
9683 * But propagating the abort exception is more important than
9684 * getting the sematics right.
9686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9688 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9690 MONO_START_BB (cfg, dont_throw);
9691 bblock = cfg->cbb;
9695 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9696 GList *tmp;
9697 MonoExceptionClause *clause;
9699 for (tmp = handlers; tmp; tmp = tmp->next) {
9700 clause = tmp->data;
9701 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9702 g_assert (tblock);
9703 link_bblock (cfg, bblock, tblock);
9704 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9705 ins->inst_target_bb = tblock;
9706 ins->inst_eh_block = clause;
9707 MONO_ADD_INS (bblock, ins);
9708 bblock->has_call_handler = 1;
9709 if (COMPILE_LLVM (cfg)) {
9710 MonoBasicBlock *target_bb;
9713 * Link the finally bblock with the target, since it will
9714 * conceptually branch there.
9715 * FIXME: Have to link the bblock containing the endfinally.
9717 GET_BBLOCK (cfg, target_bb, target);
9718 link_bblock (cfg, tblock, target_bb);
9721 g_list_free (handlers);
9724 MONO_INST_NEW (cfg, ins, OP_BR);
9725 MONO_ADD_INS (bblock, ins);
9726 GET_BBLOCK (cfg, tblock, target);
9727 link_bblock (cfg, bblock, tblock);
9728 ins->inst_target_bb = tblock;
9729 start_new_bblock = 1;
9731 if (*ip == CEE_LEAVE)
9732 ip += 5;
9733 else
9734 ip += 2;
9736 break;
9740 * Mono specific opcodes
9742 case MONO_CUSTOM_PREFIX: {
9744 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9746 CHECK_OPSIZE (2);
9747 switch (ip [1]) {
9748 case CEE_MONO_ICALL: {
9749 gpointer func;
9750 MonoJitICallInfo *info;
9752 token = read32 (ip + 2);
9753 func = mono_method_get_wrapper_data (method, token);
9754 info = mono_find_jit_icall_by_addr (func);
9755 g_assert (info);
9757 CHECK_STACK (info->sig->param_count);
9758 sp -= info->sig->param_count;
9760 ins = mono_emit_jit_icall (cfg, info->func, sp);
9761 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9762 *sp++ = ins;
9764 ip += 6;
9765 inline_costs += 10 * num_calls++;
9767 break;
9769 case CEE_MONO_LDPTR: {
9770 gpointer ptr;
9772 CHECK_STACK_OVF (1);
9773 CHECK_OPSIZE (6);
9774 token = read32 (ip + 2);
9776 ptr = mono_method_get_wrapper_data (method, token);
9777 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9778 MonoJitICallInfo *callinfo;
9779 const char *icall_name;
9781 icall_name = method->name + strlen ("__icall_wrapper_");
9782 g_assert (icall_name);
9783 callinfo = mono_find_jit_icall_by_name (icall_name);
9784 g_assert (callinfo);
9786 if (ptr == callinfo->func) {
9787 /* Will be transformed into an AOTCONST later */
9788 EMIT_NEW_PCONST (cfg, ins, ptr);
9789 *sp++ = ins;
9790 ip += 6;
9791 break;
9794 /* FIXME: Generalize this */
9795 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9796 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9797 *sp++ = ins;
9798 ip += 6;
9799 break;
9801 EMIT_NEW_PCONST (cfg, ins, ptr);
9802 *sp++ = ins;
9803 ip += 6;
9804 inline_costs += 10 * num_calls++;
9805 /* Can't embed random pointers into AOT code */
9806 cfg->disable_aot = 1;
9807 break;
9809 case CEE_MONO_ICALL_ADDR: {
9810 MonoMethod *cmethod;
9811 gpointer ptr;
9813 CHECK_STACK_OVF (1);
9814 CHECK_OPSIZE (6);
9815 token = read32 (ip + 2);
9817 cmethod = mono_method_get_wrapper_data (method, token);
9819 if (cfg->compile_aot) {
9820 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9821 } else {
9822 ptr = mono_lookup_internal_call (cmethod);
9823 g_assert (ptr);
9824 EMIT_NEW_PCONST (cfg, ins, ptr);
9826 *sp++ = ins;
9827 ip += 6;
9828 break;
9830 case CEE_MONO_VTADDR: {
9831 MonoInst *src_var, *src;
9833 CHECK_STACK (1);
9834 --sp;
9836 // FIXME:
9837 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9838 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9839 *sp++ = src;
9840 ip += 2;
9841 break;
9843 case CEE_MONO_NEWOBJ: {
9844 MonoInst *iargs [2];
9846 CHECK_STACK_OVF (1);
9847 CHECK_OPSIZE (6);
9848 token = read32 (ip + 2);
9849 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9850 mono_class_init (klass);
9851 NEW_DOMAINCONST (cfg, iargs [0]);
9852 MONO_ADD_INS (cfg->cbb, iargs [0]);
9853 NEW_CLASSCONST (cfg, iargs [1], klass);
9854 MONO_ADD_INS (cfg->cbb, iargs [1]);
9855 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9856 ip += 6;
9857 inline_costs += 10 * num_calls++;
9858 break;
9860 case CEE_MONO_OBJADDR:
9861 CHECK_STACK (1);
9862 --sp;
9863 MONO_INST_NEW (cfg, ins, OP_MOVE);
9864 ins->dreg = alloc_ireg_mp (cfg);
9865 ins->sreg1 = sp [0]->dreg;
9866 ins->type = STACK_MP;
9867 MONO_ADD_INS (cfg->cbb, ins);
9868 *sp++ = ins;
9869 ip += 2;
9870 break;
9871 case CEE_MONO_LDNATIVEOBJ:
9873 * Similar to LDOBJ, but instead load the unmanaged
9874 * representation of the vtype to the stack.
9876 CHECK_STACK (1);
9877 CHECK_OPSIZE (6);
9878 --sp;
9879 token = read32 (ip + 2);
9880 klass = mono_method_get_wrapper_data (method, token);
9881 g_assert (klass->valuetype);
9882 mono_class_init (klass);
9885 MonoInst *src, *dest, *temp;
9887 src = sp [0];
9888 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9889 temp->backend.is_pinvoke = 1;
9890 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9891 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9893 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9894 dest->type = STACK_VTYPE;
9895 dest->klass = klass;
9897 *sp ++ = dest;
9898 ip += 6;
9900 break;
9901 case CEE_MONO_RETOBJ: {
9903 * Same as RET, but return the native representation of a vtype
9904 * to the caller.
9906 g_assert (cfg->ret);
9907 g_assert (mono_method_signature (method)->pinvoke);
9908 CHECK_STACK (1);
9909 --sp;
9911 CHECK_OPSIZE (6);
9912 token = read32 (ip + 2);
9913 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9915 if (!cfg->vret_addr) {
9916 g_assert (cfg->ret_var_is_local);
9918 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9919 } else {
9920 EMIT_NEW_RETLOADA (cfg, ins);
9922 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9924 if (sp != stack_start)
9925 UNVERIFIED;
9927 MONO_INST_NEW (cfg, ins, OP_BR);
9928 ins->inst_target_bb = end_bblock;
9929 MONO_ADD_INS (bblock, ins);
9930 link_bblock (cfg, bblock, end_bblock);
9931 start_new_bblock = 1;
9932 ip += 6;
9933 break;
9935 case CEE_MONO_CISINST:
9936 case CEE_MONO_CCASTCLASS: {
9937 int token;
9938 CHECK_STACK (1);
9939 --sp;
9940 CHECK_OPSIZE (6);
9941 token = read32 (ip + 2);
9942 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9943 if (ip [1] == CEE_MONO_CISINST)
9944 ins = handle_cisinst (cfg, klass, sp [0]);
9945 else
9946 ins = handle_ccastclass (cfg, klass, sp [0]);
9947 bblock = cfg->cbb;
9948 *sp++ = ins;
9949 ip += 6;
9950 break;
9952 case CEE_MONO_SAVE_LMF:
9953 case CEE_MONO_RESTORE_LMF:
9954 #ifdef MONO_ARCH_HAVE_LMF_OPS
9955 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9956 MONO_ADD_INS (bblock, ins);
9957 cfg->need_lmf_area = TRUE;
9958 #endif
9959 ip += 2;
9960 break;
9961 case CEE_MONO_CLASSCONST:
9962 CHECK_STACK_OVF (1);
9963 CHECK_OPSIZE (6);
9964 token = read32 (ip + 2);
9965 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9966 *sp++ = ins;
9967 ip += 6;
9968 inline_costs += 10 * num_calls++;
9969 break;
9970 case CEE_MONO_NOT_TAKEN:
9971 bblock->out_of_line = TRUE;
9972 ip += 2;
9973 break;
9974 case CEE_MONO_TLS:
9975 CHECK_STACK_OVF (1);
9976 CHECK_OPSIZE (6);
9977 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9978 ins->dreg = alloc_preg (cfg);
9979 ins->inst_offset = (gint32)read32 (ip + 2);
9980 ins->type = STACK_PTR;
9981 MONO_ADD_INS (bblock, ins);
9982 *sp++ = ins;
9983 ip += 6;
9984 break;
9985 case CEE_MONO_DYN_CALL: {
9986 MonoCallInst *call;
9988 /* It would be easier to call a trampoline, but that would put an
9989 * extra frame on the stack, confusing exception handling. So
9990 * implement it inline using an opcode for now.
9993 if (!cfg->dyn_call_var) {
9994 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9995 /* prevent it from being register allocated */
9996 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9999 /* Has to use a call inst since it local regalloc expects it */
10000 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10001 ins = (MonoInst*)call;
10002 sp -= 2;
10003 ins->sreg1 = sp [0]->dreg;
10004 ins->sreg2 = sp [1]->dreg;
10005 MONO_ADD_INS (bblock, ins);
10007 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10008 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10009 #endif
10011 ip += 2;
10012 inline_costs += 10 * num_calls++;
10014 break;
10016 case CEE_MONO_MEMORY_BARRIER: {
10017 CHECK_OPSIZE (5);
10018 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10019 ip += 5;
10020 break;
10022 default:
10023 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10024 break;
10026 break;
10029 case CEE_PREFIX1: {
10030 CHECK_OPSIZE (2);
10031 switch (ip [1]) {
10032 case CEE_ARGLIST: {
10033 /* somewhat similar to LDTOKEN */
10034 MonoInst *addr, *vtvar;
10035 CHECK_STACK_OVF (1);
10036 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10038 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10039 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10041 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10042 ins->type = STACK_VTYPE;
10043 ins->klass = mono_defaults.argumenthandle_class;
10044 *sp++ = ins;
10045 ip += 2;
10046 break;
10048 case CEE_CEQ:
10049 case CEE_CGT:
10050 case CEE_CGT_UN:
10051 case CEE_CLT:
10052 case CEE_CLT_UN: {
10053 MonoInst *cmp;
10054 CHECK_STACK (2);
10056 * The following transforms:
10057 * CEE_CEQ into OP_CEQ
10058 * CEE_CGT into OP_CGT
10059 * CEE_CGT_UN into OP_CGT_UN
10060 * CEE_CLT into OP_CLT
10061 * CEE_CLT_UN into OP_CLT_UN
10063 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10065 MONO_INST_NEW (cfg, ins, cmp->opcode);
10066 sp -= 2;
10067 cmp->sreg1 = sp [0]->dreg;
10068 cmp->sreg2 = sp [1]->dreg;
10069 type_from_op (cmp, sp [0], sp [1]);
10070 CHECK_TYPE (cmp);
10071 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10072 cmp->opcode = OP_LCOMPARE;
10073 else if (sp [0]->type == STACK_R8)
10074 cmp->opcode = OP_FCOMPARE;
10075 else
10076 cmp->opcode = OP_ICOMPARE;
10077 MONO_ADD_INS (bblock, cmp);
10078 ins->type = STACK_I4;
10079 ins->dreg = alloc_dreg (cfg, ins->type);
10080 type_from_op (ins, sp [0], sp [1]);
10082 if (cmp->opcode == OP_FCOMPARE) {
10084 * The backends expect the fceq opcodes to do the
10085 * comparison too.
10087 cmp->opcode = OP_NOP;
10088 ins->sreg1 = cmp->sreg1;
10089 ins->sreg2 = cmp->sreg2;
10091 MONO_ADD_INS (bblock, ins);
10092 *sp++ = ins;
10093 ip += 2;
10094 break;
10096 case CEE_LDFTN: {
10097 MonoInst *argconst;
10098 MonoMethod *cil_method;
10100 CHECK_STACK_OVF (1);
10101 CHECK_OPSIZE (6);
10102 n = read32 (ip + 2);
10103 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10104 if (!cmethod || mono_loader_get_last_error ())
10105 LOAD_ERROR;
10106 mono_class_init (cmethod->klass);
10108 mono_save_token_info (cfg, image, n, cmethod);
10110 if (cfg->generic_sharing_context)
10111 context_used = mono_method_check_context_used (cmethod);
10113 cil_method = cmethod;
10114 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10115 METHOD_ACCESS_FAILURE;
10117 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10118 if (check_linkdemand (cfg, method, cmethod))
10119 INLINE_FAILURE;
10120 CHECK_CFG_EXCEPTION;
10121 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10122 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10126 * Optimize the common case of ldftn+delegate creation
10128 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10129 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10130 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10131 MonoInst *target_ins;
10132 MonoMethod *invoke;
10133 int invoke_context_used = 0;
10135 invoke = mono_get_delegate_invoke (ctor_method->klass);
10136 if (!invoke || !mono_method_signature (invoke))
10137 LOAD_ERROR;
10139 if (cfg->generic_sharing_context)
10140 invoke_context_used = mono_method_check_context_used (invoke);
10142 target_ins = sp [-1];
10144 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10145 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10147 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10148 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10149 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10150 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10151 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10155 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10156 /* FIXME: SGEN support */
10157 if (invoke_context_used == 0) {
10158 ip += 6;
10159 if (cfg->verbose_level > 3)
10160 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10161 sp --;
10162 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10163 CHECK_CFG_EXCEPTION;
10164 ip += 5;
10165 sp ++;
10166 break;
10168 #endif
10172 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10173 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10174 *sp++ = ins;
10176 ip += 6;
10177 inline_costs += 10 * num_calls++;
10178 break;
10180 case CEE_LDVIRTFTN: {
10181 MonoInst *args [2];
10183 CHECK_STACK (1);
10184 CHECK_OPSIZE (6);
10185 n = read32 (ip + 2);
10186 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10187 if (!cmethod || mono_loader_get_last_error ())
10188 LOAD_ERROR;
10189 mono_class_init (cmethod->klass);
10191 if (cfg->generic_sharing_context)
10192 context_used = mono_method_check_context_used (cmethod);
10194 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10195 if (check_linkdemand (cfg, method, cmethod))
10196 INLINE_FAILURE;
10197 CHECK_CFG_EXCEPTION;
10198 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10199 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10202 --sp;
10203 args [0] = *sp;
10205 args [1] = emit_get_rgctx_method (cfg, context_used,
10206 cmethod, MONO_RGCTX_INFO_METHOD);
10208 if (context_used)
10209 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10210 else
10211 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10213 ip += 6;
10214 inline_costs += 10 * num_calls++;
10215 break;
10217 case CEE_LDARG:
10218 CHECK_STACK_OVF (1);
10219 CHECK_OPSIZE (4);
10220 n = read16 (ip + 2);
10221 CHECK_ARG (n);
10222 EMIT_NEW_ARGLOAD (cfg, ins, n);
10223 *sp++ = ins;
10224 ip += 4;
10225 break;
10226 case CEE_LDARGA:
10227 CHECK_STACK_OVF (1);
10228 CHECK_OPSIZE (4);
10229 n = read16 (ip + 2);
10230 CHECK_ARG (n);
10231 NEW_ARGLOADA (cfg, ins, n);
10232 MONO_ADD_INS (cfg->cbb, ins);
10233 *sp++ = ins;
10234 ip += 4;
10235 break;
10236 case CEE_STARG:
10237 CHECK_STACK (1);
10238 --sp;
10239 CHECK_OPSIZE (4);
10240 n = read16 (ip + 2);
10241 CHECK_ARG (n);
10242 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10243 UNVERIFIED;
10244 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10245 ip += 4;
10246 break;
10247 case CEE_LDLOC:
10248 CHECK_STACK_OVF (1);
10249 CHECK_OPSIZE (4);
10250 n = read16 (ip + 2);
10251 CHECK_LOCAL (n);
10252 EMIT_NEW_LOCLOAD (cfg, ins, n);
10253 *sp++ = ins;
10254 ip += 4;
10255 break;
10256 case CEE_LDLOCA: {
10257 unsigned char *tmp_ip;
10258 CHECK_STACK_OVF (1);
10259 CHECK_OPSIZE (4);
10260 n = read16 (ip + 2);
10261 CHECK_LOCAL (n);
10263 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10264 ip = tmp_ip;
10265 inline_costs += 1;
10266 break;
10269 EMIT_NEW_LOCLOADA (cfg, ins, n);
10270 *sp++ = ins;
10271 ip += 4;
10272 break;
10274 case CEE_STLOC:
10275 CHECK_STACK (1);
10276 --sp;
10277 CHECK_OPSIZE (4);
10278 n = read16 (ip + 2);
10279 CHECK_LOCAL (n);
10280 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10281 UNVERIFIED;
10282 emit_stloc_ir (cfg, sp, header, n);
10283 ip += 4;
10284 inline_costs += 1;
10285 break;
10286 case CEE_LOCALLOC:
10287 CHECK_STACK (1);
10288 --sp;
10289 if (sp != stack_start)
10290 UNVERIFIED;
10291 if (cfg->method != method)
10293 * Inlining this into a loop in a parent could lead to
10294 * stack overflows which is different behavior than the
10295 * non-inlined case, thus disable inlining in this case.
10297 goto inline_failure;
10299 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10300 ins->dreg = alloc_preg (cfg);
10301 ins->sreg1 = sp [0]->dreg;
10302 ins->type = STACK_PTR;
10303 MONO_ADD_INS (cfg->cbb, ins);
10305 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10306 if (init_locals)
10307 ins->flags |= MONO_INST_INIT;
10309 *sp++ = ins;
10310 ip += 2;
10311 break;
10312 case CEE_ENDFILTER: {
10313 MonoExceptionClause *clause, *nearest;
10314 int cc, nearest_num;
10316 CHECK_STACK (1);
10317 --sp;
10318 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10319 UNVERIFIED;
10320 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10321 ins->sreg1 = (*sp)->dreg;
10322 MONO_ADD_INS (bblock, ins);
10323 start_new_bblock = 1;
10324 ip += 2;
10326 nearest = NULL;
10327 nearest_num = 0;
10328 for (cc = 0; cc < header->num_clauses; ++cc) {
10329 clause = &header->clauses [cc];
10330 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10331 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10332 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10333 nearest = clause;
10334 nearest_num = cc;
10337 g_assert (nearest);
10338 if ((ip - header->code) != nearest->handler_offset)
10339 UNVERIFIED;
10341 break;
10343 case CEE_UNALIGNED_:
10344 ins_flag |= MONO_INST_UNALIGNED;
10345 /* FIXME: record alignment? we can assume 1 for now */
10346 CHECK_OPSIZE (3);
10347 ip += 3;
10348 break;
10349 case CEE_VOLATILE_:
10350 ins_flag |= MONO_INST_VOLATILE;
10351 ip += 2;
10352 break;
10353 case CEE_TAIL_:
10354 ins_flag |= MONO_INST_TAILCALL;
10355 cfg->flags |= MONO_CFG_HAS_TAIL;
10356 /* Can't inline tail calls at this time */
10357 inline_costs += 100000;
10358 ip += 2;
10359 break;
10360 case CEE_INITOBJ:
10361 CHECK_STACK (1);
10362 --sp;
10363 CHECK_OPSIZE (6);
10364 token = read32 (ip + 2);
10365 klass = mini_get_class (method, token, generic_context);
10366 CHECK_TYPELOAD (klass);
10367 if (generic_class_is_reference_type (cfg, klass))
10368 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10369 else
10370 mini_emit_initobj (cfg, *sp, NULL, klass);
10371 ip += 6;
10372 inline_costs += 1;
10373 break;
10374 case CEE_CONSTRAINED_:
10375 CHECK_OPSIZE (6);
10376 token = read32 (ip + 2);
10377 if (method->wrapper_type != MONO_WRAPPER_NONE)
10378 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10379 else
10380 constrained_call = mono_class_get_full (image, token, generic_context);
10381 CHECK_TYPELOAD (constrained_call);
10382 ip += 6;
10383 break;
10384 case CEE_CPBLK:
10385 case CEE_INITBLK: {
10386 MonoInst *iargs [3];
10387 CHECK_STACK (3);
10388 sp -= 3;
10390 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10391 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10392 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10393 /* emit_memset only works when val == 0 */
10394 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10395 } else {
10396 iargs [0] = sp [0];
10397 iargs [1] = sp [1];
10398 iargs [2] = sp [2];
10399 if (ip [1] == CEE_CPBLK) {
10400 MonoMethod *memcpy_method = get_memcpy_method ();
10401 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10402 } else {
10403 MonoMethod *memset_method = get_memset_method ();
10404 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10407 ip += 2;
10408 inline_costs += 1;
10409 break;
10411 case CEE_NO_:
10412 CHECK_OPSIZE (3);
10413 if (ip [2] & 0x1)
10414 ins_flag |= MONO_INST_NOTYPECHECK;
10415 if (ip [2] & 0x2)
10416 ins_flag |= MONO_INST_NORANGECHECK;
10417 /* we ignore the no-nullcheck for now since we
10418 * really do it explicitly only when doing callvirt->call
10420 ip += 3;
10421 break;
10422 case CEE_RETHROW: {
10423 MonoInst *load;
10424 int handler_offset = -1;
10426 for (i = 0; i < header->num_clauses; ++i) {
10427 MonoExceptionClause *clause = &header->clauses [i];
10428 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10429 handler_offset = clause->handler_offset;
10430 break;
10434 bblock->flags |= BB_EXCEPTION_UNSAFE;
10436 g_assert (handler_offset != -1);
10438 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10439 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10440 ins->sreg1 = load->dreg;
10441 MONO_ADD_INS (bblock, ins);
10443 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10444 MONO_ADD_INS (bblock, ins);
10446 sp = stack_start;
10447 link_bblock (cfg, bblock, end_bblock);
10448 start_new_bblock = 1;
10449 ip += 2;
10450 break;
10452 case CEE_SIZEOF: {
10453 guint32 align;
10454 int ialign;
10456 CHECK_STACK_OVF (1);
10457 CHECK_OPSIZE (6);
10458 token = read32 (ip + 2);
10459 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10460 MonoType *type = mono_type_create_from_typespec (image, token);
10461 token = mono_type_size (type, &ialign);
10462 } else {
10463 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10464 CHECK_TYPELOAD (klass);
10465 mono_class_init (klass);
10466 token = mono_class_value_size (klass, &align);
10468 EMIT_NEW_ICONST (cfg, ins, token);
10469 *sp++= ins;
10470 ip += 6;
10471 break;
10473 case CEE_REFANYTYPE: {
10474 MonoInst *src_var, *src;
10476 CHECK_STACK (1);
10477 --sp;
10479 // FIXME:
10480 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10481 if (!src_var)
10482 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10483 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10484 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10485 *sp++ = ins;
10486 ip += 2;
10487 break;
10489 case CEE_READONLY_:
10490 readonly = TRUE;
10491 ip += 2;
10492 break;
10494 case CEE_UNUSED56:
10495 case CEE_UNUSED57:
10496 case CEE_UNUSED70:
10497 case CEE_UNUSED:
10498 case CEE_UNUSED99:
10499 UNVERIFIED;
10501 default:
10502 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10503 UNVERIFIED;
10505 break;
10507 case CEE_UNUSED58:
10508 case CEE_UNUSED1:
10509 UNVERIFIED;
10511 default:
10512 g_warning ("opcode 0x%02x not handled", *ip);
10513 UNVERIFIED;
10516 if (start_new_bblock != 1)
10517 UNVERIFIED;
10519 bblock->cil_length = ip - bblock->cil_code;
10520 if (bblock->next_bb) {
10521 /* This could already be set because of inlining, #693905 */
10522 MonoBasicBlock *bb = bblock;
10524 while (bb->next_bb)
10525 bb = bb->next_bb;
10526 bb->next_bb = end_bblock;
10527 } else {
10528 bblock->next_bb = end_bblock;
10531 if (cfg->method == method && cfg->domainvar) {
10532 MonoInst *store;
10533 MonoInst *get_domain;
10535 cfg->cbb = init_localsbb;
10537 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10538 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10540 else {
10541 get_domain->dreg = alloc_preg (cfg);
10542 MONO_ADD_INS (cfg->cbb, get_domain);
10544 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10545 MONO_ADD_INS (cfg->cbb, store);
10548 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10549 if (cfg->compile_aot)
10550 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10551 mono_get_got_var (cfg);
10552 #endif
10554 if (cfg->method == method && cfg->got_var)
10555 mono_emit_load_got_addr (cfg);
10557 if (init_locals) {
10558 MonoInst *store;
10560 cfg->cbb = init_localsbb;
10561 cfg->ip = NULL;
10562 for (i = 0; i < header->num_locals; ++i) {
10563 MonoType *ptype = header->locals [i];
10564 int t = ptype->type;
10565 dreg = cfg->locals [i]->dreg;
10567 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10568 t = mono_class_enum_basetype (ptype->data.klass)->type;
10569 if (ptype->byref) {
10570 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10571 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10572 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10573 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10574 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10575 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10576 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10577 ins->type = STACK_R8;
10578 ins->inst_p0 = (void*)&r8_0;
10579 ins->dreg = alloc_dreg (cfg, STACK_R8);
10580 MONO_ADD_INS (init_localsbb, ins);
10581 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10582 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10583 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10584 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10585 } else {
10586 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10591 if (cfg->init_ref_vars && cfg->method == method) {
10592 /* Emit initialization for ref vars */
10593 // FIXME: Avoid duplication initialization for IL locals.
10594 for (i = 0; i < cfg->num_varinfo; ++i) {
10595 MonoInst *ins = cfg->varinfo [i];
10597 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10598 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10602 /* Add a sequence point for method entry/exit events */
10603 if (seq_points) {
10604 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10605 MONO_ADD_INS (init_localsbb, ins);
10606 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10607 MONO_ADD_INS (cfg->bb_exit, ins);
10610 cfg->ip = NULL;
10612 if (cfg->method == method) {
10613 MonoBasicBlock *bb;
10614 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10615 bb->region = mono_find_block_region (cfg, bb->real_offset);
10616 if (cfg->spvars)
10617 mono_create_spvar_for_region (cfg, bb->region);
10618 if (cfg->verbose_level > 2)
10619 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10623 g_slist_free (class_inits);
10624 dont_inline = g_list_remove (dont_inline, method);
10626 if (inline_costs < 0) {
10627 char *mname;
10629 /* Method is too large */
10630 mname = mono_method_full_name (method, TRUE);
10631 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10632 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10633 g_free (mname);
10634 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10635 mono_basic_block_free (original_bb);
10636 return -1;
10639 if ((cfg->verbose_level > 2) && (cfg->method == method))
10640 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10642 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10643 mono_basic_block_free (original_bb);
10644 return inline_costs;
10646 exception_exit:
10647 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10648 goto cleanup;
10650 inline_failure:
10651 goto cleanup;
10653 load_error:
10654 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10655 goto cleanup;
10657 unverified:
10658 set_exception_type_from_invalid_il (cfg, method, ip);
10659 goto cleanup;
10661 cleanup:
10662 g_slist_free (class_inits);
10663 mono_basic_block_free (original_bb);
10664 dont_inline = g_list_remove (dont_inline, method);
10665 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10666 return -1;
10669 static int
10670 store_membase_reg_to_store_membase_imm (int opcode)
10672 switch (opcode) {
10673 case OP_STORE_MEMBASE_REG:
10674 return OP_STORE_MEMBASE_IMM;
10675 case OP_STOREI1_MEMBASE_REG:
10676 return OP_STOREI1_MEMBASE_IMM;
10677 case OP_STOREI2_MEMBASE_REG:
10678 return OP_STOREI2_MEMBASE_IMM;
10679 case OP_STOREI4_MEMBASE_REG:
10680 return OP_STOREI4_MEMBASE_IMM;
10681 case OP_STOREI8_MEMBASE_REG:
10682 return OP_STOREI8_MEMBASE_IMM;
10683 default:
10684 g_assert_not_reached ();
10687 return -1;
10690 #endif /* DISABLE_JIT */
10693 mono_op_to_op_imm (int opcode)
10695 switch (opcode) {
10696 case OP_IADD:
10697 return OP_IADD_IMM;
10698 case OP_ISUB:
10699 return OP_ISUB_IMM;
10700 case OP_IDIV:
10701 return OP_IDIV_IMM;
10702 case OP_IDIV_UN:
10703 return OP_IDIV_UN_IMM;
10704 case OP_IREM:
10705 return OP_IREM_IMM;
10706 case OP_IREM_UN:
10707 return OP_IREM_UN_IMM;
10708 case OP_IMUL:
10709 return OP_IMUL_IMM;
10710 case OP_IAND:
10711 return OP_IAND_IMM;
10712 case OP_IOR:
10713 return OP_IOR_IMM;
10714 case OP_IXOR:
10715 return OP_IXOR_IMM;
10716 case OP_ISHL:
10717 return OP_ISHL_IMM;
10718 case OP_ISHR:
10719 return OP_ISHR_IMM;
10720 case OP_ISHR_UN:
10721 return OP_ISHR_UN_IMM;
10723 case OP_LADD:
10724 return OP_LADD_IMM;
10725 case OP_LSUB:
10726 return OP_LSUB_IMM;
10727 case OP_LAND:
10728 return OP_LAND_IMM;
10729 case OP_LOR:
10730 return OP_LOR_IMM;
10731 case OP_LXOR:
10732 return OP_LXOR_IMM;
10733 case OP_LSHL:
10734 return OP_LSHL_IMM;
10735 case OP_LSHR:
10736 return OP_LSHR_IMM;
10737 case OP_LSHR_UN:
10738 return OP_LSHR_UN_IMM;
10740 case OP_COMPARE:
10741 return OP_COMPARE_IMM;
10742 case OP_ICOMPARE:
10743 return OP_ICOMPARE_IMM;
10744 case OP_LCOMPARE:
10745 return OP_LCOMPARE_IMM;
10747 case OP_STORE_MEMBASE_REG:
10748 return OP_STORE_MEMBASE_IMM;
10749 case OP_STOREI1_MEMBASE_REG:
10750 return OP_STOREI1_MEMBASE_IMM;
10751 case OP_STOREI2_MEMBASE_REG:
10752 return OP_STOREI2_MEMBASE_IMM;
10753 case OP_STOREI4_MEMBASE_REG:
10754 return OP_STOREI4_MEMBASE_IMM;
10756 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10757 case OP_X86_PUSH:
10758 return OP_X86_PUSH_IMM;
10759 case OP_X86_COMPARE_MEMBASE_REG:
10760 return OP_X86_COMPARE_MEMBASE_IMM;
10761 #endif
10762 #if defined(TARGET_AMD64)
10763 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10764 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10765 #endif
10766 case OP_VOIDCALL_REG:
10767 return OP_VOIDCALL;
10768 case OP_CALL_REG:
10769 return OP_CALL;
10770 case OP_LCALL_REG:
10771 return OP_LCALL;
10772 case OP_FCALL_REG:
10773 return OP_FCALL;
10774 case OP_LOCALLOC:
10775 return OP_LOCALLOC_IMM;
10778 return -1;
10781 static int
10782 ldind_to_load_membase (int opcode)
10784 switch (opcode) {
10785 case CEE_LDIND_I1:
10786 return OP_LOADI1_MEMBASE;
10787 case CEE_LDIND_U1:
10788 return OP_LOADU1_MEMBASE;
10789 case CEE_LDIND_I2:
10790 return OP_LOADI2_MEMBASE;
10791 case CEE_LDIND_U2:
10792 return OP_LOADU2_MEMBASE;
10793 case CEE_LDIND_I4:
10794 return OP_LOADI4_MEMBASE;
10795 case CEE_LDIND_U4:
10796 return OP_LOADU4_MEMBASE;
10797 case CEE_LDIND_I:
10798 return OP_LOAD_MEMBASE;
10799 case CEE_LDIND_REF:
10800 return OP_LOAD_MEMBASE;
10801 case CEE_LDIND_I8:
10802 return OP_LOADI8_MEMBASE;
10803 case CEE_LDIND_R4:
10804 return OP_LOADR4_MEMBASE;
10805 case CEE_LDIND_R8:
10806 return OP_LOADR8_MEMBASE;
10807 default:
10808 g_assert_not_reached ();
10811 return -1;
10814 static int
10815 stind_to_store_membase (int opcode)
10817 switch (opcode) {
10818 case CEE_STIND_I1:
10819 return OP_STOREI1_MEMBASE_REG;
10820 case CEE_STIND_I2:
10821 return OP_STOREI2_MEMBASE_REG;
10822 case CEE_STIND_I4:
10823 return OP_STOREI4_MEMBASE_REG;
10824 case CEE_STIND_I:
10825 case CEE_STIND_REF:
10826 return OP_STORE_MEMBASE_REG;
10827 case CEE_STIND_I8:
10828 return OP_STOREI8_MEMBASE_REG;
10829 case CEE_STIND_R4:
10830 return OP_STORER4_MEMBASE_REG;
10831 case CEE_STIND_R8:
10832 return OP_STORER8_MEMBASE_REG;
10833 default:
10834 g_assert_not_reached ();
10837 return -1;
10841 mono_load_membase_to_load_mem (int opcode)
10843 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10844 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10845 switch (opcode) {
10846 case OP_LOAD_MEMBASE:
10847 return OP_LOAD_MEM;
10848 case OP_LOADU1_MEMBASE:
10849 return OP_LOADU1_MEM;
10850 case OP_LOADU2_MEMBASE:
10851 return OP_LOADU2_MEM;
10852 case OP_LOADI4_MEMBASE:
10853 return OP_LOADI4_MEM;
10854 case OP_LOADU4_MEMBASE:
10855 return OP_LOADU4_MEM;
10856 #if SIZEOF_REGISTER == 8
10857 case OP_LOADI8_MEMBASE:
10858 return OP_LOADI8_MEM;
10859 #endif
10861 #endif
10863 return -1;
10866 static inline int
10867 op_to_op_dest_membase (int store_opcode, int opcode)
10869 #if defined(TARGET_X86)
10870 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10871 return -1;
10873 switch (opcode) {
10874 case OP_IADD:
10875 return OP_X86_ADD_MEMBASE_REG;
10876 case OP_ISUB:
10877 return OP_X86_SUB_MEMBASE_REG;
10878 case OP_IAND:
10879 return OP_X86_AND_MEMBASE_REG;
10880 case OP_IOR:
10881 return OP_X86_OR_MEMBASE_REG;
10882 case OP_IXOR:
10883 return OP_X86_XOR_MEMBASE_REG;
10884 case OP_ADD_IMM:
10885 case OP_IADD_IMM:
10886 return OP_X86_ADD_MEMBASE_IMM;
10887 case OP_SUB_IMM:
10888 case OP_ISUB_IMM:
10889 return OP_X86_SUB_MEMBASE_IMM;
10890 case OP_AND_IMM:
10891 case OP_IAND_IMM:
10892 return OP_X86_AND_MEMBASE_IMM;
10893 case OP_OR_IMM:
10894 case OP_IOR_IMM:
10895 return OP_X86_OR_MEMBASE_IMM;
10896 case OP_XOR_IMM:
10897 case OP_IXOR_IMM:
10898 return OP_X86_XOR_MEMBASE_IMM;
10899 case OP_MOVE:
10900 return OP_NOP;
10902 #endif
10904 #if defined(TARGET_AMD64)
10905 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10906 return -1;
10908 switch (opcode) {
10909 case OP_IADD:
10910 return OP_X86_ADD_MEMBASE_REG;
10911 case OP_ISUB:
10912 return OP_X86_SUB_MEMBASE_REG;
10913 case OP_IAND:
10914 return OP_X86_AND_MEMBASE_REG;
10915 case OP_IOR:
10916 return OP_X86_OR_MEMBASE_REG;
10917 case OP_IXOR:
10918 return OP_X86_XOR_MEMBASE_REG;
10919 case OP_IADD_IMM:
10920 return OP_X86_ADD_MEMBASE_IMM;
10921 case OP_ISUB_IMM:
10922 return OP_X86_SUB_MEMBASE_IMM;
10923 case OP_IAND_IMM:
10924 return OP_X86_AND_MEMBASE_IMM;
10925 case OP_IOR_IMM:
10926 return OP_X86_OR_MEMBASE_IMM;
10927 case OP_IXOR_IMM:
10928 return OP_X86_XOR_MEMBASE_IMM;
10929 case OP_LADD:
10930 return OP_AMD64_ADD_MEMBASE_REG;
10931 case OP_LSUB:
10932 return OP_AMD64_SUB_MEMBASE_REG;
10933 case OP_LAND:
10934 return OP_AMD64_AND_MEMBASE_REG;
10935 case OP_LOR:
10936 return OP_AMD64_OR_MEMBASE_REG;
10937 case OP_LXOR:
10938 return OP_AMD64_XOR_MEMBASE_REG;
10939 case OP_ADD_IMM:
10940 case OP_LADD_IMM:
10941 return OP_AMD64_ADD_MEMBASE_IMM;
10942 case OP_SUB_IMM:
10943 case OP_LSUB_IMM:
10944 return OP_AMD64_SUB_MEMBASE_IMM;
10945 case OP_AND_IMM:
10946 case OP_LAND_IMM:
10947 return OP_AMD64_AND_MEMBASE_IMM;
10948 case OP_OR_IMM:
10949 case OP_LOR_IMM:
10950 return OP_AMD64_OR_MEMBASE_IMM;
10951 case OP_XOR_IMM:
10952 case OP_LXOR_IMM:
10953 return OP_AMD64_XOR_MEMBASE_IMM;
10954 case OP_MOVE:
10955 return OP_NOP;
10957 #endif
10959 return -1;
10962 static inline int
10963 op_to_op_store_membase (int store_opcode, int opcode)
10965 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10966 switch (opcode) {
10967 case OP_ICEQ:
10968 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10969 return OP_X86_SETEQ_MEMBASE;
10970 case OP_CNE:
10971 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10972 return OP_X86_SETNE_MEMBASE;
10974 #endif
10976 return -1;
10979 static inline int
10980 op_to_op_src1_membase (int load_opcode, int opcode)
10982 #ifdef TARGET_X86
10983 /* FIXME: This has sign extension issues */
10985 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10986 return OP_X86_COMPARE_MEMBASE8_IMM;
10989 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10990 return -1;
10992 switch (opcode) {
10993 case OP_X86_PUSH:
10994 return OP_X86_PUSH_MEMBASE;
10995 case OP_COMPARE_IMM:
10996 case OP_ICOMPARE_IMM:
10997 return OP_X86_COMPARE_MEMBASE_IMM;
10998 case OP_COMPARE:
10999 case OP_ICOMPARE:
11000 return OP_X86_COMPARE_MEMBASE_REG;
11002 #endif
11004 #ifdef TARGET_AMD64
11005 /* FIXME: This has sign extension issues */
11007 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11008 return OP_X86_COMPARE_MEMBASE8_IMM;
11011 switch (opcode) {
11012 case OP_X86_PUSH:
11013 #ifdef __mono_ilp32__
11014 if (load_opcode == OP_LOADI8_MEMBASE)
11015 #else
11016 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11017 #endif
11018 return OP_X86_PUSH_MEMBASE;
11019 break;
11020 /* FIXME: This only works for 32 bit immediates
11021 case OP_COMPARE_IMM:
11022 case OP_LCOMPARE_IMM:
11023 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11024 return OP_AMD64_COMPARE_MEMBASE_IMM;
11026 case OP_ICOMPARE_IMM:
11027 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11028 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11029 break;
11030 case OP_COMPARE:
11031 case OP_LCOMPARE:
11032 #ifdef __mono_ilp32__
11033 if (load_opcode == OP_LOAD_MEMBASE)
11034 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11035 if (load_opcode == OP_LOADI8_MEMBASE)
11036 #else
11037 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11038 #endif
11039 return OP_AMD64_COMPARE_MEMBASE_REG;
11040 break;
11041 case OP_ICOMPARE:
11042 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11043 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11044 break;
11046 #endif
11048 return -1;
11051 static inline int
11052 op_to_op_src2_membase (int load_opcode, int opcode)
11054 #ifdef TARGET_X86
11055 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11056 return -1;
11058 switch (opcode) {
11059 case OP_COMPARE:
11060 case OP_ICOMPARE:
11061 return OP_X86_COMPARE_REG_MEMBASE;
11062 case OP_IADD:
11063 return OP_X86_ADD_REG_MEMBASE;
11064 case OP_ISUB:
11065 return OP_X86_SUB_REG_MEMBASE;
11066 case OP_IAND:
11067 return OP_X86_AND_REG_MEMBASE;
11068 case OP_IOR:
11069 return OP_X86_OR_REG_MEMBASE;
11070 case OP_IXOR:
11071 return OP_X86_XOR_REG_MEMBASE;
11073 #endif
11075 #ifdef TARGET_AMD64
11076 #ifdef __mono_ilp32__
11077 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11078 #else
11079 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11080 #endif
11081 switch (opcode) {
11082 case OP_ICOMPARE:
11083 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11084 case OP_IADD:
11085 return OP_X86_ADD_REG_MEMBASE;
11086 case OP_ISUB:
11087 return OP_X86_SUB_REG_MEMBASE;
11088 case OP_IAND:
11089 return OP_X86_AND_REG_MEMBASE;
11090 case OP_IOR:
11091 return OP_X86_OR_REG_MEMBASE;
11092 case OP_IXOR:
11093 return OP_X86_XOR_REG_MEMBASE;
11095 #ifdef __mono_ilp32__
11096 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11097 #else
11098 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11099 #endif
11100 switch (opcode) {
11101 case OP_COMPARE:
11102 case OP_LCOMPARE:
11103 return OP_AMD64_COMPARE_REG_MEMBASE;
11104 case OP_LADD:
11105 return OP_AMD64_ADD_REG_MEMBASE;
11106 case OP_LSUB:
11107 return OP_AMD64_SUB_REG_MEMBASE;
11108 case OP_LAND:
11109 return OP_AMD64_AND_REG_MEMBASE;
11110 case OP_LOR:
11111 return OP_AMD64_OR_REG_MEMBASE;
11112 case OP_LXOR:
11113 return OP_AMD64_XOR_REG_MEMBASE;
11116 #endif
11118 return -1;
11122 mono_op_to_op_imm_noemul (int opcode)
11124 switch (opcode) {
11125 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11126 case OP_LSHR:
11127 case OP_LSHL:
11128 case OP_LSHR_UN:
11129 return -1;
11130 #endif
11131 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11132 case OP_IDIV:
11133 case OP_IDIV_UN:
11134 case OP_IREM:
11135 case OP_IREM_UN:
11136 return -1;
11137 #endif
11138 default:
11139 return mono_op_to_op_imm (opcode);
11143 #ifndef DISABLE_JIT
11146 * mono_handle_global_vregs:
11148 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11149 * for them.
11151 void
11152 mono_handle_global_vregs (MonoCompile *cfg)
11154 gint32 *vreg_to_bb;
11155 MonoBasicBlock *bb;
11156 int i, pos;
11158 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11160 #ifdef MONO_ARCH_SIMD_INTRINSICS
11161 if (cfg->uses_simd_intrinsics)
11162 mono_simd_simplify_indirection (cfg);
11163 #endif
11165 /* Find local vregs used in more than one bb */
11166 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11167 MonoInst *ins = bb->code;
11168 int block_num = bb->block_num;
11170 if (cfg->verbose_level > 2)
11171 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11173 cfg->cbb = bb;
11174 for (; ins; ins = ins->next) {
11175 const char *spec = INS_INFO (ins->opcode);
11176 int regtype = 0, regindex;
11177 gint32 prev_bb;
11179 if (G_UNLIKELY (cfg->verbose_level > 2))
11180 mono_print_ins (ins);
11182 g_assert (ins->opcode >= MONO_CEE_LAST);
11184 for (regindex = 0; regindex < 4; regindex ++) {
11185 int vreg = 0;
11187 if (regindex == 0) {
11188 regtype = spec [MONO_INST_DEST];
11189 if (regtype == ' ')
11190 continue;
11191 vreg = ins->dreg;
11192 } else if (regindex == 1) {
11193 regtype = spec [MONO_INST_SRC1];
11194 if (regtype == ' ')
11195 continue;
11196 vreg = ins->sreg1;
11197 } else if (regindex == 2) {
11198 regtype = spec [MONO_INST_SRC2];
11199 if (regtype == ' ')
11200 continue;
11201 vreg = ins->sreg2;
11202 } else if (regindex == 3) {
11203 regtype = spec [MONO_INST_SRC3];
11204 if (regtype == ' ')
11205 continue;
11206 vreg = ins->sreg3;
11209 #if SIZEOF_REGISTER == 4
11210 /* In the LLVM case, the long opcodes are not decomposed */
11211 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11213 * Since some instructions reference the original long vreg,
11214 * and some reference the two component vregs, it is quite hard
11215 * to determine when it needs to be global. So be conservative.
11217 if (!get_vreg_to_inst (cfg, vreg)) {
11218 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11220 if (cfg->verbose_level > 2)
11221 printf ("LONG VREG R%d made global.\n", vreg);
11225 * Make the component vregs volatile since the optimizations can
11226 * get confused otherwise.
11228 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11229 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11231 #endif
11233 g_assert (vreg != -1);
11235 prev_bb = vreg_to_bb [vreg];
11236 if (prev_bb == 0) {
11237 /* 0 is a valid block num */
11238 vreg_to_bb [vreg] = block_num + 1;
11239 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11240 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11241 continue;
11243 if (!get_vreg_to_inst (cfg, vreg)) {
11244 if (G_UNLIKELY (cfg->verbose_level > 2))
11245 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11247 switch (regtype) {
11248 case 'i':
11249 if (vreg_is_ref (cfg, vreg))
11250 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11251 else
11252 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11253 break;
11254 case 'l':
11255 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11256 break;
11257 case 'f':
11258 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11259 break;
11260 case 'v':
11261 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11262 break;
11263 default:
11264 g_assert_not_reached ();
11268 /* Flag as having been used in more than one bb */
11269 vreg_to_bb [vreg] = -1;
11275 /* If a variable is used in only one bblock, convert it into a local vreg */
11276 for (i = 0; i < cfg->num_varinfo; i++) {
11277 MonoInst *var = cfg->varinfo [i];
11278 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11280 switch (var->type) {
11281 case STACK_I4:
11282 case STACK_OBJ:
11283 case STACK_PTR:
11284 case STACK_MP:
11285 case STACK_VTYPE:
11286 #if SIZEOF_REGISTER == 8
11287 case STACK_I8:
11288 #endif
11289 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11290 /* Enabling this screws up the fp stack on x86 */
11291 case STACK_R8:
11292 #endif
11293 /* Arguments are implicitly global */
11294 /* Putting R4 vars into registers doesn't work currently */
11295 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11297 * Make that the variable's liveness interval doesn't contain a call, since
11298 * that would cause the lvreg to be spilled, making the whole optimization
11299 * useless.
11301 /* This is too slow for JIT compilation */
11302 #if 0
11303 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11304 MonoInst *ins;
11305 int def_index, call_index, ins_index;
11306 gboolean spilled = FALSE;
11308 def_index = -1;
11309 call_index = -1;
11310 ins_index = 0;
11311 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11312 const char *spec = INS_INFO (ins->opcode);
11314 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11315 def_index = ins_index;
11317 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11318 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11319 if (call_index > def_index) {
11320 spilled = TRUE;
11321 break;
11325 if (MONO_IS_CALL (ins))
11326 call_index = ins_index;
11328 ins_index ++;
11331 if (spilled)
11332 break;
11334 #endif
11336 if (G_UNLIKELY (cfg->verbose_level > 2))
11337 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11338 var->flags |= MONO_INST_IS_DEAD;
11339 cfg->vreg_to_inst [var->dreg] = NULL;
11341 break;
11346 * Compress the varinfo and vars tables so the liveness computation is faster and
11347 * takes up less space.
11349 pos = 0;
11350 for (i = 0; i < cfg->num_varinfo; ++i) {
11351 MonoInst *var = cfg->varinfo [i];
11352 if (pos < i && cfg->locals_start == i)
11353 cfg->locals_start = pos;
11354 if (!(var->flags & MONO_INST_IS_DEAD)) {
11355 if (pos < i) {
11356 cfg->varinfo [pos] = cfg->varinfo [i];
11357 cfg->varinfo [pos]->inst_c0 = pos;
11358 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11359 cfg->vars [pos].idx = pos;
11360 #if SIZEOF_REGISTER == 4
11361 if (cfg->varinfo [pos]->type == STACK_I8) {
11362 /* Modify the two component vars too */
11363 MonoInst *var1;
11365 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11366 var1->inst_c0 = pos;
11367 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11368 var1->inst_c0 = pos;
11370 #endif
11372 pos ++;
11375 cfg->num_varinfo = pos;
11376 if (cfg->locals_start > cfg->num_varinfo)
11377 cfg->locals_start = cfg->num_varinfo;
11381 * mono_spill_global_vars:
11383 * Generate spill code for variables which are not allocated to registers,
11384 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11385 * code is generated which could be optimized by the local optimization passes.
11387 void
11388 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11390 MonoBasicBlock *bb;
11391 char spec2 [16];
11392 int orig_next_vreg;
11393 guint32 *vreg_to_lvreg;
11394 guint32 *lvregs;
11395 guint32 i, lvregs_len;
11396 gboolean dest_has_lvreg = FALSE;
11397 guint32 stacktypes [128];
11398 MonoInst **live_range_start, **live_range_end;
11399 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11401 *need_local_opts = FALSE;
11403 memset (spec2, 0, sizeof (spec2));
11405 /* FIXME: Move this function to mini.c */
11406 stacktypes ['i'] = STACK_PTR;
11407 stacktypes ['l'] = STACK_I8;
11408 stacktypes ['f'] = STACK_R8;
11409 #ifdef MONO_ARCH_SIMD_INTRINSICS
11410 stacktypes ['x'] = STACK_VTYPE;
11411 #endif
11413 #if SIZEOF_REGISTER == 4
11414 /* Create MonoInsts for longs */
11415 for (i = 0; i < cfg->num_varinfo; i++) {
11416 MonoInst *ins = cfg->varinfo [i];
11418 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11419 switch (ins->type) {
11420 case STACK_R8:
11421 case STACK_I8: {
11422 MonoInst *tree;
11424 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11425 break;
11427 g_assert (ins->opcode == OP_REGOFFSET);
11429 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11430 g_assert (tree);
11431 tree->opcode = OP_REGOFFSET;
11432 tree->inst_basereg = ins->inst_basereg;
11433 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11435 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11436 g_assert (tree);
11437 tree->opcode = OP_REGOFFSET;
11438 tree->inst_basereg = ins->inst_basereg;
11439 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11440 break;
11442 default:
11443 break;
11447 #endif
11449 if (cfg->compute_gc_maps) {
11450 /* registers need liveness info even for !non refs */
11451 for (i = 0; i < cfg->num_varinfo; i++) {
11452 MonoInst *ins = cfg->varinfo [i];
11454 if (ins->opcode == OP_REGVAR)
11455 ins->flags |= MONO_INST_GC_TRACK;
11459 /* FIXME: widening and truncation */
11462 * As an optimization, when a variable allocated to the stack is first loaded into
11463 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11464 * the variable again.
11466 orig_next_vreg = cfg->next_vreg;
11467 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11468 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11469 lvregs_len = 0;
11472 * These arrays contain the first and last instructions accessing a given
11473 * variable.
11474 * Since we emit bblocks in the same order we process them here, and we
11475 * don't split live ranges, these will precisely describe the live range of
11476 * the variable, i.e. the instruction range where a valid value can be found
11477 * in the variables location.
11478 * The live range is computed using the liveness info computed by the liveness pass.
11479 * We can't use vmv->range, since that is an abstract live range, and we need
11480 * one which is instruction precise.
11481 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11483 /* FIXME: Only do this if debugging info is requested */
11484 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11485 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11486 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11487 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11489 /* Add spill loads/stores */
11490 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11491 MonoInst *ins;
11493 if (cfg->verbose_level > 2)
11494 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11496 /* Clear vreg_to_lvreg array */
11497 for (i = 0; i < lvregs_len; i++)
11498 vreg_to_lvreg [lvregs [i]] = 0;
11499 lvregs_len = 0;
11501 cfg->cbb = bb;
11502 MONO_BB_FOR_EACH_INS (bb, ins) {
11503 const char *spec = INS_INFO (ins->opcode);
11504 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11505 gboolean store, no_lvreg;
11506 int sregs [MONO_MAX_SRC_REGS];
11508 if (G_UNLIKELY (cfg->verbose_level > 2))
11509 mono_print_ins (ins);
11511 if (ins->opcode == OP_NOP)
11512 continue;
11515 * We handle LDADDR here as well, since it can only be decomposed
11516 * when variable addresses are known.
11518 if (ins->opcode == OP_LDADDR) {
11519 MonoInst *var = ins->inst_p0;
11521 if (var->opcode == OP_VTARG_ADDR) {
11522 /* Happens on SPARC/S390 where vtypes are passed by reference */
11523 MonoInst *vtaddr = var->inst_left;
11524 if (vtaddr->opcode == OP_REGVAR) {
11525 ins->opcode = OP_MOVE;
11526 ins->sreg1 = vtaddr->dreg;
11528 else if (var->inst_left->opcode == OP_REGOFFSET) {
11529 ins->opcode = OP_LOAD_MEMBASE;
11530 ins->inst_basereg = vtaddr->inst_basereg;
11531 ins->inst_offset = vtaddr->inst_offset;
11532 } else
11533 NOT_IMPLEMENTED;
11534 } else {
11535 g_assert (var->opcode == OP_REGOFFSET);
11537 ins->opcode = OP_ADD_IMM;
11538 ins->sreg1 = var->inst_basereg;
11539 ins->inst_imm = var->inst_offset;
11542 *need_local_opts = TRUE;
11543 spec = INS_INFO (ins->opcode);
11546 if (ins->opcode < MONO_CEE_LAST) {
11547 mono_print_ins (ins);
11548 g_assert_not_reached ();
11552 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11553 * src register.
11554 * FIXME:
11556 if (MONO_IS_STORE_MEMBASE (ins)) {
11557 tmp_reg = ins->dreg;
11558 ins->dreg = ins->sreg2;
11559 ins->sreg2 = tmp_reg;
11560 store = TRUE;
11562 spec2 [MONO_INST_DEST] = ' ';
11563 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11564 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11565 spec2 [MONO_INST_SRC3] = ' ';
11566 spec = spec2;
11567 } else if (MONO_IS_STORE_MEMINDEX (ins))
11568 g_assert_not_reached ();
11569 else
11570 store = FALSE;
11571 no_lvreg = FALSE;
11573 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11574 printf ("\t %.3s %d", spec, ins->dreg);
11575 num_sregs = mono_inst_get_src_registers (ins, sregs);
11576 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11577 printf (" %d", sregs [srcindex]);
11578 printf ("\n");
11581 /***************/
11582 /* DREG */
11583 /***************/
11584 regtype = spec [MONO_INST_DEST];
11585 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11586 prev_dreg = -1;
11588 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11589 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11590 MonoInst *store_ins;
11591 int store_opcode;
11592 MonoInst *def_ins = ins;
11593 int dreg = ins->dreg; /* The original vreg */
11595 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11597 if (var->opcode == OP_REGVAR) {
11598 ins->dreg = var->dreg;
11599 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11601 * Instead of emitting a load+store, use a _membase opcode.
11603 g_assert (var->opcode == OP_REGOFFSET);
11604 if (ins->opcode == OP_MOVE) {
11605 NULLIFY_INS (ins);
11606 def_ins = NULL;
11607 } else {
11608 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11609 ins->inst_basereg = var->inst_basereg;
11610 ins->inst_offset = var->inst_offset;
11611 ins->dreg = -1;
11613 spec = INS_INFO (ins->opcode);
11614 } else {
11615 guint32 lvreg;
11617 g_assert (var->opcode == OP_REGOFFSET);
11619 prev_dreg = ins->dreg;
11621 /* Invalidate any previous lvreg for this vreg */
11622 vreg_to_lvreg [ins->dreg] = 0;
11624 lvreg = 0;
11626 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11627 regtype = 'l';
11628 store_opcode = OP_STOREI8_MEMBASE_REG;
11631 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11633 if (regtype == 'l') {
11634 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11635 mono_bblock_insert_after_ins (bb, ins, store_ins);
11636 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11637 mono_bblock_insert_after_ins (bb, ins, store_ins);
11638 def_ins = store_ins;
11640 else {
11641 g_assert (store_opcode != OP_STOREV_MEMBASE);
11643 /* Try to fuse the store into the instruction itself */
11644 /* FIXME: Add more instructions */
11645 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11646 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11647 ins->inst_imm = ins->inst_c0;
11648 ins->inst_destbasereg = var->inst_basereg;
11649 ins->inst_offset = var->inst_offset;
11650 spec = INS_INFO (ins->opcode);
11651 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11652 ins->opcode = store_opcode;
11653 ins->inst_destbasereg = var->inst_basereg;
11654 ins->inst_offset = var->inst_offset;
11656 no_lvreg = TRUE;
11658 tmp_reg = ins->dreg;
11659 ins->dreg = ins->sreg2;
11660 ins->sreg2 = tmp_reg;
11661 store = TRUE;
11663 spec2 [MONO_INST_DEST] = ' ';
11664 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11665 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11666 spec2 [MONO_INST_SRC3] = ' ';
11667 spec = spec2;
11668 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11669 // FIXME: The backends expect the base reg to be in inst_basereg
11670 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11671 ins->dreg = -1;
11672 ins->inst_basereg = var->inst_basereg;
11673 ins->inst_offset = var->inst_offset;
11674 spec = INS_INFO (ins->opcode);
11675 } else {
11676 /* printf ("INS: "); mono_print_ins (ins); */
11677 /* Create a store instruction */
11678 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11680 /* Insert it after the instruction */
11681 mono_bblock_insert_after_ins (bb, ins, store_ins);
11683 def_ins = store_ins;
11686 * We can't assign ins->dreg to var->dreg here, since the
11687 * sregs could use it. So set a flag, and do it after
11688 * the sregs.
11690 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11691 dest_has_lvreg = TRUE;
11696 if (def_ins && !live_range_start [dreg]) {
11697 live_range_start [dreg] = def_ins;
11698 live_range_start_bb [dreg] = bb;
11701 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11702 MonoInst *tmp;
11704 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11705 tmp->inst_c1 = dreg;
11706 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11710 /************/
11711 /* SREGS */
11712 /************/
11713 num_sregs = mono_inst_get_src_registers (ins, sregs);
11714 for (srcindex = 0; srcindex < 3; ++srcindex) {
11715 regtype = spec [MONO_INST_SRC1 + srcindex];
11716 sreg = sregs [srcindex];
11718 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11719 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11720 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11721 MonoInst *use_ins = ins;
11722 MonoInst *load_ins;
11723 guint32 load_opcode;
11725 if (var->opcode == OP_REGVAR) {
11726 sregs [srcindex] = var->dreg;
11727 //mono_inst_set_src_registers (ins, sregs);
11728 live_range_end [sreg] = use_ins;
11729 live_range_end_bb [sreg] = bb;
11731 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11732 MonoInst *tmp;
11734 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11735 /* var->dreg is a hreg */
11736 tmp->inst_c1 = sreg;
11737 mono_bblock_insert_after_ins (bb, ins, tmp);
11740 continue;
11743 g_assert (var->opcode == OP_REGOFFSET);
11745 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11747 g_assert (load_opcode != OP_LOADV_MEMBASE);
11749 if (vreg_to_lvreg [sreg]) {
11750 g_assert (vreg_to_lvreg [sreg] != -1);
11752 /* The variable is already loaded to an lvreg */
11753 if (G_UNLIKELY (cfg->verbose_level > 2))
11754 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11755 sregs [srcindex] = vreg_to_lvreg [sreg];
11756 //mono_inst_set_src_registers (ins, sregs);
11757 continue;
11760 /* Try to fuse the load into the instruction */
11761 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11762 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11763 sregs [0] = var->inst_basereg;
11764 //mono_inst_set_src_registers (ins, sregs);
11765 ins->inst_offset = var->inst_offset;
11766 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11767 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11768 sregs [1] = var->inst_basereg;
11769 //mono_inst_set_src_registers (ins, sregs);
11770 ins->inst_offset = var->inst_offset;
11771 } else {
11772 if (MONO_IS_REAL_MOVE (ins)) {
11773 ins->opcode = OP_NOP;
11774 sreg = ins->dreg;
11775 } else {
11776 //printf ("%d ", srcindex); mono_print_ins (ins);
11778 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11780 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11781 if (var->dreg == prev_dreg) {
11783 * sreg refers to the value loaded by the load
11784 * emitted below, but we need to use ins->dreg
11785 * since it refers to the store emitted earlier.
11787 sreg = ins->dreg;
11789 g_assert (sreg != -1);
11790 vreg_to_lvreg [var->dreg] = sreg;
11791 g_assert (lvregs_len < 1024);
11792 lvregs [lvregs_len ++] = var->dreg;
11796 sregs [srcindex] = sreg;
11797 //mono_inst_set_src_registers (ins, sregs);
11799 if (regtype == 'l') {
11800 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11801 mono_bblock_insert_before_ins (bb, ins, load_ins);
11802 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11803 mono_bblock_insert_before_ins (bb, ins, load_ins);
11804 use_ins = load_ins;
11806 else {
11807 #if SIZEOF_REGISTER == 4
11808 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11809 #endif
11810 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11811 mono_bblock_insert_before_ins (bb, ins, load_ins);
11812 use_ins = load_ins;
11816 if (var->dreg < orig_next_vreg) {
11817 live_range_end [var->dreg] = use_ins;
11818 live_range_end_bb [var->dreg] = bb;
11821 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11822 MonoInst *tmp;
11824 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11825 tmp->inst_c1 = var->dreg;
11826 mono_bblock_insert_after_ins (bb, ins, tmp);
11830 mono_inst_set_src_registers (ins, sregs);
11832 if (dest_has_lvreg) {
11833 g_assert (ins->dreg != -1);
11834 vreg_to_lvreg [prev_dreg] = ins->dreg;
11835 g_assert (lvregs_len < 1024);
11836 lvregs [lvregs_len ++] = prev_dreg;
11837 dest_has_lvreg = FALSE;
11840 if (store) {
11841 tmp_reg = ins->dreg;
11842 ins->dreg = ins->sreg2;
11843 ins->sreg2 = tmp_reg;
11846 if (MONO_IS_CALL (ins)) {
11847 /* Clear vreg_to_lvreg array */
11848 for (i = 0; i < lvregs_len; i++)
11849 vreg_to_lvreg [lvregs [i]] = 0;
11850 lvregs_len = 0;
11851 } else if (ins->opcode == OP_NOP) {
11852 ins->dreg = -1;
11853 MONO_INST_NULLIFY_SREGS (ins);
11856 if (cfg->verbose_level > 2)
11857 mono_print_ins_index (1, ins);
11860 /* Extend the live range based on the liveness info */
11861 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11862 for (i = 0; i < cfg->num_varinfo; i ++) {
11863 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11865 if (vreg_is_volatile (cfg, vi->vreg))
11866 /* The liveness info is incomplete */
11867 continue;
11869 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11870 /* Live from at least the first ins of this bb */
11871 live_range_start [vi->vreg] = bb->code;
11872 live_range_start_bb [vi->vreg] = bb;
11875 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11876 /* Live at least until the last ins of this bb */
11877 live_range_end [vi->vreg] = bb->last_ins;
11878 live_range_end_bb [vi->vreg] = bb;
11884 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11886 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11887 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11889 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11890 for (i = 0; i < cfg->num_varinfo; ++i) {
11891 int vreg = MONO_VARINFO (cfg, i)->vreg;
11892 MonoInst *ins;
11894 if (live_range_start [vreg]) {
11895 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11896 ins->inst_c0 = i;
11897 ins->inst_c1 = vreg;
11898 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11900 if (live_range_end [vreg]) {
11901 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11902 ins->inst_c0 = i;
11903 ins->inst_c1 = vreg;
11904 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11905 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11906 else
11907 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11911 #endif
11913 g_free (live_range_start);
11914 g_free (live_range_end);
11915 g_free (live_range_start_bb);
11916 g_free (live_range_end_bb);
11920 * FIXME:
11921 * - use 'iadd' instead of 'int_add'
11922 * - handling ovf opcodes: decompose in method_to_ir.
11923 * - unify iregs/fregs
11924 * -> partly done, the missing parts are:
11925 * - a more complete unification would involve unifying the hregs as well, so
11926 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11927 * would no longer map to the machine hregs, so the code generators would need to
11928 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11929 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11930 * fp/non-fp branches speeds it up by about 15%.
11931 * - use sext/zext opcodes instead of shifts
11932 * - add OP_ICALL
11933 * - get rid of TEMPLOADs if possible and use vregs instead
11934 * - clean up usage of OP_P/OP_ opcodes
11935 * - cleanup usage of DUMMY_USE
11936 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11937 * stack
11938 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11939 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11940 * - make sure handle_stack_args () is called before the branch is emitted
11941 * - when the new IR is done, get rid of all unused stuff
11942 * - COMPARE/BEQ as separate instructions or unify them ?
11943 * - keeping them separate allows specialized compare instructions like
11944 * compare_imm, compare_membase
11945 * - most back ends unify fp compare+branch, fp compare+ceq
11946 * - integrate mono_save_args into inline_method
11947 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11948 * - handle long shift opts on 32 bit platforms somehow: they require
11949 * 3 sregs (2 for arg1 and 1 for arg2)
11950 * - make byref a 'normal' type.
11951 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11952 * variable if needed.
11953 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11954 * like inline_method.
11955 * - remove inlining restrictions
11956 * - fix LNEG and enable cfold of INEG
11957 * - generalize x86 optimizations like ldelema as a peephole optimization
11958 * - add store_mem_imm for amd64
11959 * - optimize the loading of the interruption flag in the managed->native wrappers
11960 * - avoid special handling of OP_NOP in passes
11961 * - move code inserting instructions into one function/macro.
11962 * - try a coalescing phase after liveness analysis
11963 * - add float -> vreg conversion + local optimizations on !x86
11964 * - figure out how to handle decomposed branches during optimizations, ie.
11965 * compare+branch, op_jump_table+op_br etc.
11966 * - promote RuntimeXHandles to vregs
11967 * - vtype cleanups:
11968 * - add a NEW_VARLOADA_VREG macro
11969 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11970 * accessing vtype fields.
11971 * - get rid of I8CONST on 64 bit platforms
11972 * - dealing with the increase in code size due to branches created during opcode
11973 * decomposition:
11974 * - use extended basic blocks
11975 * - all parts of the JIT
11976 * - handle_global_vregs () && local regalloc
11977 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11978 * - sources of increase in code size:
11979 * - vtypes
11980 * - long compares
11981 * - isinst and castclass
11982 * - lvregs not allocated to global registers even if used multiple times
11983 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11984 * meaningful.
11985 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11986 * - add all micro optimizations from the old JIT
11987 * - put tree optimizations into the deadce pass
11988 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11989 * specific function.
11990 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11991 * fcompare + branchCC.
11992 * - create a helper function for allocating a stack slot, taking into account
11993 * MONO_CFG_HAS_SPILLUP.
11994 * - merge r68207.
11995 * - merge the ia64 switch changes.
11996 * - optimize mono_regstate2_alloc_int/float.
11997 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11998 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11999 * parts of the tree could be separated by other instructions, killing the tree
12000 * arguments, or stores killing loads etc. Also, should we fold loads into other
12001 * instructions if the result of the load is used multiple times ?
12002 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12003 * - LAST MERGE: 108395.
12004 * - when returning vtypes in registers, generate IR and append it to the end of the
12005 * last bb instead of doing it in the epilog.
12006 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12011 NOTES
12012 -----
12014 - When to decompose opcodes:
12015 - earlier: this makes some optimizations hard to implement, since the low level IR
12016 no longer contains the neccessary information. But it is easier to do.
12017 - later: harder to implement, enables more optimizations.
12018 - Branches inside bblocks:
12019 - created when decomposing complex opcodes.
12020 - branches to another bblock: harmless, but not tracked by the branch
12021 optimizations, so need to branch to a label at the start of the bblock.
12022 - branches to inside the same bblock: very problematic, trips up the local
12023 reg allocator. Can be fixed by spitting the current bblock, but that is a
12024 complex operation, since some local vregs can become global vregs etc.
12025 - Local/global vregs:
12026 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12027 local register allocator.
12028 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12029 structure, created by mono_create_var (). Assigned to hregs or the stack by
12030 the global register allocator.
12031 - When to do optimizations like alu->alu_imm:
12032 - earlier -> saves work later on since the IR will be smaller/simpler
12033 - later -> can work on more instructions
12034 - Handling of valuetypes:
12035 - When a vtype is pushed on the stack, a new temporary is created, an
12036 instruction computing its address (LDADDR) is emitted and pushed on
12037 the stack. Need to optimize cases when the vtype is used immediately as in
12038 argument passing, stloc etc.
12039 - Instead of the to_end stuff in the old JIT, simply call the function handling
12040 the values on the stack before emitting the last instruction of the bb.
12043 #endif /* DISABLE_JIT */