Extract the code to emit a memory barrier. Add a memory_barrier_kind field to MonoIns...
[mono-project.git] / mono / mini / method-to-ir.c
blob19210fe71ff32934ec051a4f5912f68916940a9b
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 #include <config.h>
14 #include <signal.h>
16 #ifdef HAVE_UNISTD_H
17 #include <unistd.h>
18 #endif
20 #include <math.h>
21 #include <string.h>
22 #include <ctype.h>
24 #ifdef HAVE_SYS_TIME_H
25 #include <sys/time.h>
26 #endif
28 #ifdef HAVE_ALLOCA_H
29 #include <alloca.h>
30 #endif
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/utils/mono-compiler.h>
56 #include <mono/utils/mono-memory-model.h>
57 #include <mono/metadata/mono-basic-block.h>
59 #include "mini.h"
60 #include "trace.h"
62 #include "ir-emit.h"
64 #include "jit-icalls.h"
65 #include "jit.h"
66 #include "debugger-agent.h"
68 #define BRANCH_COST 10
69 #define INLINE_LENGTH_LIMIT 20
70 #define INLINE_FAILURE do {\
71 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
72 goto inline_failure;\
73 } while (0)
74 #define CHECK_CFG_EXCEPTION do {\
75 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
76 goto exception_exit;\
77 } while (0)
78 #define METHOD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
81 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
82 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (cil_method_fname); \
85 goto exception_exit; \
86 } while (0)
87 #define FIELD_ACCESS_FAILURE do { \
88 char *method_fname = mono_method_full_name (method, TRUE); \
89 char *field_fname = mono_field_full_name (field); \
90 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
91 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
92 g_free (method_fname); \
93 g_free (field_fname); \
94 goto exception_exit; \
95 } while (0)
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 if (cfg->generic_sharing_context) { \
98 if (cfg->verbose_level > 2) \
99 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
100 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
101 goto exception_exit; \
103 } while (0)
104 #define OUT_OF_MEMORY_FAILURE do { \
105 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
106 goto exception_exit; \
107 } while (0)
108 /* Determine whenever 'ins' represents a load of the 'this' argument */
109 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
111 static int ldind_to_load_membase (int opcode);
112 static int stind_to_store_membase (int opcode);
114 int mono_op_to_op_imm (int opcode);
115 int mono_op_to_op_imm_noemul (int opcode);
117 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
118 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
119 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
121 /* helper methods signatures */
122 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
123 static MonoMethodSignature *helper_sig_domain_get = NULL;
124 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
126 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
127 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
128 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
131 * Instruction metadata
133 #ifdef MINI_OP
134 #undef MINI_OP
135 #endif
136 #ifdef MINI_OP3
137 #undef MINI_OP3
138 #endif
139 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
140 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
141 #define NONE ' '
142 #define IREG 'i'
143 #define FREG 'f'
144 #define VREG 'v'
145 #define XREG 'x'
146 #if SIZEOF_REGISTER == 8
147 #define LREG IREG
148 #else
149 #define LREG 'l'
150 #endif
151 /* keep in sync with the enum in mini.h */
152 const char
153 ins_info[] = {
154 #include "mini-ops.h"
156 #undef MINI_OP
157 #undef MINI_OP3
159 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
160 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
162 * This should contain the index of the last sreg + 1. This is not the same
163 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
165 const gint8 ins_sreg_counts[] = {
166 #include "mini-ops.h"
168 #undef MINI_OP
169 #undef MINI_OP3
171 #define MONO_INIT_VARINFO(vi,id) do { \
172 (vi)->range.first_use.pos.bid = 0xffff; \
173 (vi)->reg = -1; \
174 (vi)->idx = (id); \
175 } while (0)
177 void
178 mono_inst_set_src_registers (MonoInst *ins, int *regs)
180 ins->sreg1 = regs [0];
181 ins->sreg2 = regs [1];
182 ins->sreg3 = regs [2];
185 guint32
186 mono_alloc_ireg (MonoCompile *cfg)
188 return alloc_ireg (cfg);
191 guint32
192 mono_alloc_freg (MonoCompile *cfg)
194 return alloc_freg (cfg);
197 guint32
198 mono_alloc_preg (MonoCompile *cfg)
200 return alloc_preg (cfg);
203 guint32
204 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
206 return alloc_dreg (cfg, stack_type);
210 * mono_alloc_ireg_ref:
212 * Allocate an IREG, and mark it as holding a GC ref.
214 guint32
215 mono_alloc_ireg_ref (MonoCompile *cfg)
217 return alloc_ireg_ref (cfg);
221 * mono_alloc_ireg_mp:
223 * Allocate an IREG, and mark it as holding a managed pointer.
225 guint32
226 mono_alloc_ireg_mp (MonoCompile *cfg)
228 return alloc_ireg_mp (cfg);
232 * mono_alloc_ireg_copy:
234 * Allocate an IREG with the same GC type as VREG.
236 guint32
237 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
239 if (vreg_is_ref (cfg, vreg))
240 return alloc_ireg_ref (cfg);
241 else if (vreg_is_mp (cfg, vreg))
242 return alloc_ireg_mp (cfg);
243 else
244 return alloc_ireg (cfg);
247 guint
248 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
250 if (type->byref)
251 return OP_MOVE;
253 handle_enum:
254 switch (type->type) {
255 case MONO_TYPE_I1:
256 case MONO_TYPE_U1:
257 case MONO_TYPE_BOOLEAN:
258 return OP_MOVE;
259 case MONO_TYPE_I2:
260 case MONO_TYPE_U2:
261 case MONO_TYPE_CHAR:
262 return OP_MOVE;
263 case MONO_TYPE_I4:
264 case MONO_TYPE_U4:
265 return OP_MOVE;
266 case MONO_TYPE_I:
267 case MONO_TYPE_U:
268 case MONO_TYPE_PTR:
269 case MONO_TYPE_FNPTR:
270 return OP_MOVE;
271 case MONO_TYPE_CLASS:
272 case MONO_TYPE_STRING:
273 case MONO_TYPE_OBJECT:
274 case MONO_TYPE_SZARRAY:
275 case MONO_TYPE_ARRAY:
276 return OP_MOVE;
277 case MONO_TYPE_I8:
278 case MONO_TYPE_U8:
279 #if SIZEOF_REGISTER == 8
280 return OP_MOVE;
281 #else
282 return OP_LMOVE;
283 #endif
284 case MONO_TYPE_R4:
285 return OP_FMOVE;
286 case MONO_TYPE_R8:
287 return OP_FMOVE;
288 case MONO_TYPE_VALUETYPE:
289 if (type->data.klass->enumtype) {
290 type = mono_class_enum_basetype (type->data.klass);
291 goto handle_enum;
293 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
294 return OP_XMOVE;
295 return OP_VMOVE;
296 case MONO_TYPE_TYPEDBYREF:
297 return OP_VMOVE;
298 case MONO_TYPE_GENERICINST:
299 type = &type->data.generic_class->container_class->byval_arg;
300 goto handle_enum;
301 case MONO_TYPE_VAR:
302 case MONO_TYPE_MVAR:
303 g_assert (cfg->generic_sharing_context);
304 return OP_MOVE;
305 default:
306 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
308 return -1;
311 void
312 mono_print_bb (MonoBasicBlock *bb, const char *msg)
314 int i;
315 MonoInst *tree;
317 printf ("\n%s %d: [IN: ", msg, bb->block_num);
318 for (i = 0; i < bb->in_count; ++i)
319 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
320 printf (", OUT: ");
321 for (i = 0; i < bb->out_count; ++i)
322 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
323 printf (" ]\n");
324 for (tree = bb->code; tree; tree = tree->next)
325 mono_print_ins_index (-1, tree);
328 void
329 mono_create_helper_signatures (void)
331 helper_sig_domain_get = mono_create_icall_signature ("ptr");
332 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
333 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
334 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
335 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
336 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
337 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
341 * Can't put this at the beginning, since other files reference stuff from this
342 * file.
344 #ifndef DISABLE_JIT
346 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
348 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
350 #define GET_BBLOCK(cfg,tblock,ip) do { \
351 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
352 if (!(tblock)) { \
353 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
354 NEW_BBLOCK (cfg, (tblock)); \
355 (tblock)->cil_code = (ip); \
356 ADD_BBLOCK (cfg, (tblock)); \
358 } while (0)
360 #if defined(TARGET_X86) || defined(TARGET_AMD64)
361 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
362 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
363 (dest)->dreg = alloc_ireg_mp ((cfg)); \
364 (dest)->sreg1 = (sr1); \
365 (dest)->sreg2 = (sr2); \
366 (dest)->inst_imm = (imm); \
367 (dest)->backend.shift_amount = (shift); \
368 MONO_ADD_INS ((cfg)->cbb, (dest)); \
369 } while (0)
370 #endif
372 #if SIZEOF_REGISTER == 8
373 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
374 /* FIXME: Need to add many more cases */ \
375 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
376 MonoInst *widen; \
377 int dr = alloc_preg (cfg); \
378 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
379 (ins)->sreg2 = widen->dreg; \
381 } while (0)
382 #else
383 #define ADD_WIDEN_OP(ins, arg1, arg2)
384 #endif
386 #define ADD_BINOP(op) do { \
387 MONO_INST_NEW (cfg, ins, (op)); \
388 sp -= 2; \
389 ins->sreg1 = sp [0]->dreg; \
390 ins->sreg2 = sp [1]->dreg; \
391 type_from_op (ins, sp [0], sp [1]); \
392 CHECK_TYPE (ins); \
393 /* Have to insert a widening op */ \
394 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
395 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
396 MONO_ADD_INS ((cfg)->cbb, (ins)); \
397 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
398 } while (0)
400 #define ADD_UNOP(op) do { \
401 MONO_INST_NEW (cfg, ins, (op)); \
402 sp--; \
403 ins->sreg1 = sp [0]->dreg; \
404 type_from_op (ins, sp [0], NULL); \
405 CHECK_TYPE (ins); \
406 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
407 MONO_ADD_INS ((cfg)->cbb, (ins)); \
408 *sp++ = mono_decompose_opcode (cfg, ins); \
409 } while (0)
411 #define ADD_BINCOND(next_block) do { \
412 MonoInst *cmp; \
413 sp -= 2; \
414 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
415 cmp->sreg1 = sp [0]->dreg; \
416 cmp->sreg2 = sp [1]->dreg; \
417 type_from_op (cmp, sp [0], sp [1]); \
418 CHECK_TYPE (cmp); \
419 type_from_op (ins, sp [0], sp [1]); \
420 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
421 GET_BBLOCK (cfg, tblock, target); \
422 link_bblock (cfg, bblock, tblock); \
423 ins->inst_true_bb = tblock; \
424 if ((next_block)) { \
425 link_bblock (cfg, bblock, (next_block)); \
426 ins->inst_false_bb = (next_block); \
427 start_new_bblock = 1; \
428 } else { \
429 GET_BBLOCK (cfg, tblock, ip); \
430 link_bblock (cfg, bblock, tblock); \
431 ins->inst_false_bb = tblock; \
432 start_new_bblock = 2; \
434 if (sp != stack_start) { \
435 handle_stack_args (cfg, stack_start, sp - stack_start); \
436 CHECK_UNVERIFIABLE (cfg); \
438 MONO_ADD_INS (bblock, cmp); \
439 MONO_ADD_INS (bblock, ins); \
440 } while (0)
442 /* *
443 * link_bblock: Links two basic blocks
445 * links two basic blocks in the control flow graph, the 'from'
446 * argument is the starting block and the 'to' argument is the block
447 * the control flow ends to after 'from'.
449 static void
450 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
452 MonoBasicBlock **newa;
453 int i, found;
455 #if 0
456 if (from->cil_code) {
457 if (to->cil_code)
458 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
459 else
460 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
461 } else {
462 if (to->cil_code)
463 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
464 else
465 printf ("edge from entry to exit\n");
467 #endif
469 found = FALSE;
470 for (i = 0; i < from->out_count; ++i) {
471 if (to == from->out_bb [i]) {
472 found = TRUE;
473 break;
476 if (!found) {
477 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
478 for (i = 0; i < from->out_count; ++i) {
479 newa [i] = from->out_bb [i];
481 newa [i] = to;
482 from->out_count++;
483 from->out_bb = newa;
486 found = FALSE;
487 for (i = 0; i < to->in_count; ++i) {
488 if (from == to->in_bb [i]) {
489 found = TRUE;
490 break;
493 if (!found) {
494 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
495 for (i = 0; i < to->in_count; ++i) {
496 newa [i] = to->in_bb [i];
498 newa [i] = from;
499 to->in_count++;
500 to->in_bb = newa;
504 void
505 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
507 link_bblock (cfg, from, to);
511 * mono_find_block_region:
513 * We mark each basic block with a region ID. We use that to avoid BB
514 * optimizations when blocks are in different regions.
516 * Returns:
517 * A region token that encodes where this region is, and information
518 * about the clause owner for this block.
520 * The region encodes the try/catch/filter clause that owns this block
521 * as well as the type. -1 is a special value that represents a block
522 * that is in none of try/catch/filter.
524 static int
525 mono_find_block_region (MonoCompile *cfg, int offset)
527 MonoMethodHeader *header = cfg->header;
528 MonoExceptionClause *clause;
529 int i;
531 for (i = 0; i < header->num_clauses; ++i) {
532 clause = &header->clauses [i];
533 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
534 (offset < (clause->handler_offset)))
535 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
537 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
538 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
539 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
540 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
541 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
542 else
543 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
546 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
547 return ((i + 1) << 8) | clause->flags;
550 return -1;
553 static GList*
554 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
556 MonoMethodHeader *header = cfg->header;
557 MonoExceptionClause *clause;
558 int i;
559 GList *res = NULL;
561 for (i = 0; i < header->num_clauses; ++i) {
562 clause = &header->clauses [i];
563 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
564 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
565 if (clause->flags == type)
566 res = g_list_append (res, clause);
569 return res;
572 static void
573 mono_create_spvar_for_region (MonoCompile *cfg, int region)
575 MonoInst *var;
577 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
578 if (var)
579 return;
581 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
582 /* prevent it from being register allocated */
583 var->flags |= MONO_INST_INDIRECT;
585 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
588 MonoInst *
589 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
591 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
594 static MonoInst*
595 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
597 MonoInst *var;
599 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
600 if (var)
601 return var;
603 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
604 /* prevent it from being register allocated */
605 var->flags |= MONO_INST_INDIRECT;
607 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
609 return var;
613 * Returns the type used in the eval stack when @type is loaded.
614 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
616 void
617 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
619 MonoClass *klass;
621 inst->klass = klass = mono_class_from_mono_type (type);
622 if (type->byref) {
623 inst->type = STACK_MP;
624 return;
627 handle_enum:
628 switch (type->type) {
629 case MONO_TYPE_VOID:
630 inst->type = STACK_INV;
631 return;
632 case MONO_TYPE_I1:
633 case MONO_TYPE_U1:
634 case MONO_TYPE_BOOLEAN:
635 case MONO_TYPE_I2:
636 case MONO_TYPE_U2:
637 case MONO_TYPE_CHAR:
638 case MONO_TYPE_I4:
639 case MONO_TYPE_U4:
640 inst->type = STACK_I4;
641 return;
642 case MONO_TYPE_I:
643 case MONO_TYPE_U:
644 case MONO_TYPE_PTR:
645 case MONO_TYPE_FNPTR:
646 inst->type = STACK_PTR;
647 return;
648 case MONO_TYPE_CLASS:
649 case MONO_TYPE_STRING:
650 case MONO_TYPE_OBJECT:
651 case MONO_TYPE_SZARRAY:
652 case MONO_TYPE_ARRAY:
653 inst->type = STACK_OBJ;
654 return;
655 case MONO_TYPE_I8:
656 case MONO_TYPE_U8:
657 inst->type = STACK_I8;
658 return;
659 case MONO_TYPE_R4:
660 case MONO_TYPE_R8:
661 inst->type = STACK_R8;
662 return;
663 case MONO_TYPE_VALUETYPE:
664 if (type->data.klass->enumtype) {
665 type = mono_class_enum_basetype (type->data.klass);
666 goto handle_enum;
667 } else {
668 inst->klass = klass;
669 inst->type = STACK_VTYPE;
670 return;
672 case MONO_TYPE_TYPEDBYREF:
673 inst->klass = mono_defaults.typed_reference_class;
674 inst->type = STACK_VTYPE;
675 return;
676 case MONO_TYPE_GENERICINST:
677 type = &type->data.generic_class->container_class->byval_arg;
678 goto handle_enum;
679 case MONO_TYPE_VAR :
680 case MONO_TYPE_MVAR :
681 /* FIXME: all the arguments must be references for now,
682 * later look inside cfg and see if the arg num is
683 * really a reference
685 g_assert (cfg->generic_sharing_context);
686 inst->type = STACK_OBJ;
687 return;
688 default:
689 g_error ("unknown type 0x%02x in eval stack type", type->type);
694 * The following tables are used to quickly validate the IL code in type_from_op ().
696 static const char
697 bin_num_table [STACK_MAX] [STACK_MAX] = {
698 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
708 static const char
709 neg_table [] = {
710 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
713 /* reduce the size of this table */
714 static const char
715 bin_int_table [STACK_MAX] [STACK_MAX] = {
716 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
722 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
723 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
726 static const char
727 bin_comp_table [STACK_MAX] [STACK_MAX] = {
728 /* Inv i L p F & O vt */
729 {0},
730 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
731 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
732 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
733 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
734 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
735 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
736 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
739 /* reduce the size of this table */
740 static const char
741 shift_table [STACK_MAX] [STACK_MAX] = {
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
753 * Tables to map from the non-specific opcode to the matching
754 * type-specific opcode.
756 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
757 static const guint16
758 binops_op_map [STACK_MAX] = {
759 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
762 /* handles from CEE_NEG to CEE_CONV_U8 */
763 static const guint16
764 unops_op_map [STACK_MAX] = {
765 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
768 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
769 static const guint16
770 ovfops_op_map [STACK_MAX] = {
771 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
774 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
775 static const guint16
776 ovf2ops_op_map [STACK_MAX] = {
777 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
780 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
781 static const guint16
782 ovf3ops_op_map [STACK_MAX] = {
783 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
786 /* handles from CEE_BEQ to CEE_BLT_UN */
787 static const guint16
788 beqops_op_map [STACK_MAX] = {
789 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
792 /* handles from CEE_CEQ to CEE_CLT_UN */
793 static const guint16
794 ceqops_op_map [STACK_MAX] = {
795 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
799 * Sets ins->type (the type on the eval stack) according to the
800 * type of the opcode and the arguments to it.
801 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
803 * FIXME: this function sets ins->type unconditionally in some cases, but
804 * it should set it to invalid for some types (a conv.x on an object)
806 static void
807 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
809 switch (ins->opcode) {
810 /* binops */
811 case CEE_ADD:
812 case CEE_SUB:
813 case CEE_MUL:
814 case CEE_DIV:
815 case CEE_REM:
816 /* FIXME: check unverifiable args for STACK_MP */
817 ins->type = bin_num_table [src1->type] [src2->type];
818 ins->opcode += binops_op_map [ins->type];
819 break;
820 case CEE_DIV_UN:
821 case CEE_REM_UN:
822 case CEE_AND:
823 case CEE_OR:
824 case CEE_XOR:
825 ins->type = bin_int_table [src1->type] [src2->type];
826 ins->opcode += binops_op_map [ins->type];
827 break;
828 case CEE_SHL:
829 case CEE_SHR:
830 case CEE_SHR_UN:
831 ins->type = shift_table [src1->type] [src2->type];
832 ins->opcode += binops_op_map [ins->type];
833 break;
834 case OP_COMPARE:
835 case OP_LCOMPARE:
836 case OP_ICOMPARE:
837 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
838 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
839 ins->opcode = OP_LCOMPARE;
840 else if (src1->type == STACK_R8)
841 ins->opcode = OP_FCOMPARE;
842 else
843 ins->opcode = OP_ICOMPARE;
844 break;
845 case OP_ICOMPARE_IMM:
846 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
847 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
848 ins->opcode = OP_LCOMPARE_IMM;
849 break;
850 case CEE_BEQ:
851 case CEE_BGE:
852 case CEE_BGT:
853 case CEE_BLE:
854 case CEE_BLT:
855 case CEE_BNE_UN:
856 case CEE_BGE_UN:
857 case CEE_BGT_UN:
858 case CEE_BLE_UN:
859 case CEE_BLT_UN:
860 ins->opcode += beqops_op_map [src1->type];
861 break;
862 case OP_CEQ:
863 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
864 ins->opcode += ceqops_op_map [src1->type];
865 break;
866 case OP_CGT:
867 case OP_CGT_UN:
868 case OP_CLT:
869 case OP_CLT_UN:
870 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
871 ins->opcode += ceqops_op_map [src1->type];
872 break;
873 /* unops */
874 case CEE_NEG:
875 ins->type = neg_table [src1->type];
876 ins->opcode += unops_op_map [ins->type];
877 break;
878 case CEE_NOT:
879 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
880 ins->type = src1->type;
881 else
882 ins->type = STACK_INV;
883 ins->opcode += unops_op_map [ins->type];
884 break;
885 case CEE_CONV_I1:
886 case CEE_CONV_I2:
887 case CEE_CONV_I4:
888 case CEE_CONV_U4:
889 ins->type = STACK_I4;
890 ins->opcode += unops_op_map [src1->type];
891 break;
892 case CEE_CONV_R_UN:
893 ins->type = STACK_R8;
894 switch (src1->type) {
895 case STACK_I4:
896 case STACK_PTR:
897 ins->opcode = OP_ICONV_TO_R_UN;
898 break;
899 case STACK_I8:
900 ins->opcode = OP_LCONV_TO_R_UN;
901 break;
903 break;
904 case CEE_CONV_OVF_I1:
905 case CEE_CONV_OVF_U1:
906 case CEE_CONV_OVF_I2:
907 case CEE_CONV_OVF_U2:
908 case CEE_CONV_OVF_I4:
909 case CEE_CONV_OVF_U4:
910 ins->type = STACK_I4;
911 ins->opcode += ovf3ops_op_map [src1->type];
912 break;
913 case CEE_CONV_OVF_I_UN:
914 case CEE_CONV_OVF_U_UN:
915 ins->type = STACK_PTR;
916 ins->opcode += ovf2ops_op_map [src1->type];
917 break;
918 case CEE_CONV_OVF_I1_UN:
919 case CEE_CONV_OVF_I2_UN:
920 case CEE_CONV_OVF_I4_UN:
921 case CEE_CONV_OVF_U1_UN:
922 case CEE_CONV_OVF_U2_UN:
923 case CEE_CONV_OVF_U4_UN:
924 ins->type = STACK_I4;
925 ins->opcode += ovf2ops_op_map [src1->type];
926 break;
927 case CEE_CONV_U:
928 ins->type = STACK_PTR;
929 switch (src1->type) {
930 case STACK_I4:
931 ins->opcode = OP_ICONV_TO_U;
932 break;
933 case STACK_PTR:
934 case STACK_MP:
935 #if SIZEOF_VOID_P == 8
936 ins->opcode = OP_LCONV_TO_U;
937 #else
938 ins->opcode = OP_MOVE;
939 #endif
940 break;
941 case STACK_I8:
942 ins->opcode = OP_LCONV_TO_U;
943 break;
944 case STACK_R8:
945 ins->opcode = OP_FCONV_TO_U;
946 break;
948 break;
949 case CEE_CONV_I8:
950 case CEE_CONV_U8:
951 ins->type = STACK_I8;
952 ins->opcode += unops_op_map [src1->type];
953 break;
954 case CEE_CONV_OVF_I8:
955 case CEE_CONV_OVF_U8:
956 ins->type = STACK_I8;
957 ins->opcode += ovf3ops_op_map [src1->type];
958 break;
959 case CEE_CONV_OVF_U8_UN:
960 case CEE_CONV_OVF_I8_UN:
961 ins->type = STACK_I8;
962 ins->opcode += ovf2ops_op_map [src1->type];
963 break;
964 case CEE_CONV_R4:
965 case CEE_CONV_R8:
966 ins->type = STACK_R8;
967 ins->opcode += unops_op_map [src1->type];
968 break;
969 case OP_CKFINITE:
970 ins->type = STACK_R8;
971 break;
972 case CEE_CONV_U2:
973 case CEE_CONV_U1:
974 ins->type = STACK_I4;
975 ins->opcode += ovfops_op_map [src1->type];
976 break;
977 case CEE_CONV_I:
978 case CEE_CONV_OVF_I:
979 case CEE_CONV_OVF_U:
980 ins->type = STACK_PTR;
981 ins->opcode += ovfops_op_map [src1->type];
982 break;
983 case CEE_ADD_OVF:
984 case CEE_ADD_OVF_UN:
985 case CEE_MUL_OVF:
986 case CEE_MUL_OVF_UN:
987 case CEE_SUB_OVF:
988 case CEE_SUB_OVF_UN:
989 ins->type = bin_num_table [src1->type] [src2->type];
990 ins->opcode += ovfops_op_map [src1->type];
991 if (ins->type == STACK_R8)
992 ins->type = STACK_INV;
993 break;
994 case OP_LOAD_MEMBASE:
995 ins->type = STACK_PTR;
996 break;
997 case OP_LOADI1_MEMBASE:
998 case OP_LOADU1_MEMBASE:
999 case OP_LOADI2_MEMBASE:
1000 case OP_LOADU2_MEMBASE:
1001 case OP_LOADI4_MEMBASE:
1002 case OP_LOADU4_MEMBASE:
1003 ins->type = STACK_PTR;
1004 break;
1005 case OP_LOADI8_MEMBASE:
1006 ins->type = STACK_I8;
1007 break;
1008 case OP_LOADR4_MEMBASE:
1009 case OP_LOADR8_MEMBASE:
1010 ins->type = STACK_R8;
1011 break;
1012 default:
1013 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1014 break;
1017 if (ins->type == STACK_MP)
1018 ins->klass = mono_defaults.object_class;
1021 static const char
1022 ldind_type [] = {
1023 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1026 #if 0
1028 static const char
1029 param_table [STACK_MAX] [STACK_MAX] = {
1030 {0},
1033 static int
1034 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1035 int i;
1037 if (sig->hasthis) {
1038 switch (args->type) {
1039 case STACK_I4:
1040 case STACK_I8:
1041 case STACK_R8:
1042 case STACK_VTYPE:
1043 case STACK_INV:
1044 return 0;
1046 args++;
1048 for (i = 0; i < sig->param_count; ++i) {
1049 switch (args [i].type) {
1050 case STACK_INV:
1051 return 0;
1052 case STACK_MP:
1053 if (!sig->params [i]->byref)
1054 return 0;
1055 continue;
1056 case STACK_OBJ:
1057 if (sig->params [i]->byref)
1058 return 0;
1059 switch (sig->params [i]->type) {
1060 case MONO_TYPE_CLASS:
1061 case MONO_TYPE_STRING:
1062 case MONO_TYPE_OBJECT:
1063 case MONO_TYPE_SZARRAY:
1064 case MONO_TYPE_ARRAY:
1065 break;
1066 default:
1067 return 0;
1069 continue;
1070 case STACK_R8:
1071 if (sig->params [i]->byref)
1072 return 0;
1073 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1074 return 0;
1075 continue;
1076 case STACK_PTR:
1077 case STACK_I4:
1078 case STACK_I8:
1079 case STACK_VTYPE:
1080 break;
1082 /*if (!param_table [args [i].type] [sig->params [i]->type])
1083 return 0;*/
1085 return 1;
1087 #endif
1090 * When we need a pointer to the current domain many times in a method, we
1091 * call mono_domain_get() once and we store the result in a local variable.
1092 * This function returns the variable that represents the MonoDomain*.
1094 inline static MonoInst *
1095 mono_get_domainvar (MonoCompile *cfg)
1097 if (!cfg->domainvar)
1098 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1099 return cfg->domainvar;
1103 * The got_var contains the address of the Global Offset Table when AOT
1104 * compiling.
1106 MonoInst *
1107 mono_get_got_var (MonoCompile *cfg)
1109 #ifdef MONO_ARCH_NEED_GOT_VAR
1110 if (!cfg->compile_aot)
1111 return NULL;
1112 if (!cfg->got_var) {
1113 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1115 return cfg->got_var;
1116 #else
1117 return NULL;
1118 #endif
1121 static MonoInst *
1122 mono_get_vtable_var (MonoCompile *cfg)
1124 g_assert (cfg->generic_sharing_context);
1126 if (!cfg->rgctx_var) {
1127 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 /* force the var to be stack allocated */
1129 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1132 return cfg->rgctx_var;
1135 static MonoType*
1136 type_from_stack_type (MonoInst *ins) {
1137 switch (ins->type) {
1138 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1139 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1140 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1141 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1142 case STACK_MP:
1143 return &ins->klass->this_arg;
1144 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1145 case STACK_VTYPE: return &ins->klass->byval_arg;
1146 default:
1147 g_error ("stack type %d to monotype not handled\n", ins->type);
1149 return NULL;
1152 static G_GNUC_UNUSED int
1153 type_to_stack_type (MonoType *t)
1155 t = mono_type_get_underlying_type (t);
1156 switch (t->type) {
1157 case MONO_TYPE_I1:
1158 case MONO_TYPE_U1:
1159 case MONO_TYPE_BOOLEAN:
1160 case MONO_TYPE_I2:
1161 case MONO_TYPE_U2:
1162 case MONO_TYPE_CHAR:
1163 case MONO_TYPE_I4:
1164 case MONO_TYPE_U4:
1165 return STACK_I4;
1166 case MONO_TYPE_I:
1167 case MONO_TYPE_U:
1168 case MONO_TYPE_PTR:
1169 case MONO_TYPE_FNPTR:
1170 return STACK_PTR;
1171 case MONO_TYPE_CLASS:
1172 case MONO_TYPE_STRING:
1173 case MONO_TYPE_OBJECT:
1174 case MONO_TYPE_SZARRAY:
1175 case MONO_TYPE_ARRAY:
1176 return STACK_OBJ;
1177 case MONO_TYPE_I8:
1178 case MONO_TYPE_U8:
1179 return STACK_I8;
1180 case MONO_TYPE_R4:
1181 case MONO_TYPE_R8:
1182 return STACK_R8;
1183 case MONO_TYPE_VALUETYPE:
1184 case MONO_TYPE_TYPEDBYREF:
1185 return STACK_VTYPE;
1186 case MONO_TYPE_GENERICINST:
1187 if (mono_type_generic_inst_is_valuetype (t))
1188 return STACK_VTYPE;
1189 else
1190 return STACK_OBJ;
1191 break;
1192 default:
1193 g_assert_not_reached ();
1196 return -1;
1199 static MonoClass*
1200 array_access_to_klass (int opcode)
1202 switch (opcode) {
1203 case CEE_LDELEM_U1:
1204 return mono_defaults.byte_class;
1205 case CEE_LDELEM_U2:
1206 return mono_defaults.uint16_class;
1207 case CEE_LDELEM_I:
1208 case CEE_STELEM_I:
1209 return mono_defaults.int_class;
1210 case CEE_LDELEM_I1:
1211 case CEE_STELEM_I1:
1212 return mono_defaults.sbyte_class;
1213 case CEE_LDELEM_I2:
1214 case CEE_STELEM_I2:
1215 return mono_defaults.int16_class;
1216 case CEE_LDELEM_I4:
1217 case CEE_STELEM_I4:
1218 return mono_defaults.int32_class;
1219 case CEE_LDELEM_U4:
1220 return mono_defaults.uint32_class;
1221 case CEE_LDELEM_I8:
1222 case CEE_STELEM_I8:
1223 return mono_defaults.int64_class;
1224 case CEE_LDELEM_R4:
1225 case CEE_STELEM_R4:
1226 return mono_defaults.single_class;
1227 case CEE_LDELEM_R8:
1228 case CEE_STELEM_R8:
1229 return mono_defaults.double_class;
1230 case CEE_LDELEM_REF:
1231 case CEE_STELEM_REF:
1232 return mono_defaults.object_class;
1233 default:
1234 g_assert_not_reached ();
1236 return NULL;
1240 * We try to share variables when possible
1242 static MonoInst *
1243 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1245 MonoInst *res;
1246 int pos, vnum;
1248 /* inlining can result in deeper stacks */
1249 if (slot >= cfg->header->max_stack)
1250 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1252 pos = ins->type - 1 + slot * STACK_MAX;
1254 switch (ins->type) {
1255 case STACK_I4:
1256 case STACK_I8:
1257 case STACK_R8:
1258 case STACK_PTR:
1259 case STACK_MP:
1260 case STACK_OBJ:
1261 if ((vnum = cfg->intvars [pos]))
1262 return cfg->varinfo [vnum];
1263 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1264 cfg->intvars [pos] = res->inst_c0;
1265 break;
1266 default:
1267 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1269 return res;
1272 static void
1273 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1276 * Don't use this if a generic_context is set, since that means AOT can't
1277 * look up the method using just the image+token.
1278 * table == 0 means this is a reference made from a wrapper.
1280 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1281 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1282 jump_info_token->image = image;
1283 jump_info_token->token = token;
1284 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1289 * This function is called to handle items that are left on the evaluation stack
1290 * at basic block boundaries. What happens is that we save the values to local variables
1291 * and we reload them later when first entering the target basic block (with the
1292 * handle_loaded_temps () function).
1293 * A single joint point will use the same variables (stored in the array bb->out_stack or
1294 * bb->in_stack, if the basic block is before or after the joint point).
1296 * This function needs to be called _before_ emitting the last instruction of
1297 * the bb (i.e. before emitting a branch).
1298 * If the stack merge fails at a join point, cfg->unverifiable is set.
1300 static void
1301 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1303 int i, bindex;
1304 MonoBasicBlock *bb = cfg->cbb;
1305 MonoBasicBlock *outb;
1306 MonoInst *inst, **locals;
1307 gboolean found;
1309 if (!count)
1310 return;
1311 if (cfg->verbose_level > 3)
1312 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1313 if (!bb->out_scount) {
1314 bb->out_scount = count;
1315 //printf ("bblock %d has out:", bb->block_num);
1316 found = FALSE;
1317 for (i = 0; i < bb->out_count; ++i) {
1318 outb = bb->out_bb [i];
1319 /* exception handlers are linked, but they should not be considered for stack args */
1320 if (outb->flags & BB_EXCEPTION_HANDLER)
1321 continue;
1322 //printf (" %d", outb->block_num);
1323 if (outb->in_stack) {
1324 found = TRUE;
1325 bb->out_stack = outb->in_stack;
1326 break;
1329 //printf ("\n");
1330 if (!found) {
1331 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1332 for (i = 0; i < count; ++i) {
1334 * try to reuse temps already allocated for this purpouse, if they occupy the same
1335 * stack slot and if they are of the same type.
1336 * This won't cause conflicts since if 'local' is used to
1337 * store one of the values in the in_stack of a bblock, then
1338 * the same variable will be used for the same outgoing stack
1339 * slot as well.
1340 * This doesn't work when inlining methods, since the bblocks
1341 * in the inlined methods do not inherit their in_stack from
1342 * the bblock they are inlined to. See bug #58863 for an
1343 * example.
1345 if (cfg->inlined_method)
1346 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1347 else
1348 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1353 for (i = 0; i < bb->out_count; ++i) {
1354 outb = bb->out_bb [i];
1355 /* exception handlers are linked, but they should not be considered for stack args */
1356 if (outb->flags & BB_EXCEPTION_HANDLER)
1357 continue;
1358 if (outb->in_scount) {
1359 if (outb->in_scount != bb->out_scount) {
1360 cfg->unverifiable = TRUE;
1361 return;
1363 continue; /* check they are the same locals */
1365 outb->in_scount = count;
1366 outb->in_stack = bb->out_stack;
1369 locals = bb->out_stack;
1370 cfg->cbb = bb;
1371 for (i = 0; i < count; ++i) {
1372 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1373 inst->cil_code = sp [i]->cil_code;
1374 sp [i] = locals [i];
1375 if (cfg->verbose_level > 3)
1376 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1380 * It is possible that the out bblocks already have in_stack assigned, and
1381 * the in_stacks differ. In this case, we will store to all the different
1382 * in_stacks.
1385 found = TRUE;
1386 bindex = 0;
1387 while (found) {
1388 /* Find a bblock which has a different in_stack */
1389 found = FALSE;
1390 while (bindex < bb->out_count) {
1391 outb = bb->out_bb [bindex];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER) {
1394 bindex++;
1395 continue;
1397 if (outb->in_stack != locals) {
1398 for (i = 0; i < count; ++i) {
1399 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1400 inst->cil_code = sp [i]->cil_code;
1401 sp [i] = locals [i];
1402 if (cfg->verbose_level > 3)
1403 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1405 locals = outb->in_stack;
1406 found = TRUE;
1407 break;
1409 bindex ++;
1414 /* Emit code which loads interface_offsets [klass->interface_id]
1415 * The array is stored in memory before vtable.
1417 static void
1418 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1420 if (cfg->compile_aot) {
1421 int ioffset_reg = alloc_preg (cfg);
1422 int iid_reg = alloc_preg (cfg);
1424 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1425 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1428 else {
1429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1433 static void
1434 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1436 int ibitmap_reg = alloc_preg (cfg);
1437 #ifdef COMPRESSED_INTERFACE_BITMAP
1438 MonoInst *args [2];
1439 MonoInst *res, *ins;
1440 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1441 MONO_ADD_INS (cfg->cbb, ins);
1442 args [0] = ins;
1443 if (cfg->compile_aot)
1444 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1445 else
1446 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1447 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1449 #else
1450 int ibitmap_byte_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1454 if (cfg->compile_aot) {
1455 int iid_reg = alloc_preg (cfg);
1456 int shifted_iid_reg = alloc_preg (cfg);
1457 int ibitmap_byte_address_reg = alloc_preg (cfg);
1458 int masked_iid_reg = alloc_preg (cfg);
1459 int iid_one_bit_reg = alloc_preg (cfg);
1460 int iid_bit_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1463 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1466 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1467 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1469 } else {
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1473 #endif
1477 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1478 * stored in "klass_reg" implements the interface "klass".
1480 static void
1481 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1483 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1487 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1488 * stored in "vtable_reg" implements the interface "klass".
1490 static void
1491 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1493 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1497 * Emit code which checks whenever the interface id of @klass is smaller than
1498 * than the value given by max_iid_reg.
1500 static void
1501 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1502 MonoBasicBlock *false_target)
1504 if (cfg->compile_aot) {
1505 int iid_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1509 else
1510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1511 if (false_target)
1512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1513 else
1514 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1517 /* Same as above, but obtains max_iid from a vtable */
1518 static void
1519 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1520 MonoBasicBlock *false_target)
1522 int max_iid_reg = alloc_preg (cfg);
1524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1525 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1528 /* Same as above, but obtains max_iid from a klass */
1529 static void
1530 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 int max_iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1536 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1539 static void
1540 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1542 int idepth_reg = alloc_preg (cfg);
1543 int stypes_reg = alloc_preg (cfg);
1544 int stype = alloc_preg (cfg);
1546 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1547 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1553 if (klass_ins) {
1554 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1555 } else if (cfg->compile_aot) {
1556 int const_reg = alloc_preg (cfg);
1557 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1558 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1559 } else {
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1565 static void
1566 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1568 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1571 static void
1572 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1574 int intf_reg = alloc_preg (cfg);
1576 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1577 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1579 if (true_target)
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1581 else
1582 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1586 * Variant of the above that takes a register to the class, not the vtable.
1588 static void
1589 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1591 int intf_bit_reg = alloc_preg (cfg);
1593 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1594 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1596 if (true_target)
1597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1598 else
1599 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1602 static inline void
1603 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1605 if (klass_inst) {
1606 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1607 } else if (cfg->compile_aot) {
1608 int const_reg = alloc_preg (cfg);
1609 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1610 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1611 } else {
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 static inline void
1618 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1620 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1623 static inline void
1624 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1626 if (cfg->compile_aot) {
1627 int const_reg = alloc_preg (cfg);
1628 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1630 } else {
1631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1636 static void
1637 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1639 static void
1640 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1642 if (klass->rank) {
1643 int rank_reg = alloc_preg (cfg);
1644 int eclass_reg = alloc_preg (cfg);
1646 g_assert (!klass_inst);
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1649 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1650 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1652 if (klass->cast_class == mono_defaults.object_class) {
1653 int parent_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1655 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1658 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1659 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1660 } else if (klass->cast_class == mono_defaults.enum_class) {
1661 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1662 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1663 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1664 } else {
1665 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1666 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1669 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1670 /* Check that the object is a vector too */
1671 int bounds_reg = alloc_preg (cfg);
1672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1674 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1676 } else {
1677 int idepth_reg = alloc_preg (cfg);
1678 int stypes_reg = alloc_preg (cfg);
1679 int stype = alloc_preg (cfg);
1681 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1684 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1688 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1692 static void
1693 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1695 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1698 static void
1699 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1701 int val_reg;
1703 g_assert (val == 0);
1705 if (align == 0)
1706 align = 4;
1708 if ((size <= 4) && (size <= align)) {
1709 switch (size) {
1710 case 1:
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1712 return;
1713 case 2:
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1715 return;
1716 case 4:
1717 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1718 return;
1719 #if SIZEOF_REGISTER == 8
1720 case 8:
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1722 return;
1723 #endif
1727 val_reg = alloc_preg (cfg);
1729 if (SIZEOF_REGISTER == 8)
1730 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1731 else
1732 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1734 if (align < 4) {
1735 /* This could be optimized further if neccesary */
1736 while (size >= 1) {
1737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1738 offset += 1;
1739 size -= 1;
1741 return;
1744 #if !NO_UNALIGNED_ACCESS
1745 if (SIZEOF_REGISTER == 8) {
1746 if (offset % 8) {
1747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1748 offset += 4;
1749 size -= 4;
1751 while (size >= 8) {
1752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1753 offset += 8;
1754 size -= 8;
1757 #endif
1759 while (size >= 4) {
1760 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1761 offset += 4;
1762 size -= 4;
1764 while (size >= 2) {
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1766 offset += 2;
1767 size -= 2;
1769 while (size >= 1) {
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1771 offset += 1;
1772 size -= 1;
1776 void
1777 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1779 int cur_reg;
1781 if (align == 0)
1782 align = 4;
1784 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1785 g_assert (size < 10000);
1787 if (align < 4) {
1788 /* This could be optimized further if neccesary */
1789 while (size >= 1) {
1790 cur_reg = alloc_preg (cfg);
1791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1793 doffset += 1;
1794 soffset += 1;
1795 size -= 1;
1799 #if !NO_UNALIGNED_ACCESS
1800 if (SIZEOF_REGISTER == 8) {
1801 while (size >= 8) {
1802 cur_reg = alloc_preg (cfg);
1803 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1804 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1805 doffset += 8;
1806 soffset += 8;
1807 size -= 8;
1810 #endif
1812 while (size >= 4) {
1813 cur_reg = alloc_preg (cfg);
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1815 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1816 doffset += 4;
1817 soffset += 4;
1818 size -= 4;
1820 while (size >= 2) {
1821 cur_reg = alloc_preg (cfg);
1822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1823 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1824 doffset += 2;
1825 soffset += 2;
1826 size -= 2;
1828 while (size >= 1) {
1829 cur_reg = alloc_preg (cfg);
1830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1831 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1832 doffset += 1;
1833 soffset += 1;
1834 size -= 1;
1838 static int
1839 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1841 if (type->byref)
1842 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1844 handle_enum:
1845 type = mini_get_basic_type_from_generic (gsctx, type);
1846 switch (type->type) {
1847 case MONO_TYPE_VOID:
1848 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1849 case MONO_TYPE_I1:
1850 case MONO_TYPE_U1:
1851 case MONO_TYPE_BOOLEAN:
1852 case MONO_TYPE_I2:
1853 case MONO_TYPE_U2:
1854 case MONO_TYPE_CHAR:
1855 case MONO_TYPE_I4:
1856 case MONO_TYPE_U4:
1857 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1858 case MONO_TYPE_I:
1859 case MONO_TYPE_U:
1860 case MONO_TYPE_PTR:
1861 case MONO_TYPE_FNPTR:
1862 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1863 case MONO_TYPE_CLASS:
1864 case MONO_TYPE_STRING:
1865 case MONO_TYPE_OBJECT:
1866 case MONO_TYPE_SZARRAY:
1867 case MONO_TYPE_ARRAY:
1868 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1869 case MONO_TYPE_I8:
1870 case MONO_TYPE_U8:
1871 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1872 case MONO_TYPE_R4:
1873 case MONO_TYPE_R8:
1874 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1875 case MONO_TYPE_VALUETYPE:
1876 if (type->data.klass->enumtype) {
1877 type = mono_class_enum_basetype (type->data.klass);
1878 goto handle_enum;
1879 } else
1880 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1881 case MONO_TYPE_TYPEDBYREF:
1882 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1883 case MONO_TYPE_GENERICINST:
1884 type = &type->data.generic_class->container_class->byval_arg;
1885 goto handle_enum;
1886 default:
1887 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1889 return -1;
1893 * target_type_is_incompatible:
1894 * @cfg: MonoCompile context
1896 * Check that the item @arg on the evaluation stack can be stored
1897 * in the target type (can be a local, or field, etc).
1898 * The cfg arg can be used to check if we need verification or just
1899 * validity checks.
1901 * Returns: non-0 value if arg can't be stored on a target.
1903 static int
1904 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1906 MonoType *simple_type;
1907 MonoClass *klass;
1909 if (target->byref) {
1910 /* FIXME: check that the pointed to types match */
1911 if (arg->type == STACK_MP)
1912 return arg->klass != mono_class_from_mono_type (target);
1913 if (arg->type == STACK_PTR)
1914 return 0;
1915 return 1;
1918 simple_type = mono_type_get_underlying_type (target);
1919 switch (simple_type->type) {
1920 case MONO_TYPE_VOID:
1921 return 1;
1922 case MONO_TYPE_I1:
1923 case MONO_TYPE_U1:
1924 case MONO_TYPE_BOOLEAN:
1925 case MONO_TYPE_I2:
1926 case MONO_TYPE_U2:
1927 case MONO_TYPE_CHAR:
1928 case MONO_TYPE_I4:
1929 case MONO_TYPE_U4:
1930 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1931 return 1;
1932 return 0;
1933 case MONO_TYPE_PTR:
1934 /* STACK_MP is needed when setting pinned locals */
1935 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1936 return 1;
1937 return 0;
1938 case MONO_TYPE_I:
1939 case MONO_TYPE_U:
1940 case MONO_TYPE_FNPTR:
1942 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1943 * in native int. (#688008).
1945 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1946 return 1;
1947 return 0;
1948 case MONO_TYPE_CLASS:
1949 case MONO_TYPE_STRING:
1950 case MONO_TYPE_OBJECT:
1951 case MONO_TYPE_SZARRAY:
1952 case MONO_TYPE_ARRAY:
1953 if (arg->type != STACK_OBJ)
1954 return 1;
1955 /* FIXME: check type compatibility */
1956 return 0;
1957 case MONO_TYPE_I8:
1958 case MONO_TYPE_U8:
1959 if (arg->type != STACK_I8)
1960 return 1;
1961 return 0;
1962 case MONO_TYPE_R4:
1963 case MONO_TYPE_R8:
1964 if (arg->type != STACK_R8)
1965 return 1;
1966 return 0;
1967 case MONO_TYPE_VALUETYPE:
1968 if (arg->type != STACK_VTYPE)
1969 return 1;
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1972 return 1;
1973 return 0;
1974 case MONO_TYPE_TYPEDBYREF:
1975 if (arg->type != STACK_VTYPE)
1976 return 1;
1977 klass = mono_class_from_mono_type (simple_type);
1978 if (klass != arg->klass)
1979 return 1;
1980 return 0;
1981 case MONO_TYPE_GENERICINST:
1982 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1983 if (arg->type != STACK_VTYPE)
1984 return 1;
1985 klass = mono_class_from_mono_type (simple_type);
1986 if (klass != arg->klass)
1987 return 1;
1988 return 0;
1989 } else {
1990 if (arg->type != STACK_OBJ)
1991 return 1;
1992 /* FIXME: check type compatibility */
1993 return 0;
1995 case MONO_TYPE_VAR:
1996 case MONO_TYPE_MVAR:
1997 /* FIXME: all the arguments must be references for now,
1998 * later look inside cfg and see if the arg num is
1999 * really a reference
2001 g_assert (cfg->generic_sharing_context);
2002 if (arg->type != STACK_OBJ)
2003 return 1;
2004 return 0;
2005 default:
2006 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2008 return 1;
2012 * Prepare arguments for passing to a function call.
2013 * Return a non-zero value if the arguments can't be passed to the given
2014 * signature.
2015 * The type checks are not yet complete and some conversions may need
2016 * casts on 32 or 64 bit architectures.
2018 * FIXME: implement this using target_type_is_incompatible ()
2020 static int
2021 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2023 MonoType *simple_type;
2024 int i;
2026 if (sig->hasthis) {
2027 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2028 return 1;
2029 args++;
2031 for (i = 0; i < sig->param_count; ++i) {
2032 if (sig->params [i]->byref) {
2033 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2034 return 1;
2035 continue;
2037 simple_type = sig->params [i];
2038 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2039 handle_enum:
2040 switch (simple_type->type) {
2041 case MONO_TYPE_VOID:
2042 return 1;
2043 continue;
2044 case MONO_TYPE_I1:
2045 case MONO_TYPE_U1:
2046 case MONO_TYPE_BOOLEAN:
2047 case MONO_TYPE_I2:
2048 case MONO_TYPE_U2:
2049 case MONO_TYPE_CHAR:
2050 case MONO_TYPE_I4:
2051 case MONO_TYPE_U4:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2053 return 1;
2054 continue;
2055 case MONO_TYPE_I:
2056 case MONO_TYPE_U:
2057 case MONO_TYPE_PTR:
2058 case MONO_TYPE_FNPTR:
2059 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2060 return 1;
2061 continue;
2062 case MONO_TYPE_CLASS:
2063 case MONO_TYPE_STRING:
2064 case MONO_TYPE_OBJECT:
2065 case MONO_TYPE_SZARRAY:
2066 case MONO_TYPE_ARRAY:
2067 if (args [i]->type != STACK_OBJ)
2068 return 1;
2069 continue;
2070 case MONO_TYPE_I8:
2071 case MONO_TYPE_U8:
2072 if (args [i]->type != STACK_I8)
2073 return 1;
2074 continue;
2075 case MONO_TYPE_R4:
2076 case MONO_TYPE_R8:
2077 if (args [i]->type != STACK_R8)
2078 return 1;
2079 continue;
2080 case MONO_TYPE_VALUETYPE:
2081 if (simple_type->data.klass->enumtype) {
2082 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2083 goto handle_enum;
2085 if (args [i]->type != STACK_VTYPE)
2086 return 1;
2087 continue;
2088 case MONO_TYPE_TYPEDBYREF:
2089 if (args [i]->type != STACK_VTYPE)
2090 return 1;
2091 continue;
2092 case MONO_TYPE_GENERICINST:
2093 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2094 goto handle_enum;
2096 default:
2097 g_error ("unknown type 0x%02x in check_call_signature",
2098 simple_type->type);
2101 return 0;
2104 static int
2105 callvirt_to_call (int opcode)
2107 switch (opcode) {
2108 case OP_CALLVIRT:
2109 return OP_CALL;
2110 case OP_VOIDCALLVIRT:
2111 return OP_VOIDCALL;
2112 case OP_FCALLVIRT:
2113 return OP_FCALL;
2114 case OP_VCALLVIRT:
2115 return OP_VCALL;
2116 case OP_LCALLVIRT:
2117 return OP_LCALL;
2118 default:
2119 g_assert_not_reached ();
2122 return -1;
2125 static int
2126 callvirt_to_call_membase (int opcode)
2128 switch (opcode) {
2129 case OP_CALLVIRT:
2130 return OP_CALL_MEMBASE;
2131 case OP_VOIDCALLVIRT:
2132 return OP_VOIDCALL_MEMBASE;
2133 case OP_FCALLVIRT:
2134 return OP_FCALL_MEMBASE;
2135 case OP_LCALLVIRT:
2136 return OP_LCALL_MEMBASE;
2137 case OP_VCALLVIRT:
2138 return OP_VCALL_MEMBASE;
2139 default:
2140 g_assert_not_reached ();
2143 return -1;
2146 #ifdef MONO_ARCH_HAVE_IMT
2147 static void
2148 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2150 int method_reg;
2152 if (COMPILE_LLVM (cfg)) {
2153 method_reg = alloc_preg (cfg);
2155 if (imt_arg) {
2156 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2157 } else if (cfg->compile_aot) {
2158 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2159 } else {
2160 MonoInst *ins;
2161 MONO_INST_NEW (cfg, ins, OP_PCONST);
2162 ins->inst_p0 = call->method;
2163 ins->dreg = method_reg;
2164 MONO_ADD_INS (cfg->cbb, ins);
2167 #ifdef ENABLE_LLVM
2168 call->imt_arg_reg = method_reg;
2169 #endif
2170 #ifdef MONO_ARCH_IMT_REG
2171 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2172 #else
2173 /* Need this to keep the IMT arg alive */
2174 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2175 #endif
2176 return;
2179 #ifdef MONO_ARCH_IMT_REG
2180 method_reg = alloc_preg (cfg);
2182 if (imt_arg) {
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2184 } else if (cfg->compile_aot) {
2185 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2186 } else {
2187 MonoInst *ins;
2188 MONO_INST_NEW (cfg, ins, OP_PCONST);
2189 ins->inst_p0 = call->method;
2190 ins->dreg = method_reg;
2191 MONO_ADD_INS (cfg->cbb, ins);
2194 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2195 #else
2196 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2197 #endif
2199 #endif
2201 static MonoJumpInfo *
2202 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2204 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2206 ji->ip.i = ip;
2207 ji->type = type;
2208 ji->data.target = target;
2210 return ji;
2213 inline static MonoCallInst *
2214 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2215 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2217 MonoCallInst *call;
2218 #ifdef MONO_ARCH_SOFT_FLOAT
2219 int i;
2220 #endif
2222 if (tail)
2223 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2224 else
2225 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2227 call->args = args;
2228 call->signature = sig;
2229 call->rgctx_reg = rgctx;
2231 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2233 if (tail) {
2234 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2235 call->vret_var = cfg->vret_addr;
2236 //g_assert_not_reached ();
2238 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2239 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2240 MonoInst *loada;
2242 temp->backend.is_pinvoke = sig->pinvoke;
2245 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2246 * address of return value to increase optimization opportunities.
2247 * Before vtype decomposition, the dreg of the call ins itself represents the
2248 * fact the call modifies the return value. After decomposition, the call will
2249 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2250 * will be transformed into an LDADDR.
2252 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2253 loada->dreg = alloc_preg (cfg);
2254 loada->inst_p0 = temp;
2255 /* We reference the call too since call->dreg could change during optimization */
2256 loada->inst_p1 = call;
2257 MONO_ADD_INS (cfg->cbb, loada);
2259 call->inst.dreg = temp->dreg;
2261 call->vret_var = loada;
2262 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2263 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2265 #ifdef MONO_ARCH_SOFT_FLOAT
2266 if (COMPILE_SOFT_FLOAT (cfg)) {
2268 * If the call has a float argument, we would need to do an r8->r4 conversion using
2269 * an icall, but that cannot be done during the call sequence since it would clobber
2270 * the call registers + the stack. So we do it before emitting the call.
2272 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2273 MonoType *t;
2274 MonoInst *in = call->args [i];
2276 if (i >= sig->hasthis)
2277 t = sig->params [i - sig->hasthis];
2278 else
2279 t = &mono_defaults.int_class->byval_arg;
2280 t = mono_type_get_underlying_type (t);
2282 if (!t->byref && t->type == MONO_TYPE_R4) {
2283 MonoInst *iargs [1];
2284 MonoInst *conv;
2286 iargs [0] = in;
2287 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2289 /* The result will be in an int vreg */
2290 call->args [i] = conv;
2294 #endif
2296 #ifdef ENABLE_LLVM
2297 if (COMPILE_LLVM (cfg))
2298 mono_llvm_emit_call (cfg, call);
2299 else
2300 mono_arch_emit_call (cfg, call);
2301 #else
2302 mono_arch_emit_call (cfg, call);
2303 #endif
2305 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2306 cfg->flags |= MONO_CFG_HAS_CALLS;
2308 return call;
2311 static void
2312 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2314 #ifdef MONO_ARCH_RGCTX_REG
2315 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2316 cfg->uses_rgctx_reg = TRUE;
2317 call->rgctx_reg = TRUE;
2318 #ifdef ENABLE_LLVM
2319 call->rgctx_arg_reg = rgctx_reg;
2320 #endif
2321 #else
2322 NOT_IMPLEMENTED;
2323 #endif
2326 inline static MonoInst*
2327 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2329 MonoCallInst *call;
2330 int rgctx_reg = -1;
2332 if (rgctx_arg) {
2333 rgctx_reg = mono_alloc_preg (cfg);
2334 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2337 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2339 call->inst.sreg1 = addr->dreg;
2341 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2343 if (rgctx_arg)
2344 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2346 return (MonoInst*)call;
2349 static MonoInst*
2350 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2351 static MonoInst*
2352 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2354 static MonoInst*
2355 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2356 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2358 gboolean might_be_remote;
2359 gboolean virtual = this != NULL;
2360 gboolean enable_for_aot = TRUE;
2361 int context_used;
2362 MonoCallInst *call;
2363 int rgctx_reg = 0;
2365 if (rgctx_arg) {
2366 rgctx_reg = mono_alloc_preg (cfg);
2367 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2370 if (method->string_ctor) {
2371 /* Create the real signature */
2372 /* FIXME: Cache these */
2373 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2374 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2376 sig = ctor_sig;
2379 context_used = mono_method_check_context_used (method);
2381 might_be_remote = this && sig->hasthis &&
2382 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2383 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2385 if (might_be_remote && context_used) {
2386 MonoInst *addr;
2388 g_assert (cfg->generic_sharing_context);
2390 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2392 return mono_emit_calli (cfg, sig, args, addr, NULL);
2395 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2397 if (might_be_remote)
2398 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2399 else
2400 call->method = method;
2401 call->inst.flags |= MONO_INST_HAS_METHOD;
2402 call->inst.inst_left = this;
2404 if (virtual) {
2405 int vtable_reg, slot_reg, this_reg;
2407 this_reg = this->dreg;
2409 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2410 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2411 MonoInst *dummy_use;
2413 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2415 /* Make a call to delegate->invoke_impl */
2416 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2417 call->inst.inst_basereg = this_reg;
2418 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2419 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2421 /* We must emit a dummy use here because the delegate trampoline will
2422 replace the 'this' argument with the delegate target making this activation
2423 no longer a root for the delegate.
2424 This is an issue for delegates that target collectible code such as dynamic
2425 methods of GC'able assemblies.
2427 For a test case look into #667921.
2429 FIXME: a dummy use is not the best way to do it as the local register allocator
2430 will put it on a caller save register and spil it around the call.
2431 Ideally, we would either put it on a callee save register or only do the store part.
2433 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2435 return (MonoInst*)call;
2437 #endif
2439 if ((!cfg->compile_aot || enable_for_aot) &&
2440 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2441 (MONO_METHOD_IS_FINAL (method) &&
2442 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2443 !(method->klass->marshalbyref && context_used)) {
2445 * the method is not virtual, we just need to ensure this is not null
2446 * and then we can call the method directly.
2448 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2450 * The check above ensures method is not gshared, this is needed since
2451 * gshared methods can't have wrappers.
2453 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2456 if (!method->string_ctor)
2457 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2459 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2460 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2462 * the method is virtual, but we can statically dispatch since either
2463 * it's class or the method itself are sealed.
2464 * But first we need to ensure it's not a null reference.
2466 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2468 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2469 } else {
2470 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2472 vtable_reg = alloc_preg (cfg);
2473 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2474 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2475 slot_reg = -1;
2476 #ifdef MONO_ARCH_HAVE_IMT
2477 if (mono_use_imt) {
2478 guint32 imt_slot = mono_method_get_imt_slot (method);
2479 emit_imt_argument (cfg, call, imt_arg);
2480 slot_reg = vtable_reg;
2481 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2483 #endif
2484 if (slot_reg == -1) {
2485 slot_reg = alloc_preg (cfg);
2486 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2487 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2489 } else {
2490 slot_reg = vtable_reg;
2491 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2492 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2493 #ifdef MONO_ARCH_HAVE_IMT
2494 if (imt_arg) {
2495 g_assert (mono_method_signature (method)->generic_param_count);
2496 emit_imt_argument (cfg, call, imt_arg);
2498 #endif
2501 call->inst.sreg1 = slot_reg;
2502 call->virtual = TRUE;
2506 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2508 if (rgctx_arg)
2509 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2511 return (MonoInst*)call;
2514 MonoInst*
2515 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2517 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2520 MonoInst*
2521 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2522 MonoInst **args)
2524 MonoCallInst *call;
2526 g_assert (sig);
2528 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2529 call->fptr = func;
2531 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2533 return (MonoInst*)call;
2536 MonoInst*
2537 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2539 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2541 g_assert (info);
2543 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2547 * mono_emit_abs_call:
2549 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2551 inline static MonoInst*
2552 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2553 MonoMethodSignature *sig, MonoInst **args)
2555 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2556 MonoInst *ins;
2559 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2560 * handle it.
2562 if (cfg->abs_patches == NULL)
2563 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2564 g_hash_table_insert (cfg->abs_patches, ji, ji);
2565 ins = mono_emit_native_call (cfg, ji, sig, args);
2566 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2567 return ins;
2570 static MonoInst*
2571 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2573 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2574 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2575 int widen_op = -1;
2578 * Native code might return non register sized integers
2579 * without initializing the upper bits.
2581 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2582 case OP_LOADI1_MEMBASE:
2583 widen_op = OP_ICONV_TO_I1;
2584 break;
2585 case OP_LOADU1_MEMBASE:
2586 widen_op = OP_ICONV_TO_U1;
2587 break;
2588 case OP_LOADI2_MEMBASE:
2589 widen_op = OP_ICONV_TO_I2;
2590 break;
2591 case OP_LOADU2_MEMBASE:
2592 widen_op = OP_ICONV_TO_U2;
2593 break;
2594 default:
2595 break;
2598 if (widen_op != -1) {
2599 int dreg = alloc_preg (cfg);
2600 MonoInst *widen;
2602 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2603 widen->type = ins->type;
2604 ins = widen;
2609 return ins;
2612 static MonoMethod*
2613 get_memcpy_method (void)
2615 static MonoMethod *memcpy_method = NULL;
2616 if (!memcpy_method) {
2617 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2618 if (!memcpy_method)
2619 g_error ("Old corlib found. Install a new one");
2621 return memcpy_method;
2624 static void
2625 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2627 MonoClassField *field;
2628 gpointer iter = NULL;
2630 while ((field = mono_class_get_fields (klass, &iter))) {
2631 int foffset;
2633 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2634 continue;
2635 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2636 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2637 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2638 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2639 } else {
2640 MonoClass *field_class = mono_class_from_mono_type (field->type);
2641 if (field_class->has_references)
2642 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2647 static void
2648 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2650 int card_table_shift_bits;
2651 gpointer card_table_mask;
2652 guint8 *card_table;
2653 MonoInst *dummy_use;
2654 int nursery_shift_bits;
2655 size_t nursery_size;
2656 gboolean has_card_table_wb = FALSE;
2658 if (!cfg->gen_write_barriers)
2659 return;
2661 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2663 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2665 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2666 has_card_table_wb = TRUE;
2667 #endif
2669 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2670 MonoInst *wbarrier;
2672 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2673 wbarrier->sreg1 = ptr->dreg;
2674 if (value)
2675 wbarrier->sreg2 = value->dreg;
2676 else
2677 wbarrier->sreg2 = value_reg;
2678 MONO_ADD_INS (cfg->cbb, wbarrier);
2679 } else if (card_table) {
2680 int offset_reg = alloc_preg (cfg);
2681 int card_reg = alloc_preg (cfg);
2682 MonoInst *ins;
2684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2685 if (card_table_mask)
2686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2688 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2689 * IMM's larger than 32bits.
2691 if (cfg->compile_aot) {
2692 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2693 } else {
2694 MONO_INST_NEW (cfg, ins, OP_PCONST);
2695 ins->inst_p0 = card_table;
2696 ins->dreg = card_reg;
2697 MONO_ADD_INS (cfg->cbb, ins);
2700 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2701 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2702 } else {
2703 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2704 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2707 if (value) {
2708 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2709 } else {
2710 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2711 dummy_use->sreg1 = value_reg;
2712 MONO_ADD_INS (cfg->cbb, dummy_use);
2716 static gboolean
2717 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2719 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2720 unsigned need_wb = 0;
2722 if (align == 0)
2723 align = 4;
2725 /*types with references can't have alignment smaller than sizeof(void*) */
2726 if (align < SIZEOF_VOID_P)
2727 return FALSE;
2729 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2730 if (size > 32 * SIZEOF_VOID_P)
2731 return FALSE;
2733 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2735 /* We don't unroll more than 5 stores to avoid code bloat. */
2736 if (size > 5 * SIZEOF_VOID_P) {
2737 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2738 size += (SIZEOF_VOID_P - 1);
2739 size &= ~(SIZEOF_VOID_P - 1);
2741 EMIT_NEW_ICONST (cfg, iargs [2], size);
2742 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2743 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2744 return TRUE;
2747 destreg = iargs [0]->dreg;
2748 srcreg = iargs [1]->dreg;
2749 offset = 0;
2751 dest_ptr_reg = alloc_preg (cfg);
2752 tmp_reg = alloc_preg (cfg);
2754 /*tmp = dreg*/
2755 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2757 while (size >= SIZEOF_VOID_P) {
2758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2761 if (need_wb & 0x1)
2762 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2764 offset += SIZEOF_VOID_P;
2765 size -= SIZEOF_VOID_P;
2766 need_wb >>= 1;
2768 /*tmp += sizeof (void*)*/
2769 if (size >= SIZEOF_VOID_P) {
2770 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2771 MONO_ADD_INS (cfg->cbb, iargs [0]);
2775 /* Those cannot be references since size < sizeof (void*) */
2776 while (size >= 4) {
2777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2779 offset += 4;
2780 size -= 4;
2783 while (size >= 2) {
2784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2786 offset += 2;
2787 size -= 2;
2790 while (size >= 1) {
2791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2793 offset += 1;
2794 size -= 1;
2797 return TRUE;
2801 * Emit code to copy a valuetype of type @klass whose address is stored in
2802 * @src->dreg to memory whose address is stored at @dest->dreg.
2804 void
2805 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2807 MonoInst *iargs [4];
2808 int n;
2809 guint32 align = 0;
2810 MonoMethod *memcpy_method;
2812 g_assert (klass);
2814 * This check breaks with spilled vars... need to handle it during verification anyway.
2815 * g_assert (klass && klass == src->klass && klass == dest->klass);
2818 if (native)
2819 n = mono_class_native_size (klass, &align);
2820 else
2821 n = mono_class_value_size (klass, &align);
2823 /* if native is true there should be no references in the struct */
2824 if (cfg->gen_write_barriers && klass->has_references && !native) {
2825 /* Avoid barriers when storing to the stack */
2826 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2827 (dest->opcode == OP_LDADDR))) {
2828 int context_used = 0;
2830 iargs [0] = dest;
2831 iargs [1] = src;
2833 if (cfg->generic_sharing_context)
2834 context_used = mono_class_check_context_used (klass);
2836 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2837 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2838 return;
2839 } else if (context_used) {
2840 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2841 } else {
2842 if (cfg->compile_aot) {
2843 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2844 } else {
2845 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2846 mono_class_compute_gc_descriptor (klass);
2850 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2851 return;
2855 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2856 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2857 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2858 } else {
2859 iargs [0] = dest;
2860 iargs [1] = src;
2861 EMIT_NEW_ICONST (cfg, iargs [2], n);
2863 memcpy_method = get_memcpy_method ();
2864 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2868 static MonoMethod*
2869 get_memset_method (void)
2871 static MonoMethod *memset_method = NULL;
2872 if (!memset_method) {
2873 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2874 if (!memset_method)
2875 g_error ("Old corlib found. Install a new one");
2877 return memset_method;
2880 void
2881 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2883 MonoInst *iargs [3];
2884 int n;
2885 guint32 align;
2886 MonoMethod *memset_method;
2888 /* FIXME: Optimize this for the case when dest is an LDADDR */
2890 mono_class_init (klass);
2891 n = mono_class_value_size (klass, &align);
2893 if (n <= sizeof (gpointer) * 5) {
2894 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2896 else {
2897 memset_method = get_memset_method ();
2898 iargs [0] = dest;
2899 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2900 EMIT_NEW_ICONST (cfg, iargs [2], n);
2901 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2905 static MonoInst*
2906 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2908 MonoInst *this = NULL;
2910 g_assert (cfg->generic_sharing_context);
2912 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2913 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2914 !method->klass->valuetype)
2915 EMIT_NEW_ARGLOAD (cfg, this, 0);
2917 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2918 MonoInst *mrgctx_loc, *mrgctx_var;
2920 g_assert (!this);
2921 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2923 mrgctx_loc = mono_get_vtable_var (cfg);
2924 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2926 return mrgctx_var;
2927 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2928 MonoInst *vtable_loc, *vtable_var;
2930 g_assert (!this);
2932 vtable_loc = mono_get_vtable_var (cfg);
2933 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2935 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2936 MonoInst *mrgctx_var = vtable_var;
2937 int vtable_reg;
2939 vtable_reg = alloc_preg (cfg);
2940 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2941 vtable_var->type = STACK_PTR;
2944 return vtable_var;
2945 } else {
2946 MonoInst *ins;
2947 int vtable_reg;
2949 vtable_reg = alloc_preg (cfg);
2950 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2951 return ins;
2955 static MonoJumpInfoRgctxEntry *
2956 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2958 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2959 res->method = method;
2960 res->in_mrgctx = in_mrgctx;
2961 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2962 res->data->type = patch_type;
2963 res->data->data.target = patch_data;
2964 res->info_type = info_type;
2966 return res;
2969 static inline MonoInst*
2970 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2972 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2975 static MonoInst*
2976 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2977 MonoClass *klass, int rgctx_type)
2979 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2980 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2982 return emit_rgctx_fetch (cfg, rgctx, entry);
2986 * emit_get_rgctx_method:
2988 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2989 * normal constants, else emit a load from the rgctx.
2991 static MonoInst*
2992 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2993 MonoMethod *cmethod, int rgctx_type)
2995 if (!context_used) {
2996 MonoInst *ins;
2998 switch (rgctx_type) {
2999 case MONO_RGCTX_INFO_METHOD:
3000 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3001 return ins;
3002 case MONO_RGCTX_INFO_METHOD_RGCTX:
3003 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3004 return ins;
3005 default:
3006 g_assert_not_reached ();
3008 } else {
3009 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3010 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3012 return emit_rgctx_fetch (cfg, rgctx, entry);
3016 static MonoInst*
3017 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3018 MonoClassField *field, int rgctx_type)
3020 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3021 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3023 return emit_rgctx_fetch (cfg, rgctx, entry);
3027 * On return the caller must check @klass for load errors.
3029 static void
3030 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3032 MonoInst *vtable_arg;
3033 MonoCallInst *call;
3034 int context_used = 0;
3036 if (cfg->generic_sharing_context)
3037 context_used = mono_class_check_context_used (klass);
3039 if (context_used) {
3040 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3041 klass, MONO_RGCTX_INFO_VTABLE);
3042 } else {
3043 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3045 if (!vtable)
3046 return;
3047 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3050 if (COMPILE_LLVM (cfg))
3051 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3052 else
3053 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3054 #ifdef MONO_ARCH_VTABLE_REG
3055 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3056 cfg->uses_vtable_reg = TRUE;
3057 #else
3058 NOT_IMPLEMENTED;
3059 #endif
3062 static void
3063 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3065 if (mini_get_debug_options ()->better_cast_details) {
3066 int to_klass_reg = alloc_preg (cfg);
3067 int vtable_reg = alloc_preg (cfg);
3068 int klass_reg = alloc_preg (cfg);
3069 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3071 if (!tls_get) {
3072 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3073 exit (1);
3076 MONO_ADD_INS (cfg->cbb, tls_get);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3081 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3082 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3086 static void
3087 reset_cast_details (MonoCompile *cfg)
3089 /* Reset the variables holding the cast details */
3090 if (mini_get_debug_options ()->better_cast_details) {
3091 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3093 MONO_ADD_INS (cfg->cbb, tls_get);
3094 /* It is enough to reset the from field */
3095 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3100 * On return the caller must check @array_class for load errors
3102 static void
3103 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3105 int vtable_reg = alloc_preg (cfg);
3106 int context_used = 0;
3108 if (cfg->generic_sharing_context)
3109 context_used = mono_class_check_context_used (array_class);
3111 save_cast_details (cfg, array_class, obj->dreg);
3113 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3115 if (cfg->opt & MONO_OPT_SHARED) {
3116 int class_reg = alloc_preg (cfg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3118 if (cfg->compile_aot) {
3119 int klass_reg = alloc_preg (cfg);
3120 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3121 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3122 } else {
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3125 } else if (context_used) {
3126 MonoInst *vtable_ins;
3128 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3129 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3130 } else {
3131 if (cfg->compile_aot) {
3132 int vt_reg;
3133 MonoVTable *vtable;
3135 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3136 return;
3137 vt_reg = alloc_preg (cfg);
3138 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3139 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3140 } else {
3141 MonoVTable *vtable;
3142 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3143 return;
3144 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3148 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3150 reset_cast_details (cfg);
3154 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3155 * generic code is generated.
3157 static MonoInst*
3158 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3160 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3162 if (context_used) {
3163 MonoInst *rgctx, *addr;
3165 /* FIXME: What if the class is shared? We might not
3166 have to get the address of the method from the
3167 RGCTX. */
3168 addr = emit_get_rgctx_method (cfg, context_used, method,
3169 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3171 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3173 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3174 } else {
3175 return mono_emit_method_call (cfg, method, &val, NULL);
3179 static MonoInst*
3180 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3182 MonoInst *add;
3183 int obj_reg;
3184 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3185 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3186 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3187 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3189 obj_reg = sp [0]->dreg;
3190 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3191 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3193 /* FIXME: generics */
3194 g_assert (klass->rank == 0);
3196 // Check rank == 0
3197 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3198 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3200 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3203 if (context_used) {
3204 MonoInst *element_class;
3206 /* This assertion is from the unboxcast insn */
3207 g_assert (klass->rank == 0);
3209 element_class = emit_get_rgctx_klass (cfg, context_used,
3210 klass->element_class, MONO_RGCTX_INFO_KLASS);
3212 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3213 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3214 } else {
3215 save_cast_details (cfg, klass->element_class, obj_reg);
3216 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3217 reset_cast_details (cfg);
3220 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3221 MONO_ADD_INS (cfg->cbb, add);
3222 add->type = STACK_MP;
3223 add->klass = klass;
3225 return add;
3229 * Returns NULL and set the cfg exception on error.
3231 static MonoInst*
3232 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3234 MonoInst *iargs [2];
3235 void *alloc_ftn;
3237 if (context_used) {
3238 MonoInst *data;
3239 int rgctx_info;
3240 MonoInst *iargs [2];
3243 FIXME: we cannot get managed_alloc here because we can't get
3244 the class's vtable (because it's not a closed class)
3246 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3247 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3250 if (cfg->opt & MONO_OPT_SHARED)
3251 rgctx_info = MONO_RGCTX_INFO_KLASS;
3252 else
3253 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3254 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3256 if (cfg->opt & MONO_OPT_SHARED) {
3257 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3258 iargs [1] = data;
3259 alloc_ftn = mono_object_new;
3260 } else {
3261 iargs [0] = data;
3262 alloc_ftn = mono_object_new_specific;
3265 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3268 if (cfg->opt & MONO_OPT_SHARED) {
3269 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3270 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3272 alloc_ftn = mono_object_new;
3273 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3274 /* This happens often in argument checking code, eg. throw new FooException... */
3275 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3276 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3277 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3278 } else {
3279 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3280 MonoMethod *managed_alloc = NULL;
3281 gboolean pass_lw;
3283 if (!vtable) {
3284 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3285 cfg->exception_ptr = klass;
3286 return NULL;
3289 #ifndef MONO_CROSS_COMPILE
3290 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3291 #endif
3293 if (managed_alloc) {
3294 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3295 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3297 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3298 if (pass_lw) {
3299 guint32 lw = vtable->klass->instance_size;
3300 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3301 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3302 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3304 else {
3305 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3309 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3313 * Returns NULL and set the cfg exception on error.
3315 static MonoInst*
3316 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3318 MonoInst *alloc, *ins;
3320 if (mono_class_is_nullable (klass)) {
3321 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3323 if (context_used) {
3324 /* FIXME: What if the class is shared? We might not
3325 have to get the method address from the RGCTX. */
3326 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3327 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3328 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3330 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3331 } else {
3332 return mono_emit_method_call (cfg, method, &val, NULL);
3336 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3337 if (!alloc)
3338 return NULL;
3340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3342 return alloc;
3346 static gboolean
3347 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3349 int i;
3350 MonoGenericContainer *container;
3351 MonoGenericInst *ginst;
3353 if (klass->generic_class) {
3354 container = klass->generic_class->container_class->generic_container;
3355 ginst = klass->generic_class->context.class_inst;
3356 } else if (klass->generic_container && context_used) {
3357 container = klass->generic_container;
3358 ginst = container->context.class_inst;
3359 } else {
3360 return FALSE;
3363 for (i = 0; i < container->type_argc; ++i) {
3364 MonoType *type;
3365 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3366 continue;
3367 type = ginst->type_argv [i];
3368 if (mini_type_is_reference (cfg, type))
3369 return TRUE;
3371 return FALSE;
3374 // FIXME: This doesn't work yet (class libs tests fail?)
3375 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3378 * Returns NULL and set the cfg exception on error.
3380 static MonoInst*
3381 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3383 MonoBasicBlock *is_null_bb;
3384 int obj_reg = src->dreg;
3385 int vtable_reg = alloc_preg (cfg);
3386 MonoInst *klass_inst = NULL;
3388 if (context_used) {
3389 MonoInst *args [3];
3391 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3392 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3393 MonoInst *cache_ins;
3395 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3397 /* obj */
3398 args [0] = src;
3400 /* klass - it's the second element of the cache entry*/
3401 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3403 /* cache */
3404 args [2] = cache_ins;
3406 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3409 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3411 if (is_complex_isinst (klass)) {
3412 /* Complex case, handle by an icall */
3414 /* obj */
3415 args [0] = src;
3417 /* klass */
3418 args [1] = klass_inst;
3420 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3421 } else {
3422 /* Simple case, handled by the code below */
3426 NEW_BBLOCK (cfg, is_null_bb);
3428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3429 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3431 save_cast_details (cfg, klass, obj_reg);
3433 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3435 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3436 } else {
3437 int klass_reg = alloc_preg (cfg);
3439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3441 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3442 /* the remoting code is broken, access the class for now */
3443 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3444 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3445 if (!vt) {
3446 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3447 cfg->exception_ptr = klass;
3448 return NULL;
3450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3451 } else {
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3455 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3456 } else {
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3458 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3462 MONO_START_BB (cfg, is_null_bb);
3464 reset_cast_details (cfg);
3466 return src;
3470 * Returns NULL and set the cfg exception on error.
3472 static MonoInst*
3473 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3475 MonoInst *ins;
3476 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3477 int obj_reg = src->dreg;
3478 int vtable_reg = alloc_preg (cfg);
3479 int res_reg = alloc_ireg_ref (cfg);
3480 MonoInst *klass_inst = NULL;
3482 if (context_used) {
3483 MonoInst *args [3];
3485 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3486 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3487 MonoInst *cache_ins;
3489 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3491 /* obj */
3492 args [0] = src;
3494 /* klass - it's the second element of the cache entry*/
3495 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3497 /* cache */
3498 args [2] = cache_ins;
3500 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3503 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3505 if (is_complex_isinst (klass)) {
3506 /* Complex case, handle by an icall */
3508 /* obj */
3509 args [0] = src;
3511 /* klass */
3512 args [1] = klass_inst;
3514 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3515 } else {
3516 /* Simple case, the code below can handle it */
3520 NEW_BBLOCK (cfg, is_null_bb);
3521 NEW_BBLOCK (cfg, false_bb);
3522 NEW_BBLOCK (cfg, end_bb);
3524 /* Do the assignment at the beginning, so the other assignment can be if converted */
3525 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3526 ins->type = STACK_OBJ;
3527 ins->klass = klass;
3529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3534 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3535 g_assert (!context_used);
3536 /* the is_null_bb target simply copies the input register to the output */
3537 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3538 } else {
3539 int klass_reg = alloc_preg (cfg);
3541 if (klass->rank) {
3542 int rank_reg = alloc_preg (cfg);
3543 int eclass_reg = alloc_preg (cfg);
3545 g_assert (!context_used);
3546 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3551 if (klass->cast_class == mono_defaults.object_class) {
3552 int parent_reg = alloc_preg (cfg);
3553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3554 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3555 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3557 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3558 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3559 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3561 } else if (klass->cast_class == mono_defaults.enum_class) {
3562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3564 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3565 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3566 } else {
3567 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3568 /* Check that the object is a vector too */
3569 int bounds_reg = alloc_preg (cfg);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3572 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3575 /* the is_null_bb target simply copies the input register to the output */
3576 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3578 } else if (mono_class_is_nullable (klass)) {
3579 g_assert (!context_used);
3580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3581 /* the is_null_bb target simply copies the input register to the output */
3582 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3583 } else {
3584 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3585 g_assert (!context_used);
3586 /* the remoting code is broken, access the class for now */
3587 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3588 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3589 if (!vt) {
3590 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3591 cfg->exception_ptr = klass;
3592 return NULL;
3594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3595 } else {
3596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3601 } else {
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3603 /* the is_null_bb target simply copies the input register to the output */
3604 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3609 MONO_START_BB (cfg, false_bb);
3611 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3612 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3614 MONO_START_BB (cfg, is_null_bb);
3616 MONO_START_BB (cfg, end_bb);
3618 return ins;
3621 static MonoInst*
3622 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3624 /* This opcode takes as input an object reference and a class, and returns:
3625 0) if the object is an instance of the class,
3626 1) if the object is not instance of the class,
3627 2) if the object is a proxy whose type cannot be determined */
3629 MonoInst *ins;
3630 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3631 int obj_reg = src->dreg;
3632 int dreg = alloc_ireg (cfg);
3633 int tmp_reg;
3634 int klass_reg = alloc_preg (cfg);
3636 NEW_BBLOCK (cfg, true_bb);
3637 NEW_BBLOCK (cfg, false_bb);
3638 NEW_BBLOCK (cfg, false2_bb);
3639 NEW_BBLOCK (cfg, end_bb);
3640 NEW_BBLOCK (cfg, no_proxy_bb);
3642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3645 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3646 NEW_BBLOCK (cfg, interface_fail_bb);
3648 tmp_reg = alloc_preg (cfg);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3650 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3651 MONO_START_BB (cfg, interface_fail_bb);
3652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3654 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3656 tmp_reg = alloc_preg (cfg);
3657 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3660 } else {
3661 tmp_reg = alloc_preg (cfg);
3662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3665 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3666 tmp_reg = alloc_preg (cfg);
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3670 tmp_reg = alloc_preg (cfg);
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3675 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3678 MONO_START_BB (cfg, no_proxy_bb);
3680 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3683 MONO_START_BB (cfg, false_bb);
3685 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3688 MONO_START_BB (cfg, false2_bb);
3690 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3693 MONO_START_BB (cfg, true_bb);
3695 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3697 MONO_START_BB (cfg, end_bb);
3699 /* FIXME: */
3700 MONO_INST_NEW (cfg, ins, OP_ICONST);
3701 ins->dreg = dreg;
3702 ins->type = STACK_I4;
3704 return ins;
3707 static MonoInst*
3708 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3710 /* This opcode takes as input an object reference and a class, and returns:
3711 0) if the object is an instance of the class,
3712 1) if the object is a proxy whose type cannot be determined
3713 an InvalidCastException exception is thrown otherwhise*/
3715 MonoInst *ins;
3716 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3717 int obj_reg = src->dreg;
3718 int dreg = alloc_ireg (cfg);
3719 int tmp_reg = alloc_preg (cfg);
3720 int klass_reg = alloc_preg (cfg);
3722 NEW_BBLOCK (cfg, end_bb);
3723 NEW_BBLOCK (cfg, ok_result_bb);
3725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3728 save_cast_details (cfg, klass, obj_reg);
3730 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3731 NEW_BBLOCK (cfg, interface_fail_bb);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3734 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3735 MONO_START_BB (cfg, interface_fail_bb);
3736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3738 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3740 tmp_reg = alloc_preg (cfg);
3741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3743 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3745 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3748 } else {
3749 NEW_BBLOCK (cfg, no_proxy_bb);
3751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3753 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3755 tmp_reg = alloc_preg (cfg);
3756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3759 tmp_reg = alloc_preg (cfg);
3760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3764 NEW_BBLOCK (cfg, fail_1_bb);
3766 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3768 MONO_START_BB (cfg, fail_1_bb);
3770 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3773 MONO_START_BB (cfg, no_proxy_bb);
3775 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3778 MONO_START_BB (cfg, ok_result_bb);
3780 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3782 MONO_START_BB (cfg, end_bb);
3784 /* FIXME: */
3785 MONO_INST_NEW (cfg, ins, OP_ICONST);
3786 ins->dreg = dreg;
3787 ins->type = STACK_I4;
3789 return ins;
3793 * Returns NULL and set the cfg exception on error.
3795 static G_GNUC_UNUSED MonoInst*
3796 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3798 MonoInst *ptr;
3799 int dreg;
3800 gpointer *trampoline;
3801 MonoInst *obj, *method_ins, *tramp_ins;
3802 MonoDomain *domain;
3803 guint8 **code_slot;
3805 obj = handle_alloc (cfg, klass, FALSE, 0);
3806 if (!obj)
3807 return NULL;
3809 /* Inline the contents of mono_delegate_ctor */
3811 /* Set target field */
3812 /* Optimize away setting of NULL target */
3813 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3815 if (cfg->gen_write_barriers) {
3816 dreg = alloc_preg (cfg);
3817 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3818 emit_write_barrier (cfg, ptr, target, 0);
3822 /* Set method field */
3823 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3825 if (cfg->gen_write_barriers) {
3826 dreg = alloc_preg (cfg);
3827 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3828 emit_write_barrier (cfg, ptr, method_ins, 0);
3831 * To avoid looking up the compiled code belonging to the target method
3832 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3833 * store it, and we fill it after the method has been compiled.
3835 if (!cfg->compile_aot && !method->dynamic) {
3836 MonoInst *code_slot_ins;
3838 if (context_used) {
3839 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3840 } else {
3841 domain = mono_domain_get ();
3842 mono_domain_lock (domain);
3843 if (!domain_jit_info (domain)->method_code_hash)
3844 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3845 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3846 if (!code_slot) {
3847 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3848 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3850 mono_domain_unlock (domain);
3852 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3857 /* Set invoke_impl field */
3858 if (cfg->compile_aot) {
3859 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3860 } else {
3861 trampoline = mono_create_delegate_trampoline (klass);
3862 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3866 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3868 return obj;
3871 static MonoInst*
3872 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3874 MonoJitICallInfo *info;
3876 /* Need to register the icall so it gets an icall wrapper */
3877 info = mono_get_array_new_va_icall (rank);
3879 cfg->flags |= MONO_CFG_HAS_VARARGS;
3881 /* mono_array_new_va () needs a vararg calling convention */
3882 cfg->disable_llvm = TRUE;
3884 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3885 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3888 static void
3889 mono_emit_load_got_addr (MonoCompile *cfg)
3891 MonoInst *getaddr, *dummy_use;
3893 if (!cfg->got_var || cfg->got_var_allocated)
3894 return;
3896 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3897 getaddr->dreg = cfg->got_var->dreg;
3899 /* Add it to the start of the first bblock */
3900 if (cfg->bb_entry->code) {
3901 getaddr->next = cfg->bb_entry->code;
3902 cfg->bb_entry->code = getaddr;
3904 else
3905 MONO_ADD_INS (cfg->bb_entry, getaddr);
3907 cfg->got_var_allocated = TRUE;
3910 * Add a dummy use to keep the got_var alive, since real uses might
3911 * only be generated by the back ends.
3912 * Add it to end_bblock, so the variable's lifetime covers the whole
3913 * method.
3914 * It would be better to make the usage of the got var explicit in all
3915 * cases when the backend needs it (i.e. calls, throw etc.), so this
3916 * wouldn't be needed.
3918 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3919 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3922 static int inline_limit;
3923 static gboolean inline_limit_inited;
3925 static gboolean
3926 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3928 MonoMethodHeaderSummary header;
3929 MonoVTable *vtable;
3930 #ifdef MONO_ARCH_SOFT_FLOAT
3931 MonoMethodSignature *sig = mono_method_signature (method);
3932 int i;
3933 #endif
3935 if (cfg->generic_sharing_context)
3936 return FALSE;
3938 if (cfg->inline_depth > 10)
3939 return FALSE;
3941 #ifdef MONO_ARCH_HAVE_LMF_OPS
3942 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3943 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3944 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3945 return TRUE;
3946 #endif
3949 if (!mono_method_get_header_summary (method, &header))
3950 return FALSE;
3952 /*runtime, icall and pinvoke are checked by summary call*/
3953 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3954 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3955 (method->klass->marshalbyref) ||
3956 header.has_clauses)
3957 return FALSE;
3959 /* also consider num_locals? */
3960 /* Do the size check early to avoid creating vtables */
3961 if (!inline_limit_inited) {
3962 if (getenv ("MONO_INLINELIMIT"))
3963 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3964 else
3965 inline_limit = INLINE_LENGTH_LIMIT;
3966 inline_limit_inited = TRUE;
3968 if (header.code_size >= inline_limit)
3969 return FALSE;
3972 * if we can initialize the class of the method right away, we do,
3973 * otherwise we don't allow inlining if the class needs initialization,
3974 * since it would mean inserting a call to mono_runtime_class_init()
3975 * inside the inlined code
3977 if (!(cfg->opt & MONO_OPT_SHARED)) {
3978 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3979 if (cfg->run_cctors && method->klass->has_cctor) {
3980 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3981 if (!method->klass->runtime_info)
3982 /* No vtable created yet */
3983 return FALSE;
3984 vtable = mono_class_vtable (cfg->domain, method->klass);
3985 if (!vtable)
3986 return FALSE;
3987 /* This makes so that inline cannot trigger */
3988 /* .cctors: too many apps depend on them */
3989 /* running with a specific order... */
3990 if (! vtable->initialized)
3991 return FALSE;
3992 mono_runtime_class_init (vtable);
3994 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3995 if (!method->klass->runtime_info)
3996 /* No vtable created yet */
3997 return FALSE;
3998 vtable = mono_class_vtable (cfg->domain, method->klass);
3999 if (!vtable)
4000 return FALSE;
4001 if (!vtable->initialized)
4002 return FALSE;
4004 } else {
4006 * If we're compiling for shared code
4007 * the cctor will need to be run at aot method load time, for example,
4008 * or at the end of the compilation of the inlining method.
4010 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4011 return FALSE;
4015 * CAS - do not inline methods with declarative security
4016 * Note: this has to be before any possible return TRUE;
4018 if (mono_method_has_declsec (method))
4019 return FALSE;
4021 #ifdef MONO_ARCH_SOFT_FLOAT
4022 /* FIXME: */
4023 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4024 return FALSE;
4025 for (i = 0; i < sig->param_count; ++i)
4026 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4027 return FALSE;
4028 #endif
4030 return TRUE;
4033 static gboolean
4034 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4036 if (vtable->initialized && !cfg->compile_aot)
4037 return FALSE;
4039 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4040 return FALSE;
4042 if (!mono_class_needs_cctor_run (vtable->klass, method))
4043 return FALSE;
4045 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4046 /* The initialization is already done before the method is called */
4047 return FALSE;
4049 return TRUE;
4052 static MonoInst*
4053 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4055 MonoInst *ins;
4056 guint32 size;
4057 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4059 mono_class_init (klass);
4060 size = mono_class_array_element_size (klass);
4062 mult_reg = alloc_preg (cfg);
4063 array_reg = arr->dreg;
4064 index_reg = index->dreg;
4066 #if SIZEOF_REGISTER == 8
4067 /* The array reg is 64 bits but the index reg is only 32 */
4068 if (COMPILE_LLVM (cfg)) {
4069 /* Not needed */
4070 index2_reg = index_reg;
4071 } else {
4072 index2_reg = alloc_preg (cfg);
4073 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4075 #else
4076 if (index->type == STACK_I8) {
4077 index2_reg = alloc_preg (cfg);
4078 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4079 } else {
4080 index2_reg = index_reg;
4082 #endif
4084 if (bcheck)
4085 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4087 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4088 if (size == 1 || size == 2 || size == 4 || size == 8) {
4089 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4091 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4092 ins->klass = mono_class_get_element_class (klass);
4093 ins->type = STACK_MP;
4095 return ins;
4097 #endif
4099 add_reg = alloc_ireg_mp (cfg);
4101 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4102 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4103 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4104 ins->klass = mono_class_get_element_class (klass);
4105 ins->type = STACK_MP;
4106 MONO_ADD_INS (cfg->cbb, ins);
4108 return ins;
4111 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4112 static MonoInst*
4113 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4115 int bounds_reg = alloc_preg (cfg);
4116 int add_reg = alloc_ireg_mp (cfg);
4117 int mult_reg = alloc_preg (cfg);
4118 int mult2_reg = alloc_preg (cfg);
4119 int low1_reg = alloc_preg (cfg);
4120 int low2_reg = alloc_preg (cfg);
4121 int high1_reg = alloc_preg (cfg);
4122 int high2_reg = alloc_preg (cfg);
4123 int realidx1_reg = alloc_preg (cfg);
4124 int realidx2_reg = alloc_preg (cfg);
4125 int sum_reg = alloc_preg (cfg);
4126 int index1, index2;
4127 MonoInst *ins;
4128 guint32 size;
4130 mono_class_init (klass);
4131 size = mono_class_array_element_size (klass);
4133 index1 = index_ins1->dreg;
4134 index2 = index_ins2->dreg;
4136 /* range checking */
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4138 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4141 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4142 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4144 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4145 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4146 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4148 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4149 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4150 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4151 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4152 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4153 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4154 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4156 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4157 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4159 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4160 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4162 ins->type = STACK_MP;
4163 ins->klass = klass;
4164 MONO_ADD_INS (cfg->cbb, ins);
4166 return ins;
4168 #endif
4170 static MonoInst*
4171 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4173 int rank;
4174 MonoInst *addr;
4175 MonoMethod *addr_method;
4176 int element_size;
4178 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4180 if (rank == 1)
4181 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4183 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4184 /* emit_ldelema_2 depends on OP_LMUL */
4185 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4186 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4188 #endif
4190 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4191 addr_method = mono_marshal_get_array_address (rank, element_size);
4192 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4194 return addr;
4197 static MonoBreakPolicy
4198 always_insert_breakpoint (MonoMethod *method)
4200 return MONO_BREAK_POLICY_ALWAYS;
4203 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4206 * mono_set_break_policy:
4207 * policy_callback: the new callback function
4209 * Allow embedders to decide wherther to actually obey breakpoint instructions
4210 * (both break IL instructions and Debugger.Break () method calls), for example
4211 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4212 * untrusted or semi-trusted code.
4214 * @policy_callback will be called every time a break point instruction needs to
4215 * be inserted with the method argument being the method that calls Debugger.Break()
4216 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4217 * if it wants the breakpoint to not be effective in the given method.
4218 * #MONO_BREAK_POLICY_ALWAYS is the default.
4220 void
4221 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4223 if (policy_callback)
4224 break_policy_func = policy_callback;
4225 else
4226 break_policy_func = always_insert_breakpoint;
4229 static gboolean
4230 should_insert_brekpoint (MonoMethod *method) {
4231 switch (break_policy_func (method)) {
4232 case MONO_BREAK_POLICY_ALWAYS:
4233 return TRUE;
4234 case MONO_BREAK_POLICY_NEVER:
4235 return FALSE;
4236 case MONO_BREAK_POLICY_ON_DBG:
4237 return mono_debug_using_mono_debugger ();
4238 default:
4239 g_warning ("Incorrect value returned from break policy callback");
4240 return FALSE;
4244 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4245 static MonoInst*
4246 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4248 MonoInst *addr, *store, *load;
4249 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4251 /* the bounds check is already done by the callers */
4252 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4253 if (is_set) {
4254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4256 } else {
4257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4258 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4260 return store;
4263 static MonoInst*
4264 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4266 MonoInst *ins = NULL;
4267 #ifdef MONO_ARCH_SIMD_INTRINSICS
4268 if (cfg->opt & MONO_OPT_SIMD) {
4269 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4270 if (ins)
4271 return ins;
4273 #endif
4275 return ins;
4278 static MonoInst*
4279 emit_memory_barrier (MonoCompile *cfg, int kind)
4281 MonoInst *ins = NULL;
4282 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4283 MONO_ADD_INS (cfg->cbb, ins);
4284 ins->backend.memory_barrier_kind = kind;
4286 return ins;
4289 static MonoInst*
4290 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4292 MonoInst *ins = NULL;
4294 static MonoClass *runtime_helpers_class = NULL;
4295 if (! runtime_helpers_class)
4296 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4297 "System.Runtime.CompilerServices", "RuntimeHelpers");
4299 if (cmethod->klass == mono_defaults.string_class) {
4300 if (strcmp (cmethod->name, "get_Chars") == 0) {
4301 int dreg = alloc_ireg (cfg);
4302 int index_reg = alloc_preg (cfg);
4303 int mult_reg = alloc_preg (cfg);
4304 int add_reg = alloc_preg (cfg);
4306 #if SIZEOF_REGISTER == 8
4307 /* The array reg is 64 bits but the index reg is only 32 */
4308 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4309 #else
4310 index_reg = args [1]->dreg;
4311 #endif
4312 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4314 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4315 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4316 add_reg = ins->dreg;
4317 /* Avoid a warning */
4318 mult_reg = 0;
4319 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4320 add_reg, 0);
4321 #else
4322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4323 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4324 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4325 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4326 #endif
4327 type_from_op (ins, NULL, NULL);
4328 return ins;
4329 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4330 int dreg = alloc_ireg (cfg);
4331 /* Decompose later to allow more optimizations */
4332 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4333 ins->type = STACK_I4;
4334 ins->flags |= MONO_INST_FAULT;
4335 cfg->cbb->has_array_access = TRUE;
4336 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4338 return ins;
4339 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4340 int mult_reg = alloc_preg (cfg);
4341 int add_reg = alloc_preg (cfg);
4343 /* The corlib functions check for oob already. */
4344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4345 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4347 return cfg->cbb->last_ins;
4348 } else
4349 return NULL;
4350 } else if (cmethod->klass == mono_defaults.object_class) {
4352 if (strcmp (cmethod->name, "GetType") == 0) {
4353 int dreg = alloc_ireg_ref (cfg);
4354 int vt_reg = alloc_preg (cfg);
4355 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4357 type_from_op (ins, NULL, NULL);
4359 return ins;
4360 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4361 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4362 int dreg = alloc_ireg (cfg);
4363 int t1 = alloc_ireg (cfg);
4365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4366 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4367 ins->type = STACK_I4;
4369 return ins;
4370 #endif
4371 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4372 MONO_INST_NEW (cfg, ins, OP_NOP);
4373 MONO_ADD_INS (cfg->cbb, ins);
4374 return ins;
4375 } else
4376 return NULL;
4377 } else if (cmethod->klass == mono_defaults.array_class) {
4378 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4379 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4381 #ifndef MONO_BIG_ARRAYS
4383 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4384 * Array methods.
4386 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4387 int dreg = alloc_ireg (cfg);
4388 int bounds_reg = alloc_ireg_mp (cfg);
4389 MonoBasicBlock *end_bb, *szarray_bb;
4390 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4392 NEW_BBLOCK (cfg, end_bb);
4393 NEW_BBLOCK (cfg, szarray_bb);
4395 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4396 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4399 /* Non-szarray case */
4400 if (get_length)
4401 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4402 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4403 else
4404 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4405 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4407 MONO_START_BB (cfg, szarray_bb);
4408 /* Szarray case */
4409 if (get_length)
4410 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4411 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4412 else
4413 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4414 MONO_START_BB (cfg, end_bb);
4416 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4417 ins->type = STACK_I4;
4419 return ins;
4421 #endif
4423 if (cmethod->name [0] != 'g')
4424 return NULL;
4426 if (strcmp (cmethod->name, "get_Rank") == 0) {
4427 int dreg = alloc_ireg (cfg);
4428 int vtable_reg = alloc_preg (cfg);
4429 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4430 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4431 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4432 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4433 type_from_op (ins, NULL, NULL);
4435 return ins;
4436 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4437 int dreg = alloc_ireg (cfg);
4439 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4440 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4441 type_from_op (ins, NULL, NULL);
4443 return ins;
4444 } else
4445 return NULL;
4446 } else if (cmethod->klass == runtime_helpers_class) {
4448 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4449 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4450 return ins;
4451 } else
4452 return NULL;
4453 } else if (cmethod->klass == mono_defaults.thread_class) {
4454 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4455 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4456 MONO_ADD_INS (cfg->cbb, ins);
4457 return ins;
4458 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4459 return emit_memory_barrier (cfg, FullBarrier);
4461 } else if (cmethod->klass == mono_defaults.monitor_class) {
4462 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4463 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4464 MonoCallInst *call;
4466 if (COMPILE_LLVM (cfg)) {
4468 * Pass the argument normally, the LLVM backend will handle the
4469 * calling convention problems.
4471 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4472 } else {
4473 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4474 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4475 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4476 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4479 return (MonoInst*)call;
4480 } else if (strcmp (cmethod->name, "Exit") == 0) {
4481 MonoCallInst *call;
4483 if (COMPILE_LLVM (cfg)) {
4484 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4485 } else {
4486 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4487 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4488 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4489 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4492 return (MonoInst*)call;
4494 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4495 MonoMethod *fast_method = NULL;
4497 /* Avoid infinite recursion */
4498 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4499 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4500 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4501 return NULL;
4503 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4504 strcmp (cmethod->name, "Exit") == 0)
4505 fast_method = mono_monitor_get_fast_path (cmethod);
4506 if (!fast_method)
4507 return NULL;
4509 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4510 #endif
4511 } else if (cmethod->klass->image == mono_defaults.corlib &&
4512 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4513 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4514 ins = NULL;
4516 #if SIZEOF_REGISTER == 8
4517 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4518 /* 64 bit reads are already atomic */
4519 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4520 ins->dreg = mono_alloc_preg (cfg);
4521 ins->inst_basereg = args [0]->dreg;
4522 ins->inst_offset = 0;
4523 MONO_ADD_INS (cfg->cbb, ins);
4525 #endif
4527 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4528 if (strcmp (cmethod->name, "Increment") == 0) {
4529 MonoInst *ins_iconst;
4530 guint32 opcode = 0;
4532 if (fsig->params [0]->type == MONO_TYPE_I4)
4533 opcode = OP_ATOMIC_ADD_NEW_I4;
4534 #if SIZEOF_REGISTER == 8
4535 else if (fsig->params [0]->type == MONO_TYPE_I8)
4536 opcode = OP_ATOMIC_ADD_NEW_I8;
4537 #endif
4538 if (opcode) {
4539 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4540 ins_iconst->inst_c0 = 1;
4541 ins_iconst->dreg = mono_alloc_ireg (cfg);
4542 MONO_ADD_INS (cfg->cbb, ins_iconst);
4544 MONO_INST_NEW (cfg, ins, opcode);
4545 ins->dreg = mono_alloc_ireg (cfg);
4546 ins->inst_basereg = args [0]->dreg;
4547 ins->inst_offset = 0;
4548 ins->sreg2 = ins_iconst->dreg;
4549 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4550 MONO_ADD_INS (cfg->cbb, ins);
4552 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4553 MonoInst *ins_iconst;
4554 guint32 opcode = 0;
4556 if (fsig->params [0]->type == MONO_TYPE_I4)
4557 opcode = OP_ATOMIC_ADD_NEW_I4;
4558 #if SIZEOF_REGISTER == 8
4559 else if (fsig->params [0]->type == MONO_TYPE_I8)
4560 opcode = OP_ATOMIC_ADD_NEW_I8;
4561 #endif
4562 if (opcode) {
4563 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4564 ins_iconst->inst_c0 = -1;
4565 ins_iconst->dreg = mono_alloc_ireg (cfg);
4566 MONO_ADD_INS (cfg->cbb, ins_iconst);
4568 MONO_INST_NEW (cfg, ins, opcode);
4569 ins->dreg = mono_alloc_ireg (cfg);
4570 ins->inst_basereg = args [0]->dreg;
4571 ins->inst_offset = 0;
4572 ins->sreg2 = ins_iconst->dreg;
4573 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4574 MONO_ADD_INS (cfg->cbb, ins);
4576 } else if (strcmp (cmethod->name, "Add") == 0) {
4577 guint32 opcode = 0;
4579 if (fsig->params [0]->type == MONO_TYPE_I4)
4580 opcode = OP_ATOMIC_ADD_NEW_I4;
4581 #if SIZEOF_REGISTER == 8
4582 else if (fsig->params [0]->type == MONO_TYPE_I8)
4583 opcode = OP_ATOMIC_ADD_NEW_I8;
4584 #endif
4586 if (opcode) {
4587 MONO_INST_NEW (cfg, ins, opcode);
4588 ins->dreg = mono_alloc_ireg (cfg);
4589 ins->inst_basereg = args [0]->dreg;
4590 ins->inst_offset = 0;
4591 ins->sreg2 = args [1]->dreg;
4592 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4593 MONO_ADD_INS (cfg->cbb, ins);
4596 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4598 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4599 if (strcmp (cmethod->name, "Exchange") == 0) {
4600 guint32 opcode;
4601 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4603 if (fsig->params [0]->type == MONO_TYPE_I4)
4604 opcode = OP_ATOMIC_EXCHANGE_I4;
4605 #if SIZEOF_REGISTER == 8
4606 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4607 (fsig->params [0]->type == MONO_TYPE_I))
4608 opcode = OP_ATOMIC_EXCHANGE_I8;
4609 #else
4610 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4611 opcode = OP_ATOMIC_EXCHANGE_I4;
4612 #endif
4613 else
4614 return NULL;
4616 MONO_INST_NEW (cfg, ins, opcode);
4617 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4618 ins->inst_basereg = args [0]->dreg;
4619 ins->inst_offset = 0;
4620 ins->sreg2 = args [1]->dreg;
4621 MONO_ADD_INS (cfg->cbb, ins);
4623 switch (fsig->params [0]->type) {
4624 case MONO_TYPE_I4:
4625 ins->type = STACK_I4;
4626 break;
4627 case MONO_TYPE_I8:
4628 case MONO_TYPE_I:
4629 ins->type = STACK_I8;
4630 break;
4631 case MONO_TYPE_OBJECT:
4632 ins->type = STACK_OBJ;
4633 break;
4634 default:
4635 g_assert_not_reached ();
4638 if (cfg->gen_write_barriers && is_ref)
4639 emit_write_barrier (cfg, args [0], args [1], -1);
4641 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4643 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4644 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4645 int size = 0;
4646 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4647 if (fsig->params [1]->type == MONO_TYPE_I4)
4648 size = 4;
4649 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4650 size = sizeof (gpointer);
4651 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4652 size = 8;
4653 if (size == 4) {
4654 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4655 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4656 ins->sreg1 = args [0]->dreg;
4657 ins->sreg2 = args [1]->dreg;
4658 ins->sreg3 = args [2]->dreg;
4659 ins->type = STACK_I4;
4660 MONO_ADD_INS (cfg->cbb, ins);
4661 } else if (size == 8) {
4662 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4663 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4664 ins->sreg1 = args [0]->dreg;
4665 ins->sreg2 = args [1]->dreg;
4666 ins->sreg3 = args [2]->dreg;
4667 ins->type = STACK_I8;
4668 MONO_ADD_INS (cfg->cbb, ins);
4669 } else {
4670 /* g_assert_not_reached (); */
4672 if (cfg->gen_write_barriers && is_ref)
4673 emit_write_barrier (cfg, args [0], args [1], -1);
4675 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4677 if (ins)
4678 return ins;
4679 } else if (cmethod->klass->image == mono_defaults.corlib) {
4680 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4681 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4682 if (should_insert_brekpoint (cfg->method))
4683 MONO_INST_NEW (cfg, ins, OP_BREAK);
4684 else
4685 MONO_INST_NEW (cfg, ins, OP_NOP);
4686 MONO_ADD_INS (cfg->cbb, ins);
4687 return ins;
4689 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4690 && strcmp (cmethod->klass->name, "Environment") == 0) {
4691 #ifdef TARGET_WIN32
4692 EMIT_NEW_ICONST (cfg, ins, 1);
4693 #else
4694 EMIT_NEW_ICONST (cfg, ins, 0);
4695 #endif
4696 return ins;
4698 } else if (cmethod->klass == mono_defaults.math_class) {
4700 * There is general branches code for Min/Max, but it does not work for
4701 * all inputs:
4702 * http://everything2.com/?node_id=1051618
4706 #ifdef MONO_ARCH_SIMD_INTRINSICS
4707 if (cfg->opt & MONO_OPT_SIMD) {
4708 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4709 if (ins)
4710 return ins;
4712 #endif
4714 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4718 * This entry point could be used later for arbitrary method
4719 * redirection.
4721 inline static MonoInst*
4722 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4723 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4725 if (method->klass == mono_defaults.string_class) {
4726 /* managed string allocation support */
4727 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4728 MonoInst *iargs [2];
4729 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4730 MonoMethod *managed_alloc = NULL;
4732 g_assert (vtable); /*Should not fail since it System.String*/
4733 #ifndef MONO_CROSS_COMPILE
4734 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4735 #endif
4736 if (!managed_alloc)
4737 return NULL;
4738 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4739 iargs [1] = args [0];
4740 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4743 return NULL;
4746 static void
4747 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4749 MonoInst *store, *temp;
4750 int i;
4752 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4753 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4756 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4757 * would be different than the MonoInst's used to represent arguments, and
4758 * the ldelema implementation can't deal with that.
4759 * Solution: When ldelema is used on an inline argument, create a var for
4760 * it, emit ldelema on that var, and emit the saving code below in
4761 * inline_method () if needed.
4763 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4764 cfg->args [i] = temp;
4765 /* This uses cfg->args [i] which is set by the preceeding line */
4766 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4767 store->cil_code = sp [0]->cil_code;
4768 sp++;
4772 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4773 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4775 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4776 static gboolean
4777 check_inline_called_method_name_limit (MonoMethod *called_method)
4779 int strncmp_result;
4780 static char *limit = NULL;
4782 if (limit == NULL) {
4783 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4785 if (limit_string != NULL)
4786 limit = limit_string;
4787 else
4788 limit = (char *) "";
4791 if (limit [0] != '\0') {
4792 char *called_method_name = mono_method_full_name (called_method, TRUE);
4794 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4795 g_free (called_method_name);
4797 //return (strncmp_result <= 0);
4798 return (strncmp_result == 0);
4799 } else {
4800 return TRUE;
4803 #endif
4805 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4806 static gboolean
4807 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4809 int strncmp_result;
4810 static char *limit = NULL;
4812 if (limit == NULL) {
4813 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4814 if (limit_string != NULL) {
4815 limit = limit_string;
4816 } else {
4817 limit = (char *) "";
4821 if (limit [0] != '\0') {
4822 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4824 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4825 g_free (caller_method_name);
4827 //return (strncmp_result <= 0);
4828 return (strncmp_result == 0);
4829 } else {
4830 return TRUE;
4833 #endif
4835 static void
4836 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
4838 static double r8_0 = 0.0;
4839 MonoInst *ins;
4841 switch (rvar->type) {
4842 case STACK_I4:
4843 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4844 break;
4845 case STACK_I8:
4846 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4847 break;
4848 case STACK_PTR:
4849 case STACK_MP:
4850 case STACK_OBJ:
4851 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4852 break;
4853 case STACK_R8:
4854 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4855 ins->type = STACK_R8;
4856 ins->inst_p0 = (void*)&r8_0;
4857 ins->dreg = rvar->dreg;
4858 MONO_ADD_INS (cfg->cbb, ins);
4859 break;
4860 case STACK_VTYPE:
4861 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
4862 break;
4863 default:
4864 g_assert_not_reached ();
4868 static int
4869 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4870 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4872 MonoInst *ins, *rvar = NULL;
4873 MonoMethodHeader *cheader;
4874 MonoBasicBlock *ebblock, *sbblock;
4875 int i, costs;
4876 MonoMethod *prev_inlined_method;
4877 MonoInst **prev_locals, **prev_args;
4878 MonoType **prev_arg_types;
4879 guint prev_real_offset;
4880 GHashTable *prev_cbb_hash;
4881 MonoBasicBlock **prev_cil_offset_to_bb;
4882 MonoBasicBlock *prev_cbb;
4883 unsigned char* prev_cil_start;
4884 guint32 prev_cil_offset_to_bb_len;
4885 MonoMethod *prev_current_method;
4886 MonoGenericContext *prev_generic_context;
4887 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4889 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4891 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4892 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4893 return 0;
4894 #endif
4895 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4896 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4897 return 0;
4898 #endif
4900 if (cfg->verbose_level > 2)
4901 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4903 if (!cmethod->inline_info) {
4904 mono_jit_stats.inlineable_methods++;
4905 cmethod->inline_info = 1;
4908 /* allocate local variables */
4909 cheader = mono_method_get_header (cmethod);
4911 if (cheader == NULL || mono_loader_get_last_error ()) {
4912 MonoLoaderError *error = mono_loader_get_last_error ();
4914 if (cheader)
4915 mono_metadata_free_mh (cheader);
4916 if (inline_always && error)
4917 mono_cfg_set_exception (cfg, error->exception_type);
4919 mono_loader_clear_error ();
4920 return 0;
4923 /*Must verify before creating locals as it can cause the JIT to assert.*/
4924 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4925 mono_metadata_free_mh (cheader);
4926 return 0;
4929 /* allocate space to store the return value */
4930 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4931 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4934 prev_locals = cfg->locals;
4935 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4936 for (i = 0; i < cheader->num_locals; ++i)
4937 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4939 /* allocate start and end blocks */
4940 /* This is needed so if the inline is aborted, we can clean up */
4941 NEW_BBLOCK (cfg, sbblock);
4942 sbblock->real_offset = real_offset;
4944 NEW_BBLOCK (cfg, ebblock);
4945 ebblock->block_num = cfg->num_bblocks++;
4946 ebblock->real_offset = real_offset;
4948 prev_args = cfg->args;
4949 prev_arg_types = cfg->arg_types;
4950 prev_inlined_method = cfg->inlined_method;
4951 cfg->inlined_method = cmethod;
4952 cfg->ret_var_set = FALSE;
4953 cfg->inline_depth ++;
4954 prev_real_offset = cfg->real_offset;
4955 prev_cbb_hash = cfg->cbb_hash;
4956 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4957 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4958 prev_cil_start = cfg->cil_start;
4959 prev_cbb = cfg->cbb;
4960 prev_current_method = cfg->current_method;
4961 prev_generic_context = cfg->generic_context;
4962 prev_ret_var_set = cfg->ret_var_set;
4964 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4965 virtual = TRUE;
4967 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4969 ret_var_set = cfg->ret_var_set;
4971 cfg->inlined_method = prev_inlined_method;
4972 cfg->real_offset = prev_real_offset;
4973 cfg->cbb_hash = prev_cbb_hash;
4974 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4975 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4976 cfg->cil_start = prev_cil_start;
4977 cfg->locals = prev_locals;
4978 cfg->args = prev_args;
4979 cfg->arg_types = prev_arg_types;
4980 cfg->current_method = prev_current_method;
4981 cfg->generic_context = prev_generic_context;
4982 cfg->ret_var_set = prev_ret_var_set;
4983 cfg->inline_depth --;
4985 if ((costs >= 0 && costs < 60) || inline_always) {
4986 if (cfg->verbose_level > 2)
4987 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4989 mono_jit_stats.inlined_methods++;
4991 /* always add some code to avoid block split failures */
4992 MONO_INST_NEW (cfg, ins, OP_NOP);
4993 MONO_ADD_INS (prev_cbb, ins);
4995 prev_cbb->next_bb = sbblock;
4996 link_bblock (cfg, prev_cbb, sbblock);
4999 * Get rid of the begin and end bblocks if possible to aid local
5000 * optimizations.
5002 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5004 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5005 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5007 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5008 MonoBasicBlock *prev = ebblock->in_bb [0];
5009 mono_merge_basic_blocks (cfg, prev, ebblock);
5010 cfg->cbb = prev;
5011 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5012 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5013 cfg->cbb = prev_cbb;
5015 } else {
5017 * Its possible that the rvar is set in some prev bblock, but not in others.
5018 * (#1835).
5020 if (rvar) {
5021 MonoBasicBlock *bb;
5023 for (i = 0; i < ebblock->in_count; ++i) {
5024 bb = ebblock->in_bb [i];
5026 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5027 cfg->cbb = bb;
5029 emit_init_rvar (cfg, rvar, fsig->ret);
5034 cfg->cbb = ebblock;
5037 if (rvar) {
5039 * If the inlined method contains only a throw, then the ret var is not
5040 * set, so set it to a dummy value.
5042 if (!ret_var_set)
5043 emit_init_rvar (cfg, rvar, fsig->ret);
5045 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5046 *sp++ = ins;
5048 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5049 return costs + 1;
5050 } else {
5051 if (cfg->verbose_level > 2)
5052 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5053 cfg->exception_type = MONO_EXCEPTION_NONE;
5054 mono_loader_clear_error ();
5056 /* This gets rid of the newly added bblocks */
5057 cfg->cbb = prev_cbb;
5059 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5060 return 0;
5064 * Some of these comments may well be out-of-date.
5065 * Design decisions: we do a single pass over the IL code (and we do bblock
5066 * splitting/merging in the few cases when it's required: a back jump to an IL
5067 * address that was not already seen as bblock starting point).
5068 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5069 * Complex operations are decomposed in simpler ones right away. We need to let the
5070 * arch-specific code peek and poke inside this process somehow (except when the
5071 * optimizations can take advantage of the full semantic info of coarse opcodes).
5072 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5073 * MonoInst->opcode initially is the IL opcode or some simplification of that
5074 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5075 * opcode with value bigger than OP_LAST.
5076 * At this point the IR can be handed over to an interpreter, a dumb code generator
5077 * or to the optimizing code generator that will translate it to SSA form.
5079 * Profiling directed optimizations.
5080 * We may compile by default with few or no optimizations and instrument the code
5081 * or the user may indicate what methods to optimize the most either in a config file
5082 * or through repeated runs where the compiler applies offline the optimizations to
5083 * each method and then decides if it was worth it.
5086 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5087 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5088 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5089 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5090 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5091 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5092 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5093 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5095 /* offset from br.s -> br like opcodes */
5096 #define BIG_BRANCH_OFFSET 13
5098 static gboolean
5099 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5101 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5103 return b == NULL || b == bb;
5106 static int
5107 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5109 unsigned char *ip = start;
5110 unsigned char *target;
5111 int i;
5112 guint cli_addr;
5113 MonoBasicBlock *bblock;
5114 const MonoOpcode *opcode;
5116 while (ip < end) {
5117 cli_addr = ip - start;
5118 i = mono_opcode_value ((const guint8 **)&ip, end);
5119 if (i < 0)
5120 UNVERIFIED;
5121 opcode = &mono_opcodes [i];
5122 switch (opcode->argument) {
5123 case MonoInlineNone:
5124 ip++;
5125 break;
5126 case MonoInlineString:
5127 case MonoInlineType:
5128 case MonoInlineField:
5129 case MonoInlineMethod:
5130 case MonoInlineTok:
5131 case MonoInlineSig:
5132 case MonoShortInlineR:
5133 case MonoInlineI:
5134 ip += 5;
5135 break;
5136 case MonoInlineVar:
5137 ip += 3;
5138 break;
5139 case MonoShortInlineVar:
5140 case MonoShortInlineI:
5141 ip += 2;
5142 break;
5143 case MonoShortInlineBrTarget:
5144 target = start + cli_addr + 2 + (signed char)ip [1];
5145 GET_BBLOCK (cfg, bblock, target);
5146 ip += 2;
5147 if (ip < end)
5148 GET_BBLOCK (cfg, bblock, ip);
5149 break;
5150 case MonoInlineBrTarget:
5151 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5152 GET_BBLOCK (cfg, bblock, target);
5153 ip += 5;
5154 if (ip < end)
5155 GET_BBLOCK (cfg, bblock, ip);
5156 break;
5157 case MonoInlineSwitch: {
5158 guint32 n = read32 (ip + 1);
5159 guint32 j;
5160 ip += 5;
5161 cli_addr += 5 + 4 * n;
5162 target = start + cli_addr;
5163 GET_BBLOCK (cfg, bblock, target);
5165 for (j = 0; j < n; ++j) {
5166 target = start + cli_addr + (gint32)read32 (ip);
5167 GET_BBLOCK (cfg, bblock, target);
5168 ip += 4;
5170 break;
5172 case MonoInlineR:
5173 case MonoInlineI8:
5174 ip += 9;
5175 break;
5176 default:
5177 g_assert_not_reached ();
5180 if (i == CEE_THROW) {
5181 unsigned char *bb_start = ip - 1;
5183 /* Find the start of the bblock containing the throw */
5184 bblock = NULL;
5185 while ((bb_start >= start) && !bblock) {
5186 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5187 bb_start --;
5189 if (bblock)
5190 bblock->out_of_line = 1;
5193 return 0;
5194 unverified:
5195 *pos = ip;
5196 return 1;
5199 static inline MonoMethod *
5200 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5202 MonoMethod *method;
5204 if (m->wrapper_type != MONO_WRAPPER_NONE)
5205 return mono_method_get_wrapper_data (m, token);
5207 method = mono_get_method_full (m->klass->image, token, klass, context);
5209 return method;
5212 static inline MonoMethod *
5213 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5215 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5217 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5218 return NULL;
5220 return method;
5223 static inline MonoClass*
5224 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5226 MonoClass *klass;
5228 if (method->wrapper_type != MONO_WRAPPER_NONE)
5229 klass = mono_method_get_wrapper_data (method, token);
5230 else
5231 klass = mono_class_get_full (method->klass->image, token, context);
5232 if (klass)
5233 mono_class_init (klass);
5234 return klass;
5238 * Returns TRUE if the JIT should abort inlining because "callee"
5239 * is influenced by security attributes.
5241 static
5242 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5244 guint32 result;
5246 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5247 return TRUE;
5250 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5251 if (result == MONO_JIT_SECURITY_OK)
5252 return FALSE;
5254 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5255 /* Generate code to throw a SecurityException before the actual call/link */
5256 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5257 MonoInst *args [2];
5259 NEW_ICONST (cfg, args [0], 4);
5260 NEW_METHODCONST (cfg, args [1], caller);
5261 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5262 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5263 /* don't hide previous results */
5264 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5265 cfg->exception_data = result;
5266 return TRUE;
5269 return FALSE;
5272 static MonoMethod*
5273 throw_exception (void)
5275 static MonoMethod *method = NULL;
5277 if (!method) {
5278 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5279 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5281 g_assert (method);
5282 return method;
5285 static void
5286 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5288 MonoMethod *thrower = throw_exception ();
5289 MonoInst *args [1];
5291 EMIT_NEW_PCONST (cfg, args [0], ex);
5292 mono_emit_method_call (cfg, thrower, args, NULL);
5296 * Return the original method is a wrapper is specified. We can only access
5297 * the custom attributes from the original method.
5299 static MonoMethod*
5300 get_original_method (MonoMethod *method)
5302 if (method->wrapper_type == MONO_WRAPPER_NONE)
5303 return method;
5305 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5306 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5307 return NULL;
5309 /* in other cases we need to find the original method */
5310 return mono_marshal_method_from_wrapper (method);
5313 static void
5314 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5315 MonoBasicBlock *bblock, unsigned char *ip)
5317 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5318 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5319 if (ex)
5320 emit_throw_exception (cfg, ex);
5323 static void
5324 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5325 MonoBasicBlock *bblock, unsigned char *ip)
5327 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5328 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5329 if (ex)
5330 emit_throw_exception (cfg, ex);
5334 * Check that the IL instructions at ip are the array initialization
5335 * sequence and return the pointer to the data and the size.
5337 static const char*
5338 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5341 * newarr[System.Int32]
5342 * dup
5343 * ldtoken field valuetype ...
5344 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5346 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5347 guint32 token = read32 (ip + 7);
5348 guint32 field_token = read32 (ip + 2);
5349 guint32 field_index = field_token & 0xffffff;
5350 guint32 rva;
5351 const char *data_ptr;
5352 int size = 0;
5353 MonoMethod *cmethod;
5354 MonoClass *dummy_class;
5355 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5356 int dummy_align;
5358 if (!field)
5359 return NULL;
5361 *out_field_token = field_token;
5363 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5364 if (!cmethod)
5365 return NULL;
5366 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5367 return NULL;
5368 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5369 case MONO_TYPE_BOOLEAN:
5370 case MONO_TYPE_I1:
5371 case MONO_TYPE_U1:
5372 size = 1; break;
5373 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5374 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5375 case MONO_TYPE_CHAR:
5376 case MONO_TYPE_I2:
5377 case MONO_TYPE_U2:
5378 size = 2; break;
5379 case MONO_TYPE_I4:
5380 case MONO_TYPE_U4:
5381 case MONO_TYPE_R4:
5382 size = 4; break;
5383 case MONO_TYPE_R8:
5384 #ifdef ARM_FPU_FPA
5385 return NULL; /* stupid ARM FP swapped format */
5386 #endif
5387 case MONO_TYPE_I8:
5388 case MONO_TYPE_U8:
5389 size = 8; break;
5390 #endif
5391 default:
5392 return NULL;
5394 size *= len;
5395 if (size > mono_type_size (field->type, &dummy_align))
5396 return NULL;
5397 *out_size = size;
5398 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5399 if (!method->klass->image->dynamic) {
5400 field_index = read32 (ip + 2) & 0xffffff;
5401 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5402 data_ptr = mono_image_rva_map (method->klass->image, rva);
5403 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5404 /* for aot code we do the lookup on load */
5405 if (aot && data_ptr)
5406 return GUINT_TO_POINTER (rva);
5407 } else {
5408 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5409 g_assert (!aot);
5410 data_ptr = mono_field_get_data (field);
5412 return data_ptr;
5414 return NULL;
5417 static void
5418 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5420 char *method_fname = mono_method_full_name (method, TRUE);
5421 char *method_code;
5422 MonoMethodHeader *header = mono_method_get_header (method);
5424 if (header->code_size == 0)
5425 method_code = g_strdup ("method body is empty.");
5426 else
5427 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5428 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5429 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5430 g_free (method_fname);
5431 g_free (method_code);
5432 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5435 static void
5436 set_exception_object (MonoCompile *cfg, MonoException *exception)
5438 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5439 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5440 cfg->exception_ptr = exception;
5443 static gboolean
5444 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5446 return mini_type_is_reference (cfg, &klass->byval_arg);
5449 static void
5450 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5452 MonoInst *ins;
5453 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5454 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5455 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5456 /* Optimize reg-reg moves away */
5458 * Can't optimize other opcodes, since sp[0] might point to
5459 * the last ins of a decomposed opcode.
5461 sp [0]->dreg = (cfg)->locals [n]->dreg;
5462 } else {
5463 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5468 * ldloca inhibits many optimizations so try to get rid of it in common
5469 * cases.
5471 static inline unsigned char *
5472 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5474 int local, token;
5475 MonoClass *klass;
5477 if (size == 1) {
5478 local = ip [1];
5479 ip += 2;
5480 } else {
5481 local = read16 (ip + 2);
5482 ip += 4;
5485 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5486 gboolean skip = FALSE;
5488 /* From the INITOBJ case */
5489 token = read32 (ip + 2);
5490 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5491 CHECK_TYPELOAD (klass);
5492 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5493 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5494 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5495 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5496 } else {
5497 skip = TRUE;
5500 if (!skip)
5501 return ip + 6;
5503 load_error:
5504 return NULL;
5507 static gboolean
5508 is_exception_class (MonoClass *class)
5510 while (class) {
5511 if (class == mono_defaults.exception_class)
5512 return TRUE;
5513 class = class->parent;
5515 return FALSE;
5519 * is_jit_optimizer_disabled:
5521 * Determine whenever M's assembly has a DebuggableAttribute with the
5522 * IsJITOptimizerDisabled flag set.
5524 static gboolean
5525 is_jit_optimizer_disabled (MonoMethod *m)
5527 MonoAssembly *ass = m->klass->image->assembly;
5528 MonoCustomAttrInfo* attrs;
5529 static MonoClass *klass;
5530 int i;
5531 gboolean val = FALSE;
5533 g_assert (ass);
5534 if (ass->jit_optimizer_disabled_inited)
5535 return ass->jit_optimizer_disabled;
5537 if (!klass)
5538 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5539 if (!klass) {
5540 /* Linked away */
5541 ass->jit_optimizer_disabled = FALSE;
5542 mono_memory_barrier ();
5543 ass->jit_optimizer_disabled_inited = TRUE;
5544 return FALSE;
5547 attrs = mono_custom_attrs_from_assembly (ass);
5548 if (attrs) {
5549 for (i = 0; i < attrs->num_attrs; ++i) {
5550 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5551 const gchar *p;
5552 int len;
5553 MonoMethodSignature *sig;
5555 if (!attr->ctor || attr->ctor->klass != klass)
5556 continue;
5557 /* Decode the attribute. See reflection.c */
5558 len = attr->data_size;
5559 p = (const char*)attr->data;
5560 g_assert (read16 (p) == 0x0001);
5561 p += 2;
5563 // FIXME: Support named parameters
5564 sig = mono_method_signature (attr->ctor);
5565 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5566 continue;
5567 /* Two boolean arguments */
5568 p ++;
5569 val = *p;
5571 mono_custom_attrs_free (attrs);
5574 ass->jit_optimizer_disabled = val;
5575 mono_memory_barrier ();
5576 ass->jit_optimizer_disabled_inited = TRUE;
5578 return val;
5581 static gboolean
5582 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5584 gboolean supported_tail_call;
5585 int i;
5587 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5588 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5589 #else
5590 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5591 #endif
5593 for (i = 0; i < fsig->param_count; ++i) {
5594 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5595 /* These can point to the current method's stack */
5596 supported_tail_call = FALSE;
5598 if (fsig->hasthis && cmethod->klass->valuetype)
5599 /* this might point to the current method's stack */
5600 supported_tail_call = FALSE;
5601 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5602 supported_tail_call = FALSE;
5603 if (cfg->method->save_lmf)
5604 supported_tail_call = FALSE;
5605 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5606 supported_tail_call = FALSE;
5608 /* Debugging support */
5609 #if 0
5610 if (supported_tail_call) {
5611 static int count = 0;
5612 count ++;
5613 if (getenv ("COUNT")) {
5614 if (count == atoi (getenv ("COUNT")))
5615 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5616 if (count > atoi (getenv ("COUNT")))
5617 supported_tail_call = FALSE;
5620 #endif
5622 return supported_tail_call;
5626 * mono_method_to_ir:
5628 * Translate the .net IL into linear IR.
5631 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5632 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5633 guint inline_offset, gboolean is_virtual_call)
5635 MonoError error;
5636 MonoInst *ins, **sp, **stack_start;
5637 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5638 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5639 MonoMethod *cmethod, *method_definition;
5640 MonoInst **arg_array;
5641 MonoMethodHeader *header;
5642 MonoImage *image;
5643 guint32 token, ins_flag;
5644 MonoClass *klass;
5645 MonoClass *constrained_call = NULL;
5646 unsigned char *ip, *end, *target, *err_pos;
5647 static double r8_0 = 0.0;
5648 MonoMethodSignature *sig;
5649 MonoGenericContext *generic_context = NULL;
5650 MonoGenericContainer *generic_container = NULL;
5651 MonoType **param_types;
5652 int i, n, start_new_bblock, dreg;
5653 int num_calls = 0, inline_costs = 0;
5654 int breakpoint_id = 0;
5655 guint num_args;
5656 MonoBoolean security, pinvoke;
5657 MonoSecurityManager* secman = NULL;
5658 MonoDeclSecurityActions actions;
5659 GSList *class_inits = NULL;
5660 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5661 int context_used;
5662 gboolean init_locals, seq_points, skip_dead_blocks;
5663 gboolean disable_inline;
5665 disable_inline = is_jit_optimizer_disabled (method);
5667 /* serialization and xdomain stuff may need access to private fields and methods */
5668 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5669 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5670 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5671 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5672 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5673 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5675 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5677 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5678 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5679 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5680 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5681 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5683 image = method->klass->image;
5684 header = mono_method_get_header (method);
5685 if (!header) {
5686 MonoLoaderError *error;
5688 if ((error = mono_loader_get_last_error ())) {
5689 mono_cfg_set_exception (cfg, error->exception_type);
5690 } else {
5691 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5692 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5694 goto exception_exit;
5696 generic_container = mono_method_get_generic_container (method);
5697 sig = mono_method_signature (method);
5698 num_args = sig->hasthis + sig->param_count;
5699 ip = (unsigned char*)header->code;
5700 cfg->cil_start = ip;
5701 end = ip + header->code_size;
5702 mono_jit_stats.cil_code_size += header->code_size;
5703 init_locals = header->init_locals;
5705 seq_points = cfg->gen_seq_points && cfg->method == method;
5708 * Methods without init_locals set could cause asserts in various passes
5709 * (#497220).
5711 init_locals = TRUE;
5713 method_definition = method;
5714 while (method_definition->is_inflated) {
5715 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5716 method_definition = imethod->declaring;
5719 /* SkipVerification is not allowed if core-clr is enabled */
5720 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5721 dont_verify = TRUE;
5722 dont_verify_stloc = TRUE;
5725 if (mono_debug_using_mono_debugger ())
5726 cfg->keep_cil_nops = TRUE;
5728 if (sig->is_inflated)
5729 generic_context = mono_method_get_context (method);
5730 else if (generic_container)
5731 generic_context = &generic_container->context;
5732 cfg->generic_context = generic_context;
5734 if (!cfg->generic_sharing_context)
5735 g_assert (!sig->has_type_parameters);
5737 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5738 g_assert (method->is_inflated);
5739 g_assert (mono_method_get_context (method)->method_inst);
5741 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5742 g_assert (sig->generic_param_count);
5744 if (cfg->method == method) {
5745 cfg->real_offset = 0;
5746 } else {
5747 cfg->real_offset = inline_offset;
5750 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5751 cfg->cil_offset_to_bb_len = header->code_size;
5753 cfg->current_method = method;
5755 if (cfg->verbose_level > 2)
5756 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5758 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5759 if (sig->hasthis)
5760 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5761 for (n = 0; n < sig->param_count; ++n)
5762 param_types [n + sig->hasthis] = sig->params [n];
5763 cfg->arg_types = param_types;
5765 dont_inline = g_list_prepend (dont_inline, method);
5766 if (cfg->method == method) {
5768 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5769 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5771 /* ENTRY BLOCK */
5772 NEW_BBLOCK (cfg, start_bblock);
5773 cfg->bb_entry = start_bblock;
5774 start_bblock->cil_code = NULL;
5775 start_bblock->cil_length = 0;
5776 #if defined(__native_client_codegen__)
5777 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5778 ins->dreg = alloc_dreg (cfg, STACK_I4);
5779 MONO_ADD_INS (start_bblock, ins);
5780 #endif
5782 /* EXIT BLOCK */
5783 NEW_BBLOCK (cfg, end_bblock);
5784 cfg->bb_exit = end_bblock;
5785 end_bblock->cil_code = NULL;
5786 end_bblock->cil_length = 0;
5787 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5788 g_assert (cfg->num_bblocks == 2);
5790 arg_array = cfg->args;
5792 if (header->num_clauses) {
5793 cfg->spvars = g_hash_table_new (NULL, NULL);
5794 cfg->exvars = g_hash_table_new (NULL, NULL);
5796 /* handle exception clauses */
5797 for (i = 0; i < header->num_clauses; ++i) {
5798 MonoBasicBlock *try_bb;
5799 MonoExceptionClause *clause = &header->clauses [i];
5800 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5801 try_bb->real_offset = clause->try_offset;
5802 try_bb->try_start = TRUE;
5803 try_bb->region = ((i + 1) << 8) | clause->flags;
5804 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5805 tblock->real_offset = clause->handler_offset;
5806 tblock->flags |= BB_EXCEPTION_HANDLER;
5808 link_bblock (cfg, try_bb, tblock);
5810 if (*(ip + clause->handler_offset) == CEE_POP)
5811 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5813 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5814 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5815 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5816 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5817 MONO_ADD_INS (tblock, ins);
5819 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5820 /* finally clauses already have a seq point */
5821 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5822 MONO_ADD_INS (tblock, ins);
5825 /* todo: is a fault block unsafe to optimize? */
5826 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5827 tblock->flags |= BB_EXCEPTION_UNSAFE;
5831 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5832 while (p < end) {
5833 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5835 /* catch and filter blocks get the exception object on the stack */
5836 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5837 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5838 MonoInst *dummy_use;
5840 /* mostly like handle_stack_args (), but just sets the input args */
5841 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5842 tblock->in_scount = 1;
5843 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5844 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5847 * Add a dummy use for the exvar so its liveness info will be
5848 * correct.
5850 cfg->cbb = tblock;
5851 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5853 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5854 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5855 tblock->flags |= BB_EXCEPTION_HANDLER;
5856 tblock->real_offset = clause->data.filter_offset;
5857 tblock->in_scount = 1;
5858 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5859 /* The filter block shares the exvar with the handler block */
5860 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5861 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5862 MONO_ADD_INS (tblock, ins);
5866 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5867 clause->data.catch_class &&
5868 cfg->generic_sharing_context &&
5869 mono_class_check_context_used (clause->data.catch_class)) {
5871 * In shared generic code with catch
5872 * clauses containing type variables
5873 * the exception handling code has to
5874 * be able to get to the rgctx.
5875 * Therefore we have to make sure that
5876 * the vtable/mrgctx argument (for
5877 * static or generic methods) or the
5878 * "this" argument (for non-static
5879 * methods) are live.
5881 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5882 mini_method_get_context (method)->method_inst ||
5883 method->klass->valuetype) {
5884 mono_get_vtable_var (cfg);
5885 } else {
5886 MonoInst *dummy_use;
5888 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5892 } else {
5893 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5894 cfg->cbb = start_bblock;
5895 cfg->args = arg_array;
5896 mono_save_args (cfg, sig, inline_args);
5899 /* FIRST CODE BLOCK */
5900 NEW_BBLOCK (cfg, bblock);
5901 bblock->cil_code = ip;
5902 cfg->cbb = bblock;
5903 cfg->ip = ip;
5905 ADD_BBLOCK (cfg, bblock);
5907 if (cfg->method == method) {
5908 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5909 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5910 MONO_INST_NEW (cfg, ins, OP_BREAK);
5911 MONO_ADD_INS (bblock, ins);
5915 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5916 secman = mono_security_manager_get_methods ();
5918 security = (secman && mono_method_has_declsec (method));
5919 /* at this point having security doesn't mean we have any code to generate */
5920 if (security && (cfg->method == method)) {
5921 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5922 * And we do not want to enter the next section (with allocation) if we
5923 * have nothing to generate */
5924 security = mono_declsec_get_demands (method, &actions);
5927 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5928 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5929 if (pinvoke) {
5930 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5931 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5932 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5934 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5935 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5936 pinvoke = FALSE;
5938 if (custom)
5939 mono_custom_attrs_free (custom);
5941 if (pinvoke) {
5942 custom = mono_custom_attrs_from_class (wrapped->klass);
5943 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5944 pinvoke = FALSE;
5946 if (custom)
5947 mono_custom_attrs_free (custom);
5949 } else {
5950 /* not a P/Invoke after all */
5951 pinvoke = FALSE;
5955 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5956 /* we use a separate basic block for the initialization code */
5957 NEW_BBLOCK (cfg, init_localsbb);
5958 cfg->bb_init = init_localsbb;
5959 init_localsbb->real_offset = cfg->real_offset;
5960 start_bblock->next_bb = init_localsbb;
5961 init_localsbb->next_bb = bblock;
5962 link_bblock (cfg, start_bblock, init_localsbb);
5963 link_bblock (cfg, init_localsbb, bblock);
5965 cfg->cbb = init_localsbb;
5966 } else {
5967 start_bblock->next_bb = bblock;
5968 link_bblock (cfg, start_bblock, bblock);
5971 /* at this point we know, if security is TRUE, that some code needs to be generated */
5972 if (security && (cfg->method == method)) {
5973 MonoInst *args [2];
5975 mono_jit_stats.cas_demand_generation++;
5977 if (actions.demand.blob) {
5978 /* Add code for SecurityAction.Demand */
5979 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5980 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5981 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5982 mono_emit_method_call (cfg, secman->demand, args, NULL);
5984 if (actions.noncasdemand.blob) {
5985 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5986 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5987 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5988 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5989 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5990 mono_emit_method_call (cfg, secman->demand, args, NULL);
5992 if (actions.demandchoice.blob) {
5993 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5994 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5995 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5996 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5997 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6001 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6002 if (pinvoke) {
6003 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6006 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6007 /* check if this is native code, e.g. an icall or a p/invoke */
6008 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6009 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6010 if (wrapped) {
6011 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6012 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6014 /* if this ia a native call then it can only be JITted from platform code */
6015 if ((icall || pinvk) && method->klass && method->klass->image) {
6016 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6017 MonoException *ex = icall ? mono_get_exception_security () :
6018 mono_get_exception_method_access ();
6019 emit_throw_exception (cfg, ex);
6026 if (header->code_size == 0)
6027 UNVERIFIED;
6029 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6030 ip = err_pos;
6031 UNVERIFIED;
6034 if (cfg->method == method)
6035 mono_debug_init_method (cfg, bblock, breakpoint_id);
6037 for (n = 0; n < header->num_locals; ++n) {
6038 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6039 UNVERIFIED;
6041 class_inits = NULL;
6043 /* We force the vtable variable here for all shared methods
6044 for the possibility that they might show up in a stack
6045 trace where their exact instantiation is needed. */
6046 if (cfg->generic_sharing_context && method == cfg->method) {
6047 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6048 mini_method_get_context (method)->method_inst ||
6049 method->klass->valuetype) {
6050 mono_get_vtable_var (cfg);
6051 } else {
6052 /* FIXME: Is there a better way to do this?
6053 We need the variable live for the duration
6054 of the whole method. */
6055 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6059 /* add a check for this != NULL to inlined methods */
6060 if (is_virtual_call) {
6061 MonoInst *arg_ins;
6063 NEW_ARGLOAD (cfg, arg_ins, 0);
6064 MONO_ADD_INS (cfg->cbb, arg_ins);
6065 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6068 skip_dead_blocks = !dont_verify;
6069 if (skip_dead_blocks) {
6070 original_bb = bb = mono_basic_block_split (method, &error);
6071 if (!mono_error_ok (&error)) {
6072 mono_error_cleanup (&error);
6073 UNVERIFIED;
6075 g_assert (bb);
6078 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6079 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6081 ins_flag = 0;
6082 start_new_bblock = 0;
6083 cfg->cbb = bblock;
6084 while (ip < end) {
6085 if (cfg->method == method)
6086 cfg->real_offset = ip - header->code;
6087 else
6088 cfg->real_offset = inline_offset;
6089 cfg->ip = ip;
6091 context_used = 0;
6093 if (start_new_bblock) {
6094 bblock->cil_length = ip - bblock->cil_code;
6095 if (start_new_bblock == 2) {
6096 g_assert (ip == tblock->cil_code);
6097 } else {
6098 GET_BBLOCK (cfg, tblock, ip);
6100 bblock->next_bb = tblock;
6101 bblock = tblock;
6102 cfg->cbb = bblock;
6103 start_new_bblock = 0;
6104 for (i = 0; i < bblock->in_scount; ++i) {
6105 if (cfg->verbose_level > 3)
6106 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6107 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6108 *sp++ = ins;
6110 if (class_inits)
6111 g_slist_free (class_inits);
6112 class_inits = NULL;
6113 } else {
6114 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6115 link_bblock (cfg, bblock, tblock);
6116 if (sp != stack_start) {
6117 handle_stack_args (cfg, stack_start, sp - stack_start);
6118 sp = stack_start;
6119 CHECK_UNVERIFIABLE (cfg);
6121 bblock->next_bb = tblock;
6122 bblock = tblock;
6123 cfg->cbb = bblock;
6124 for (i = 0; i < bblock->in_scount; ++i) {
6125 if (cfg->verbose_level > 3)
6126 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6127 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6128 *sp++ = ins;
6130 g_slist_free (class_inits);
6131 class_inits = NULL;
6135 if (skip_dead_blocks) {
6136 int ip_offset = ip - header->code;
6138 if (ip_offset == bb->end)
6139 bb = bb->next;
6141 if (bb->dead) {
6142 int op_size = mono_opcode_size (ip, end);
6143 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6145 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6147 if (ip_offset + op_size == bb->end) {
6148 MONO_INST_NEW (cfg, ins, OP_NOP);
6149 MONO_ADD_INS (bblock, ins);
6150 start_new_bblock = 1;
6153 ip += op_size;
6154 continue;
6158 * Sequence points are points where the debugger can place a breakpoint.
6159 * Currently, we generate these automatically at points where the IL
6160 * stack is empty.
6162 if (seq_points && sp == stack_start) {
6163 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6164 MONO_ADD_INS (cfg->cbb, ins);
6167 bblock->real_offset = cfg->real_offset;
6169 if ((cfg->method == method) && cfg->coverage_info) {
6170 guint32 cil_offset = ip - header->code;
6171 cfg->coverage_info->data [cil_offset].cil_code = ip;
6173 /* TODO: Use an increment here */
6174 #if defined(TARGET_X86)
6175 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6176 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6177 ins->inst_imm = 1;
6178 MONO_ADD_INS (cfg->cbb, ins);
6179 #else
6180 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6181 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6182 #endif
6185 if (cfg->verbose_level > 3)
6186 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6188 switch (*ip) {
6189 case CEE_NOP:
6190 if (cfg->keep_cil_nops)
6191 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6192 else
6193 MONO_INST_NEW (cfg, ins, OP_NOP);
6194 ip++;
6195 MONO_ADD_INS (bblock, ins);
6196 break;
6197 case CEE_BREAK:
6198 if (should_insert_brekpoint (cfg->method))
6199 MONO_INST_NEW (cfg, ins, OP_BREAK);
6200 else
6201 MONO_INST_NEW (cfg, ins, OP_NOP);
6202 ip++;
6203 MONO_ADD_INS (bblock, ins);
6204 break;
6205 case CEE_LDARG_0:
6206 case CEE_LDARG_1:
6207 case CEE_LDARG_2:
6208 case CEE_LDARG_3:
6209 CHECK_STACK_OVF (1);
6210 n = (*ip)-CEE_LDARG_0;
6211 CHECK_ARG (n);
6212 EMIT_NEW_ARGLOAD (cfg, ins, n);
6213 ip++;
6214 *sp++ = ins;
6215 break;
6216 case CEE_LDLOC_0:
6217 case CEE_LDLOC_1:
6218 case CEE_LDLOC_2:
6219 case CEE_LDLOC_3:
6220 CHECK_STACK_OVF (1);
6221 n = (*ip)-CEE_LDLOC_0;
6222 CHECK_LOCAL (n);
6223 EMIT_NEW_LOCLOAD (cfg, ins, n);
6224 ip++;
6225 *sp++ = ins;
6226 break;
6227 case CEE_STLOC_0:
6228 case CEE_STLOC_1:
6229 case CEE_STLOC_2:
6230 case CEE_STLOC_3: {
6231 CHECK_STACK (1);
6232 n = (*ip)-CEE_STLOC_0;
6233 CHECK_LOCAL (n);
6234 --sp;
6235 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6236 UNVERIFIED;
6237 emit_stloc_ir (cfg, sp, header, n);
6238 ++ip;
6239 inline_costs += 1;
6240 break;
6242 case CEE_LDARG_S:
6243 CHECK_OPSIZE (2);
6244 CHECK_STACK_OVF (1);
6245 n = ip [1];
6246 CHECK_ARG (n);
6247 EMIT_NEW_ARGLOAD (cfg, ins, n);
6248 *sp++ = ins;
6249 ip += 2;
6250 break;
6251 case CEE_LDARGA_S:
6252 CHECK_OPSIZE (2);
6253 CHECK_STACK_OVF (1);
6254 n = ip [1];
6255 CHECK_ARG (n);
6256 NEW_ARGLOADA (cfg, ins, n);
6257 MONO_ADD_INS (cfg->cbb, ins);
6258 *sp++ = ins;
6259 ip += 2;
6260 break;
6261 case CEE_STARG_S:
6262 CHECK_OPSIZE (2);
6263 CHECK_STACK (1);
6264 --sp;
6265 n = ip [1];
6266 CHECK_ARG (n);
6267 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6268 UNVERIFIED;
6269 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6270 ip += 2;
6271 break;
6272 case CEE_LDLOC_S:
6273 CHECK_OPSIZE (2);
6274 CHECK_STACK_OVF (1);
6275 n = ip [1];
6276 CHECK_LOCAL (n);
6277 EMIT_NEW_LOCLOAD (cfg, ins, n);
6278 *sp++ = ins;
6279 ip += 2;
6280 break;
6281 case CEE_LDLOCA_S: {
6282 unsigned char *tmp_ip;
6283 CHECK_OPSIZE (2);
6284 CHECK_STACK_OVF (1);
6285 CHECK_LOCAL (ip [1]);
6287 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6288 ip = tmp_ip;
6289 inline_costs += 1;
6290 break;
6293 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6294 *sp++ = ins;
6295 ip += 2;
6296 break;
6298 case CEE_STLOC_S:
6299 CHECK_OPSIZE (2);
6300 CHECK_STACK (1);
6301 --sp;
6302 CHECK_LOCAL (ip [1]);
6303 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6304 UNVERIFIED;
6305 emit_stloc_ir (cfg, sp, header, ip [1]);
6306 ip += 2;
6307 inline_costs += 1;
6308 break;
6309 case CEE_LDNULL:
6310 CHECK_STACK_OVF (1);
6311 EMIT_NEW_PCONST (cfg, ins, NULL);
6312 ins->type = STACK_OBJ;
6313 ++ip;
6314 *sp++ = ins;
6315 break;
6316 case CEE_LDC_I4_M1:
6317 CHECK_STACK_OVF (1);
6318 EMIT_NEW_ICONST (cfg, ins, -1);
6319 ++ip;
6320 *sp++ = ins;
6321 break;
6322 case CEE_LDC_I4_0:
6323 case CEE_LDC_I4_1:
6324 case CEE_LDC_I4_2:
6325 case CEE_LDC_I4_3:
6326 case CEE_LDC_I4_4:
6327 case CEE_LDC_I4_5:
6328 case CEE_LDC_I4_6:
6329 case CEE_LDC_I4_7:
6330 case CEE_LDC_I4_8:
6331 CHECK_STACK_OVF (1);
6332 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6333 ++ip;
6334 *sp++ = ins;
6335 break;
6336 case CEE_LDC_I4_S:
6337 CHECK_OPSIZE (2);
6338 CHECK_STACK_OVF (1);
6339 ++ip;
6340 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6341 ++ip;
6342 *sp++ = ins;
6343 break;
6344 case CEE_LDC_I4:
6345 CHECK_OPSIZE (5);
6346 CHECK_STACK_OVF (1);
6347 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6348 ip += 5;
6349 *sp++ = ins;
6350 break;
6351 case CEE_LDC_I8:
6352 CHECK_OPSIZE (9);
6353 CHECK_STACK_OVF (1);
6354 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6355 ins->type = STACK_I8;
6356 ins->dreg = alloc_dreg (cfg, STACK_I8);
6357 ++ip;
6358 ins->inst_l = (gint64)read64 (ip);
6359 MONO_ADD_INS (bblock, ins);
6360 ip += 8;
6361 *sp++ = ins;
6362 break;
6363 case CEE_LDC_R4: {
6364 float *f;
6365 gboolean use_aotconst = FALSE;
6367 #ifdef TARGET_POWERPC
6368 /* FIXME: Clean this up */
6369 if (cfg->compile_aot)
6370 use_aotconst = TRUE;
6371 #endif
6373 /* FIXME: we should really allocate this only late in the compilation process */
6374 f = mono_domain_alloc (cfg->domain, sizeof (float));
6375 CHECK_OPSIZE (5);
6376 CHECK_STACK_OVF (1);
6378 if (use_aotconst) {
6379 MonoInst *cons;
6380 int dreg;
6382 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6384 dreg = alloc_freg (cfg);
6385 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6386 ins->type = STACK_R8;
6387 } else {
6388 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6389 ins->type = STACK_R8;
6390 ins->dreg = alloc_dreg (cfg, STACK_R8);
6391 ins->inst_p0 = f;
6392 MONO_ADD_INS (bblock, ins);
6394 ++ip;
6395 readr4 (ip, f);
6396 ip += 4;
6397 *sp++ = ins;
6398 break;
6400 case CEE_LDC_R8: {
6401 double *d;
6402 gboolean use_aotconst = FALSE;
6404 #ifdef TARGET_POWERPC
6405 /* FIXME: Clean this up */
6406 if (cfg->compile_aot)
6407 use_aotconst = TRUE;
6408 #endif
6410 /* FIXME: we should really allocate this only late in the compilation process */
6411 d = mono_domain_alloc (cfg->domain, sizeof (double));
6412 CHECK_OPSIZE (9);
6413 CHECK_STACK_OVF (1);
6415 if (use_aotconst) {
6416 MonoInst *cons;
6417 int dreg;
6419 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6421 dreg = alloc_freg (cfg);
6422 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6423 ins->type = STACK_R8;
6424 } else {
6425 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6426 ins->type = STACK_R8;
6427 ins->dreg = alloc_dreg (cfg, STACK_R8);
6428 ins->inst_p0 = d;
6429 MONO_ADD_INS (bblock, ins);
6431 ++ip;
6432 readr8 (ip, d);
6433 ip += 8;
6434 *sp++ = ins;
6435 break;
6437 case CEE_DUP: {
6438 MonoInst *temp, *store;
6439 CHECK_STACK (1);
6440 CHECK_STACK_OVF (1);
6441 sp--;
6442 ins = *sp;
6444 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6445 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6447 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6448 *sp++ = ins;
6450 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6451 *sp++ = ins;
6453 ++ip;
6454 inline_costs += 2;
6455 break;
6457 case CEE_POP:
6458 CHECK_STACK (1);
6459 ip++;
6460 --sp;
6462 #ifdef TARGET_X86
6463 if (sp [0]->type == STACK_R8)
6464 /* we need to pop the value from the x86 FP stack */
6465 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6466 #endif
6467 break;
6468 case CEE_JMP: {
6469 MonoCallInst *call;
6471 INLINE_FAILURE;
6473 CHECK_OPSIZE (5);
6474 if (stack_start != sp)
6475 UNVERIFIED;
6476 token = read32 (ip + 1);
6477 /* FIXME: check the signature matches */
6478 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6480 if (!cmethod || mono_loader_get_last_error ())
6481 LOAD_ERROR;
6483 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6484 GENERIC_SHARING_FAILURE (CEE_JMP);
6486 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6487 CHECK_CFG_EXCEPTION;
6489 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6491 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6492 int i, n;
6494 /* Handle tail calls similarly to calls */
6495 n = fsig->param_count + fsig->hasthis;
6497 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6498 call->method = cmethod;
6499 call->tail_call = TRUE;
6500 call->signature = mono_method_signature (cmethod);
6501 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6502 call->inst.inst_p0 = cmethod;
6503 for (i = 0; i < n; ++i)
6504 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6506 mono_arch_emit_call (cfg, call);
6507 MONO_ADD_INS (bblock, (MonoInst*)call);
6509 #else
6510 for (i = 0; i < num_args; ++i)
6511 /* Prevent arguments from being optimized away */
6512 arg_array [i]->flags |= MONO_INST_VOLATILE;
6514 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6515 ins = (MonoInst*)call;
6516 ins->inst_p0 = cmethod;
6517 MONO_ADD_INS (bblock, ins);
6518 #endif
6520 ip += 5;
6521 start_new_bblock = 1;
6522 break;
6524 case CEE_CALLI:
6525 case CEE_CALL:
6526 case CEE_CALLVIRT: {
6527 MonoInst *addr = NULL;
6528 MonoMethodSignature *fsig = NULL;
6529 int array_rank = 0;
6530 int virtual = *ip == CEE_CALLVIRT;
6531 int calli = *ip == CEE_CALLI;
6532 gboolean pass_imt_from_rgctx = FALSE;
6533 MonoInst *imt_arg = NULL;
6534 gboolean pass_vtable = FALSE;
6535 gboolean pass_mrgctx = FALSE;
6536 MonoInst *vtable_arg = NULL;
6537 gboolean check_this = FALSE;
6538 gboolean supported_tail_call = FALSE;
6540 CHECK_OPSIZE (5);
6541 token = read32 (ip + 1);
6543 if (calli) {
6544 cmethod = NULL;
6545 CHECK_STACK (1);
6546 --sp;
6547 addr = *sp;
6548 if (method->wrapper_type != MONO_WRAPPER_NONE)
6549 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6550 else
6551 fsig = mono_metadata_parse_signature (image, token);
6553 n = fsig->param_count + fsig->hasthis;
6555 if (method->dynamic && fsig->pinvoke) {
6556 MonoInst *args [3];
6559 * This is a call through a function pointer using a pinvoke
6560 * signature. Have to create a wrapper and call that instead.
6561 * FIXME: This is very slow, need to create a wrapper at JIT time
6562 * instead based on the signature.
6564 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6565 EMIT_NEW_PCONST (cfg, args [1], fsig);
6566 args [2] = addr;
6567 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6569 } else {
6570 MonoMethod *cil_method;
6572 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6573 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6574 cil_method = cmethod;
6575 } else if (constrained_call) {
6576 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6578 * This is needed since get_method_constrained can't find
6579 * the method in klass representing a type var.
6580 * The type var is guaranteed to be a reference type in this
6581 * case.
6583 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6584 cil_method = cmethod;
6585 g_assert (!cmethod->klass->valuetype);
6586 } else {
6587 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6589 } else {
6590 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6591 cil_method = cmethod;
6594 if (!cmethod || mono_loader_get_last_error ())
6595 LOAD_ERROR;
6596 if (!dont_verify && !cfg->skip_visibility) {
6597 MonoMethod *target_method = cil_method;
6598 if (method->is_inflated) {
6599 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6601 if (!mono_method_can_access_method (method_definition, target_method) &&
6602 !mono_method_can_access_method (method, cil_method))
6603 METHOD_ACCESS_FAILURE;
6606 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6607 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6609 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6610 /* MS.NET seems to silently convert this to a callvirt */
6611 virtual = 1;
6615 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6616 * converts to a callvirt.
6618 * tests/bug-515884.il is an example of this behavior
6620 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6621 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6622 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6623 virtual = 1;
6626 if (!cmethod->klass->inited)
6627 if (!mono_class_init (cmethod->klass))
6628 LOAD_ERROR;
6630 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6631 mini_class_is_system_array (cmethod->klass)) {
6632 array_rank = cmethod->klass->rank;
6633 fsig = mono_method_signature (cmethod);
6634 } else {
6635 fsig = mono_method_signature (cmethod);
6637 if (!fsig)
6638 LOAD_ERROR;
6640 if (fsig->pinvoke) {
6641 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6642 check_for_pending_exc, FALSE);
6643 fsig = mono_method_signature (wrapper);
6644 } else if (constrained_call) {
6645 fsig = mono_method_signature (cmethod);
6646 } else {
6647 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6651 mono_save_token_info (cfg, image, token, cil_method);
6653 n = fsig->param_count + fsig->hasthis;
6655 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6656 if (check_linkdemand (cfg, method, cmethod))
6657 INLINE_FAILURE;
6658 CHECK_CFG_EXCEPTION;
6661 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6662 g_assert_not_reached ();
6665 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6666 UNVERIFIED;
6668 if (!cfg->generic_sharing_context && cmethod)
6669 g_assert (!mono_method_check_context_used (cmethod));
6671 CHECK_STACK (n);
6673 //g_assert (!virtual || fsig->hasthis);
6675 sp -= n;
6677 if (constrained_call) {
6679 * We have the `constrained.' prefix opcode.
6681 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6683 * The type parameter is instantiated as a valuetype,
6684 * but that type doesn't override the method we're
6685 * calling, so we need to box `this'.
6687 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6688 ins->klass = constrained_call;
6689 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6690 CHECK_CFG_EXCEPTION;
6691 } else if (!constrained_call->valuetype) {
6692 int dreg = alloc_ireg_ref (cfg);
6695 * The type parameter is instantiated as a reference
6696 * type. We have a managed pointer on the stack, so
6697 * we need to dereference it here.
6699 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6700 ins->type = STACK_OBJ;
6701 sp [0] = ins;
6702 } else if (cmethod->klass->valuetype)
6703 virtual = 0;
6704 constrained_call = NULL;
6707 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6708 UNVERIFIED;
6711 * If the callee is a shared method, then its static cctor
6712 * might not get called after the call was patched.
6714 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6715 emit_generic_class_init (cfg, cmethod->klass);
6716 CHECK_TYPELOAD (cmethod->klass);
6719 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6720 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6721 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6722 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6723 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6726 * Pass vtable iff target method might
6727 * be shared, which means that sharing
6728 * is enabled for its class and its
6729 * context is sharable (and it's not a
6730 * generic method).
6732 if (sharing_enabled && context_sharable &&
6733 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6734 pass_vtable = TRUE;
6737 if (cmethod && mini_method_get_context (cmethod) &&
6738 mini_method_get_context (cmethod)->method_inst) {
6739 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6740 MonoGenericContext *context = mini_method_get_context (cmethod);
6741 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6743 g_assert (!pass_vtable);
6745 if (sharing_enabled && context_sharable)
6746 pass_mrgctx = TRUE;
6749 if (cfg->generic_sharing_context && cmethod) {
6750 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6752 context_used = mono_method_check_context_used (cmethod);
6754 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6755 /* Generic method interface
6756 calls are resolved via a
6757 helper function and don't
6758 need an imt. */
6759 if (!cmethod_context || !cmethod_context->method_inst)
6760 pass_imt_from_rgctx = TRUE;
6764 * If a shared method calls another
6765 * shared method then the caller must
6766 * have a generic sharing context
6767 * because the magic trampoline
6768 * requires it. FIXME: We shouldn't
6769 * have to force the vtable/mrgctx
6770 * variable here. Instead there
6771 * should be a flag in the cfg to
6772 * request a generic sharing context.
6774 if (context_used &&
6775 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6776 mono_get_vtable_var (cfg);
6779 if (pass_vtable) {
6780 if (context_used) {
6781 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6782 } else {
6783 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6785 CHECK_TYPELOAD (cmethod->klass);
6786 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6790 if (pass_mrgctx) {
6791 g_assert (!vtable_arg);
6793 if (!cfg->compile_aot) {
6795 * emit_get_rgctx_method () calls mono_class_vtable () so check
6796 * for type load errors before.
6798 mono_class_setup_vtable (cmethod->klass);
6799 CHECK_TYPELOAD (cmethod->klass);
6802 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6804 /* !marshalbyref is needed to properly handle generic methods + remoting */
6805 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6806 MONO_METHOD_IS_FINAL (cmethod)) &&
6807 !cmethod->klass->marshalbyref) {
6808 if (virtual)
6809 check_this = TRUE;
6810 virtual = 0;
6814 if (pass_imt_from_rgctx) {
6815 g_assert (!pass_vtable);
6816 g_assert (cmethod);
6818 imt_arg = emit_get_rgctx_method (cfg, context_used,
6819 cmethod, MONO_RGCTX_INFO_METHOD);
6822 if (check_this)
6823 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6825 /* Calling virtual generic methods */
6826 if (cmethod && virtual &&
6827 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6828 !(MONO_METHOD_IS_FINAL (cmethod) &&
6829 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6830 mono_method_signature (cmethod)->generic_param_count) {
6831 MonoInst *this_temp, *this_arg_temp, *store;
6832 MonoInst *iargs [4];
6834 g_assert (mono_method_signature (cmethod)->is_inflated);
6836 /* Prevent inlining of methods that contain indirect calls */
6837 INLINE_FAILURE;
6839 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6840 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6841 g_assert (!imt_arg);
6842 if (!context_used)
6843 g_assert (cmethod->is_inflated);
6844 imt_arg = emit_get_rgctx_method (cfg, context_used,
6845 cmethod, MONO_RGCTX_INFO_METHOD);
6846 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6847 } else
6848 #endif
6850 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6851 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6852 MONO_ADD_INS (bblock, store);
6854 /* FIXME: This should be a managed pointer */
6855 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6857 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6858 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6859 cmethod, MONO_RGCTX_INFO_METHOD);
6860 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6861 addr = mono_emit_jit_icall (cfg,
6862 mono_helper_compile_generic_method, iargs);
6864 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6866 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6869 if (!MONO_TYPE_IS_VOID (fsig->ret))
6870 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6872 CHECK_CFG_EXCEPTION;
6874 ip += 5;
6875 ins_flag = 0;
6876 break;
6880 * Implement a workaround for the inherent races involved in locking:
6881 * Monitor.Enter ()
6882 * try {
6883 * } finally {
6884 * Monitor.Exit ()
6886 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6887 * try block, the Exit () won't be executed, see:
6888 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6889 * To work around this, we extend such try blocks to include the last x bytes
6890 * of the Monitor.Enter () call.
6892 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6893 MonoBasicBlock *tbb;
6895 GET_BBLOCK (cfg, tbb, ip + 5);
6897 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6898 * from Monitor.Enter like ArgumentNullException.
6900 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6901 /* Mark this bblock as needing to be extended */
6902 tbb->extend_try_block = TRUE;
6906 /* Conversion to a JIT intrinsic */
6907 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6908 bblock = cfg->cbb;
6909 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6910 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6911 *sp = ins;
6912 sp++;
6915 CHECK_CFG_EXCEPTION;
6917 ip += 5;
6918 ins_flag = 0;
6919 break;
6922 /* Inlining */
6923 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6924 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6925 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6926 !g_list_find (dont_inline, cmethod)) {
6927 int costs;
6928 gboolean always = FALSE;
6930 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6931 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6932 /* Prevent inlining of methods that call wrappers */
6933 INLINE_FAILURE;
6934 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6935 always = TRUE;
6938 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6939 ip += 5;
6940 cfg->real_offset += 5;
6941 bblock = cfg->cbb;
6943 if (!MONO_TYPE_IS_VOID (fsig->ret))
6944 /* *sp is already set by inline_method */
6945 sp++;
6947 inline_costs += costs;
6948 ins_flag = 0;
6949 break;
6953 inline_costs += 10 * num_calls++;
6955 /* Tail recursion elimination */
6956 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6957 gboolean has_vtargs = FALSE;
6958 int i;
6960 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6961 INLINE_FAILURE;
6963 /* keep it simple */
6964 for (i = fsig->param_count - 1; i >= 0; i--) {
6965 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6966 has_vtargs = TRUE;
6969 if (!has_vtargs) {
6970 for (i = 0; i < n; ++i)
6971 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6972 MONO_INST_NEW (cfg, ins, OP_BR);
6973 MONO_ADD_INS (bblock, ins);
6974 tblock = start_bblock->out_bb [0];
6975 link_bblock (cfg, bblock, tblock);
6976 ins->inst_target_bb = tblock;
6977 start_new_bblock = 1;
6979 /* skip the CEE_RET, too */
6980 if (ip_in_bb (cfg, bblock, ip + 5))
6981 ip += 6;
6982 else
6983 ip += 5;
6985 ins_flag = 0;
6986 break;
6990 /* Generic sharing */
6991 /* FIXME: only do this for generic methods if
6992 they are not shared! */
6993 if (context_used && !imt_arg && !array_rank &&
6994 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6995 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6996 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6997 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6998 INLINE_FAILURE;
7000 g_assert (cfg->generic_sharing_context && cmethod);
7001 g_assert (!addr);
7004 * We are compiling a call to a
7005 * generic method from shared code,
7006 * which means that we have to look up
7007 * the method in the rgctx and do an
7008 * indirect call.
7010 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7013 /* Indirect calls */
7014 if (addr) {
7015 g_assert (!imt_arg);
7017 if (*ip == CEE_CALL)
7018 g_assert (context_used);
7019 else if (*ip == CEE_CALLI)
7020 g_assert (!vtable_arg);
7021 else
7022 /* FIXME: what the hell is this??? */
7023 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7024 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7026 /* Prevent inlining of methods with indirect calls */
7027 INLINE_FAILURE;
7029 if (vtable_arg) {
7030 MonoCallInst *call;
7032 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7033 call = (MonoCallInst*)ins;
7034 } else {
7035 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7037 * Instead of emitting an indirect call, emit a direct call
7038 * with the contents of the aotconst as the patch info.
7040 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7041 NULLIFY_INS (addr);
7042 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7043 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7044 NULLIFY_INS (addr);
7045 } else {
7046 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7049 if (!MONO_TYPE_IS_VOID (fsig->ret))
7050 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7052 CHECK_CFG_EXCEPTION;
7054 ip += 5;
7055 ins_flag = 0;
7056 break;
7059 /* Array methods */
7060 if (array_rank) {
7061 MonoInst *addr;
7063 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7064 MonoInst *val = sp [fsig->param_count];
7066 if (val->type == STACK_OBJ) {
7067 MonoInst *iargs [2];
7069 iargs [0] = sp [0];
7070 iargs [1] = val;
7072 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7075 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7076 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7077 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7078 emit_write_barrier (cfg, addr, val, 0);
7079 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7080 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7082 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7084 *sp++ = ins;
7085 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7086 if (!cmethod->klass->element_class->valuetype && !readonly)
7087 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7088 CHECK_TYPELOAD (cmethod->klass);
7090 readonly = FALSE;
7091 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7092 *sp++ = addr;
7093 } else {
7094 g_assert_not_reached ();
7097 CHECK_CFG_EXCEPTION;
7099 ip += 5;
7100 ins_flag = 0;
7101 break;
7104 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7105 if (ins) {
7106 if (!MONO_TYPE_IS_VOID (fsig->ret))
7107 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7109 CHECK_CFG_EXCEPTION;
7111 ip += 5;
7112 ins_flag = 0;
7113 break;
7116 /* Tail prefix / tail call optimization */
7118 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7119 /* FIXME: runtime generic context pointer for jumps? */
7120 /* FIXME: handle this for generic sharing eventually */
7121 supported_tail_call = cmethod &&
7122 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7123 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7124 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7126 if (supported_tail_call) {
7127 MonoCallInst *call;
7129 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7130 INLINE_FAILURE;
7132 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7134 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7135 /* Handle tail calls similarly to calls */
7136 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7137 #else
7138 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7139 call->tail_call = TRUE;
7140 call->method = cmethod;
7141 call->signature = mono_method_signature (cmethod);
7144 * We implement tail calls by storing the actual arguments into the
7145 * argument variables, then emitting a CEE_JMP.
7147 for (i = 0; i < n; ++i) {
7148 /* Prevent argument from being register allocated */
7149 arg_array [i]->flags |= MONO_INST_VOLATILE;
7150 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7152 #endif
7154 ins = (MonoInst*)call;
7155 ins->inst_p0 = cmethod;
7156 ins->inst_p1 = arg_array [0];
7157 MONO_ADD_INS (bblock, ins);
7158 link_bblock (cfg, bblock, end_bblock);
7159 start_new_bblock = 1;
7161 CHECK_CFG_EXCEPTION;
7163 ip += 5;
7164 ins_flag = 0;
7166 // FIXME: Eliminate unreachable epilogs
7169 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7170 * only reachable from this call.
7172 GET_BBLOCK (cfg, tblock, ip);
7173 if (tblock == bblock || tblock->in_count == 0)
7174 ip += 1;
7175 break;
7178 /* Common call */
7179 INLINE_FAILURE;
7180 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7181 imt_arg, vtable_arg);
7183 if (!MONO_TYPE_IS_VOID (fsig->ret))
7184 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7186 CHECK_CFG_EXCEPTION;
7188 ip += 5;
7189 ins_flag = 0;
7190 break;
7192 case CEE_RET:
7193 if (cfg->method != method) {
7194 /* return from inlined method */
7196 * If in_count == 0, that means the ret is unreachable due to
7197 * being preceeded by a throw. In that case, inline_method () will
7198 * handle setting the return value
7199 * (test case: test_0_inline_throw ()).
7201 if (return_var && cfg->cbb->in_count) {
7202 MonoInst *store;
7203 CHECK_STACK (1);
7204 --sp;
7205 //g_assert (returnvar != -1);
7206 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7207 cfg->ret_var_set = TRUE;
7209 } else {
7210 if (cfg->ret) {
7211 MonoType *ret_type = mono_method_signature (method)->ret;
7213 if (seq_points) {
7215 * Place a seq point here too even through the IL stack is not
7216 * empty, so a step over on
7217 * call <FOO>
7218 * ret
7219 * will work correctly.
7221 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7222 MONO_ADD_INS (cfg->cbb, ins);
7225 g_assert (!return_var);
7226 CHECK_STACK (1);
7227 --sp;
7229 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7230 UNVERIFIED;
7232 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7233 MonoInst *ret_addr;
7235 if (!cfg->vret_addr) {
7236 MonoInst *ins;
7238 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7239 } else {
7240 EMIT_NEW_RETLOADA (cfg, ret_addr);
7242 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7243 ins->klass = mono_class_from_mono_type (ret_type);
7245 } else {
7246 #ifdef MONO_ARCH_SOFT_FLOAT
7247 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7248 MonoInst *iargs [1];
7249 MonoInst *conv;
7251 iargs [0] = *sp;
7252 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7253 mono_arch_emit_setret (cfg, method, conv);
7254 } else {
7255 mono_arch_emit_setret (cfg, method, *sp);
7257 #else
7258 mono_arch_emit_setret (cfg, method, *sp);
7259 #endif
7263 if (sp != stack_start)
7264 UNVERIFIED;
7265 MONO_INST_NEW (cfg, ins, OP_BR);
7266 ip++;
7267 ins->inst_target_bb = end_bblock;
7268 MONO_ADD_INS (bblock, ins);
7269 link_bblock (cfg, bblock, end_bblock);
7270 start_new_bblock = 1;
7271 break;
7272 case CEE_BR_S:
7273 CHECK_OPSIZE (2);
7274 MONO_INST_NEW (cfg, ins, OP_BR);
7275 ip++;
7276 target = ip + 1 + (signed char)(*ip);
7277 ++ip;
7278 GET_BBLOCK (cfg, tblock, target);
7279 link_bblock (cfg, bblock, tblock);
7280 ins->inst_target_bb = tblock;
7281 if (sp != stack_start) {
7282 handle_stack_args (cfg, stack_start, sp - stack_start);
7283 sp = stack_start;
7284 CHECK_UNVERIFIABLE (cfg);
7286 MONO_ADD_INS (bblock, ins);
7287 start_new_bblock = 1;
7288 inline_costs += BRANCH_COST;
7289 break;
7290 case CEE_BEQ_S:
7291 case CEE_BGE_S:
7292 case CEE_BGT_S:
7293 case CEE_BLE_S:
7294 case CEE_BLT_S:
7295 case CEE_BNE_UN_S:
7296 case CEE_BGE_UN_S:
7297 case CEE_BGT_UN_S:
7298 case CEE_BLE_UN_S:
7299 case CEE_BLT_UN_S:
7300 CHECK_OPSIZE (2);
7301 CHECK_STACK (2);
7302 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7303 ip++;
7304 target = ip + 1 + *(signed char*)ip;
7305 ip++;
7307 ADD_BINCOND (NULL);
7309 sp = stack_start;
7310 inline_costs += BRANCH_COST;
7311 break;
7312 case CEE_BR:
7313 CHECK_OPSIZE (5);
7314 MONO_INST_NEW (cfg, ins, OP_BR);
7315 ip++;
7317 target = ip + 4 + (gint32)read32(ip);
7318 ip += 4;
7319 GET_BBLOCK (cfg, tblock, target);
7320 link_bblock (cfg, bblock, tblock);
7321 ins->inst_target_bb = tblock;
7322 if (sp != stack_start) {
7323 handle_stack_args (cfg, stack_start, sp - stack_start);
7324 sp = stack_start;
7325 CHECK_UNVERIFIABLE (cfg);
7328 MONO_ADD_INS (bblock, ins);
7330 start_new_bblock = 1;
7331 inline_costs += BRANCH_COST;
7332 break;
7333 case CEE_BRFALSE_S:
7334 case CEE_BRTRUE_S:
7335 case CEE_BRFALSE:
7336 case CEE_BRTRUE: {
7337 MonoInst *cmp;
7338 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7339 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7340 guint32 opsize = is_short ? 1 : 4;
7342 CHECK_OPSIZE (opsize);
7343 CHECK_STACK (1);
7344 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7345 UNVERIFIED;
7346 ip ++;
7347 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7348 ip += opsize;
7350 sp--;
7352 GET_BBLOCK (cfg, tblock, target);
7353 link_bblock (cfg, bblock, tblock);
7354 GET_BBLOCK (cfg, tblock, ip);
7355 link_bblock (cfg, bblock, tblock);
7357 if (sp != stack_start) {
7358 handle_stack_args (cfg, stack_start, sp - stack_start);
7359 CHECK_UNVERIFIABLE (cfg);
7362 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7363 cmp->sreg1 = sp [0]->dreg;
7364 type_from_op (cmp, sp [0], NULL);
7365 CHECK_TYPE (cmp);
7367 #if SIZEOF_REGISTER == 4
7368 if (cmp->opcode == OP_LCOMPARE_IMM) {
7369 /* Convert it to OP_LCOMPARE */
7370 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7371 ins->type = STACK_I8;
7372 ins->dreg = alloc_dreg (cfg, STACK_I8);
7373 ins->inst_l = 0;
7374 MONO_ADD_INS (bblock, ins);
7375 cmp->opcode = OP_LCOMPARE;
7376 cmp->sreg2 = ins->dreg;
7378 #endif
7379 MONO_ADD_INS (bblock, cmp);
7381 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7382 type_from_op (ins, sp [0], NULL);
7383 MONO_ADD_INS (bblock, ins);
7384 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7385 GET_BBLOCK (cfg, tblock, target);
7386 ins->inst_true_bb = tblock;
7387 GET_BBLOCK (cfg, tblock, ip);
7388 ins->inst_false_bb = tblock;
7389 start_new_bblock = 2;
7391 sp = stack_start;
7392 inline_costs += BRANCH_COST;
7393 break;
7395 case CEE_BEQ:
7396 case CEE_BGE:
7397 case CEE_BGT:
7398 case CEE_BLE:
7399 case CEE_BLT:
7400 case CEE_BNE_UN:
7401 case CEE_BGE_UN:
7402 case CEE_BGT_UN:
7403 case CEE_BLE_UN:
7404 case CEE_BLT_UN:
7405 CHECK_OPSIZE (5);
7406 CHECK_STACK (2);
7407 MONO_INST_NEW (cfg, ins, *ip);
7408 ip++;
7409 target = ip + 4 + (gint32)read32(ip);
7410 ip += 4;
7412 ADD_BINCOND (NULL);
7414 sp = stack_start;
7415 inline_costs += BRANCH_COST;
7416 break;
7417 case CEE_SWITCH: {
7418 MonoInst *src1;
7419 MonoBasicBlock **targets;
7420 MonoBasicBlock *default_bblock;
7421 MonoJumpInfoBBTable *table;
7422 int offset_reg = alloc_preg (cfg);
7423 int target_reg = alloc_preg (cfg);
7424 int table_reg = alloc_preg (cfg);
7425 int sum_reg = alloc_preg (cfg);
7426 gboolean use_op_switch;
7428 CHECK_OPSIZE (5);
7429 CHECK_STACK (1);
7430 n = read32 (ip + 1);
7431 --sp;
7432 src1 = sp [0];
7433 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7434 UNVERIFIED;
7436 ip += 5;
7437 CHECK_OPSIZE (n * sizeof (guint32));
7438 target = ip + n * sizeof (guint32);
7440 GET_BBLOCK (cfg, default_bblock, target);
7441 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7443 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7444 for (i = 0; i < n; ++i) {
7445 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7446 targets [i] = tblock;
7447 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7448 ip += 4;
7451 if (sp != stack_start) {
7453 * Link the current bb with the targets as well, so handle_stack_args
7454 * will set their in_stack correctly.
7456 link_bblock (cfg, bblock, default_bblock);
7457 for (i = 0; i < n; ++i)
7458 link_bblock (cfg, bblock, targets [i]);
7460 handle_stack_args (cfg, stack_start, sp - stack_start);
7461 sp = stack_start;
7462 CHECK_UNVERIFIABLE (cfg);
7465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7466 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7467 bblock = cfg->cbb;
7469 for (i = 0; i < n; ++i)
7470 link_bblock (cfg, bblock, targets [i]);
7472 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7473 table->table = targets;
7474 table->table_size = n;
7476 use_op_switch = FALSE;
7477 #ifdef TARGET_ARM
7478 /* ARM implements SWITCH statements differently */
7479 /* FIXME: Make it use the generic implementation */
7480 if (!cfg->compile_aot)
7481 use_op_switch = TRUE;
7482 #endif
7484 if (COMPILE_LLVM (cfg))
7485 use_op_switch = TRUE;
7487 cfg->cbb->has_jump_table = 1;
7489 if (use_op_switch) {
7490 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7491 ins->sreg1 = src1->dreg;
7492 ins->inst_p0 = table;
7493 ins->inst_many_bb = targets;
7494 ins->klass = GUINT_TO_POINTER (n);
7495 MONO_ADD_INS (cfg->cbb, ins);
7496 } else {
7497 if (sizeof (gpointer) == 8)
7498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7499 else
7500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7502 #if SIZEOF_REGISTER == 8
7503 /* The upper word might not be zero, and we add it to a 64 bit address later */
7504 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7505 #endif
7507 if (cfg->compile_aot) {
7508 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7509 } else {
7510 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7511 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7512 ins->inst_p0 = table;
7513 ins->dreg = table_reg;
7514 MONO_ADD_INS (cfg->cbb, ins);
7517 /* FIXME: Use load_memindex */
7518 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7520 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7522 start_new_bblock = 1;
7523 inline_costs += (BRANCH_COST * 2);
7524 break;
7526 case CEE_LDIND_I1:
7527 case CEE_LDIND_U1:
7528 case CEE_LDIND_I2:
7529 case CEE_LDIND_U2:
7530 case CEE_LDIND_I4:
7531 case CEE_LDIND_U4:
7532 case CEE_LDIND_I8:
7533 case CEE_LDIND_I:
7534 case CEE_LDIND_R4:
7535 case CEE_LDIND_R8:
7536 case CEE_LDIND_REF:
7537 CHECK_STACK (1);
7538 --sp;
7540 switch (*ip) {
7541 case CEE_LDIND_R4:
7542 case CEE_LDIND_R8:
7543 dreg = alloc_freg (cfg);
7544 break;
7545 case CEE_LDIND_I8:
7546 dreg = alloc_lreg (cfg);
7547 break;
7548 case CEE_LDIND_REF:
7549 dreg = alloc_ireg_ref (cfg);
7550 break;
7551 default:
7552 dreg = alloc_preg (cfg);
7555 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7556 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7557 ins->flags |= ins_flag;
7558 ins_flag = 0;
7559 MONO_ADD_INS (bblock, ins);
7560 *sp++ = ins;
7561 if (ins->flags & MONO_INST_VOLATILE) {
7562 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7563 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7564 emit_memory_barrier (cfg, FullBarrier);
7566 ++ip;
7567 break;
7568 case CEE_STIND_REF:
7569 case CEE_STIND_I1:
7570 case CEE_STIND_I2:
7571 case CEE_STIND_I4:
7572 case CEE_STIND_I8:
7573 case CEE_STIND_R4:
7574 case CEE_STIND_R8:
7575 case CEE_STIND_I:
7576 CHECK_STACK (2);
7577 sp -= 2;
7579 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7580 ins->flags |= ins_flag;
7581 ins_flag = 0;
7583 if (ins->flags & MONO_INST_VOLATILE) {
7584 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7585 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7586 emit_memory_barrier (cfg, FullBarrier);
7589 MONO_ADD_INS (bblock, ins);
7591 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7592 emit_write_barrier (cfg, sp [0], sp [1], -1);
7594 inline_costs += 1;
7595 ++ip;
7596 break;
7598 case CEE_MUL:
7599 CHECK_STACK (2);
7601 MONO_INST_NEW (cfg, ins, (*ip));
7602 sp -= 2;
7603 ins->sreg1 = sp [0]->dreg;
7604 ins->sreg2 = sp [1]->dreg;
7605 type_from_op (ins, sp [0], sp [1]);
7606 CHECK_TYPE (ins);
7607 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7609 /* Use the immediate opcodes if possible */
7610 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7611 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7612 if (imm_opcode != -1) {
7613 ins->opcode = imm_opcode;
7614 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7615 ins->sreg2 = -1;
7617 sp [1]->opcode = OP_NOP;
7621 MONO_ADD_INS ((cfg)->cbb, (ins));
7623 *sp++ = mono_decompose_opcode (cfg, ins);
7624 ip++;
7625 break;
7626 case CEE_ADD:
7627 case CEE_SUB:
7628 case CEE_DIV:
7629 case CEE_DIV_UN:
7630 case CEE_REM:
7631 case CEE_REM_UN:
7632 case CEE_AND:
7633 case CEE_OR:
7634 case CEE_XOR:
7635 case CEE_SHL:
7636 case CEE_SHR:
7637 case CEE_SHR_UN:
7638 CHECK_STACK (2);
7640 MONO_INST_NEW (cfg, ins, (*ip));
7641 sp -= 2;
7642 ins->sreg1 = sp [0]->dreg;
7643 ins->sreg2 = sp [1]->dreg;
7644 type_from_op (ins, sp [0], sp [1]);
7645 CHECK_TYPE (ins);
7646 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7647 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7649 /* FIXME: Pass opcode to is_inst_imm */
7651 /* Use the immediate opcodes if possible */
7652 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7653 int imm_opcode;
7655 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7656 if (imm_opcode != -1) {
7657 ins->opcode = imm_opcode;
7658 if (sp [1]->opcode == OP_I8CONST) {
7659 #if SIZEOF_REGISTER == 8
7660 ins->inst_imm = sp [1]->inst_l;
7661 #else
7662 ins->inst_ls_word = sp [1]->inst_ls_word;
7663 ins->inst_ms_word = sp [1]->inst_ms_word;
7664 #endif
7666 else
7667 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7668 ins->sreg2 = -1;
7670 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7671 if (sp [1]->next == NULL)
7672 sp [1]->opcode = OP_NOP;
7675 MONO_ADD_INS ((cfg)->cbb, (ins));
7677 *sp++ = mono_decompose_opcode (cfg, ins);
7678 ip++;
7679 break;
7680 case CEE_NEG:
7681 case CEE_NOT:
7682 case CEE_CONV_I1:
7683 case CEE_CONV_I2:
7684 case CEE_CONV_I4:
7685 case CEE_CONV_R4:
7686 case CEE_CONV_R8:
7687 case CEE_CONV_U4:
7688 case CEE_CONV_I8:
7689 case CEE_CONV_U8:
7690 case CEE_CONV_OVF_I8:
7691 case CEE_CONV_OVF_U8:
7692 case CEE_CONV_R_UN:
7693 CHECK_STACK (1);
7695 /* Special case this earlier so we have long constants in the IR */
7696 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7697 int data = sp [-1]->inst_c0;
7698 sp [-1]->opcode = OP_I8CONST;
7699 sp [-1]->type = STACK_I8;
7700 #if SIZEOF_REGISTER == 8
7701 if ((*ip) == CEE_CONV_U8)
7702 sp [-1]->inst_c0 = (guint32)data;
7703 else
7704 sp [-1]->inst_c0 = data;
7705 #else
7706 sp [-1]->inst_ls_word = data;
7707 if ((*ip) == CEE_CONV_U8)
7708 sp [-1]->inst_ms_word = 0;
7709 else
7710 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7711 #endif
7712 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7714 else {
7715 ADD_UNOP (*ip);
7717 ip++;
7718 break;
7719 case CEE_CONV_OVF_I4:
7720 case CEE_CONV_OVF_I1:
7721 case CEE_CONV_OVF_I2:
7722 case CEE_CONV_OVF_I:
7723 case CEE_CONV_OVF_U:
7724 CHECK_STACK (1);
7726 if (sp [-1]->type == STACK_R8) {
7727 ADD_UNOP (CEE_CONV_OVF_I8);
7728 ADD_UNOP (*ip);
7729 } else {
7730 ADD_UNOP (*ip);
7732 ip++;
7733 break;
7734 case CEE_CONV_OVF_U1:
7735 case CEE_CONV_OVF_U2:
7736 case CEE_CONV_OVF_U4:
7737 CHECK_STACK (1);
7739 if (sp [-1]->type == STACK_R8) {
7740 ADD_UNOP (CEE_CONV_OVF_U8);
7741 ADD_UNOP (*ip);
7742 } else {
7743 ADD_UNOP (*ip);
7745 ip++;
7746 break;
7747 case CEE_CONV_OVF_I1_UN:
7748 case CEE_CONV_OVF_I2_UN:
7749 case CEE_CONV_OVF_I4_UN:
7750 case CEE_CONV_OVF_I8_UN:
7751 case CEE_CONV_OVF_U1_UN:
7752 case CEE_CONV_OVF_U2_UN:
7753 case CEE_CONV_OVF_U4_UN:
7754 case CEE_CONV_OVF_U8_UN:
7755 case CEE_CONV_OVF_I_UN:
7756 case CEE_CONV_OVF_U_UN:
7757 case CEE_CONV_U2:
7758 case CEE_CONV_U1:
7759 case CEE_CONV_I:
7760 case CEE_CONV_U:
7761 CHECK_STACK (1);
7762 ADD_UNOP (*ip);
7763 CHECK_CFG_EXCEPTION;
7764 ip++;
7765 break;
7766 case CEE_ADD_OVF:
7767 case CEE_ADD_OVF_UN:
7768 case CEE_MUL_OVF:
7769 case CEE_MUL_OVF_UN:
7770 case CEE_SUB_OVF:
7771 case CEE_SUB_OVF_UN:
7772 CHECK_STACK (2);
7773 ADD_BINOP (*ip);
7774 ip++;
7775 break;
7776 case CEE_CPOBJ:
7777 CHECK_OPSIZE (5);
7778 CHECK_STACK (2);
7779 token = read32 (ip + 1);
7780 klass = mini_get_class (method, token, generic_context);
7781 CHECK_TYPELOAD (klass);
7782 sp -= 2;
7783 if (generic_class_is_reference_type (cfg, klass)) {
7784 MonoInst *store, *load;
7785 int dreg = alloc_ireg_ref (cfg);
7787 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7788 load->flags |= ins_flag;
7789 MONO_ADD_INS (cfg->cbb, load);
7791 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7792 store->flags |= ins_flag;
7793 MONO_ADD_INS (cfg->cbb, store);
7795 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7796 emit_write_barrier (cfg, sp [0], sp [1], -1);
7797 } else {
7798 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7800 ins_flag = 0;
7801 ip += 5;
7802 break;
7803 case CEE_LDOBJ: {
7804 int loc_index = -1;
7805 int stloc_len = 0;
7807 CHECK_OPSIZE (5);
7808 CHECK_STACK (1);
7809 --sp;
7810 token = read32 (ip + 1);
7811 klass = mini_get_class (method, token, generic_context);
7812 CHECK_TYPELOAD (klass);
7814 /* Optimize the common ldobj+stloc combination */
7815 switch (ip [5]) {
7816 case CEE_STLOC_S:
7817 loc_index = ip [6];
7818 stloc_len = 2;
7819 break;
7820 case CEE_STLOC_0:
7821 case CEE_STLOC_1:
7822 case CEE_STLOC_2:
7823 case CEE_STLOC_3:
7824 loc_index = ip [5] - CEE_STLOC_0;
7825 stloc_len = 1;
7826 break;
7827 default:
7828 break;
7831 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7832 CHECK_LOCAL (loc_index);
7834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7835 ins->dreg = cfg->locals [loc_index]->dreg;
7836 ip += 5;
7837 ip += stloc_len;
7838 break;
7841 /* Optimize the ldobj+stobj combination */
7842 /* The reference case ends up being a load+store anyway */
7843 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7844 CHECK_STACK (1);
7846 sp --;
7848 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7850 ip += 5 + 5;
7851 ins_flag = 0;
7852 break;
7855 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7856 *sp++ = ins;
7858 ip += 5;
7859 ins_flag = 0;
7860 inline_costs += 1;
7861 break;
7863 case CEE_LDSTR:
7864 CHECK_STACK_OVF (1);
7865 CHECK_OPSIZE (5);
7866 n = read32 (ip + 1);
7868 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7869 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7870 ins->type = STACK_OBJ;
7871 *sp = ins;
7873 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7874 MonoInst *iargs [1];
7876 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7877 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7878 } else {
7879 if (cfg->opt & MONO_OPT_SHARED) {
7880 MonoInst *iargs [3];
7882 if (cfg->compile_aot) {
7883 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7885 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7886 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7887 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7888 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7889 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7890 } else {
7891 if (bblock->out_of_line) {
7892 MonoInst *iargs [2];
7894 if (image == mono_defaults.corlib) {
7896 * Avoid relocations in AOT and save some space by using a
7897 * version of helper_ldstr specialized to mscorlib.
7899 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7900 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7901 } else {
7902 /* Avoid creating the string object */
7903 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7904 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7905 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7908 else
7909 if (cfg->compile_aot) {
7910 NEW_LDSTRCONST (cfg, ins, image, n);
7911 *sp = ins;
7912 MONO_ADD_INS (bblock, ins);
7914 else {
7915 NEW_PCONST (cfg, ins, NULL);
7916 ins->type = STACK_OBJ;
7917 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7918 if (!ins->inst_p0)
7919 OUT_OF_MEMORY_FAILURE;
7921 *sp = ins;
7922 MONO_ADD_INS (bblock, ins);
7927 sp++;
7928 ip += 5;
7929 break;
7930 case CEE_NEWOBJ: {
7931 MonoInst *iargs [2];
7932 MonoMethodSignature *fsig;
7933 MonoInst this_ins;
7934 MonoInst *alloc;
7935 MonoInst *vtable_arg = NULL;
7937 CHECK_OPSIZE (5);
7938 token = read32 (ip + 1);
7939 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7940 if (!cmethod || mono_loader_get_last_error ())
7941 LOAD_ERROR;
7942 fsig = mono_method_get_signature (cmethod, image, token);
7943 if (!fsig)
7944 LOAD_ERROR;
7946 mono_save_token_info (cfg, image, token, cmethod);
7948 if (!mono_class_init (cmethod->klass))
7949 LOAD_ERROR;
7951 if (cfg->generic_sharing_context)
7952 context_used = mono_method_check_context_used (cmethod);
7954 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7955 if (check_linkdemand (cfg, method, cmethod))
7956 INLINE_FAILURE;
7957 CHECK_CFG_EXCEPTION;
7958 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7959 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7962 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7963 emit_generic_class_init (cfg, cmethod->klass);
7964 CHECK_TYPELOAD (cmethod->klass);
7967 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7968 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7969 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7970 mono_class_vtable (cfg->domain, cmethod->klass);
7971 CHECK_TYPELOAD (cmethod->klass);
7973 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7974 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7975 } else {
7976 if (context_used) {
7977 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7978 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7979 } else {
7980 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7982 CHECK_TYPELOAD (cmethod->klass);
7983 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7988 n = fsig->param_count;
7989 CHECK_STACK (n);
7992 * Generate smaller code for the common newobj <exception> instruction in
7993 * argument checking code.
7995 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7996 is_exception_class (cmethod->klass) && n <= 2 &&
7997 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7998 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7999 MonoInst *iargs [3];
8001 g_assert (!vtable_arg);
8003 sp -= n;
8005 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8006 switch (n) {
8007 case 0:
8008 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8009 break;
8010 case 1:
8011 iargs [1] = sp [0];
8012 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8013 break;
8014 case 2:
8015 iargs [1] = sp [0];
8016 iargs [2] = sp [1];
8017 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8018 break;
8019 default:
8020 g_assert_not_reached ();
8023 ip += 5;
8024 inline_costs += 5;
8025 break;
8028 /* move the args to allow room for 'this' in the first position */
8029 while (n--) {
8030 --sp;
8031 sp [1] = sp [0];
8034 /* check_call_signature () requires sp[0] to be set */
8035 this_ins.type = STACK_OBJ;
8036 sp [0] = &this_ins;
8037 if (check_call_signature (cfg, fsig, sp))
8038 UNVERIFIED;
8040 iargs [0] = NULL;
8042 if (mini_class_is_system_array (cmethod->klass)) {
8043 g_assert (!vtable_arg);
8045 *sp = emit_get_rgctx_method (cfg, context_used,
8046 cmethod, MONO_RGCTX_INFO_METHOD);
8048 /* Avoid varargs in the common case */
8049 if (fsig->param_count == 1)
8050 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8051 else if (fsig->param_count == 2)
8052 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8053 else if (fsig->param_count == 3)
8054 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8055 else
8056 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8057 } else if (cmethod->string_ctor) {
8058 g_assert (!context_used);
8059 g_assert (!vtable_arg);
8060 /* we simply pass a null pointer */
8061 EMIT_NEW_PCONST (cfg, *sp, NULL);
8062 /* now call the string ctor */
8063 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8064 } else {
8065 MonoInst* callvirt_this_arg = NULL;
8067 if (cmethod->klass->valuetype) {
8068 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8069 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8070 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8072 alloc = NULL;
8075 * The code generated by mini_emit_virtual_call () expects
8076 * iargs [0] to be a boxed instance, but luckily the vcall
8077 * will be transformed into a normal call there.
8079 } else if (context_used) {
8080 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8081 *sp = alloc;
8082 } else {
8083 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8085 CHECK_TYPELOAD (cmethod->klass);
8088 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8089 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8090 * As a workaround, we call class cctors before allocating objects.
8092 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8093 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8094 if (cfg->verbose_level > 2)
8095 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8096 class_inits = g_slist_prepend (class_inits, vtable);
8099 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8100 *sp = alloc;
8102 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8104 if (alloc)
8105 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8107 /* Now call the actual ctor */
8108 /* Avoid virtual calls to ctors if possible */
8109 if (cmethod->klass->marshalbyref)
8110 callvirt_this_arg = sp [0];
8113 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8114 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8115 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8116 *sp = ins;
8117 sp++;
8120 CHECK_CFG_EXCEPTION;
8121 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8122 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8123 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8124 !g_list_find (dont_inline, cmethod)) {
8125 int costs;
8127 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8128 cfg->real_offset += 5;
8129 bblock = cfg->cbb;
8131 inline_costs += costs - 5;
8132 } else {
8133 INLINE_FAILURE;
8134 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8136 } else if (context_used &&
8137 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8138 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8139 MonoInst *cmethod_addr;
8141 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8142 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8144 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8145 } else {
8146 INLINE_FAILURE;
8147 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8148 callvirt_this_arg, NULL, vtable_arg);
8152 if (alloc == NULL) {
8153 /* Valuetype */
8154 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8155 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8156 *sp++= ins;
8158 else
8159 *sp++ = alloc;
8161 ip += 5;
8162 inline_costs += 5;
8163 break;
8165 case CEE_CASTCLASS:
8166 CHECK_STACK (1);
8167 --sp;
8168 CHECK_OPSIZE (5);
8169 token = read32 (ip + 1);
8170 klass = mini_get_class (method, token, generic_context);
8171 CHECK_TYPELOAD (klass);
8172 if (sp [0]->type != STACK_OBJ)
8173 UNVERIFIED;
8175 if (cfg->generic_sharing_context)
8176 context_used = mono_class_check_context_used (klass);
8178 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8179 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8180 MonoInst *args [3];
8182 /* obj */
8183 args [0] = *sp;
8185 /* klass */
8186 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8188 /* inline cache*/
8189 if (cfg->compile_aot)
8190 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8191 else
8192 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8194 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8195 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8196 ip += 5;
8197 inline_costs += 2;
8198 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8199 MonoMethod *mono_castclass;
8200 MonoInst *iargs [1];
8201 int costs;
8203 mono_castclass = mono_marshal_get_castclass (klass);
8204 iargs [0] = sp [0];
8206 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8207 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8208 CHECK_CFG_EXCEPTION;
8209 g_assert (costs > 0);
8211 ip += 5;
8212 cfg->real_offset += 5;
8213 bblock = cfg->cbb;
8215 *sp++ = iargs [0];
8217 inline_costs += costs;
8219 else {
8220 ins = handle_castclass (cfg, klass, *sp, context_used);
8221 CHECK_CFG_EXCEPTION;
8222 bblock = cfg->cbb;
8223 *sp ++ = ins;
8224 ip += 5;
8226 break;
8227 case CEE_ISINST: {
8228 CHECK_STACK (1);
8229 --sp;
8230 CHECK_OPSIZE (5);
8231 token = read32 (ip + 1);
8232 klass = mini_get_class (method, token, generic_context);
8233 CHECK_TYPELOAD (klass);
8234 if (sp [0]->type != STACK_OBJ)
8235 UNVERIFIED;
8237 if (cfg->generic_sharing_context)
8238 context_used = mono_class_check_context_used (klass);
8240 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8241 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8242 MonoInst *args [3];
8244 /* obj */
8245 args [0] = *sp;
8247 /* klass */
8248 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8250 /* inline cache*/
8251 if (cfg->compile_aot)
8252 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8253 else
8254 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8256 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8257 ip += 5;
8258 inline_costs += 2;
8259 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8260 MonoMethod *mono_isinst;
8261 MonoInst *iargs [1];
8262 int costs;
8264 mono_isinst = mono_marshal_get_isinst (klass);
8265 iargs [0] = sp [0];
8267 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8268 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8269 CHECK_CFG_EXCEPTION;
8270 g_assert (costs > 0);
8272 ip += 5;
8273 cfg->real_offset += 5;
8274 bblock = cfg->cbb;
8276 *sp++= iargs [0];
8278 inline_costs += costs;
8280 else {
8281 ins = handle_isinst (cfg, klass, *sp, context_used);
8282 CHECK_CFG_EXCEPTION;
8283 bblock = cfg->cbb;
8284 *sp ++ = ins;
8285 ip += 5;
8287 break;
8289 case CEE_UNBOX_ANY: {
8290 CHECK_STACK (1);
8291 --sp;
8292 CHECK_OPSIZE (5);
8293 token = read32 (ip + 1);
8294 klass = mini_get_class (method, token, generic_context);
8295 CHECK_TYPELOAD (klass);
8297 mono_save_token_info (cfg, image, token, klass);
8299 if (cfg->generic_sharing_context)
8300 context_used = mono_class_check_context_used (klass);
8302 if (generic_class_is_reference_type (cfg, klass)) {
8303 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8304 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8305 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8306 MonoInst *args [3];
8308 /* obj */
8309 args [0] = *sp;
8311 /* klass */
8312 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8314 /* inline cache*/
8315 /*FIXME AOT support*/
8316 if (cfg->compile_aot)
8317 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8318 else
8319 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8321 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8322 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8323 ip += 5;
8324 inline_costs += 2;
8325 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8326 MonoMethod *mono_castclass;
8327 MonoInst *iargs [1];
8328 int costs;
8330 mono_castclass = mono_marshal_get_castclass (klass);
8331 iargs [0] = sp [0];
8333 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8334 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8335 CHECK_CFG_EXCEPTION;
8336 g_assert (costs > 0);
8338 ip += 5;
8339 cfg->real_offset += 5;
8340 bblock = cfg->cbb;
8342 *sp++ = iargs [0];
8343 inline_costs += costs;
8344 } else {
8345 ins = handle_castclass (cfg, klass, *sp, context_used);
8346 CHECK_CFG_EXCEPTION;
8347 bblock = cfg->cbb;
8348 *sp ++ = ins;
8349 ip += 5;
8351 break;
8354 if (mono_class_is_nullable (klass)) {
8355 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8356 *sp++= ins;
8357 ip += 5;
8358 break;
8361 /* UNBOX */
8362 ins = handle_unbox (cfg, klass, sp, context_used);
8363 *sp = ins;
8365 ip += 5;
8367 /* LDOBJ */
8368 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8369 *sp++ = ins;
8371 inline_costs += 2;
8372 break;
8374 case CEE_BOX: {
8375 MonoInst *val;
8377 CHECK_STACK (1);
8378 --sp;
8379 val = *sp;
8380 CHECK_OPSIZE (5);
8381 token = read32 (ip + 1);
8382 klass = mini_get_class (method, token, generic_context);
8383 CHECK_TYPELOAD (klass);
8385 mono_save_token_info (cfg, image, token, klass);
8387 if (cfg->generic_sharing_context)
8388 context_used = mono_class_check_context_used (klass);
8390 if (generic_class_is_reference_type (cfg, klass)) {
8391 *sp++ = val;
8392 ip += 5;
8393 break;
8396 if (klass == mono_defaults.void_class)
8397 UNVERIFIED;
8398 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8399 UNVERIFIED;
8400 /* frequent check in generic code: box (struct), brtrue */
8402 // FIXME: LLVM can't handle the inconsistent bb linking
8403 if (!mono_class_is_nullable (klass) &&
8404 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8405 (ip [5] == CEE_BRTRUE ||
8406 ip [5] == CEE_BRTRUE_S ||
8407 ip [5] == CEE_BRFALSE ||
8408 ip [5] == CEE_BRFALSE_S)) {
8409 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8410 int dreg;
8411 MonoBasicBlock *true_bb, *false_bb;
8413 ip += 5;
8415 if (cfg->verbose_level > 3) {
8416 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8417 printf ("<box+brtrue opt>\n");
8420 switch (*ip) {
8421 case CEE_BRTRUE_S:
8422 case CEE_BRFALSE_S:
8423 CHECK_OPSIZE (2);
8424 ip++;
8425 target = ip + 1 + (signed char)(*ip);
8426 ip++;
8427 break;
8428 case CEE_BRTRUE:
8429 case CEE_BRFALSE:
8430 CHECK_OPSIZE (5);
8431 ip++;
8432 target = ip + 4 + (gint)(read32 (ip));
8433 ip += 4;
8434 break;
8435 default:
8436 g_assert_not_reached ();
8440 * We need to link both bblocks, since it is needed for handling stack
8441 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8442 * Branching to only one of them would lead to inconsistencies, so
8443 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8445 GET_BBLOCK (cfg, true_bb, target);
8446 GET_BBLOCK (cfg, false_bb, ip);
8448 mono_link_bblock (cfg, cfg->cbb, true_bb);
8449 mono_link_bblock (cfg, cfg->cbb, false_bb);
8451 if (sp != stack_start) {
8452 handle_stack_args (cfg, stack_start, sp - stack_start);
8453 sp = stack_start;
8454 CHECK_UNVERIFIABLE (cfg);
8457 if (COMPILE_LLVM (cfg)) {
8458 dreg = alloc_ireg (cfg);
8459 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8462 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8463 } else {
8464 /* The JIT can't eliminate the iconst+compare */
8465 MONO_INST_NEW (cfg, ins, OP_BR);
8466 ins->inst_target_bb = is_true ? true_bb : false_bb;
8467 MONO_ADD_INS (cfg->cbb, ins);
8470 start_new_bblock = 1;
8471 break;
8474 *sp++ = handle_box (cfg, val, klass, context_used);
8476 CHECK_CFG_EXCEPTION;
8477 ip += 5;
8478 inline_costs += 1;
8479 break;
8481 case CEE_UNBOX: {
8482 CHECK_STACK (1);
8483 --sp;
8484 CHECK_OPSIZE (5);
8485 token = read32 (ip + 1);
8486 klass = mini_get_class (method, token, generic_context);
8487 CHECK_TYPELOAD (klass);
8489 mono_save_token_info (cfg, image, token, klass);
8491 if (cfg->generic_sharing_context)
8492 context_used = mono_class_check_context_used (klass);
8494 if (mono_class_is_nullable (klass)) {
8495 MonoInst *val;
8497 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8498 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8500 *sp++= ins;
8501 } else {
8502 ins = handle_unbox (cfg, klass, sp, context_used);
8503 *sp++ = ins;
8505 ip += 5;
8506 inline_costs += 2;
8507 break;
8509 case CEE_LDFLD:
8510 case CEE_LDFLDA:
8511 case CEE_STFLD: {
8512 MonoClassField *field;
8513 int costs;
8514 guint foffset;
8516 if (*ip == CEE_STFLD) {
8517 CHECK_STACK (2);
8518 sp -= 2;
8519 } else {
8520 CHECK_STACK (1);
8521 --sp;
8523 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8524 UNVERIFIED;
8525 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8526 UNVERIFIED;
8527 CHECK_OPSIZE (5);
8528 token = read32 (ip + 1);
8529 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8530 field = mono_method_get_wrapper_data (method, token);
8531 klass = field->parent;
8533 else {
8534 field = mono_field_from_token (image, token, &klass, generic_context);
8536 if (!field)
8537 LOAD_ERROR;
8538 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8539 FIELD_ACCESS_FAILURE;
8540 mono_class_init (klass);
8542 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8543 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8544 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8545 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8548 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8549 if (*ip == CEE_STFLD) {
8550 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8551 UNVERIFIED;
8552 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8553 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8554 MonoInst *iargs [5];
8556 iargs [0] = sp [0];
8557 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8558 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8559 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8560 field->offset);
8561 iargs [4] = sp [1];
8563 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8564 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8565 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8566 CHECK_CFG_EXCEPTION;
8567 g_assert (costs > 0);
8569 cfg->real_offset += 5;
8570 bblock = cfg->cbb;
8572 inline_costs += costs;
8573 } else {
8574 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8576 } else {
8577 MonoInst *store;
8579 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8581 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8582 if (sp [0]->opcode != OP_LDADDR)
8583 store->flags |= MONO_INST_FAULT;
8585 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8586 /* insert call to write barrier */
8587 MonoInst *ptr;
8588 int dreg;
8590 dreg = alloc_ireg_mp (cfg);
8591 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8592 emit_write_barrier (cfg, ptr, sp [1], -1);
8595 store->flags |= ins_flag;
8597 ins_flag = 0;
8598 ip += 5;
8599 break;
8602 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8603 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8604 MonoInst *iargs [4];
8606 iargs [0] = sp [0];
8607 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8608 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8609 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8610 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8611 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8612 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8613 CHECK_CFG_EXCEPTION;
8614 bblock = cfg->cbb;
8615 g_assert (costs > 0);
8617 cfg->real_offset += 5;
8619 *sp++ = iargs [0];
8621 inline_costs += costs;
8622 } else {
8623 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8624 *sp++ = ins;
8626 } else {
8627 if (sp [0]->type == STACK_VTYPE) {
8628 MonoInst *var;
8630 /* Have to compute the address of the variable */
8632 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8633 if (!var)
8634 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8635 else
8636 g_assert (var->klass == klass);
8638 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8639 sp [0] = ins;
8642 if (*ip == CEE_LDFLDA) {
8643 if (sp [0]->type == STACK_OBJ) {
8644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8645 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8648 dreg = alloc_ireg_mp (cfg);
8650 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8651 ins->klass = mono_class_from_mono_type (field->type);
8652 ins->type = STACK_MP;
8653 *sp++ = ins;
8654 } else {
8655 MonoInst *load;
8657 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8659 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8660 load->flags |= ins_flag;
8661 if (sp [0]->opcode != OP_LDADDR)
8662 load->flags |= MONO_INST_FAULT;
8663 *sp++ = load;
8666 ins_flag = 0;
8667 ip += 5;
8668 break;
8670 case CEE_LDSFLD:
8671 case CEE_LDSFLDA:
8672 case CEE_STSFLD: {
8673 MonoClassField *field;
8674 gpointer addr = NULL;
8675 gboolean is_special_static;
8676 MonoType *ftype;
8678 CHECK_OPSIZE (5);
8679 token = read32 (ip + 1);
8681 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8682 field = mono_method_get_wrapper_data (method, token);
8683 klass = field->parent;
8685 else
8686 field = mono_field_from_token (image, token, &klass, generic_context);
8687 if (!field)
8688 LOAD_ERROR;
8689 mono_class_init (klass);
8690 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8691 FIELD_ACCESS_FAILURE;
8693 /* if the class is Critical then transparent code cannot access it's fields */
8694 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8695 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8698 * We can only support shared generic static
8699 * field access on architectures where the
8700 * trampoline code has been extended to handle
8701 * the generic class init.
8703 #ifndef MONO_ARCH_VTABLE_REG
8704 GENERIC_SHARING_FAILURE (*ip);
8705 #endif
8707 if (cfg->generic_sharing_context)
8708 context_used = mono_class_check_context_used (klass);
8710 ftype = mono_field_get_type (field);
8712 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8714 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8715 * to be called here.
8717 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8718 mono_class_vtable (cfg->domain, klass);
8719 CHECK_TYPELOAD (klass);
8721 mono_domain_lock (cfg->domain);
8722 if (cfg->domain->special_static_fields)
8723 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8724 mono_domain_unlock (cfg->domain);
8726 is_special_static = mono_class_field_is_special_static (field);
8728 /* Generate IR to compute the field address */
8729 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8731 * Fast access to TLS data
8732 * Inline version of get_thread_static_data () in
8733 * threads.c.
8735 guint32 offset;
8736 int idx, static_data_reg, array_reg, dreg;
8737 MonoInst *thread_ins;
8739 // offset &= 0x7fffffff;
8740 // idx = (offset >> 24) - 1;
8741 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8743 thread_ins = mono_get_thread_intrinsic (cfg);
8744 MONO_ADD_INS (cfg->cbb, thread_ins);
8745 static_data_reg = alloc_ireg (cfg);
8746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8748 if (cfg->compile_aot) {
8749 int offset_reg, offset2_reg, idx_reg;
8751 /* For TLS variables, this will return the TLS offset */
8752 EMIT_NEW_SFLDACONST (cfg, ins, field);
8753 offset_reg = ins->dreg;
8754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8755 idx_reg = alloc_ireg (cfg);
8756 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8757 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8759 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8760 array_reg = alloc_ireg (cfg);
8761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8762 offset2_reg = alloc_ireg (cfg);
8763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8764 dreg = alloc_ireg (cfg);
8765 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8766 } else {
8767 offset = (gsize)addr & 0x7fffffff;
8768 idx = (offset >> 24) - 1;
8770 array_reg = alloc_ireg (cfg);
8771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8772 dreg = alloc_ireg (cfg);
8773 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8775 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8776 (cfg->compile_aot && is_special_static) ||
8777 (context_used && is_special_static)) {
8778 MonoInst *iargs [2];
8780 g_assert (field->parent);
8781 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8782 if (context_used) {
8783 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8784 field, MONO_RGCTX_INFO_CLASS_FIELD);
8785 } else {
8786 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8788 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8789 } else if (context_used) {
8790 MonoInst *static_data;
8793 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8794 method->klass->name_space, method->klass->name, method->name,
8795 depth, field->offset);
8798 if (mono_class_needs_cctor_run (klass, method))
8799 emit_generic_class_init (cfg, klass);
8802 * The pointer we're computing here is
8804 * super_info.static_data + field->offset
8806 static_data = emit_get_rgctx_klass (cfg, context_used,
8807 klass, MONO_RGCTX_INFO_STATIC_DATA);
8809 if (field->offset == 0) {
8810 ins = static_data;
8811 } else {
8812 int addr_reg = mono_alloc_preg (cfg);
8813 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8815 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8816 MonoInst *iargs [2];
8818 g_assert (field->parent);
8819 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8820 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8821 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8822 } else {
8823 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8825 CHECK_TYPELOAD (klass);
8826 if (!addr) {
8827 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
8828 if (!(g_slist_find (class_inits, vtable))) {
8829 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8830 if (cfg->verbose_level > 2)
8831 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8832 class_inits = g_slist_prepend (class_inits, vtable);
8834 } else {
8835 if (cfg->run_cctors) {
8836 MonoException *ex;
8837 /* This makes so that inline cannot trigger */
8838 /* .cctors: too many apps depend on them */
8839 /* running with a specific order... */
8840 if (! vtable->initialized)
8841 INLINE_FAILURE;
8842 ex = mono_runtime_class_init_full (vtable, FALSE);
8843 if (ex) {
8844 set_exception_object (cfg, ex);
8845 goto exception_exit;
8849 addr = (char*)vtable->data + field->offset;
8851 if (cfg->compile_aot)
8852 EMIT_NEW_SFLDACONST (cfg, ins, field);
8853 else
8854 EMIT_NEW_PCONST (cfg, ins, addr);
8855 } else {
8856 MonoInst *iargs [1];
8857 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8858 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8862 /* Generate IR to do the actual load/store operation */
8864 if (*ip == CEE_LDSFLDA) {
8865 ins->klass = mono_class_from_mono_type (ftype);
8866 ins->type = STACK_PTR;
8867 *sp++ = ins;
8868 } else if (*ip == CEE_STSFLD) {
8869 MonoInst *store;
8870 CHECK_STACK (1);
8871 sp--;
8873 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8874 store->flags |= ins_flag;
8875 } else {
8876 gboolean is_const = FALSE;
8877 MonoVTable *vtable = NULL;
8879 if (!context_used) {
8880 vtable = mono_class_vtable (cfg->domain, klass);
8881 CHECK_TYPELOAD (klass);
8883 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8884 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8885 gpointer addr = (char*)vtable->data + field->offset;
8886 int ro_type = ftype->type;
8887 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8888 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8890 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8891 is_const = TRUE;
8892 switch (ro_type) {
8893 case MONO_TYPE_BOOLEAN:
8894 case MONO_TYPE_U1:
8895 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8896 sp++;
8897 break;
8898 case MONO_TYPE_I1:
8899 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8900 sp++;
8901 break;
8902 case MONO_TYPE_CHAR:
8903 case MONO_TYPE_U2:
8904 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8905 sp++;
8906 break;
8907 case MONO_TYPE_I2:
8908 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8909 sp++;
8910 break;
8911 break;
8912 case MONO_TYPE_I4:
8913 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8914 sp++;
8915 break;
8916 case MONO_TYPE_U4:
8917 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8918 sp++;
8919 break;
8920 case MONO_TYPE_I:
8921 case MONO_TYPE_U:
8922 case MONO_TYPE_PTR:
8923 case MONO_TYPE_FNPTR:
8924 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8925 type_to_eval_stack_type ((cfg), field->type, *sp);
8926 sp++;
8927 break;
8928 case MONO_TYPE_STRING:
8929 case MONO_TYPE_OBJECT:
8930 case MONO_TYPE_CLASS:
8931 case MONO_TYPE_SZARRAY:
8932 case MONO_TYPE_ARRAY:
8933 if (!mono_gc_is_moving ()) {
8934 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8935 type_to_eval_stack_type ((cfg), field->type, *sp);
8936 sp++;
8937 } else {
8938 is_const = FALSE;
8940 break;
8941 case MONO_TYPE_I8:
8942 case MONO_TYPE_U8:
8943 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8944 sp++;
8945 break;
8946 case MONO_TYPE_R4:
8947 case MONO_TYPE_R8:
8948 case MONO_TYPE_VALUETYPE:
8949 default:
8950 is_const = FALSE;
8951 break;
8955 if (!is_const) {
8956 MonoInst *load;
8958 CHECK_STACK_OVF (1);
8960 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8961 load->flags |= ins_flag;
8962 ins_flag = 0;
8963 *sp++ = load;
8966 ins_flag = 0;
8967 ip += 5;
8968 break;
8970 case CEE_STOBJ:
8971 CHECK_STACK (2);
8972 sp -= 2;
8973 CHECK_OPSIZE (5);
8974 token = read32 (ip + 1);
8975 klass = mini_get_class (method, token, generic_context);
8976 CHECK_TYPELOAD (klass);
8977 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8978 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8979 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8980 generic_class_is_reference_type (cfg, klass)) {
8981 /* insert call to write barrier */
8982 emit_write_barrier (cfg, sp [0], sp [1], -1);
8984 ins_flag = 0;
8985 ip += 5;
8986 inline_costs += 1;
8987 break;
8990 * Array opcodes
8992 case CEE_NEWARR: {
8993 MonoInst *len_ins;
8994 const char *data_ptr;
8995 int data_size = 0;
8996 guint32 field_token;
8998 CHECK_STACK (1);
8999 --sp;
9001 CHECK_OPSIZE (5);
9002 token = read32 (ip + 1);
9004 klass = mini_get_class (method, token, generic_context);
9005 CHECK_TYPELOAD (klass);
9007 if (cfg->generic_sharing_context)
9008 context_used = mono_class_check_context_used (klass);
9010 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9011 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9012 ins->sreg1 = sp [0]->dreg;
9013 ins->type = STACK_I4;
9014 ins->dreg = alloc_ireg (cfg);
9015 MONO_ADD_INS (cfg->cbb, ins);
9016 *sp = mono_decompose_opcode (cfg, ins);
9019 if (context_used) {
9020 MonoInst *args [3];
9021 MonoClass *array_class = mono_array_class_get (klass, 1);
9022 /* FIXME: we cannot get a managed
9023 allocator because we can't get the
9024 open generic class's vtable. We
9025 have the same problem in
9026 handle_alloc(). This
9027 needs to be solved so that we can
9028 have managed allocs of shared
9029 generic classes. */
9031 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9032 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9034 MonoMethod *managed_alloc = NULL;
9036 /* FIXME: Decompose later to help abcrem */
9038 /* vtable */
9039 args [0] = emit_get_rgctx_klass (cfg, context_used,
9040 array_class, MONO_RGCTX_INFO_VTABLE);
9041 /* array len */
9042 args [1] = sp [0];
9044 if (managed_alloc)
9045 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9046 else
9047 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9048 } else {
9049 if (cfg->opt & MONO_OPT_SHARED) {
9050 /* Decompose now to avoid problems with references to the domainvar */
9051 MonoInst *iargs [3];
9053 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9054 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9055 iargs [2] = sp [0];
9057 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9058 } else {
9059 /* Decompose later since it is needed by abcrem */
9060 MonoClass *array_type = mono_array_class_get (klass, 1);
9061 mono_class_vtable (cfg->domain, array_type);
9062 CHECK_TYPELOAD (array_type);
9064 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9065 ins->dreg = alloc_ireg_ref (cfg);
9066 ins->sreg1 = sp [0]->dreg;
9067 ins->inst_newa_class = klass;
9068 ins->type = STACK_OBJ;
9069 ins->klass = klass;
9070 MONO_ADD_INS (cfg->cbb, ins);
9071 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9072 cfg->cbb->has_array_access = TRUE;
9074 /* Needed so mono_emit_load_get_addr () gets called */
9075 mono_get_got_var (cfg);
9079 len_ins = sp [0];
9080 ip += 5;
9081 *sp++ = ins;
9082 inline_costs += 1;
9085 * we inline/optimize the initialization sequence if possible.
9086 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9087 * for small sizes open code the memcpy
9088 * ensure the rva field is big enough
9090 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9091 MonoMethod *memcpy_method = get_memcpy_method ();
9092 MonoInst *iargs [3];
9093 int add_reg = alloc_ireg_mp (cfg);
9095 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9096 if (cfg->compile_aot) {
9097 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9098 } else {
9099 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9101 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9102 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9103 ip += 11;
9106 break;
9108 case CEE_LDLEN:
9109 CHECK_STACK (1);
9110 --sp;
9111 if (sp [0]->type != STACK_OBJ)
9112 UNVERIFIED;
9114 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9115 ins->dreg = alloc_preg (cfg);
9116 ins->sreg1 = sp [0]->dreg;
9117 ins->type = STACK_I4;
9118 /* This flag will be inherited by the decomposition */
9119 ins->flags |= MONO_INST_FAULT;
9120 MONO_ADD_INS (cfg->cbb, ins);
9121 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9122 cfg->cbb->has_array_access = TRUE;
9123 ip ++;
9124 *sp++ = ins;
9125 break;
9126 case CEE_LDELEMA:
9127 CHECK_STACK (2);
9128 sp -= 2;
9129 CHECK_OPSIZE (5);
9130 if (sp [0]->type != STACK_OBJ)
9131 UNVERIFIED;
9133 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9135 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9136 CHECK_TYPELOAD (klass);
9137 /* we need to make sure that this array is exactly the type it needs
9138 * to be for correctness. the wrappers are lax with their usage
9139 * so we need to ignore them here
9141 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9142 MonoClass *array_class = mono_array_class_get (klass, 1);
9143 mini_emit_check_array_type (cfg, sp [0], array_class);
9144 CHECK_TYPELOAD (array_class);
9147 readonly = FALSE;
9148 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9149 *sp++ = ins;
9150 ip += 5;
9151 break;
9152 case CEE_LDELEM:
9153 case CEE_LDELEM_I1:
9154 case CEE_LDELEM_U1:
9155 case CEE_LDELEM_I2:
9156 case CEE_LDELEM_U2:
9157 case CEE_LDELEM_I4:
9158 case CEE_LDELEM_U4:
9159 case CEE_LDELEM_I8:
9160 case CEE_LDELEM_I:
9161 case CEE_LDELEM_R4:
9162 case CEE_LDELEM_R8:
9163 case CEE_LDELEM_REF: {
9164 MonoInst *addr;
9166 CHECK_STACK (2);
9167 sp -= 2;
9169 if (*ip == CEE_LDELEM) {
9170 CHECK_OPSIZE (5);
9171 token = read32 (ip + 1);
9172 klass = mini_get_class (method, token, generic_context);
9173 CHECK_TYPELOAD (klass);
9174 mono_class_init (klass);
9176 else
9177 klass = array_access_to_klass (*ip);
9179 if (sp [0]->type != STACK_OBJ)
9180 UNVERIFIED;
9182 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9184 if (sp [1]->opcode == OP_ICONST) {
9185 int array_reg = sp [0]->dreg;
9186 int index_reg = sp [1]->dreg;
9187 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9189 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9190 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9191 } else {
9192 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9193 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9195 *sp++ = ins;
9196 if (*ip == CEE_LDELEM)
9197 ip += 5;
9198 else
9199 ++ip;
9200 break;
9202 case CEE_STELEM_I:
9203 case CEE_STELEM_I1:
9204 case CEE_STELEM_I2:
9205 case CEE_STELEM_I4:
9206 case CEE_STELEM_I8:
9207 case CEE_STELEM_R4:
9208 case CEE_STELEM_R8:
9209 case CEE_STELEM_REF:
9210 case CEE_STELEM: {
9211 MonoInst *addr;
9213 CHECK_STACK (3);
9214 sp -= 3;
9216 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9218 if (*ip == CEE_STELEM) {
9219 CHECK_OPSIZE (5);
9220 token = read32 (ip + 1);
9221 klass = mini_get_class (method, token, generic_context);
9222 CHECK_TYPELOAD (klass);
9223 mono_class_init (klass);
9225 else
9226 klass = array_access_to_klass (*ip);
9228 if (sp [0]->type != STACK_OBJ)
9229 UNVERIFIED;
9231 /* storing a NULL doesn't need any of the complex checks in stelemref */
9232 if (generic_class_is_reference_type (cfg, klass) &&
9233 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9234 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9235 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9236 MonoInst *iargs [3];
9238 if (!helper->slot)
9239 mono_class_setup_vtable (obj_array);
9240 g_assert (helper->slot);
9242 if (sp [0]->type != STACK_OBJ)
9243 UNVERIFIED;
9244 if (sp [2]->type != STACK_OBJ)
9245 UNVERIFIED;
9247 iargs [2] = sp [2];
9248 iargs [1] = sp [1];
9249 iargs [0] = sp [0];
9251 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9252 } else {
9253 if (sp [1]->opcode == OP_ICONST) {
9254 int array_reg = sp [0]->dreg;
9255 int index_reg = sp [1]->dreg;
9256 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9258 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9259 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9260 } else {
9261 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9266 if (*ip == CEE_STELEM)
9267 ip += 5;
9268 else
9269 ++ip;
9270 inline_costs += 1;
9271 break;
9273 case CEE_CKFINITE: {
9274 CHECK_STACK (1);
9275 --sp;
9277 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9278 ins->sreg1 = sp [0]->dreg;
9279 ins->dreg = alloc_freg (cfg);
9280 ins->type = STACK_R8;
9281 MONO_ADD_INS (bblock, ins);
9283 *sp++ = mono_decompose_opcode (cfg, ins);
9285 ++ip;
9286 break;
9288 case CEE_REFANYVAL: {
9289 MonoInst *src_var, *src;
9291 int klass_reg = alloc_preg (cfg);
9292 int dreg = alloc_preg (cfg);
9294 CHECK_STACK (1);
9295 MONO_INST_NEW (cfg, ins, *ip);
9296 --sp;
9297 CHECK_OPSIZE (5);
9298 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9299 CHECK_TYPELOAD (klass);
9300 mono_class_init (klass);
9302 if (cfg->generic_sharing_context)
9303 context_used = mono_class_check_context_used (klass);
9305 // FIXME:
9306 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9307 if (!src_var)
9308 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9309 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9312 if (context_used) {
9313 MonoInst *klass_ins;
9315 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9316 klass, MONO_RGCTX_INFO_KLASS);
9318 // FIXME:
9319 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9320 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9321 } else {
9322 mini_emit_class_check (cfg, klass_reg, klass);
9324 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9325 ins->type = STACK_MP;
9326 *sp++ = ins;
9327 ip += 5;
9328 break;
9330 case CEE_MKREFANY: {
9331 MonoInst *loc, *addr;
9333 CHECK_STACK (1);
9334 MONO_INST_NEW (cfg, ins, *ip);
9335 --sp;
9336 CHECK_OPSIZE (5);
9337 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9338 CHECK_TYPELOAD (klass);
9339 mono_class_init (klass);
9341 if (cfg->generic_sharing_context)
9342 context_used = mono_class_check_context_used (klass);
9344 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9345 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9347 if (context_used) {
9348 MonoInst *const_ins;
9349 int type_reg = alloc_preg (cfg);
9351 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9352 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9354 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9355 } else if (cfg->compile_aot) {
9356 int const_reg = alloc_preg (cfg);
9357 int type_reg = alloc_preg (cfg);
9359 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9360 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9363 } else {
9364 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9365 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9369 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9370 ins->type = STACK_VTYPE;
9371 ins->klass = mono_defaults.typed_reference_class;
9372 *sp++ = ins;
9373 ip += 5;
9374 break;
9376 case CEE_LDTOKEN: {
9377 gpointer handle;
9378 MonoClass *handle_class;
9380 CHECK_STACK_OVF (1);
9382 CHECK_OPSIZE (5);
9383 n = read32 (ip + 1);
9385 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9386 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9387 handle = mono_method_get_wrapper_data (method, n);
9388 handle_class = mono_method_get_wrapper_data (method, n + 1);
9389 if (handle_class == mono_defaults.typehandle_class)
9390 handle = &((MonoClass*)handle)->byval_arg;
9392 else {
9393 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9395 if (!handle)
9396 LOAD_ERROR;
9397 mono_class_init (handle_class);
9398 if (cfg->generic_sharing_context) {
9399 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9400 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9401 /* This case handles ldtoken
9402 of an open type, like for
9403 typeof(Gen<>). */
9404 context_used = 0;
9405 } else if (handle_class == mono_defaults.typehandle_class) {
9406 /* If we get a MONO_TYPE_CLASS
9407 then we need to provide the
9408 open type, not an
9409 instantiation of it. */
9410 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9411 context_used = 0;
9412 else
9413 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9414 } else if (handle_class == mono_defaults.fieldhandle_class)
9415 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9416 else if (handle_class == mono_defaults.methodhandle_class)
9417 context_used = mono_method_check_context_used (handle);
9418 else
9419 g_assert_not_reached ();
9422 if ((cfg->opt & MONO_OPT_SHARED) &&
9423 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9424 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9425 MonoInst *addr, *vtvar, *iargs [3];
9426 int method_context_used;
9428 if (cfg->generic_sharing_context)
9429 method_context_used = mono_method_check_context_used (method);
9430 else
9431 method_context_used = 0;
9433 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9435 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9436 EMIT_NEW_ICONST (cfg, iargs [1], n);
9437 if (method_context_used) {
9438 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9439 method, MONO_RGCTX_INFO_METHOD);
9440 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9441 } else {
9442 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9443 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9445 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9447 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9449 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9450 } else {
9451 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9452 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9453 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9454 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9455 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9456 MonoClass *tclass = mono_class_from_mono_type (handle);
9458 mono_class_init (tclass);
9459 if (context_used) {
9460 ins = emit_get_rgctx_klass (cfg, context_used,
9461 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9462 } else if (cfg->compile_aot) {
9463 if (method->wrapper_type) {
9464 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9465 /* Special case for static synchronized wrappers */
9466 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9467 } else {
9468 /* FIXME: n is not a normal token */
9469 cfg->disable_aot = TRUE;
9470 EMIT_NEW_PCONST (cfg, ins, NULL);
9472 } else {
9473 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9475 } else {
9476 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9478 ins->type = STACK_OBJ;
9479 ins->klass = cmethod->klass;
9480 ip += 5;
9481 } else {
9482 MonoInst *addr, *vtvar;
9484 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9486 if (context_used) {
9487 if (handle_class == mono_defaults.typehandle_class) {
9488 ins = emit_get_rgctx_klass (cfg, context_used,
9489 mono_class_from_mono_type (handle),
9490 MONO_RGCTX_INFO_TYPE);
9491 } else if (handle_class == mono_defaults.methodhandle_class) {
9492 ins = emit_get_rgctx_method (cfg, context_used,
9493 handle, MONO_RGCTX_INFO_METHOD);
9494 } else if (handle_class == mono_defaults.fieldhandle_class) {
9495 ins = emit_get_rgctx_field (cfg, context_used,
9496 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9497 } else {
9498 g_assert_not_reached ();
9500 } else if (cfg->compile_aot) {
9501 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9502 } else {
9503 EMIT_NEW_PCONST (cfg, ins, handle);
9505 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9506 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9507 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9511 *sp++ = ins;
9512 ip += 5;
9513 break;
9515 case CEE_THROW:
9516 CHECK_STACK (1);
9517 MONO_INST_NEW (cfg, ins, OP_THROW);
9518 --sp;
9519 ins->sreg1 = sp [0]->dreg;
9520 ip++;
9521 bblock->out_of_line = TRUE;
9522 MONO_ADD_INS (bblock, ins);
9523 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9524 MONO_ADD_INS (bblock, ins);
9525 sp = stack_start;
9527 link_bblock (cfg, bblock, end_bblock);
9528 start_new_bblock = 1;
9529 break;
9530 case CEE_ENDFINALLY:
9531 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9532 MONO_ADD_INS (bblock, ins);
9533 ip++;
9534 start_new_bblock = 1;
9537 * Control will leave the method so empty the stack, otherwise
9538 * the next basic block will start with a nonempty stack.
9540 while (sp != stack_start) {
9541 sp--;
9543 break;
9544 case CEE_LEAVE:
9545 case CEE_LEAVE_S: {
9546 GList *handlers;
9548 if (*ip == CEE_LEAVE) {
9549 CHECK_OPSIZE (5);
9550 target = ip + 5 + (gint32)read32(ip + 1);
9551 } else {
9552 CHECK_OPSIZE (2);
9553 target = ip + 2 + (signed char)(ip [1]);
9556 /* empty the stack */
9557 while (sp != stack_start) {
9558 sp--;
9562 * If this leave statement is in a catch block, check for a
9563 * pending exception, and rethrow it if necessary.
9564 * We avoid doing this in runtime invoke wrappers, since those are called
9565 * by native code which excepts the wrapper to catch all exceptions.
9567 for (i = 0; i < header->num_clauses; ++i) {
9568 MonoExceptionClause *clause = &header->clauses [i];
9571 * Use <= in the final comparison to handle clauses with multiple
9572 * leave statements, like in bug #78024.
9573 * The ordering of the exception clauses guarantees that we find the
9574 * innermost clause.
9576 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9577 MonoInst *exc_ins;
9578 MonoBasicBlock *dont_throw;
9581 MonoInst *load;
9583 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9586 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9588 NEW_BBLOCK (cfg, dont_throw);
9591 * Currently, we always rethrow the abort exception, despite the
9592 * fact that this is not correct. See thread6.cs for an example.
9593 * But propagating the abort exception is more important than
9594 * getting the sematics right.
9596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9598 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9600 MONO_START_BB (cfg, dont_throw);
9601 bblock = cfg->cbb;
9605 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9606 GList *tmp;
9607 MonoExceptionClause *clause;
9609 for (tmp = handlers; tmp; tmp = tmp->next) {
9610 clause = tmp->data;
9611 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9612 g_assert (tblock);
9613 link_bblock (cfg, bblock, tblock);
9614 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9615 ins->inst_target_bb = tblock;
9616 ins->inst_eh_block = clause;
9617 MONO_ADD_INS (bblock, ins);
9618 bblock->has_call_handler = 1;
9619 if (COMPILE_LLVM (cfg)) {
9620 MonoBasicBlock *target_bb;
9623 * Link the finally bblock with the target, since it will
9624 * conceptually branch there.
9625 * FIXME: Have to link the bblock containing the endfinally.
9627 GET_BBLOCK (cfg, target_bb, target);
9628 link_bblock (cfg, tblock, target_bb);
9631 g_list_free (handlers);
9634 MONO_INST_NEW (cfg, ins, OP_BR);
9635 MONO_ADD_INS (bblock, ins);
9636 GET_BBLOCK (cfg, tblock, target);
9637 link_bblock (cfg, bblock, tblock);
9638 ins->inst_target_bb = tblock;
9639 start_new_bblock = 1;
9641 if (*ip == CEE_LEAVE)
9642 ip += 5;
9643 else
9644 ip += 2;
9646 break;
9650 * Mono specific opcodes
9652 case MONO_CUSTOM_PREFIX: {
9654 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9656 CHECK_OPSIZE (2);
9657 switch (ip [1]) {
9658 case CEE_MONO_ICALL: {
9659 gpointer func;
9660 MonoJitICallInfo *info;
9662 token = read32 (ip + 2);
9663 func = mono_method_get_wrapper_data (method, token);
9664 info = mono_find_jit_icall_by_addr (func);
9665 g_assert (info);
9667 CHECK_STACK (info->sig->param_count);
9668 sp -= info->sig->param_count;
9670 ins = mono_emit_jit_icall (cfg, info->func, sp);
9671 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9672 *sp++ = ins;
9674 ip += 6;
9675 inline_costs += 10 * num_calls++;
9677 break;
9679 case CEE_MONO_LDPTR: {
9680 gpointer ptr;
9682 CHECK_STACK_OVF (1);
9683 CHECK_OPSIZE (6);
9684 token = read32 (ip + 2);
9686 ptr = mono_method_get_wrapper_data (method, token);
9687 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9688 MonoJitICallInfo *callinfo;
9689 const char *icall_name;
9691 icall_name = method->name + strlen ("__icall_wrapper_");
9692 g_assert (icall_name);
9693 callinfo = mono_find_jit_icall_by_name (icall_name);
9694 g_assert (callinfo);
9696 if (ptr == callinfo->func) {
9697 /* Will be transformed into an AOTCONST later */
9698 EMIT_NEW_PCONST (cfg, ins, ptr);
9699 *sp++ = ins;
9700 ip += 6;
9701 break;
9704 /* FIXME: Generalize this */
9705 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9706 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9707 *sp++ = ins;
9708 ip += 6;
9709 break;
9711 EMIT_NEW_PCONST (cfg, ins, ptr);
9712 *sp++ = ins;
9713 ip += 6;
9714 inline_costs += 10 * num_calls++;
9715 /* Can't embed random pointers into AOT code */
9716 cfg->disable_aot = 1;
9717 break;
9719 case CEE_MONO_ICALL_ADDR: {
9720 MonoMethod *cmethod;
9721 gpointer ptr;
9723 CHECK_STACK_OVF (1);
9724 CHECK_OPSIZE (6);
9725 token = read32 (ip + 2);
9727 cmethod = mono_method_get_wrapper_data (method, token);
9729 if (cfg->compile_aot) {
9730 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9731 } else {
9732 ptr = mono_lookup_internal_call (cmethod);
9733 g_assert (ptr);
9734 EMIT_NEW_PCONST (cfg, ins, ptr);
9736 *sp++ = ins;
9737 ip += 6;
9738 break;
9740 case CEE_MONO_VTADDR: {
9741 MonoInst *src_var, *src;
9743 CHECK_STACK (1);
9744 --sp;
9746 // FIXME:
9747 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9748 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9749 *sp++ = src;
9750 ip += 2;
9751 break;
9753 case CEE_MONO_NEWOBJ: {
9754 MonoInst *iargs [2];
9756 CHECK_STACK_OVF (1);
9757 CHECK_OPSIZE (6);
9758 token = read32 (ip + 2);
9759 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9760 mono_class_init (klass);
9761 NEW_DOMAINCONST (cfg, iargs [0]);
9762 MONO_ADD_INS (cfg->cbb, iargs [0]);
9763 NEW_CLASSCONST (cfg, iargs [1], klass);
9764 MONO_ADD_INS (cfg->cbb, iargs [1]);
9765 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9766 ip += 6;
9767 inline_costs += 10 * num_calls++;
9768 break;
9770 case CEE_MONO_OBJADDR:
9771 CHECK_STACK (1);
9772 --sp;
9773 MONO_INST_NEW (cfg, ins, OP_MOVE);
9774 ins->dreg = alloc_ireg_mp (cfg);
9775 ins->sreg1 = sp [0]->dreg;
9776 ins->type = STACK_MP;
9777 MONO_ADD_INS (cfg->cbb, ins);
9778 *sp++ = ins;
9779 ip += 2;
9780 break;
9781 case CEE_MONO_LDNATIVEOBJ:
9783 * Similar to LDOBJ, but instead load the unmanaged
9784 * representation of the vtype to the stack.
9786 CHECK_STACK (1);
9787 CHECK_OPSIZE (6);
9788 --sp;
9789 token = read32 (ip + 2);
9790 klass = mono_method_get_wrapper_data (method, token);
9791 g_assert (klass->valuetype);
9792 mono_class_init (klass);
9795 MonoInst *src, *dest, *temp;
9797 src = sp [0];
9798 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9799 temp->backend.is_pinvoke = 1;
9800 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9801 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9803 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9804 dest->type = STACK_VTYPE;
9805 dest->klass = klass;
9807 *sp ++ = dest;
9808 ip += 6;
9810 break;
9811 case CEE_MONO_RETOBJ: {
9813 * Same as RET, but return the native representation of a vtype
9814 * to the caller.
9816 g_assert (cfg->ret);
9817 g_assert (mono_method_signature (method)->pinvoke);
9818 CHECK_STACK (1);
9819 --sp;
9821 CHECK_OPSIZE (6);
9822 token = read32 (ip + 2);
9823 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9825 if (!cfg->vret_addr) {
9826 g_assert (cfg->ret_var_is_local);
9828 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9829 } else {
9830 EMIT_NEW_RETLOADA (cfg, ins);
9832 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9834 if (sp != stack_start)
9835 UNVERIFIED;
9837 MONO_INST_NEW (cfg, ins, OP_BR);
9838 ins->inst_target_bb = end_bblock;
9839 MONO_ADD_INS (bblock, ins);
9840 link_bblock (cfg, bblock, end_bblock);
9841 start_new_bblock = 1;
9842 ip += 6;
9843 break;
9845 case CEE_MONO_CISINST:
9846 case CEE_MONO_CCASTCLASS: {
9847 int token;
9848 CHECK_STACK (1);
9849 --sp;
9850 CHECK_OPSIZE (6);
9851 token = read32 (ip + 2);
9852 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9853 if (ip [1] == CEE_MONO_CISINST)
9854 ins = handle_cisinst (cfg, klass, sp [0]);
9855 else
9856 ins = handle_ccastclass (cfg, klass, sp [0]);
9857 bblock = cfg->cbb;
9858 *sp++ = ins;
9859 ip += 6;
9860 break;
9862 case CEE_MONO_SAVE_LMF:
9863 case CEE_MONO_RESTORE_LMF:
9864 #ifdef MONO_ARCH_HAVE_LMF_OPS
9865 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9866 MONO_ADD_INS (bblock, ins);
9867 cfg->need_lmf_area = TRUE;
9868 #endif
9869 ip += 2;
9870 break;
9871 case CEE_MONO_CLASSCONST:
9872 CHECK_STACK_OVF (1);
9873 CHECK_OPSIZE (6);
9874 token = read32 (ip + 2);
9875 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9876 *sp++ = ins;
9877 ip += 6;
9878 inline_costs += 10 * num_calls++;
9879 break;
9880 case CEE_MONO_NOT_TAKEN:
9881 bblock->out_of_line = TRUE;
9882 ip += 2;
9883 break;
9884 case CEE_MONO_TLS:
9885 CHECK_STACK_OVF (1);
9886 CHECK_OPSIZE (6);
9887 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9888 ins->dreg = alloc_preg (cfg);
9889 ins->inst_offset = (gint32)read32 (ip + 2);
9890 ins->type = STACK_PTR;
9891 MONO_ADD_INS (bblock, ins);
9892 *sp++ = ins;
9893 ip += 6;
9894 break;
9895 case CEE_MONO_DYN_CALL: {
9896 MonoCallInst *call;
9898 /* It would be easier to call a trampoline, but that would put an
9899 * extra frame on the stack, confusing exception handling. So
9900 * implement it inline using an opcode for now.
9903 if (!cfg->dyn_call_var) {
9904 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9905 /* prevent it from being register allocated */
9906 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9909 /* Has to use a call inst since it local regalloc expects it */
9910 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9911 ins = (MonoInst*)call;
9912 sp -= 2;
9913 ins->sreg1 = sp [0]->dreg;
9914 ins->sreg2 = sp [1]->dreg;
9915 MONO_ADD_INS (bblock, ins);
9917 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9918 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9919 #endif
9921 ip += 2;
9922 inline_costs += 10 * num_calls++;
9924 break;
9926 default:
9927 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9928 break;
9930 break;
9933 case CEE_PREFIX1: {
9934 CHECK_OPSIZE (2);
9935 switch (ip [1]) {
9936 case CEE_ARGLIST: {
9937 /* somewhat similar to LDTOKEN */
9938 MonoInst *addr, *vtvar;
9939 CHECK_STACK_OVF (1);
9940 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9942 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9943 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9945 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9946 ins->type = STACK_VTYPE;
9947 ins->klass = mono_defaults.argumenthandle_class;
9948 *sp++ = ins;
9949 ip += 2;
9950 break;
9952 case CEE_CEQ:
9953 case CEE_CGT:
9954 case CEE_CGT_UN:
9955 case CEE_CLT:
9956 case CEE_CLT_UN: {
9957 MonoInst *cmp;
9958 CHECK_STACK (2);
9960 * The following transforms:
9961 * CEE_CEQ into OP_CEQ
9962 * CEE_CGT into OP_CGT
9963 * CEE_CGT_UN into OP_CGT_UN
9964 * CEE_CLT into OP_CLT
9965 * CEE_CLT_UN into OP_CLT_UN
9967 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9969 MONO_INST_NEW (cfg, ins, cmp->opcode);
9970 sp -= 2;
9971 cmp->sreg1 = sp [0]->dreg;
9972 cmp->sreg2 = sp [1]->dreg;
9973 type_from_op (cmp, sp [0], sp [1]);
9974 CHECK_TYPE (cmp);
9975 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9976 cmp->opcode = OP_LCOMPARE;
9977 else if (sp [0]->type == STACK_R8)
9978 cmp->opcode = OP_FCOMPARE;
9979 else
9980 cmp->opcode = OP_ICOMPARE;
9981 MONO_ADD_INS (bblock, cmp);
9982 ins->type = STACK_I4;
9983 ins->dreg = alloc_dreg (cfg, ins->type);
9984 type_from_op (ins, sp [0], sp [1]);
9986 if (cmp->opcode == OP_FCOMPARE) {
9988 * The backends expect the fceq opcodes to do the
9989 * comparison too.
9991 cmp->opcode = OP_NOP;
9992 ins->sreg1 = cmp->sreg1;
9993 ins->sreg2 = cmp->sreg2;
9995 MONO_ADD_INS (bblock, ins);
9996 *sp++ = ins;
9997 ip += 2;
9998 break;
10000 case CEE_LDFTN: {
10001 MonoInst *argconst;
10002 MonoMethod *cil_method;
10003 gboolean needs_static_rgctx_invoke;
10005 CHECK_STACK_OVF (1);
10006 CHECK_OPSIZE (6);
10007 n = read32 (ip + 2);
10008 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10009 if (!cmethod || mono_loader_get_last_error ())
10010 LOAD_ERROR;
10011 mono_class_init (cmethod->klass);
10013 mono_save_token_info (cfg, image, n, cmethod);
10015 if (cfg->generic_sharing_context)
10016 context_used = mono_method_check_context_used (cmethod);
10018 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
10020 cil_method = cmethod;
10021 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10022 METHOD_ACCESS_FAILURE;
10024 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10025 if (check_linkdemand (cfg, method, cmethod))
10026 INLINE_FAILURE;
10027 CHECK_CFG_EXCEPTION;
10028 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10029 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10033 * Optimize the common case of ldftn+delegate creation
10035 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10036 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10037 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10038 MonoInst *target_ins;
10039 MonoMethod *invoke;
10040 int invoke_context_used = 0;
10042 invoke = mono_get_delegate_invoke (ctor_method->klass);
10043 if (!invoke || !mono_method_signature (invoke))
10044 LOAD_ERROR;
10046 if (cfg->generic_sharing_context)
10047 invoke_context_used = mono_method_check_context_used (invoke);
10049 target_ins = sp [-1];
10051 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10052 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10053 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10054 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10055 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10059 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10060 /* FIXME: SGEN support */
10061 if (invoke_context_used == 0) {
10062 ip += 6;
10063 if (cfg->verbose_level > 3)
10064 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10065 sp --;
10066 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10067 CHECK_CFG_EXCEPTION;
10068 ip += 5;
10069 sp ++;
10070 break;
10072 #endif
10076 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10077 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10078 *sp++ = ins;
10080 ip += 6;
10081 inline_costs += 10 * num_calls++;
10082 break;
10084 case CEE_LDVIRTFTN: {
10085 MonoInst *args [2];
10087 CHECK_STACK (1);
10088 CHECK_OPSIZE (6);
10089 n = read32 (ip + 2);
10090 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10091 if (!cmethod || mono_loader_get_last_error ())
10092 LOAD_ERROR;
10093 mono_class_init (cmethod->klass);
10095 if (cfg->generic_sharing_context)
10096 context_used = mono_method_check_context_used (cmethod);
10098 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10099 if (check_linkdemand (cfg, method, cmethod))
10100 INLINE_FAILURE;
10101 CHECK_CFG_EXCEPTION;
10102 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10103 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10106 --sp;
10107 args [0] = *sp;
10109 args [1] = emit_get_rgctx_method (cfg, context_used,
10110 cmethod, MONO_RGCTX_INFO_METHOD);
10112 if (context_used)
10113 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10114 else
10115 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10117 ip += 6;
10118 inline_costs += 10 * num_calls++;
10119 break;
10121 case CEE_LDARG:
10122 CHECK_STACK_OVF (1);
10123 CHECK_OPSIZE (4);
10124 n = read16 (ip + 2);
10125 CHECK_ARG (n);
10126 EMIT_NEW_ARGLOAD (cfg, ins, n);
10127 *sp++ = ins;
10128 ip += 4;
10129 break;
10130 case CEE_LDARGA:
10131 CHECK_STACK_OVF (1);
10132 CHECK_OPSIZE (4);
10133 n = read16 (ip + 2);
10134 CHECK_ARG (n);
10135 NEW_ARGLOADA (cfg, ins, n);
10136 MONO_ADD_INS (cfg->cbb, ins);
10137 *sp++ = ins;
10138 ip += 4;
10139 break;
10140 case CEE_STARG:
10141 CHECK_STACK (1);
10142 --sp;
10143 CHECK_OPSIZE (4);
10144 n = read16 (ip + 2);
10145 CHECK_ARG (n);
10146 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10147 UNVERIFIED;
10148 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10149 ip += 4;
10150 break;
10151 case CEE_LDLOC:
10152 CHECK_STACK_OVF (1);
10153 CHECK_OPSIZE (4);
10154 n = read16 (ip + 2);
10155 CHECK_LOCAL (n);
10156 EMIT_NEW_LOCLOAD (cfg, ins, n);
10157 *sp++ = ins;
10158 ip += 4;
10159 break;
10160 case CEE_LDLOCA: {
10161 unsigned char *tmp_ip;
10162 CHECK_STACK_OVF (1);
10163 CHECK_OPSIZE (4);
10164 n = read16 (ip + 2);
10165 CHECK_LOCAL (n);
10167 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10168 ip = tmp_ip;
10169 inline_costs += 1;
10170 break;
10173 EMIT_NEW_LOCLOADA (cfg, ins, n);
10174 *sp++ = ins;
10175 ip += 4;
10176 break;
10178 case CEE_STLOC:
10179 CHECK_STACK (1);
10180 --sp;
10181 CHECK_OPSIZE (4);
10182 n = read16 (ip + 2);
10183 CHECK_LOCAL (n);
10184 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10185 UNVERIFIED;
10186 emit_stloc_ir (cfg, sp, header, n);
10187 ip += 4;
10188 inline_costs += 1;
10189 break;
10190 case CEE_LOCALLOC:
10191 CHECK_STACK (1);
10192 --sp;
10193 if (sp != stack_start)
10194 UNVERIFIED;
10195 if (cfg->method != method)
10197 * Inlining this into a loop in a parent could lead to
10198 * stack overflows which is different behavior than the
10199 * non-inlined case, thus disable inlining in this case.
10201 goto inline_failure;
10203 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10204 ins->dreg = alloc_preg (cfg);
10205 ins->sreg1 = sp [0]->dreg;
10206 ins->type = STACK_PTR;
10207 MONO_ADD_INS (cfg->cbb, ins);
10209 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10210 if (init_locals)
10211 ins->flags |= MONO_INST_INIT;
10213 *sp++ = ins;
10214 ip += 2;
10215 break;
10216 case CEE_ENDFILTER: {
10217 MonoExceptionClause *clause, *nearest;
10218 int cc, nearest_num;
10220 CHECK_STACK (1);
10221 --sp;
10222 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10223 UNVERIFIED;
10224 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10225 ins->sreg1 = (*sp)->dreg;
10226 MONO_ADD_INS (bblock, ins);
10227 start_new_bblock = 1;
10228 ip += 2;
10230 nearest = NULL;
10231 nearest_num = 0;
10232 for (cc = 0; cc < header->num_clauses; ++cc) {
10233 clause = &header->clauses [cc];
10234 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10235 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10236 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10237 nearest = clause;
10238 nearest_num = cc;
10241 g_assert (nearest);
10242 if ((ip - header->code) != nearest->handler_offset)
10243 UNVERIFIED;
10245 break;
10247 case CEE_UNALIGNED_:
10248 ins_flag |= MONO_INST_UNALIGNED;
10249 /* FIXME: record alignment? we can assume 1 for now */
10250 CHECK_OPSIZE (3);
10251 ip += 3;
10252 break;
10253 case CEE_VOLATILE_:
10254 ins_flag |= MONO_INST_VOLATILE;
10255 ip += 2;
10256 break;
10257 case CEE_TAIL_:
10258 ins_flag |= MONO_INST_TAILCALL;
10259 cfg->flags |= MONO_CFG_HAS_TAIL;
10260 /* Can't inline tail calls at this time */
10261 inline_costs += 100000;
10262 ip += 2;
10263 break;
10264 case CEE_INITOBJ:
10265 CHECK_STACK (1);
10266 --sp;
10267 CHECK_OPSIZE (6);
10268 token = read32 (ip + 2);
10269 klass = mini_get_class (method, token, generic_context);
10270 CHECK_TYPELOAD (klass);
10271 if (generic_class_is_reference_type (cfg, klass))
10272 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10273 else
10274 mini_emit_initobj (cfg, *sp, NULL, klass);
10275 ip += 6;
10276 inline_costs += 1;
10277 break;
10278 case CEE_CONSTRAINED_:
10279 CHECK_OPSIZE (6);
10280 token = read32 (ip + 2);
10281 if (method->wrapper_type != MONO_WRAPPER_NONE)
10282 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10283 else
10284 constrained_call = mono_class_get_full (image, token, generic_context);
10285 CHECK_TYPELOAD (constrained_call);
10286 ip += 6;
10287 break;
10288 case CEE_CPBLK:
10289 case CEE_INITBLK: {
10290 MonoInst *iargs [3];
10291 CHECK_STACK (3);
10292 sp -= 3;
10294 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10295 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10296 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10297 /* emit_memset only works when val == 0 */
10298 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10299 } else {
10300 iargs [0] = sp [0];
10301 iargs [1] = sp [1];
10302 iargs [2] = sp [2];
10303 if (ip [1] == CEE_CPBLK) {
10304 MonoMethod *memcpy_method = get_memcpy_method ();
10305 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10306 } else {
10307 MonoMethod *memset_method = get_memset_method ();
10308 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10311 ip += 2;
10312 inline_costs += 1;
10313 break;
10315 case CEE_NO_:
10316 CHECK_OPSIZE (3);
10317 if (ip [2] & 0x1)
10318 ins_flag |= MONO_INST_NOTYPECHECK;
10319 if (ip [2] & 0x2)
10320 ins_flag |= MONO_INST_NORANGECHECK;
10321 /* we ignore the no-nullcheck for now since we
10322 * really do it explicitly only when doing callvirt->call
10324 ip += 3;
10325 break;
10326 case CEE_RETHROW: {
10327 MonoInst *load;
10328 int handler_offset = -1;
10330 for (i = 0; i < header->num_clauses; ++i) {
10331 MonoExceptionClause *clause = &header->clauses [i];
10332 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10333 handler_offset = clause->handler_offset;
10334 break;
10338 bblock->flags |= BB_EXCEPTION_UNSAFE;
10340 g_assert (handler_offset != -1);
10342 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10343 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10344 ins->sreg1 = load->dreg;
10345 MONO_ADD_INS (bblock, ins);
10347 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10348 MONO_ADD_INS (bblock, ins);
10350 sp = stack_start;
10351 link_bblock (cfg, bblock, end_bblock);
10352 start_new_bblock = 1;
10353 ip += 2;
10354 break;
10356 case CEE_SIZEOF: {
10357 guint32 align;
10358 int ialign;
10360 CHECK_STACK_OVF (1);
10361 CHECK_OPSIZE (6);
10362 token = read32 (ip + 2);
10363 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10364 MonoType *type = mono_type_create_from_typespec (image, token);
10365 token = mono_type_size (type, &ialign);
10366 } else {
10367 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10368 CHECK_TYPELOAD (klass);
10369 mono_class_init (klass);
10370 token = mono_class_value_size (klass, &align);
10372 EMIT_NEW_ICONST (cfg, ins, token);
10373 *sp++= ins;
10374 ip += 6;
10375 break;
10377 case CEE_REFANYTYPE: {
10378 MonoInst *src_var, *src;
10380 CHECK_STACK (1);
10381 --sp;
10383 // FIXME:
10384 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10385 if (!src_var)
10386 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10387 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10388 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10389 *sp++ = ins;
10390 ip += 2;
10391 break;
10393 case CEE_READONLY_:
10394 readonly = TRUE;
10395 ip += 2;
10396 break;
10398 case CEE_UNUSED56:
10399 case CEE_UNUSED57:
10400 case CEE_UNUSED70:
10401 case CEE_UNUSED:
10402 case CEE_UNUSED99:
10403 UNVERIFIED;
10405 default:
10406 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10407 UNVERIFIED;
10409 break;
10411 case CEE_UNUSED58:
10412 case CEE_UNUSED1:
10413 UNVERIFIED;
10415 default:
10416 g_warning ("opcode 0x%02x not handled", *ip);
10417 UNVERIFIED;
10420 if (start_new_bblock != 1)
10421 UNVERIFIED;
10423 bblock->cil_length = ip - bblock->cil_code;
10424 if (bblock->next_bb) {
10425 /* This could already be set because of inlining, #693905 */
10426 MonoBasicBlock *bb = bblock;
10428 while (bb->next_bb)
10429 bb = bb->next_bb;
10430 bb->next_bb = end_bblock;
10431 } else {
10432 bblock->next_bb = end_bblock;
10435 if (cfg->method == method && cfg->domainvar) {
10436 MonoInst *store;
10437 MonoInst *get_domain;
10439 cfg->cbb = init_localsbb;
10441 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10442 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10444 else {
10445 get_domain->dreg = alloc_preg (cfg);
10446 MONO_ADD_INS (cfg->cbb, get_domain);
10448 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10449 MONO_ADD_INS (cfg->cbb, store);
10452 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10453 if (cfg->compile_aot)
10454 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10455 mono_get_got_var (cfg);
10456 #endif
10458 if (cfg->method == method && cfg->got_var)
10459 mono_emit_load_got_addr (cfg);
10461 if (init_locals) {
10462 MonoInst *store;
10464 cfg->cbb = init_localsbb;
10465 cfg->ip = NULL;
10466 for (i = 0; i < header->num_locals; ++i) {
10467 MonoType *ptype = header->locals [i];
10468 int t = ptype->type;
10469 dreg = cfg->locals [i]->dreg;
10471 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10472 t = mono_class_enum_basetype (ptype->data.klass)->type;
10473 if (ptype->byref) {
10474 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10475 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10476 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10477 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10478 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10479 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10480 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10481 ins->type = STACK_R8;
10482 ins->inst_p0 = (void*)&r8_0;
10483 ins->dreg = alloc_dreg (cfg, STACK_R8);
10484 MONO_ADD_INS (init_localsbb, ins);
10485 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10486 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10487 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10488 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10489 } else {
10490 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10495 if (cfg->init_ref_vars && cfg->method == method) {
10496 /* Emit initialization for ref vars */
10497 // FIXME: Avoid duplication initialization for IL locals.
10498 for (i = 0; i < cfg->num_varinfo; ++i) {
10499 MonoInst *ins = cfg->varinfo [i];
10501 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10502 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10506 /* Add a sequence point for method entry/exit events */
10507 if (seq_points) {
10508 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10509 MONO_ADD_INS (init_localsbb, ins);
10510 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10511 MONO_ADD_INS (cfg->bb_exit, ins);
10514 cfg->ip = NULL;
10516 if (cfg->method == method) {
10517 MonoBasicBlock *bb;
10518 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10519 bb->region = mono_find_block_region (cfg, bb->real_offset);
10520 if (cfg->spvars)
10521 mono_create_spvar_for_region (cfg, bb->region);
10522 if (cfg->verbose_level > 2)
10523 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10527 g_slist_free (class_inits);
10528 dont_inline = g_list_remove (dont_inline, method);
10530 if (inline_costs < 0) {
10531 char *mname;
10533 /* Method is too large */
10534 mname = mono_method_full_name (method, TRUE);
10535 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10536 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10537 g_free (mname);
10538 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10539 mono_basic_block_free (original_bb);
10540 return -1;
10543 if ((cfg->verbose_level > 2) && (cfg->method == method))
10544 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10546 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10547 mono_basic_block_free (original_bb);
10548 return inline_costs;
10550 exception_exit:
10551 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10552 goto cleanup;
10554 inline_failure:
10555 goto cleanup;
10557 load_error:
10558 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10559 goto cleanup;
10561 unverified:
10562 set_exception_type_from_invalid_il (cfg, method, ip);
10563 goto cleanup;
10565 cleanup:
10566 g_slist_free (class_inits);
10567 mono_basic_block_free (original_bb);
10568 dont_inline = g_list_remove (dont_inline, method);
10569 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10570 return -1;
10573 static int
10574 store_membase_reg_to_store_membase_imm (int opcode)
10576 switch (opcode) {
10577 case OP_STORE_MEMBASE_REG:
10578 return OP_STORE_MEMBASE_IMM;
10579 case OP_STOREI1_MEMBASE_REG:
10580 return OP_STOREI1_MEMBASE_IMM;
10581 case OP_STOREI2_MEMBASE_REG:
10582 return OP_STOREI2_MEMBASE_IMM;
10583 case OP_STOREI4_MEMBASE_REG:
10584 return OP_STOREI4_MEMBASE_IMM;
10585 case OP_STOREI8_MEMBASE_REG:
10586 return OP_STOREI8_MEMBASE_IMM;
10587 default:
10588 g_assert_not_reached ();
10591 return -1;
10594 #endif /* DISABLE_JIT */
10597 mono_op_to_op_imm (int opcode)
10599 switch (opcode) {
10600 case OP_IADD:
10601 return OP_IADD_IMM;
10602 case OP_ISUB:
10603 return OP_ISUB_IMM;
10604 case OP_IDIV:
10605 return OP_IDIV_IMM;
10606 case OP_IDIV_UN:
10607 return OP_IDIV_UN_IMM;
10608 case OP_IREM:
10609 return OP_IREM_IMM;
10610 case OP_IREM_UN:
10611 return OP_IREM_UN_IMM;
10612 case OP_IMUL:
10613 return OP_IMUL_IMM;
10614 case OP_IAND:
10615 return OP_IAND_IMM;
10616 case OP_IOR:
10617 return OP_IOR_IMM;
10618 case OP_IXOR:
10619 return OP_IXOR_IMM;
10620 case OP_ISHL:
10621 return OP_ISHL_IMM;
10622 case OP_ISHR:
10623 return OP_ISHR_IMM;
10624 case OP_ISHR_UN:
10625 return OP_ISHR_UN_IMM;
10627 case OP_LADD:
10628 return OP_LADD_IMM;
10629 case OP_LSUB:
10630 return OP_LSUB_IMM;
10631 case OP_LAND:
10632 return OP_LAND_IMM;
10633 case OP_LOR:
10634 return OP_LOR_IMM;
10635 case OP_LXOR:
10636 return OP_LXOR_IMM;
10637 case OP_LSHL:
10638 return OP_LSHL_IMM;
10639 case OP_LSHR:
10640 return OP_LSHR_IMM;
10641 case OP_LSHR_UN:
10642 return OP_LSHR_UN_IMM;
10644 case OP_COMPARE:
10645 return OP_COMPARE_IMM;
10646 case OP_ICOMPARE:
10647 return OP_ICOMPARE_IMM;
10648 case OP_LCOMPARE:
10649 return OP_LCOMPARE_IMM;
10651 case OP_STORE_MEMBASE_REG:
10652 return OP_STORE_MEMBASE_IMM;
10653 case OP_STOREI1_MEMBASE_REG:
10654 return OP_STOREI1_MEMBASE_IMM;
10655 case OP_STOREI2_MEMBASE_REG:
10656 return OP_STOREI2_MEMBASE_IMM;
10657 case OP_STOREI4_MEMBASE_REG:
10658 return OP_STOREI4_MEMBASE_IMM;
10660 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10661 case OP_X86_PUSH:
10662 return OP_X86_PUSH_IMM;
10663 case OP_X86_COMPARE_MEMBASE_REG:
10664 return OP_X86_COMPARE_MEMBASE_IMM;
10665 #endif
10666 #if defined(TARGET_AMD64)
10667 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10668 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10669 #endif
10670 case OP_VOIDCALL_REG:
10671 return OP_VOIDCALL;
10672 case OP_CALL_REG:
10673 return OP_CALL;
10674 case OP_LCALL_REG:
10675 return OP_LCALL;
10676 case OP_FCALL_REG:
10677 return OP_FCALL;
10678 case OP_LOCALLOC:
10679 return OP_LOCALLOC_IMM;
10682 return -1;
10685 static int
10686 ldind_to_load_membase (int opcode)
10688 switch (opcode) {
10689 case CEE_LDIND_I1:
10690 return OP_LOADI1_MEMBASE;
10691 case CEE_LDIND_U1:
10692 return OP_LOADU1_MEMBASE;
10693 case CEE_LDIND_I2:
10694 return OP_LOADI2_MEMBASE;
10695 case CEE_LDIND_U2:
10696 return OP_LOADU2_MEMBASE;
10697 case CEE_LDIND_I4:
10698 return OP_LOADI4_MEMBASE;
10699 case CEE_LDIND_U4:
10700 return OP_LOADU4_MEMBASE;
10701 case CEE_LDIND_I:
10702 return OP_LOAD_MEMBASE;
10703 case CEE_LDIND_REF:
10704 return OP_LOAD_MEMBASE;
10705 case CEE_LDIND_I8:
10706 return OP_LOADI8_MEMBASE;
10707 case CEE_LDIND_R4:
10708 return OP_LOADR4_MEMBASE;
10709 case CEE_LDIND_R8:
10710 return OP_LOADR8_MEMBASE;
10711 default:
10712 g_assert_not_reached ();
10715 return -1;
10718 static int
10719 stind_to_store_membase (int opcode)
10721 switch (opcode) {
10722 case CEE_STIND_I1:
10723 return OP_STOREI1_MEMBASE_REG;
10724 case CEE_STIND_I2:
10725 return OP_STOREI2_MEMBASE_REG;
10726 case CEE_STIND_I4:
10727 return OP_STOREI4_MEMBASE_REG;
10728 case CEE_STIND_I:
10729 case CEE_STIND_REF:
10730 return OP_STORE_MEMBASE_REG;
10731 case CEE_STIND_I8:
10732 return OP_STOREI8_MEMBASE_REG;
10733 case CEE_STIND_R4:
10734 return OP_STORER4_MEMBASE_REG;
10735 case CEE_STIND_R8:
10736 return OP_STORER8_MEMBASE_REG;
10737 default:
10738 g_assert_not_reached ();
10741 return -1;
10745 mono_load_membase_to_load_mem (int opcode)
10747 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10748 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10749 switch (opcode) {
10750 case OP_LOAD_MEMBASE:
10751 return OP_LOAD_MEM;
10752 case OP_LOADU1_MEMBASE:
10753 return OP_LOADU1_MEM;
10754 case OP_LOADU2_MEMBASE:
10755 return OP_LOADU2_MEM;
10756 case OP_LOADI4_MEMBASE:
10757 return OP_LOADI4_MEM;
10758 case OP_LOADU4_MEMBASE:
10759 return OP_LOADU4_MEM;
10760 #if SIZEOF_REGISTER == 8
10761 case OP_LOADI8_MEMBASE:
10762 return OP_LOADI8_MEM;
10763 #endif
10765 #endif
10767 return -1;
10770 static inline int
10771 op_to_op_dest_membase (int store_opcode, int opcode)
10773 #if defined(TARGET_X86)
10774 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10775 return -1;
10777 switch (opcode) {
10778 case OP_IADD:
10779 return OP_X86_ADD_MEMBASE_REG;
10780 case OP_ISUB:
10781 return OP_X86_SUB_MEMBASE_REG;
10782 case OP_IAND:
10783 return OP_X86_AND_MEMBASE_REG;
10784 case OP_IOR:
10785 return OP_X86_OR_MEMBASE_REG;
10786 case OP_IXOR:
10787 return OP_X86_XOR_MEMBASE_REG;
10788 case OP_ADD_IMM:
10789 case OP_IADD_IMM:
10790 return OP_X86_ADD_MEMBASE_IMM;
10791 case OP_SUB_IMM:
10792 case OP_ISUB_IMM:
10793 return OP_X86_SUB_MEMBASE_IMM;
10794 case OP_AND_IMM:
10795 case OP_IAND_IMM:
10796 return OP_X86_AND_MEMBASE_IMM;
10797 case OP_OR_IMM:
10798 case OP_IOR_IMM:
10799 return OP_X86_OR_MEMBASE_IMM;
10800 case OP_XOR_IMM:
10801 case OP_IXOR_IMM:
10802 return OP_X86_XOR_MEMBASE_IMM;
10803 case OP_MOVE:
10804 return OP_NOP;
10806 #endif
10808 #if defined(TARGET_AMD64)
10809 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10810 return -1;
10812 switch (opcode) {
10813 case OP_IADD:
10814 return OP_X86_ADD_MEMBASE_REG;
10815 case OP_ISUB:
10816 return OP_X86_SUB_MEMBASE_REG;
10817 case OP_IAND:
10818 return OP_X86_AND_MEMBASE_REG;
10819 case OP_IOR:
10820 return OP_X86_OR_MEMBASE_REG;
10821 case OP_IXOR:
10822 return OP_X86_XOR_MEMBASE_REG;
10823 case OP_IADD_IMM:
10824 return OP_X86_ADD_MEMBASE_IMM;
10825 case OP_ISUB_IMM:
10826 return OP_X86_SUB_MEMBASE_IMM;
10827 case OP_IAND_IMM:
10828 return OP_X86_AND_MEMBASE_IMM;
10829 case OP_IOR_IMM:
10830 return OP_X86_OR_MEMBASE_IMM;
10831 case OP_IXOR_IMM:
10832 return OP_X86_XOR_MEMBASE_IMM;
10833 case OP_LADD:
10834 return OP_AMD64_ADD_MEMBASE_REG;
10835 case OP_LSUB:
10836 return OP_AMD64_SUB_MEMBASE_REG;
10837 case OP_LAND:
10838 return OP_AMD64_AND_MEMBASE_REG;
10839 case OP_LOR:
10840 return OP_AMD64_OR_MEMBASE_REG;
10841 case OP_LXOR:
10842 return OP_AMD64_XOR_MEMBASE_REG;
10843 case OP_ADD_IMM:
10844 case OP_LADD_IMM:
10845 return OP_AMD64_ADD_MEMBASE_IMM;
10846 case OP_SUB_IMM:
10847 case OP_LSUB_IMM:
10848 return OP_AMD64_SUB_MEMBASE_IMM;
10849 case OP_AND_IMM:
10850 case OP_LAND_IMM:
10851 return OP_AMD64_AND_MEMBASE_IMM;
10852 case OP_OR_IMM:
10853 case OP_LOR_IMM:
10854 return OP_AMD64_OR_MEMBASE_IMM;
10855 case OP_XOR_IMM:
10856 case OP_LXOR_IMM:
10857 return OP_AMD64_XOR_MEMBASE_IMM;
10858 case OP_MOVE:
10859 return OP_NOP;
10861 #endif
10863 return -1;
10866 static inline int
10867 op_to_op_store_membase (int store_opcode, int opcode)
10869 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10870 switch (opcode) {
10871 case OP_ICEQ:
10872 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10873 return OP_X86_SETEQ_MEMBASE;
10874 case OP_CNE:
10875 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10876 return OP_X86_SETNE_MEMBASE;
10878 #endif
10880 return -1;
10883 static inline int
10884 op_to_op_src1_membase (int load_opcode, int opcode)
10886 #ifdef TARGET_X86
10887 /* FIXME: This has sign extension issues */
10889 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10890 return OP_X86_COMPARE_MEMBASE8_IMM;
10893 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10894 return -1;
10896 switch (opcode) {
10897 case OP_X86_PUSH:
10898 return OP_X86_PUSH_MEMBASE;
10899 case OP_COMPARE_IMM:
10900 case OP_ICOMPARE_IMM:
10901 return OP_X86_COMPARE_MEMBASE_IMM;
10902 case OP_COMPARE:
10903 case OP_ICOMPARE:
10904 return OP_X86_COMPARE_MEMBASE_REG;
10906 #endif
10908 #ifdef TARGET_AMD64
10909 /* FIXME: This has sign extension issues */
10911 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10912 return OP_X86_COMPARE_MEMBASE8_IMM;
10915 switch (opcode) {
10916 case OP_X86_PUSH:
10917 #ifdef __mono_ilp32__
10918 if (load_opcode == OP_LOADI8_MEMBASE)
10919 #else
10920 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10921 #endif
10922 return OP_X86_PUSH_MEMBASE;
10923 break;
10924 /* FIXME: This only works for 32 bit immediates
10925 case OP_COMPARE_IMM:
10926 case OP_LCOMPARE_IMM:
10927 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10928 return OP_AMD64_COMPARE_MEMBASE_IMM;
10930 case OP_ICOMPARE_IMM:
10931 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10932 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10933 break;
10934 case OP_COMPARE:
10935 case OP_LCOMPARE:
10936 #ifdef __mono_ilp32__
10937 if (load_opcode == OP_LOAD_MEMBASE)
10938 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10939 if (load_opcode == OP_LOADI8_MEMBASE)
10940 #else
10941 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10942 #endif
10943 return OP_AMD64_COMPARE_MEMBASE_REG;
10944 break;
10945 case OP_ICOMPARE:
10946 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10947 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10948 break;
10950 #endif
10952 return -1;
10955 static inline int
10956 op_to_op_src2_membase (int load_opcode, int opcode)
10958 #ifdef TARGET_X86
10959 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10960 return -1;
10962 switch (opcode) {
10963 case OP_COMPARE:
10964 case OP_ICOMPARE:
10965 return OP_X86_COMPARE_REG_MEMBASE;
10966 case OP_IADD:
10967 return OP_X86_ADD_REG_MEMBASE;
10968 case OP_ISUB:
10969 return OP_X86_SUB_REG_MEMBASE;
10970 case OP_IAND:
10971 return OP_X86_AND_REG_MEMBASE;
10972 case OP_IOR:
10973 return OP_X86_OR_REG_MEMBASE;
10974 case OP_IXOR:
10975 return OP_X86_XOR_REG_MEMBASE;
10977 #endif
10979 #ifdef TARGET_AMD64
10980 #ifdef __mono_ilp32__
10981 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
10982 #else
10983 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10984 #endif
10985 switch (opcode) {
10986 case OP_ICOMPARE:
10987 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10988 case OP_IADD:
10989 return OP_X86_ADD_REG_MEMBASE;
10990 case OP_ISUB:
10991 return OP_X86_SUB_REG_MEMBASE;
10992 case OP_IAND:
10993 return OP_X86_AND_REG_MEMBASE;
10994 case OP_IOR:
10995 return OP_X86_OR_REG_MEMBASE;
10996 case OP_IXOR:
10997 return OP_X86_XOR_REG_MEMBASE;
10999 #ifdef __mono_ilp32__
11000 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11001 #else
11002 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11003 #endif
11004 switch (opcode) {
11005 case OP_COMPARE:
11006 case OP_LCOMPARE:
11007 return OP_AMD64_COMPARE_REG_MEMBASE;
11008 case OP_LADD:
11009 return OP_AMD64_ADD_REG_MEMBASE;
11010 case OP_LSUB:
11011 return OP_AMD64_SUB_REG_MEMBASE;
11012 case OP_LAND:
11013 return OP_AMD64_AND_REG_MEMBASE;
11014 case OP_LOR:
11015 return OP_AMD64_OR_REG_MEMBASE;
11016 case OP_LXOR:
11017 return OP_AMD64_XOR_REG_MEMBASE;
11020 #endif
11022 return -1;
11026 mono_op_to_op_imm_noemul (int opcode)
11028 switch (opcode) {
11029 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11030 case OP_LSHR:
11031 case OP_LSHL:
11032 case OP_LSHR_UN:
11033 return -1;
11034 #endif
11035 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11036 case OP_IDIV:
11037 case OP_IDIV_UN:
11038 case OP_IREM:
11039 case OP_IREM_UN:
11040 return -1;
11041 #endif
11042 default:
11043 return mono_op_to_op_imm (opcode);
11047 #ifndef DISABLE_JIT
11050 * mono_handle_global_vregs:
11052 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11053 * for them.
11055 void
11056 mono_handle_global_vregs (MonoCompile *cfg)
11058 gint32 *vreg_to_bb;
11059 MonoBasicBlock *bb;
11060 int i, pos;
11062 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11064 #ifdef MONO_ARCH_SIMD_INTRINSICS
11065 if (cfg->uses_simd_intrinsics)
11066 mono_simd_simplify_indirection (cfg);
11067 #endif
11069 /* Find local vregs used in more than one bb */
11070 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11071 MonoInst *ins = bb->code;
11072 int block_num = bb->block_num;
11074 if (cfg->verbose_level > 2)
11075 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11077 cfg->cbb = bb;
11078 for (; ins; ins = ins->next) {
11079 const char *spec = INS_INFO (ins->opcode);
11080 int regtype = 0, regindex;
11081 gint32 prev_bb;
11083 if (G_UNLIKELY (cfg->verbose_level > 2))
11084 mono_print_ins (ins);
11086 g_assert (ins->opcode >= MONO_CEE_LAST);
11088 for (regindex = 0; regindex < 4; regindex ++) {
11089 int vreg = 0;
11091 if (regindex == 0) {
11092 regtype = spec [MONO_INST_DEST];
11093 if (regtype == ' ')
11094 continue;
11095 vreg = ins->dreg;
11096 } else if (regindex == 1) {
11097 regtype = spec [MONO_INST_SRC1];
11098 if (regtype == ' ')
11099 continue;
11100 vreg = ins->sreg1;
11101 } else if (regindex == 2) {
11102 regtype = spec [MONO_INST_SRC2];
11103 if (regtype == ' ')
11104 continue;
11105 vreg = ins->sreg2;
11106 } else if (regindex == 3) {
11107 regtype = spec [MONO_INST_SRC3];
11108 if (regtype == ' ')
11109 continue;
11110 vreg = ins->sreg3;
11113 #if SIZEOF_REGISTER == 4
11114 /* In the LLVM case, the long opcodes are not decomposed */
11115 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11117 * Since some instructions reference the original long vreg,
11118 * and some reference the two component vregs, it is quite hard
11119 * to determine when it needs to be global. So be conservative.
11121 if (!get_vreg_to_inst (cfg, vreg)) {
11122 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11124 if (cfg->verbose_level > 2)
11125 printf ("LONG VREG R%d made global.\n", vreg);
11129 * Make the component vregs volatile since the optimizations can
11130 * get confused otherwise.
11132 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11133 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11135 #endif
11137 g_assert (vreg != -1);
11139 prev_bb = vreg_to_bb [vreg];
11140 if (prev_bb == 0) {
11141 /* 0 is a valid block num */
11142 vreg_to_bb [vreg] = block_num + 1;
11143 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11144 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11145 continue;
11147 if (!get_vreg_to_inst (cfg, vreg)) {
11148 if (G_UNLIKELY (cfg->verbose_level > 2))
11149 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11151 switch (regtype) {
11152 case 'i':
11153 if (vreg_is_ref (cfg, vreg))
11154 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11155 else
11156 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11157 break;
11158 case 'l':
11159 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11160 break;
11161 case 'f':
11162 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11163 break;
11164 case 'v':
11165 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11166 break;
11167 default:
11168 g_assert_not_reached ();
11172 /* Flag as having been used in more than one bb */
11173 vreg_to_bb [vreg] = -1;
11179 /* If a variable is used in only one bblock, convert it into a local vreg */
11180 for (i = 0; i < cfg->num_varinfo; i++) {
11181 MonoInst *var = cfg->varinfo [i];
11182 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11184 switch (var->type) {
11185 case STACK_I4:
11186 case STACK_OBJ:
11187 case STACK_PTR:
11188 case STACK_MP:
11189 case STACK_VTYPE:
11190 #if SIZEOF_REGISTER == 8
11191 case STACK_I8:
11192 #endif
11193 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11194 /* Enabling this screws up the fp stack on x86 */
11195 case STACK_R8:
11196 #endif
11197 /* Arguments are implicitly global */
11198 /* Putting R4 vars into registers doesn't work currently */
11199 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11201 * Make that the variable's liveness interval doesn't contain a call, since
11202 * that would cause the lvreg to be spilled, making the whole optimization
11203 * useless.
11205 /* This is too slow for JIT compilation */
11206 #if 0
11207 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11208 MonoInst *ins;
11209 int def_index, call_index, ins_index;
11210 gboolean spilled = FALSE;
11212 def_index = -1;
11213 call_index = -1;
11214 ins_index = 0;
11215 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11216 const char *spec = INS_INFO (ins->opcode);
11218 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11219 def_index = ins_index;
11221 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11222 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11223 if (call_index > def_index) {
11224 spilled = TRUE;
11225 break;
11229 if (MONO_IS_CALL (ins))
11230 call_index = ins_index;
11232 ins_index ++;
11235 if (spilled)
11236 break;
11238 #endif
11240 if (G_UNLIKELY (cfg->verbose_level > 2))
11241 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11242 var->flags |= MONO_INST_IS_DEAD;
11243 cfg->vreg_to_inst [var->dreg] = NULL;
11245 break;
11250 * Compress the varinfo and vars tables so the liveness computation is faster and
11251 * takes up less space.
11253 pos = 0;
11254 for (i = 0; i < cfg->num_varinfo; ++i) {
11255 MonoInst *var = cfg->varinfo [i];
11256 if (pos < i && cfg->locals_start == i)
11257 cfg->locals_start = pos;
11258 if (!(var->flags & MONO_INST_IS_DEAD)) {
11259 if (pos < i) {
11260 cfg->varinfo [pos] = cfg->varinfo [i];
11261 cfg->varinfo [pos]->inst_c0 = pos;
11262 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11263 cfg->vars [pos].idx = pos;
11264 #if SIZEOF_REGISTER == 4
11265 if (cfg->varinfo [pos]->type == STACK_I8) {
11266 /* Modify the two component vars too */
11267 MonoInst *var1;
11269 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11270 var1->inst_c0 = pos;
11271 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11272 var1->inst_c0 = pos;
11274 #endif
11276 pos ++;
11279 cfg->num_varinfo = pos;
11280 if (cfg->locals_start > cfg->num_varinfo)
11281 cfg->locals_start = cfg->num_varinfo;
11285 * mono_spill_global_vars:
11287 * Generate spill code for variables which are not allocated to registers,
11288 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11289 * code is generated which could be optimized by the local optimization passes.
11291 void
11292 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11294 MonoBasicBlock *bb;
11295 char spec2 [16];
11296 int orig_next_vreg;
11297 guint32 *vreg_to_lvreg;
11298 guint32 *lvregs;
11299 guint32 i, lvregs_len;
11300 gboolean dest_has_lvreg = FALSE;
11301 guint32 stacktypes [128];
11302 MonoInst **live_range_start, **live_range_end;
11303 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11305 *need_local_opts = FALSE;
11307 memset (spec2, 0, sizeof (spec2));
11309 /* FIXME: Move this function to mini.c */
11310 stacktypes ['i'] = STACK_PTR;
11311 stacktypes ['l'] = STACK_I8;
11312 stacktypes ['f'] = STACK_R8;
11313 #ifdef MONO_ARCH_SIMD_INTRINSICS
11314 stacktypes ['x'] = STACK_VTYPE;
11315 #endif
11317 #if SIZEOF_REGISTER == 4
11318 /* Create MonoInsts for longs */
11319 for (i = 0; i < cfg->num_varinfo; i++) {
11320 MonoInst *ins = cfg->varinfo [i];
11322 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11323 switch (ins->type) {
11324 case STACK_R8:
11325 case STACK_I8: {
11326 MonoInst *tree;
11328 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11329 break;
11331 g_assert (ins->opcode == OP_REGOFFSET);
11333 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11334 g_assert (tree);
11335 tree->opcode = OP_REGOFFSET;
11336 tree->inst_basereg = ins->inst_basereg;
11337 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11339 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11340 g_assert (tree);
11341 tree->opcode = OP_REGOFFSET;
11342 tree->inst_basereg = ins->inst_basereg;
11343 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11344 break;
11346 default:
11347 break;
11351 #endif
11353 if (cfg->compute_gc_maps) {
11354 /* registers need liveness info even for !non refs */
11355 for (i = 0; i < cfg->num_varinfo; i++) {
11356 MonoInst *ins = cfg->varinfo [i];
11358 if (ins->opcode == OP_REGVAR)
11359 ins->flags |= MONO_INST_GC_TRACK;
11363 /* FIXME: widening and truncation */
11366 * As an optimization, when a variable allocated to the stack is first loaded into
11367 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11368 * the variable again.
11370 orig_next_vreg = cfg->next_vreg;
11371 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11372 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11373 lvregs_len = 0;
11376 * These arrays contain the first and last instructions accessing a given
11377 * variable.
11378 * Since we emit bblocks in the same order we process them here, and we
11379 * don't split live ranges, these will precisely describe the live range of
11380 * the variable, i.e. the instruction range where a valid value can be found
11381 * in the variables location.
11382 * The live range is computed using the liveness info computed by the liveness pass.
11383 * We can't use vmv->range, since that is an abstract live range, and we need
11384 * one which is instruction precise.
11385 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11387 /* FIXME: Only do this if debugging info is requested */
11388 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11389 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11390 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11391 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11393 /* Add spill loads/stores */
11394 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11395 MonoInst *ins;
11397 if (cfg->verbose_level > 2)
11398 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11400 /* Clear vreg_to_lvreg array */
11401 for (i = 0; i < lvregs_len; i++)
11402 vreg_to_lvreg [lvregs [i]] = 0;
11403 lvregs_len = 0;
11405 cfg->cbb = bb;
11406 MONO_BB_FOR_EACH_INS (bb, ins) {
11407 const char *spec = INS_INFO (ins->opcode);
11408 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11409 gboolean store, no_lvreg;
11410 int sregs [MONO_MAX_SRC_REGS];
11412 if (G_UNLIKELY (cfg->verbose_level > 2))
11413 mono_print_ins (ins);
11415 if (ins->opcode == OP_NOP)
11416 continue;
11419 * We handle LDADDR here as well, since it can only be decomposed
11420 * when variable addresses are known.
11422 if (ins->opcode == OP_LDADDR) {
11423 MonoInst *var = ins->inst_p0;
11425 if (var->opcode == OP_VTARG_ADDR) {
11426 /* Happens on SPARC/S390 where vtypes are passed by reference */
11427 MonoInst *vtaddr = var->inst_left;
11428 if (vtaddr->opcode == OP_REGVAR) {
11429 ins->opcode = OP_MOVE;
11430 ins->sreg1 = vtaddr->dreg;
11432 else if (var->inst_left->opcode == OP_REGOFFSET) {
11433 ins->opcode = OP_LOAD_MEMBASE;
11434 ins->inst_basereg = vtaddr->inst_basereg;
11435 ins->inst_offset = vtaddr->inst_offset;
11436 } else
11437 NOT_IMPLEMENTED;
11438 } else {
11439 g_assert (var->opcode == OP_REGOFFSET);
11441 ins->opcode = OP_ADD_IMM;
11442 ins->sreg1 = var->inst_basereg;
11443 ins->inst_imm = var->inst_offset;
11446 *need_local_opts = TRUE;
11447 spec = INS_INFO (ins->opcode);
11450 if (ins->opcode < MONO_CEE_LAST) {
11451 mono_print_ins (ins);
11452 g_assert_not_reached ();
11456 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11457 * src register.
11458 * FIXME:
11460 if (MONO_IS_STORE_MEMBASE (ins)) {
11461 tmp_reg = ins->dreg;
11462 ins->dreg = ins->sreg2;
11463 ins->sreg2 = tmp_reg;
11464 store = TRUE;
11466 spec2 [MONO_INST_DEST] = ' ';
11467 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11468 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11469 spec2 [MONO_INST_SRC3] = ' ';
11470 spec = spec2;
11471 } else if (MONO_IS_STORE_MEMINDEX (ins))
11472 g_assert_not_reached ();
11473 else
11474 store = FALSE;
11475 no_lvreg = FALSE;
11477 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11478 printf ("\t %.3s %d", spec, ins->dreg);
11479 num_sregs = mono_inst_get_src_registers (ins, sregs);
11480 for (srcindex = 0; srcindex < 3; ++srcindex)
11481 printf (" %d", sregs [srcindex]);
11482 printf ("\n");
11485 /***************/
11486 /* DREG */
11487 /***************/
11488 regtype = spec [MONO_INST_DEST];
11489 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11490 prev_dreg = -1;
11492 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11493 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11494 MonoInst *store_ins;
11495 int store_opcode;
11496 MonoInst *def_ins = ins;
11497 int dreg = ins->dreg; /* The original vreg */
11499 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11501 if (var->opcode == OP_REGVAR) {
11502 ins->dreg = var->dreg;
11503 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11505 * Instead of emitting a load+store, use a _membase opcode.
11507 g_assert (var->opcode == OP_REGOFFSET);
11508 if (ins->opcode == OP_MOVE) {
11509 NULLIFY_INS (ins);
11510 def_ins = NULL;
11511 } else {
11512 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11513 ins->inst_basereg = var->inst_basereg;
11514 ins->inst_offset = var->inst_offset;
11515 ins->dreg = -1;
11517 spec = INS_INFO (ins->opcode);
11518 } else {
11519 guint32 lvreg;
11521 g_assert (var->opcode == OP_REGOFFSET);
11523 prev_dreg = ins->dreg;
11525 /* Invalidate any previous lvreg for this vreg */
11526 vreg_to_lvreg [ins->dreg] = 0;
11528 lvreg = 0;
11530 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11531 regtype = 'l';
11532 store_opcode = OP_STOREI8_MEMBASE_REG;
11535 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11537 if (regtype == 'l') {
11538 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11539 mono_bblock_insert_after_ins (bb, ins, store_ins);
11540 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11541 mono_bblock_insert_after_ins (bb, ins, store_ins);
11542 def_ins = store_ins;
11544 else {
11545 g_assert (store_opcode != OP_STOREV_MEMBASE);
11547 /* Try to fuse the store into the instruction itself */
11548 /* FIXME: Add more instructions */
11549 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11550 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11551 ins->inst_imm = ins->inst_c0;
11552 ins->inst_destbasereg = var->inst_basereg;
11553 ins->inst_offset = var->inst_offset;
11554 spec = INS_INFO (ins->opcode);
11555 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11556 ins->opcode = store_opcode;
11557 ins->inst_destbasereg = var->inst_basereg;
11558 ins->inst_offset = var->inst_offset;
11560 no_lvreg = TRUE;
11562 tmp_reg = ins->dreg;
11563 ins->dreg = ins->sreg2;
11564 ins->sreg2 = tmp_reg;
11565 store = TRUE;
11567 spec2 [MONO_INST_DEST] = ' ';
11568 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11569 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11570 spec2 [MONO_INST_SRC3] = ' ';
11571 spec = spec2;
11572 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11573 // FIXME: The backends expect the base reg to be in inst_basereg
11574 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11575 ins->dreg = -1;
11576 ins->inst_basereg = var->inst_basereg;
11577 ins->inst_offset = var->inst_offset;
11578 spec = INS_INFO (ins->opcode);
11579 } else {
11580 /* printf ("INS: "); mono_print_ins (ins); */
11581 /* Create a store instruction */
11582 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11584 /* Insert it after the instruction */
11585 mono_bblock_insert_after_ins (bb, ins, store_ins);
11587 def_ins = store_ins;
11590 * We can't assign ins->dreg to var->dreg here, since the
11591 * sregs could use it. So set a flag, and do it after
11592 * the sregs.
11594 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11595 dest_has_lvreg = TRUE;
11600 if (def_ins && !live_range_start [dreg]) {
11601 live_range_start [dreg] = def_ins;
11602 live_range_start_bb [dreg] = bb;
11605 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11606 MonoInst *tmp;
11608 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11609 tmp->inst_c1 = dreg;
11610 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11614 /************/
11615 /* SREGS */
11616 /************/
11617 num_sregs = mono_inst_get_src_registers (ins, sregs);
11618 for (srcindex = 0; srcindex < 3; ++srcindex) {
11619 regtype = spec [MONO_INST_SRC1 + srcindex];
11620 sreg = sregs [srcindex];
11622 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11623 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11624 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11625 MonoInst *use_ins = ins;
11626 MonoInst *load_ins;
11627 guint32 load_opcode;
11629 if (var->opcode == OP_REGVAR) {
11630 sregs [srcindex] = var->dreg;
11631 //mono_inst_set_src_registers (ins, sregs);
11632 live_range_end [sreg] = use_ins;
11633 live_range_end_bb [sreg] = bb;
11635 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11636 MonoInst *tmp;
11638 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11639 /* var->dreg is a hreg */
11640 tmp->inst_c1 = sreg;
11641 mono_bblock_insert_after_ins (bb, ins, tmp);
11644 continue;
11647 g_assert (var->opcode == OP_REGOFFSET);
11649 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11651 g_assert (load_opcode != OP_LOADV_MEMBASE);
11653 if (vreg_to_lvreg [sreg]) {
11654 g_assert (vreg_to_lvreg [sreg] != -1);
11656 /* The variable is already loaded to an lvreg */
11657 if (G_UNLIKELY (cfg->verbose_level > 2))
11658 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11659 sregs [srcindex] = vreg_to_lvreg [sreg];
11660 //mono_inst_set_src_registers (ins, sregs);
11661 continue;
11664 /* Try to fuse the load into the instruction */
11665 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11666 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11667 sregs [0] = var->inst_basereg;
11668 //mono_inst_set_src_registers (ins, sregs);
11669 ins->inst_offset = var->inst_offset;
11670 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11671 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11672 sregs [1] = var->inst_basereg;
11673 //mono_inst_set_src_registers (ins, sregs);
11674 ins->inst_offset = var->inst_offset;
11675 } else {
11676 if (MONO_IS_REAL_MOVE (ins)) {
11677 ins->opcode = OP_NOP;
11678 sreg = ins->dreg;
11679 } else {
11680 //printf ("%d ", srcindex); mono_print_ins (ins);
11682 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11684 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11685 if (var->dreg == prev_dreg) {
11687 * sreg refers to the value loaded by the load
11688 * emitted below, but we need to use ins->dreg
11689 * since it refers to the store emitted earlier.
11691 sreg = ins->dreg;
11693 g_assert (sreg != -1);
11694 vreg_to_lvreg [var->dreg] = sreg;
11695 g_assert (lvregs_len < 1024);
11696 lvregs [lvregs_len ++] = var->dreg;
11700 sregs [srcindex] = sreg;
11701 //mono_inst_set_src_registers (ins, sregs);
11703 if (regtype == 'l') {
11704 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11705 mono_bblock_insert_before_ins (bb, ins, load_ins);
11706 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11707 mono_bblock_insert_before_ins (bb, ins, load_ins);
11708 use_ins = load_ins;
11710 else {
11711 #if SIZEOF_REGISTER == 4
11712 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11713 #endif
11714 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11715 mono_bblock_insert_before_ins (bb, ins, load_ins);
11716 use_ins = load_ins;
11720 if (var->dreg < orig_next_vreg) {
11721 live_range_end [var->dreg] = use_ins;
11722 live_range_end_bb [var->dreg] = bb;
11725 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11726 MonoInst *tmp;
11728 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11729 tmp->inst_c1 = var->dreg;
11730 mono_bblock_insert_after_ins (bb, ins, tmp);
11734 mono_inst_set_src_registers (ins, sregs);
11736 if (dest_has_lvreg) {
11737 g_assert (ins->dreg != -1);
11738 vreg_to_lvreg [prev_dreg] = ins->dreg;
11739 g_assert (lvregs_len < 1024);
11740 lvregs [lvregs_len ++] = prev_dreg;
11741 dest_has_lvreg = FALSE;
11744 if (store) {
11745 tmp_reg = ins->dreg;
11746 ins->dreg = ins->sreg2;
11747 ins->sreg2 = tmp_reg;
11750 if (MONO_IS_CALL (ins)) {
11751 /* Clear vreg_to_lvreg array */
11752 for (i = 0; i < lvregs_len; i++)
11753 vreg_to_lvreg [lvregs [i]] = 0;
11754 lvregs_len = 0;
11755 } else if (ins->opcode == OP_NOP) {
11756 ins->dreg = -1;
11757 MONO_INST_NULLIFY_SREGS (ins);
11760 if (cfg->verbose_level > 2)
11761 mono_print_ins_index (1, ins);
11764 /* Extend the live range based on the liveness info */
11765 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11766 for (i = 0; i < cfg->num_varinfo; i ++) {
11767 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11769 if (vreg_is_volatile (cfg, vi->vreg))
11770 /* The liveness info is incomplete */
11771 continue;
11773 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11774 /* Live from at least the first ins of this bb */
11775 live_range_start [vi->vreg] = bb->code;
11776 live_range_start_bb [vi->vreg] = bb;
11779 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11780 /* Live at least until the last ins of this bb */
11781 live_range_end [vi->vreg] = bb->last_ins;
11782 live_range_end_bb [vi->vreg] = bb;
11788 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11790 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11791 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11793 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11794 for (i = 0; i < cfg->num_varinfo; ++i) {
11795 int vreg = MONO_VARINFO (cfg, i)->vreg;
11796 MonoInst *ins;
11798 if (live_range_start [vreg]) {
11799 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11800 ins->inst_c0 = i;
11801 ins->inst_c1 = vreg;
11802 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11804 if (live_range_end [vreg]) {
11805 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11806 ins->inst_c0 = i;
11807 ins->inst_c1 = vreg;
11808 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11809 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11810 else
11811 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11815 #endif
11817 g_free (live_range_start);
11818 g_free (live_range_end);
11819 g_free (live_range_start_bb);
11820 g_free (live_range_end_bb);
11824 * FIXME:
11825 * - use 'iadd' instead of 'int_add'
11826 * - handling ovf opcodes: decompose in method_to_ir.
11827 * - unify iregs/fregs
11828 * -> partly done, the missing parts are:
11829 * - a more complete unification would involve unifying the hregs as well, so
11830 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11831 * would no longer map to the machine hregs, so the code generators would need to
11832 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11833 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11834 * fp/non-fp branches speeds it up by about 15%.
11835 * - use sext/zext opcodes instead of shifts
11836 * - add OP_ICALL
11837 * - get rid of TEMPLOADs if possible and use vregs instead
11838 * - clean up usage of OP_P/OP_ opcodes
11839 * - cleanup usage of DUMMY_USE
11840 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11841 * stack
11842 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11843 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11844 * - make sure handle_stack_args () is called before the branch is emitted
11845 * - when the new IR is done, get rid of all unused stuff
11846 * - COMPARE/BEQ as separate instructions or unify them ?
11847 * - keeping them separate allows specialized compare instructions like
11848 * compare_imm, compare_membase
11849 * - most back ends unify fp compare+branch, fp compare+ceq
11850 * - integrate mono_save_args into inline_method
11851 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11852 * - handle long shift opts on 32 bit platforms somehow: they require
11853 * 3 sregs (2 for arg1 and 1 for arg2)
11854 * - make byref a 'normal' type.
11855 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11856 * variable if needed.
11857 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11858 * like inline_method.
11859 * - remove inlining restrictions
11860 * - fix LNEG and enable cfold of INEG
11861 * - generalize x86 optimizations like ldelema as a peephole optimization
11862 * - add store_mem_imm for amd64
11863 * - optimize the loading of the interruption flag in the managed->native wrappers
11864 * - avoid special handling of OP_NOP in passes
11865 * - move code inserting instructions into one function/macro.
11866 * - try a coalescing phase after liveness analysis
11867 * - add float -> vreg conversion + local optimizations on !x86
11868 * - figure out how to handle decomposed branches during optimizations, ie.
11869 * compare+branch, op_jump_table+op_br etc.
11870 * - promote RuntimeXHandles to vregs
11871 * - vtype cleanups:
11872 * - add a NEW_VARLOADA_VREG macro
11873 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11874 * accessing vtype fields.
11875 * - get rid of I8CONST on 64 bit platforms
11876 * - dealing with the increase in code size due to branches created during opcode
11877 * decomposition:
11878 * - use extended basic blocks
11879 * - all parts of the JIT
11880 * - handle_global_vregs () && local regalloc
11881 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11882 * - sources of increase in code size:
11883 * - vtypes
11884 * - long compares
11885 * - isinst and castclass
11886 * - lvregs not allocated to global registers even if used multiple times
11887 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11888 * meaningful.
11889 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11890 * - add all micro optimizations from the old JIT
11891 * - put tree optimizations into the deadce pass
11892 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11893 * specific function.
11894 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11895 * fcompare + branchCC.
11896 * - create a helper function for allocating a stack slot, taking into account
11897 * MONO_CFG_HAS_SPILLUP.
11898 * - merge r68207.
11899 * - merge the ia64 switch changes.
11900 * - optimize mono_regstate2_alloc_int/float.
11901 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11902 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11903 * parts of the tree could be separated by other instructions, killing the tree
11904 * arguments, or stores killing loads etc. Also, should we fold loads into other
11905 * instructions if the result of the load is used multiple times ?
11906 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11907 * - LAST MERGE: 108395.
11908 * - when returning vtypes in registers, generate IR and append it to the end of the
11909 * last bb instead of doing it in the epilog.
11910 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11915 NOTES
11916 -----
11918 - When to decompose opcodes:
11919 - earlier: this makes some optimizations hard to implement, since the low level IR
11920 no longer contains the neccessary information. But it is easier to do.
11921 - later: harder to implement, enables more optimizations.
11922 - Branches inside bblocks:
11923 - created when decomposing complex opcodes.
11924 - branches to another bblock: harmless, but not tracked by the branch
11925 optimizations, so need to branch to a label at the start of the bblock.
11926 - branches to inside the same bblock: very problematic, trips up the local
11927 reg allocator. Can be fixed by spitting the current bblock, but that is a
11928 complex operation, since some local vregs can become global vregs etc.
11929 - Local/global vregs:
11930 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11931 local register allocator.
11932 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11933 structure, created by mono_create_var (). Assigned to hregs or the stack by
11934 the global register allocator.
11935 - When to do optimizations like alu->alu_imm:
11936 - earlier -> saves work later on since the IR will be smaller/simpler
11937 - later -> can work on more instructions
11938 - Handling of valuetypes:
11939 - When a vtype is pushed on the stack, a new temporary is created, an
11940 instruction computing its address (LDADDR) is emitted and pushed on
11941 the stack. Need to optimize cases when the vtype is used immediately as in
11942 argument passing, stloc etc.
11943 - Instead of the to_end stuff in the old JIT, simply call the function handling
11944 the values on the stack before emitting the last instruction of the bb.
11947 #endif /* DISABLE_JIT */