2010-06-17 Rodrigo Kumpera <rkumpera@novell.com>
[mono-project.git] / mono / mini / method-to-ir.c
blob2f51b2e4431550d6cc23f6bc084d256ae4b16385
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
55 #include "mini.h"
56 #include "trace.h"
58 #include "ir-emit.h"
60 #include "jit-icalls.h"
61 #include "jit.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 goto inline_failure;\
69 } while (0)
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 goto exception_exit;\
73 } while (0)
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
82 } while (0)
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
91 } while (0)
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
98 } \
99 } while (0)
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
126 #ifdef MINI_OP
127 #undef MINI_OP
128 #endif
129 #ifdef MINI_OP3
130 #undef MINI_OP3
131 #endif
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #define NONE ' '
135 #define IREG 'i'
136 #define FREG 'f'
137 #define VREG 'v'
138 #define XREG 'x'
139 #if SIZEOF_REGISTER == 8
140 #define LREG IREG
141 #else
142 #define LREG 'l'
143 #endif
144 /* keep in sync with the enum in mini.h */
145 const char
146 ins_info[] = {
147 #include "mini-ops.h"
149 #undef MINI_OP
150 #undef MINI_OP3
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
161 #undef MINI_OP
162 #undef MINI_OP3
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
166 (vi)->reg = -1; \
167 (vi)->idx = (id); \
168 } while (0)
170 void
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
178 guint32
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
184 guint32
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
190 guint32
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
196 guint32
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
202 guint
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
205 if (type->byref)
206 return OP_MOVE;
208 handle_enum:
209 switch (type->type) {
210 case MONO_TYPE_I1:
211 case MONO_TYPE_U1:
212 case MONO_TYPE_BOOLEAN:
213 return OP_MOVE;
214 case MONO_TYPE_I2:
215 case MONO_TYPE_U2:
216 case MONO_TYPE_CHAR:
217 return OP_MOVE;
218 case MONO_TYPE_I4:
219 case MONO_TYPE_U4:
220 return OP_MOVE;
221 case MONO_TYPE_I:
222 case MONO_TYPE_U:
223 case MONO_TYPE_PTR:
224 case MONO_TYPE_FNPTR:
225 return OP_MOVE;
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
231 return OP_MOVE;
232 case MONO_TYPE_I8:
233 case MONO_TYPE_U8:
234 #if SIZEOF_REGISTER == 8
235 return OP_MOVE;
236 #else
237 return OP_LMOVE;
238 #endif
239 case MONO_TYPE_R4:
240 return OP_FMOVE;
241 case MONO_TYPE_R8:
242 return OP_FMOVE;
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
246 goto handle_enum;
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
249 return OP_XMOVE;
250 return OP_VMOVE;
251 case MONO_TYPE_TYPEDBYREF:
252 return OP_VMOVE;
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
255 goto handle_enum;
256 case MONO_TYPE_VAR:
257 case MONO_TYPE_MVAR:
258 g_assert (cfg->generic_sharing_context);
259 return OP_MOVE;
260 default:
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
263 return -1;
266 void
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 int i;
270 MonoInst *tree;
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 printf (", OUT: ");
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 printf (" ]\n");
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
285 * file.
287 #ifndef DISABLE_JIT
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if (!(tblock)) { \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
299 } while (0)
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 } while (0)
311 #endif
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 MonoInst *widen; \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
322 } while (0)
323 #else
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
325 #endif
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
329 sp -= 2; \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
333 CHECK_TYPE (ins); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
339 } while (0)
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
343 sp--; \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
346 CHECK_TYPE (ins); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
350 } while (0)
352 #define ADD_BINCOND(next_block) do { \
353 MonoInst *cmp; \
354 sp -= 2; \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
359 CHECK_TYPE (cmp); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
369 } else { \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
381 } while (0)
383 /* *
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
390 static void
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
394 int i, found;
396 #if 0
397 if (from->cil_code) {
398 if (to->cil_code)
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 else
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
402 } else {
403 if (to->cil_code)
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 else
406 printf ("edge from entry to exit\n");
408 #endif
410 found = FALSE;
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
413 found = TRUE;
414 break;
417 if (!found) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
422 newa [i] = to;
423 from->out_count++;
424 from->out_bb = newa;
427 found = FALSE;
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
430 found = TRUE;
431 break;
434 if (!found) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
439 newa [i] = from;
440 to->in_count++;
441 to->in_bb = newa;
445 void
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
457 * Returns:
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
465 static int
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
470 int i;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 else
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
491 return -1;
494 static GList*
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
499 int i;
500 GList *res = NULL;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
510 return res;
513 static void
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
516 MonoInst *var;
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
519 if (var)
520 return;
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 MonoInst *
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 static MonoInst*
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
538 MonoInst *var;
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
541 if (var)
542 return var;
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
550 return var;
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 void
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
560 MonoClass *klass;
562 inst->klass = klass = mono_class_from_mono_type (type);
563 if (type->byref) {
564 inst->type = STACK_MP;
565 return;
568 handle_enum:
569 switch (type->type) {
570 case MONO_TYPE_VOID:
571 inst->type = STACK_INV;
572 return;
573 case MONO_TYPE_I1:
574 case MONO_TYPE_U1:
575 case MONO_TYPE_BOOLEAN:
576 case MONO_TYPE_I2:
577 case MONO_TYPE_U2:
578 case MONO_TYPE_CHAR:
579 case MONO_TYPE_I4:
580 case MONO_TYPE_U4:
581 inst->type = STACK_I4;
582 return;
583 case MONO_TYPE_I:
584 case MONO_TYPE_U:
585 case MONO_TYPE_PTR:
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
588 return;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
595 return;
596 case MONO_TYPE_I8:
597 case MONO_TYPE_U8:
598 inst->type = STACK_I8;
599 return;
600 case MONO_TYPE_R4:
601 case MONO_TYPE_R8:
602 inst->type = STACK_R8;
603 return;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
607 goto handle_enum;
608 } else {
609 inst->klass = klass;
610 inst->type = STACK_VTYPE;
611 return;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
616 return;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
619 goto handle_enum;
620 case MONO_TYPE_VAR :
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
624 * really a reference
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
628 return;
629 default:
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
637 static const char
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 static const char
650 neg_table [] = {
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
655 static const char
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 static const char
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
670 {0},
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
681 static const char
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 static const guint16
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
704 static const guint16
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 static const guint16
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 static const guint16
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 static const guint16
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
728 static const guint16
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
734 static const guint16
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
747 static void
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
751 /* binops */
752 case CEE_ADD:
753 case CEE_SUB:
754 case CEE_MUL:
755 case CEE_DIV:
756 case CEE_REM:
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
760 break;
761 case CEE_DIV_UN:
762 case CEE_REM_UN:
763 case CEE_AND:
764 case CEE_OR:
765 case CEE_XOR:
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
768 break;
769 case CEE_SHL:
770 case CEE_SHR:
771 case CEE_SHR_UN:
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
774 break;
775 case OP_COMPARE:
776 case OP_LCOMPARE:
777 case OP_ICOMPARE:
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
783 else
784 ins->opcode = OP_ICOMPARE;
785 break;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
790 break;
791 case CEE_BEQ:
792 case CEE_BGE:
793 case CEE_BGT:
794 case CEE_BLE:
795 case CEE_BLT:
796 case CEE_BNE_UN:
797 case CEE_BGE_UN:
798 case CEE_BGT_UN:
799 case CEE_BLE_UN:
800 case CEE_BLT_UN:
801 ins->opcode += beqops_op_map [src1->type];
802 break;
803 case OP_CEQ:
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
806 break;
807 case OP_CGT:
808 case OP_CGT_UN:
809 case OP_CLT:
810 case OP_CLT_UN:
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
813 break;
814 /* unops */
815 case CEE_NEG:
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
818 break;
819 case CEE_NOT:
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
822 else
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
825 break;
826 case CEE_CONV_I1:
827 case CEE_CONV_I2:
828 case CEE_CONV_I4:
829 case CEE_CONV_U4:
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
832 break;
833 case CEE_CONV_R_UN:
834 ins->type = STACK_R8;
835 switch (src1->type) {
836 case STACK_I4:
837 case STACK_PTR:
838 ins->opcode = OP_ICONV_TO_R_UN;
839 break;
840 case STACK_I8:
841 ins->opcode = OP_LCONV_TO_R_UN;
842 break;
844 break;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
853 break;
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
858 break;
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
867 break;
868 case CEE_CONV_U:
869 ins->type = STACK_PTR;
870 switch (src1->type) {
871 case STACK_I4:
872 ins->opcode = OP_ICONV_TO_U;
873 break;
874 case STACK_PTR:
875 case STACK_MP:
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
878 #else
879 ins->opcode = OP_MOVE;
880 #endif
881 break;
882 case STACK_I8:
883 ins->opcode = OP_LCONV_TO_U;
884 break;
885 case STACK_R8:
886 ins->opcode = OP_FCONV_TO_U;
887 break;
889 break;
890 case CEE_CONV_I8:
891 case CEE_CONV_U8:
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
894 break;
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
899 break;
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
904 break;
905 case CEE_CONV_R4:
906 case CEE_CONV_R8:
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
909 break;
910 case OP_CKFINITE:
911 ins->type = STACK_R8;
912 break;
913 case CEE_CONV_U2:
914 case CEE_CONV_U1:
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
917 break;
918 case CEE_CONV_I:
919 case CEE_CONV_OVF_I:
920 case CEE_CONV_OVF_U:
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
923 break;
924 case CEE_ADD_OVF:
925 case CEE_ADD_OVF_UN:
926 case CEE_MUL_OVF:
927 case CEE_MUL_OVF_UN:
928 case CEE_SUB_OVF:
929 case CEE_SUB_OVF_UN:
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
934 break;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
937 break;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
945 break;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
948 break;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
952 break;
953 default:
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
955 break;
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
962 static const char
963 ldind_type [] = {
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
967 #if 0
969 static const char
970 param_table [STACK_MAX] [STACK_MAX] = {
971 {0},
974 static int
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
976 int i;
978 if (sig->hasthis) {
979 switch (args->type) {
980 case STACK_I4:
981 case STACK_I8:
982 case STACK_R8:
983 case STACK_VTYPE:
984 case STACK_INV:
985 return 0;
987 args++;
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
991 case STACK_INV:
992 return 0;
993 case STACK_MP:
994 if (!sig->params [i]->byref)
995 return 0;
996 continue;
997 case STACK_OBJ:
998 if (sig->params [i]->byref)
999 return 0;
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1006 break;
1007 default:
1008 return 0;
1010 continue;
1011 case STACK_R8:
1012 if (sig->params [i]->byref)
1013 return 0;
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1015 return 0;
1016 continue;
1017 case STACK_PTR:
1018 case STACK_I4:
1019 case STACK_I8:
1020 case STACK_VTYPE:
1021 break;
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1024 return 0;*/
1026 return 1;
1028 #endif
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1045 * compiling.
1047 MonoInst *
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1052 return NULL;
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1057 #else
1058 return NULL;
1059 #endif
1062 static MonoInst *
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1076 static MonoType*
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 case STACK_MP:
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 default:
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1090 return NULL;
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1097 switch (t->type) {
1098 case MONO_TYPE_I1:
1099 case MONO_TYPE_U1:
1100 case MONO_TYPE_BOOLEAN:
1101 case MONO_TYPE_I2:
1102 case MONO_TYPE_U2:
1103 case MONO_TYPE_CHAR:
1104 case MONO_TYPE_I4:
1105 case MONO_TYPE_U4:
1106 return STACK_I4;
1107 case MONO_TYPE_I:
1108 case MONO_TYPE_U:
1109 case MONO_TYPE_PTR:
1110 case MONO_TYPE_FNPTR:
1111 return STACK_PTR;
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1117 return STACK_OBJ;
1118 case MONO_TYPE_I8:
1119 case MONO_TYPE_U8:
1120 return STACK_I8;
1121 case MONO_TYPE_R4:
1122 case MONO_TYPE_R8:
1123 return STACK_R8;
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1126 return STACK_VTYPE;
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1129 return STACK_VTYPE;
1130 else
1131 return STACK_OBJ;
1132 break;
1133 default:
1134 g_assert_not_reached ();
1137 return -1;
1140 static MonoClass*
1141 array_access_to_klass (int opcode)
1143 switch (opcode) {
1144 case CEE_LDELEM_U1:
1145 return mono_defaults.byte_class;
1146 case CEE_LDELEM_U2:
1147 return mono_defaults.uint16_class;
1148 case CEE_LDELEM_I:
1149 case CEE_STELEM_I:
1150 return mono_defaults.int_class;
1151 case CEE_LDELEM_I1:
1152 case CEE_STELEM_I1:
1153 return mono_defaults.sbyte_class;
1154 case CEE_LDELEM_I2:
1155 case CEE_STELEM_I2:
1156 return mono_defaults.int16_class;
1157 case CEE_LDELEM_I4:
1158 case CEE_STELEM_I4:
1159 return mono_defaults.int32_class;
1160 case CEE_LDELEM_U4:
1161 return mono_defaults.uint32_class;
1162 case CEE_LDELEM_I8:
1163 case CEE_STELEM_I8:
1164 return mono_defaults.int64_class;
1165 case CEE_LDELEM_R4:
1166 case CEE_STELEM_R4:
1167 return mono_defaults.single_class;
1168 case CEE_LDELEM_R8:
1169 case CEE_STELEM_R8:
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1174 default:
1175 g_assert_not_reached ();
1177 return NULL;
1181 * We try to share variables when possible
1183 static MonoInst *
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1186 MonoInst *res;
1187 int pos, vnum;
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1196 case STACK_I4:
1197 case STACK_I8:
1198 case STACK_R8:
1199 case STACK_PTR:
1200 case STACK_MP:
1201 case STACK_OBJ:
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1206 break;
1207 default:
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 return res;
1213 static void
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 static void
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 int i, bindex;
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1248 gboolean found;
1250 if (!count)
1251 return;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1257 found = FALSE;
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 continue;
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1265 found = TRUE;
1266 bb->out_stack = outb->in_stack;
1267 break;
1270 //printf ("\n");
1271 if (!found) {
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1280 * slot as well.
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1284 * example.
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 else
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 continue;
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1302 return;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1311 cfg->cbb = bb;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1323 * in_stacks.
1326 found = TRUE;
1327 bindex = 0;
1328 while (found) {
1329 /* Find a bblock which has a different in_stack */
1330 found = FALSE;
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1335 bindex++;
1336 continue;
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1347 found = TRUE;
1348 break;
1350 bindex ++;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1358 static void
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 else {
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 static void
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *args [2];
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1383 args [0] = ins;
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 else
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 #else
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 } else {
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1414 #endif
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1421 static void
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1431 static void
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1441 static void
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 else
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 if (false_target)
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 else
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1459 static void
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1470 static void
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 static void
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 if (klass_ins) {
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 } else {
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 static void
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 static void
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 if (true_target)
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 else
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1529 static void
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 if (true_target)
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 else
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 static inline void
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 if (klass_inst) {
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 } else {
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 static inline void
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 static inline void
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 } else {
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 static void
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 static void
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 if (klass->rank) {
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 } else {
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 } else {
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 static void
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 static void
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1642 int val_reg;
1644 g_assert (val == 0);
1646 if (align == 0)
1647 align = 4;
1649 if ((size <= 4) && (size <= align)) {
1650 switch (size) {
1651 case 1:
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1653 return;
1654 case 2:
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1656 return;
1657 case 4:
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 return;
1660 #if SIZEOF_REGISTER == 8
1661 case 8:
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1663 return;
1664 #endif
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 else
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 if (align < 4) {
1676 /* This could be optimized further if neccesary */
1677 while (size >= 1) {
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1679 offset += 1;
1680 size -= 1;
1682 return;
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1687 if (offset % 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1689 offset += 4;
1690 size -= 4;
1692 while (size >= 8) {
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1694 offset += 8;
1695 size -= 8;
1698 #endif
1700 while (size >= 4) {
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1702 offset += 4;
1703 size -= 4;
1705 while (size >= 2) {
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1707 offset += 2;
1708 size -= 2;
1710 while (size >= 1) {
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1712 offset += 1;
1713 size -= 1;
1717 void
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1720 int cur_reg;
1722 if (align == 0)
1723 align = 4;
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1728 if (align < 4) {
1729 /* This could be optimized further if neccesary */
1730 while (size >= 1) {
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1734 doffset += 1;
1735 soffset += 1;
1736 size -= 1;
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1742 while (size >= 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1746 doffset += 8;
1747 soffset += 8;
1748 size -= 8;
1751 #endif
1753 while (size >= 4) {
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1757 doffset += 4;
1758 soffset += 4;
1759 size -= 4;
1761 while (size >= 2) {
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1765 doffset += 2;
1766 soffset += 2;
1767 size -= 2;
1769 while (size >= 1) {
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1773 doffset += 1;
1774 soffset += 1;
1775 size -= 1;
1779 static int
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1782 if (type->byref)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1785 handle_enum:
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1790 case MONO_TYPE_I1:
1791 case MONO_TYPE_U1:
1792 case MONO_TYPE_BOOLEAN:
1793 case MONO_TYPE_I2:
1794 case MONO_TYPE_U2:
1795 case MONO_TYPE_CHAR:
1796 case MONO_TYPE_I4:
1797 case MONO_TYPE_U4:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1799 case MONO_TYPE_I:
1800 case MONO_TYPE_U:
1801 case MONO_TYPE_PTR:
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1810 case MONO_TYPE_I8:
1811 case MONO_TYPE_U8:
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1813 case MONO_TYPE_R4:
1814 case MONO_TYPE_R8:
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1819 goto handle_enum;
1820 } else
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1826 goto handle_enum;
1827 default:
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1830 return -1;
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1840 * validity checks.
1842 * Returns: non-0 value if arg can't be stored on a target.
1844 static int
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1848 MonoClass *klass;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1855 return 0;
1856 return 1;
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1862 return 1;
1863 case MONO_TYPE_I1:
1864 case MONO_TYPE_U1:
1865 case MONO_TYPE_BOOLEAN:
1866 case MONO_TYPE_I2:
1867 case MONO_TYPE_U2:
1868 case MONO_TYPE_CHAR:
1869 case MONO_TYPE_I4:
1870 case MONO_TYPE_U4:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1872 return 1;
1873 return 0;
1874 case MONO_TYPE_PTR:
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1877 return 1;
1878 return 0;
1879 case MONO_TYPE_I:
1880 case MONO_TYPE_U:
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1883 return 1;
1884 return 0;
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1891 return 1;
1892 /* FIXME: check type compatibility */
1893 return 0;
1894 case MONO_TYPE_I8:
1895 case MONO_TYPE_U8:
1896 if (arg->type != STACK_I8)
1897 return 1;
1898 return 0;
1899 case MONO_TYPE_R4:
1900 case MONO_TYPE_R8:
1901 if (arg->type != STACK_R8)
1902 return 1;
1903 return 0;
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1906 return 1;
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1909 return 1;
1910 return 0;
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1913 return 1;
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1916 return 1;
1917 return 0;
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1921 return 1;
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1924 return 1;
1925 return 0;
1926 } else {
1927 if (arg->type != STACK_OBJ)
1928 return 1;
1929 /* FIXME: check type compatibility */
1930 return 0;
1932 case MONO_TYPE_VAR:
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1940 return 1;
1941 return 0;
1942 default:
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1945 return 1;
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1951 * signature.
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1957 static int
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1961 int i;
1963 if (sig->hasthis) {
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1965 return 1;
1966 args++;
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1971 return 1;
1972 continue;
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1976 handle_enum:
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1979 return 1;
1980 continue;
1981 case MONO_TYPE_I1:
1982 case MONO_TYPE_U1:
1983 case MONO_TYPE_BOOLEAN:
1984 case MONO_TYPE_I2:
1985 case MONO_TYPE_U2:
1986 case MONO_TYPE_CHAR:
1987 case MONO_TYPE_I4:
1988 case MONO_TYPE_U4:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1990 return 1;
1991 continue;
1992 case MONO_TYPE_I:
1993 case MONO_TYPE_U:
1994 case MONO_TYPE_PTR:
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1997 return 1;
1998 continue;
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2005 return 1;
2006 continue;
2007 case MONO_TYPE_I8:
2008 case MONO_TYPE_U8:
2009 if (args [i]->type != STACK_I8)
2010 return 1;
2011 continue;
2012 case MONO_TYPE_R4:
2013 case MONO_TYPE_R8:
2014 if (args [i]->type != STACK_R8)
2015 return 1;
2016 continue;
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2020 goto handle_enum;
2022 if (args [i]->type != STACK_VTYPE)
2023 return 1;
2024 continue;
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2027 return 1;
2028 continue;
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2031 goto handle_enum;
2033 default:
2034 g_error ("unknown type 0x%02x in check_call_signature",
2035 simple_type->type);
2038 return 0;
2041 static int
2042 callvirt_to_call (int opcode)
2044 switch (opcode) {
2045 case OP_CALLVIRT:
2046 return OP_CALL;
2047 case OP_VOIDCALLVIRT:
2048 return OP_VOIDCALL;
2049 case OP_FCALLVIRT:
2050 return OP_FCALL;
2051 case OP_VCALLVIRT:
2052 return OP_VCALL;
2053 case OP_LCALLVIRT:
2054 return OP_LCALL;
2055 default:
2056 g_assert_not_reached ();
2059 return -1;
2062 static int
2063 callvirt_to_call_membase (int opcode)
2065 switch (opcode) {
2066 case OP_CALLVIRT:
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2070 case OP_FCALLVIRT:
2071 return OP_FCALL_MEMBASE;
2072 case OP_LCALLVIRT:
2073 return OP_LCALL_MEMBASE;
2074 case OP_VCALLVIRT:
2075 return OP_VCALL_MEMBASE;
2076 default:
2077 g_assert_not_reached ();
2080 return -1;
2083 #ifdef MONO_ARCH_HAVE_IMT
2084 static void
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2090 if (imt_arg) {
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2094 } else {
2095 MonoInst *ins;
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2102 #ifdef ENABLE_LLVM
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2105 #endif
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2107 #else
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2109 #endif
2111 #endif
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2118 ji->ip.i = ip;
2119 ji->type = type;
2120 ji->data.target = target;
2122 return ji;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2129 MonoCallInst *call;
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2131 int i;
2132 #endif
2134 if (tail)
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2136 else
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2139 call->args = args;
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2144 if (tail) {
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2151 MonoInst *loada;
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2184 MonoType *t;
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2189 else
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2195 MonoInst *conv;
2197 iargs [0] = in;
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2205 #endif
2207 #ifdef ENABLE_LLVM
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2210 else
2211 mono_arch_emit_call (cfg, call);
2212 #else
2213 mono_arch_emit_call (cfg, call);
2214 #endif
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2219 return call;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2234 static void
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2241 #ifdef ENABLE_LLVM
2242 call->rgctx_arg_reg = rgctx_reg;
2243 #endif
2244 #else
2245 NOT_IMPLEMENTED;
2246 #endif
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2252 MonoCallInst *call;
2253 int rgctx_reg = -1;
2255 if (rgctx_arg) {
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2260 if (rgctx_arg)
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2265 static MonoInst*
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2267 static MonoInst*
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2270 static MonoInst*
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2277 int context_used;
2278 MonoCallInst *call;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2286 sig = ctor_sig;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2295 MonoInst *addr;
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2308 else
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2313 if (virtual) {
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2330 #endif
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2378 slot_reg = -1;
2379 #ifdef MONO_ARCH_HAVE_IMT
2380 if (mono_use_imt) {
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2386 #endif
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2392 } else {
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2397 if (imt_arg) {
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2401 #endif
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2413 static MonoInst*
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2417 int rgctx_reg = 0;
2418 MonoInst *ins;
2419 MonoCallInst *call;
2421 if (vtable_arg) {
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2428 if (vtable_arg)
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2431 return ins;
2434 MonoInst*
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2440 MonoInst*
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2442 MonoInst **args)
2444 MonoCallInst *call;
2446 g_assert (sig);
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2449 call->fptr = func;
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 MonoInst*
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2461 g_assert (info);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2476 MonoInst *ins;
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2480 * handle it.
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2487 return ins;
2490 static MonoInst*
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2495 int widen_op = -1;
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2504 break;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2507 break;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2510 break;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2513 break;
2514 default:
2515 break;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2520 MonoInst *widen;
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2524 ins = widen;
2529 return ins;
2532 static MonoMethod*
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2538 if (!memcpy_method)
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2544 #if HAVE_WRITE_BARRIERS
2546 static void
2547 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2549 MonoClassField *field;
2550 gpointer iter = NULL;
2552 while ((field = mono_class_get_fields (klass, &iter))) {
2553 int foffset;
2555 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2556 continue;
2557 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2558 if (mono_type_is_reference (field->type)) {
2559 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2560 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2561 } else {
2562 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2563 MonoClass *field_class = mono_class_from_mono_type (field->type);
2564 if (field_class->has_references)
2565 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2570 static gboolean
2571 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, int destreg, int doffset, int srcreg, int soffset, int size, int align)
2573 MonoInst *args[1];
2574 int dest_ptr_reg, tmp_reg;
2575 unsigned need_wb = 0;
2577 if (align == 0)
2578 align = 4;
2580 /*types with references can't have alignment smaller than sizeof(void*) */
2581 if (align < SIZEOF_VOID_P)
2582 return FALSE;
2585 * This value cannot be biger than 32 due to the way we calculate the required wb bitmap.
2586 * FIXME tune this value.
2588 if (size > 5 * SIZEOF_VOID_P)
2589 return FALSE;
2591 create_write_barrier_bitmap (klass, &need_wb, 0);
2593 dest_ptr_reg = alloc_preg (cfg);
2594 tmp_reg = alloc_preg (cfg);
2596 /*tmp = dreg + doffset*/
2597 if (doffset) {
2598 NEW_BIALU_IMM (cfg, args [0], OP_PADD_IMM, dest_ptr_reg, destreg, doffset);
2599 MONO_ADD_INS (cfg->cbb, args [0]);
2600 } else {
2601 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dest_ptr_reg, destreg);
2604 while (size >= SIZEOF_VOID_P) {
2605 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, soffset);
2606 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2608 if (need_wb & 0x1) {
2609 MonoInst *dummy_use;
2611 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2612 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
2614 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2615 dummy_use->sreg1 = dest_ptr_reg;
2616 MONO_ADD_INS (cfg->cbb, dummy_use);
2620 doffset += SIZEOF_VOID_P;
2621 soffset += SIZEOF_VOID_P;
2622 size -= SIZEOF_VOID_P;
2623 need_wb >>= 1;
2625 //tmp += sizeof (void*)
2626 if (size >= SIZEOF_VOID_P) {
2627 NEW_BIALU_IMM (cfg, args [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2628 MONO_ADD_INS (cfg->cbb, args [0]);
2632 /* Those cannot be references since size < sizeof (void*) */
2633 while (size >= 4) {
2634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, soffset);
2635 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, tmp_reg);
2636 doffset += 4;
2637 soffset += 4;
2638 size -= 4;
2641 while (size >= 2) {
2642 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, soffset);
2643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, tmp_reg);
2644 doffset += 2;
2645 soffset += 2;
2646 size -= 2;
2649 while (size >= 1) {
2650 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, soffset);
2651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, tmp_reg);
2652 doffset += 1;
2653 soffset += 1;
2654 size -= 1;
2657 return TRUE;
2659 #endif
2662 * Emit code to copy a valuetype of type @klass whose address is stored in
2663 * @src->dreg to memory whose address is stored at @dest->dreg.
2665 void
2666 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2668 MonoInst *iargs [3];
2669 int n;
2670 guint32 align = 0;
2671 MonoMethod *memcpy_method;
2673 g_assert (klass);
2675 * This check breaks with spilled vars... need to handle it during verification anyway.
2676 * g_assert (klass && klass == src->klass && klass == dest->klass);
2679 if (native)
2680 n = mono_class_native_size (klass, &align);
2681 else
2682 n = mono_class_value_size (klass, &align);
2684 #if HAVE_WRITE_BARRIERS
2685 /* if native is true there should be no references in the struct */
2686 if (klass->has_references && !native) {
2687 /* Avoid barriers when storing to the stack */
2688 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2689 (dest->opcode == OP_LDADDR))) {
2690 int context_used = 0;
2692 iargs [0] = dest;
2693 iargs [1] = src;
2695 if (cfg->generic_sharing_context)
2696 context_used = mono_class_check_context_used (klass);
2697 /*FIXME can we use the intrinsics version when context_used == TRUE? */
2698 if (context_used) {
2699 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2700 } else if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, dest->dreg, 0, src->dreg, 0, n, align)) {
2701 return;
2702 } else {
2703 if (cfg->compile_aot) {
2704 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2705 } else {
2706 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2707 mono_class_compute_gc_descriptor (klass);
2711 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2712 return;
2715 #endif
2717 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2718 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2719 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2720 } else {
2721 iargs [0] = dest;
2722 iargs [1] = src;
2723 EMIT_NEW_ICONST (cfg, iargs [2], n);
2725 memcpy_method = get_memcpy_method ();
2726 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2730 static MonoMethod*
2731 get_memset_method (void)
2733 static MonoMethod *memset_method = NULL;
2734 if (!memset_method) {
2735 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2736 if (!memset_method)
2737 g_error ("Old corlib found. Install a new one");
2739 return memset_method;
2742 void
2743 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2745 MonoInst *iargs [3];
2746 int n;
2747 guint32 align;
2748 MonoMethod *memset_method;
2750 /* FIXME: Optimize this for the case when dest is an LDADDR */
2752 mono_class_init (klass);
2753 n = mono_class_value_size (klass, &align);
2755 if (n <= sizeof (gpointer) * 5) {
2756 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2758 else {
2759 memset_method = get_memset_method ();
2760 iargs [0] = dest;
2761 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2762 EMIT_NEW_ICONST (cfg, iargs [2], n);
2763 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2767 static MonoInst*
2768 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2770 MonoInst *this = NULL;
2772 g_assert (cfg->generic_sharing_context);
2774 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2775 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2776 !method->klass->valuetype)
2777 EMIT_NEW_ARGLOAD (cfg, this, 0);
2779 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2780 MonoInst *mrgctx_loc, *mrgctx_var;
2782 g_assert (!this);
2783 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2785 mrgctx_loc = mono_get_vtable_var (cfg);
2786 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2788 return mrgctx_var;
2789 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2790 MonoInst *vtable_loc, *vtable_var;
2792 g_assert (!this);
2794 vtable_loc = mono_get_vtable_var (cfg);
2795 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2797 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2798 MonoInst *mrgctx_var = vtable_var;
2799 int vtable_reg;
2801 vtable_reg = alloc_preg (cfg);
2802 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2803 vtable_var->type = STACK_PTR;
2806 return vtable_var;
2807 } else {
2808 MonoInst *ins;
2809 int vtable_reg, res_reg;
2811 vtable_reg = alloc_preg (cfg);
2812 res_reg = alloc_preg (cfg);
2813 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2814 return ins;
2818 static MonoJumpInfoRgctxEntry *
2819 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2821 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2822 res->method = method;
2823 res->in_mrgctx = in_mrgctx;
2824 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2825 res->data->type = patch_type;
2826 res->data->data.target = patch_data;
2827 res->info_type = info_type;
2829 return res;
2832 static inline MonoInst*
2833 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2835 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2838 static MonoInst*
2839 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2840 MonoClass *klass, int rgctx_type)
2842 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2843 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2845 return emit_rgctx_fetch (cfg, rgctx, entry);
2849 * emit_get_rgctx_method:
2851 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2852 * normal constants, else emit a load from the rgctx.
2854 static MonoInst*
2855 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2856 MonoMethod *cmethod, int rgctx_type)
2858 if (!context_used) {
2859 MonoInst *ins;
2861 switch (rgctx_type) {
2862 case MONO_RGCTX_INFO_METHOD:
2863 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2864 return ins;
2865 case MONO_RGCTX_INFO_METHOD_RGCTX:
2866 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2867 return ins;
2868 default:
2869 g_assert_not_reached ();
2871 } else {
2872 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2873 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2875 return emit_rgctx_fetch (cfg, rgctx, entry);
2879 static MonoInst*
2880 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2881 MonoClassField *field, int rgctx_type)
2883 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2884 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2886 return emit_rgctx_fetch (cfg, rgctx, entry);
2890 * On return the caller must check @klass for load errors.
2892 static void
2893 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2895 MonoInst *vtable_arg;
2896 MonoCallInst *call;
2897 int context_used = 0;
2899 if (cfg->generic_sharing_context)
2900 context_used = mono_class_check_context_used (klass);
2902 if (context_used) {
2903 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2904 klass, MONO_RGCTX_INFO_VTABLE);
2905 } else {
2906 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2908 if (!vtable)
2909 return;
2910 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2913 if (COMPILE_LLVM (cfg))
2914 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2915 else
2916 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2917 #ifdef MONO_ARCH_VTABLE_REG
2918 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2919 cfg->uses_vtable_reg = TRUE;
2920 #else
2921 NOT_IMPLEMENTED;
2922 #endif
2926 * On return the caller must check @array_class for load errors
2928 static void
2929 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2931 int vtable_reg = alloc_preg (cfg);
2932 int context_used = 0;
2934 if (cfg->generic_sharing_context)
2935 context_used = mono_class_check_context_used (array_class);
2937 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2939 if (cfg->opt & MONO_OPT_SHARED) {
2940 int class_reg = alloc_preg (cfg);
2941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2942 if (cfg->compile_aot) {
2943 int klass_reg = alloc_preg (cfg);
2944 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2945 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2946 } else {
2947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2949 } else if (context_used) {
2950 MonoInst *vtable_ins;
2952 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2953 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2954 } else {
2955 if (cfg->compile_aot) {
2956 int vt_reg;
2957 MonoVTable *vtable;
2959 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2960 return;
2961 vt_reg = alloc_preg (cfg);
2962 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2963 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2964 } else {
2965 MonoVTable *vtable;
2966 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2967 return;
2968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2972 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2975 static void
2976 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2978 if (mini_get_debug_options ()->better_cast_details) {
2979 int to_klass_reg = alloc_preg (cfg);
2980 int vtable_reg = alloc_preg (cfg);
2981 int klass_reg = alloc_preg (cfg);
2982 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2984 if (!tls_get) {
2985 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2986 exit (1);
2989 MONO_ADD_INS (cfg->cbb, tls_get);
2990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2991 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2993 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2994 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2995 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2999 static void
3000 reset_cast_details (MonoCompile *cfg)
3002 /* Reset the variables holding the cast details */
3003 if (mini_get_debug_options ()->better_cast_details) {
3004 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3006 MONO_ADD_INS (cfg->cbb, tls_get);
3007 /* It is enough to reset the from field */
3008 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3013 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3014 * generic code is generated.
3016 static MonoInst*
3017 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3019 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3021 if (context_used) {
3022 MonoInst *rgctx, *addr;
3024 /* FIXME: What if the class is shared? We might not
3025 have to get the address of the method from the
3026 RGCTX. */
3027 addr = emit_get_rgctx_method (cfg, context_used, method,
3028 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3030 rgctx = emit_get_rgctx (cfg, method, context_used);
3032 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3033 } else {
3034 return mono_emit_method_call (cfg, method, &val, NULL);
3038 static MonoInst*
3039 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3041 MonoInst *add;
3042 int obj_reg;
3043 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3044 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3045 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3046 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3048 obj_reg = sp [0]->dreg;
3049 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3052 /* FIXME: generics */
3053 g_assert (klass->rank == 0);
3055 // Check rank == 0
3056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3057 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3062 if (context_used) {
3063 MonoInst *element_class;
3065 /* This assertion is from the unboxcast insn */
3066 g_assert (klass->rank == 0);
3068 element_class = emit_get_rgctx_klass (cfg, context_used,
3069 klass->element_class, MONO_RGCTX_INFO_KLASS);
3071 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3072 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3073 } else {
3074 save_cast_details (cfg, klass->element_class, obj_reg);
3075 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3076 reset_cast_details (cfg);
3079 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3080 MONO_ADD_INS (cfg->cbb, add);
3081 add->type = STACK_MP;
3082 add->klass = klass;
3084 return add;
3088 * Returns NULL and set the cfg exception on error.
3090 static MonoInst*
3091 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3093 MonoInst *iargs [2];
3094 void *alloc_ftn;
3096 if (context_used) {
3097 MonoInst *data;
3098 int rgctx_info;
3099 MonoInst *iargs [2];
3102 FIXME: we cannot get managed_alloc here because we can't get
3103 the class's vtable (because it's not a closed class)
3105 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3106 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3109 if (cfg->opt & MONO_OPT_SHARED)
3110 rgctx_info = MONO_RGCTX_INFO_KLASS;
3111 else
3112 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3113 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3115 if (cfg->opt & MONO_OPT_SHARED) {
3116 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3117 iargs [1] = data;
3118 alloc_ftn = mono_object_new;
3119 } else {
3120 iargs [0] = data;
3121 alloc_ftn = mono_object_new_specific;
3124 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3127 if (cfg->opt & MONO_OPT_SHARED) {
3128 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3129 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3131 alloc_ftn = mono_object_new;
3132 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3133 /* This happens often in argument checking code, eg. throw new FooException... */
3134 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3135 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3136 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3137 } else {
3138 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3139 MonoMethod *managed_alloc = NULL;
3140 gboolean pass_lw;
3142 if (!vtable) {
3143 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3144 cfg->exception_ptr = klass;
3145 return NULL;
3148 #ifndef MONO_CROSS_COMPILE
3149 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3150 #endif
3152 if (managed_alloc) {
3153 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3154 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3156 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3157 if (pass_lw) {
3158 guint32 lw = vtable->klass->instance_size;
3159 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3160 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3161 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3163 else {
3164 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3168 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3172 * Returns NULL and set the cfg exception on error.
3174 static MonoInst*
3175 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3177 MonoInst *alloc, *ins;
3179 if (mono_class_is_nullable (klass)) {
3180 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3182 if (context_used) {
3183 /* FIXME: What if the class is shared? We might not
3184 have to get the method address from the RGCTX. */
3185 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3186 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3187 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3189 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3190 } else {
3191 return mono_emit_method_call (cfg, method, &val, NULL);
3195 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3196 if (!alloc)
3197 return NULL;
3199 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3201 return alloc;
3204 // FIXME: This doesn't work yet (class libs tests fail?)
3205 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3208 * Returns NULL and set the cfg exception on error.
3210 static MonoInst*
3211 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3213 MonoBasicBlock *is_null_bb;
3214 int obj_reg = src->dreg;
3215 int vtable_reg = alloc_preg (cfg);
3216 MonoInst *klass_inst = NULL;
3218 if (context_used) {
3219 MonoInst *args [2];
3221 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3222 klass, MONO_RGCTX_INFO_KLASS);
3224 if (is_complex_isinst (klass)) {
3225 /* Complex case, handle by an icall */
3227 /* obj */
3228 args [0] = src;
3230 /* klass */
3231 args [1] = klass_inst;
3233 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3234 } else {
3235 /* Simple case, handled by the code below */
3239 NEW_BBLOCK (cfg, is_null_bb);
3241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3244 save_cast_details (cfg, klass, obj_reg);
3246 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3248 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3249 } else {
3250 int klass_reg = alloc_preg (cfg);
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3254 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3255 /* the remoting code is broken, access the class for now */
3256 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3257 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3258 if (!vt) {
3259 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3260 cfg->exception_ptr = klass;
3261 return NULL;
3263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3264 } else {
3265 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3266 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3268 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3269 } else {
3270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3271 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3275 MONO_START_BB (cfg, is_null_bb);
3277 reset_cast_details (cfg);
3279 return src;
3283 * Returns NULL and set the cfg exception on error.
3285 static MonoInst*
3286 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3288 MonoInst *ins;
3289 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3290 int obj_reg = src->dreg;
3291 int vtable_reg = alloc_preg (cfg);
3292 int res_reg = alloc_preg (cfg);
3293 MonoInst *klass_inst = NULL;
3295 if (context_used) {
3296 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3298 if (is_complex_isinst (klass)) {
3299 MonoInst *args [2];
3301 /* Complex case, handle by an icall */
3303 /* obj */
3304 args [0] = src;
3306 /* klass */
3307 args [1] = klass_inst;
3309 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3310 } else {
3311 /* Simple case, the code below can handle it */
3315 NEW_BBLOCK (cfg, is_null_bb);
3316 NEW_BBLOCK (cfg, false_bb);
3317 NEW_BBLOCK (cfg, end_bb);
3319 /* Do the assignment at the beginning, so the other assignment can be if converted */
3320 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3321 ins->type = STACK_OBJ;
3322 ins->klass = klass;
3324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3329 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3330 g_assert (!context_used);
3331 /* the is_null_bb target simply copies the input register to the output */
3332 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3333 } else {
3334 int klass_reg = alloc_preg (cfg);
3336 if (klass->rank) {
3337 int rank_reg = alloc_preg (cfg);
3338 int eclass_reg = alloc_preg (cfg);
3340 g_assert (!context_used);
3341 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3346 if (klass->cast_class == mono_defaults.object_class) {
3347 int parent_reg = alloc_preg (cfg);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3349 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3350 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3352 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3353 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3354 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3355 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3356 } else if (klass->cast_class == mono_defaults.enum_class) {
3357 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3359 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3360 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3361 } else {
3362 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3363 /* Check that the object is a vector too */
3364 int bounds_reg = alloc_preg (cfg);
3365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3366 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3370 /* the is_null_bb target simply copies the input register to the output */
3371 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3373 } else if (mono_class_is_nullable (klass)) {
3374 g_assert (!context_used);
3375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3376 /* the is_null_bb target simply copies the input register to the output */
3377 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3378 } else {
3379 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3380 g_assert (!context_used);
3381 /* the remoting code is broken, access the class for now */
3382 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3383 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3384 if (!vt) {
3385 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3386 cfg->exception_ptr = klass;
3387 return NULL;
3389 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3390 } else {
3391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3394 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3395 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3396 } else {
3397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3398 /* the is_null_bb target simply copies the input register to the output */
3399 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3404 MONO_START_BB (cfg, false_bb);
3406 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3407 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3409 MONO_START_BB (cfg, is_null_bb);
3411 MONO_START_BB (cfg, end_bb);
3413 return ins;
3416 static MonoInst*
3417 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3419 /* This opcode takes as input an object reference and a class, and returns:
3420 0) if the object is an instance of the class,
3421 1) if the object is not instance of the class,
3422 2) if the object is a proxy whose type cannot be determined */
3424 MonoInst *ins;
3425 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3426 int obj_reg = src->dreg;
3427 int dreg = alloc_ireg (cfg);
3428 int tmp_reg;
3429 int klass_reg = alloc_preg (cfg);
3431 NEW_BBLOCK (cfg, true_bb);
3432 NEW_BBLOCK (cfg, false_bb);
3433 NEW_BBLOCK (cfg, false2_bb);
3434 NEW_BBLOCK (cfg, end_bb);
3435 NEW_BBLOCK (cfg, no_proxy_bb);
3437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3440 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3441 NEW_BBLOCK (cfg, interface_fail_bb);
3443 tmp_reg = alloc_preg (cfg);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3445 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3446 MONO_START_BB (cfg, interface_fail_bb);
3447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3449 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3451 tmp_reg = alloc_preg (cfg);
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3455 } else {
3456 tmp_reg = alloc_preg (cfg);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3460 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3461 tmp_reg = alloc_preg (cfg);
3462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3465 tmp_reg = alloc_preg (cfg);
3466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3467 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3470 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3473 MONO_START_BB (cfg, no_proxy_bb);
3475 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3478 MONO_START_BB (cfg, false_bb);
3480 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3481 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3483 MONO_START_BB (cfg, false2_bb);
3485 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3488 MONO_START_BB (cfg, true_bb);
3490 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3492 MONO_START_BB (cfg, end_bb);
3494 /* FIXME: */
3495 MONO_INST_NEW (cfg, ins, OP_ICONST);
3496 ins->dreg = dreg;
3497 ins->type = STACK_I4;
3499 return ins;
3502 static MonoInst*
3503 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3505 /* This opcode takes as input an object reference and a class, and returns:
3506 0) if the object is an instance of the class,
3507 1) if the object is a proxy whose type cannot be determined
3508 an InvalidCastException exception is thrown otherwhise*/
3510 MonoInst *ins;
3511 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3512 int obj_reg = src->dreg;
3513 int dreg = alloc_ireg (cfg);
3514 int tmp_reg = alloc_preg (cfg);
3515 int klass_reg = alloc_preg (cfg);
3517 NEW_BBLOCK (cfg, end_bb);
3518 NEW_BBLOCK (cfg, ok_result_bb);
3520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3523 save_cast_details (cfg, klass, obj_reg);
3525 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3526 NEW_BBLOCK (cfg, interface_fail_bb);
3528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3529 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3530 MONO_START_BB (cfg, interface_fail_bb);
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3533 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3535 tmp_reg = alloc_preg (cfg);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3538 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3540 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3543 } else {
3544 NEW_BBLOCK (cfg, no_proxy_bb);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3548 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3550 tmp_reg = alloc_preg (cfg);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3554 tmp_reg = alloc_preg (cfg);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3559 NEW_BBLOCK (cfg, fail_1_bb);
3561 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3563 MONO_START_BB (cfg, fail_1_bb);
3565 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3568 MONO_START_BB (cfg, no_proxy_bb);
3570 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3573 MONO_START_BB (cfg, ok_result_bb);
3575 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3577 MONO_START_BB (cfg, end_bb);
3579 /* FIXME: */
3580 MONO_INST_NEW (cfg, ins, OP_ICONST);
3581 ins->dreg = dreg;
3582 ins->type = STACK_I4;
3584 return ins;
3588 * Returns NULL and set the cfg exception on error.
3590 static G_GNUC_UNUSED MonoInst*
3591 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3593 gpointer *trampoline;
3594 MonoInst *obj, *method_ins, *tramp_ins;
3595 MonoDomain *domain;
3596 guint8 **code_slot;
3598 obj = handle_alloc (cfg, klass, FALSE, 0);
3599 if (!obj)
3600 return NULL;
3602 /* Inline the contents of mono_delegate_ctor */
3604 /* Set target field */
3605 /* Optimize away setting of NULL target */
3606 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3607 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3609 /* Set method field */
3610 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3611 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3614 * To avoid looking up the compiled code belonging to the target method
3615 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3616 * store it, and we fill it after the method has been compiled.
3618 if (!cfg->compile_aot && !method->dynamic) {
3619 MonoInst *code_slot_ins;
3621 if (context_used) {
3622 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3623 } else {
3624 domain = mono_domain_get ();
3625 mono_domain_lock (domain);
3626 if (!domain_jit_info (domain)->method_code_hash)
3627 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3628 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3629 if (!code_slot) {
3630 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3631 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3633 mono_domain_unlock (domain);
3635 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3640 /* Set invoke_impl field */
3641 if (cfg->compile_aot) {
3642 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3643 } else {
3644 trampoline = mono_create_delegate_trampoline (klass);
3645 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3647 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3649 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3651 return obj;
3654 static MonoInst*
3655 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3657 MonoJitICallInfo *info;
3659 /* Need to register the icall so it gets an icall wrapper */
3660 info = mono_get_array_new_va_icall (rank);
3662 cfg->flags |= MONO_CFG_HAS_VARARGS;
3664 /* mono_array_new_va () needs a vararg calling convention */
3665 cfg->disable_llvm = TRUE;
3667 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3668 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3671 static void
3672 mono_emit_load_got_addr (MonoCompile *cfg)
3674 MonoInst *getaddr, *dummy_use;
3676 if (!cfg->got_var || cfg->got_var_allocated)
3677 return;
3679 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3680 getaddr->dreg = cfg->got_var->dreg;
3682 /* Add it to the start of the first bblock */
3683 if (cfg->bb_entry->code) {
3684 getaddr->next = cfg->bb_entry->code;
3685 cfg->bb_entry->code = getaddr;
3687 else
3688 MONO_ADD_INS (cfg->bb_entry, getaddr);
3690 cfg->got_var_allocated = TRUE;
3693 * Add a dummy use to keep the got_var alive, since real uses might
3694 * only be generated by the back ends.
3695 * Add it to end_bblock, so the variable's lifetime covers the whole
3696 * method.
3697 * It would be better to make the usage of the got var explicit in all
3698 * cases when the backend needs it (i.e. calls, throw etc.), so this
3699 * wouldn't be needed.
3701 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3702 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3705 static int inline_limit;
3706 static gboolean inline_limit_inited;
3708 static gboolean
3709 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3711 MonoMethodHeaderSummary header;
3712 MonoVTable *vtable;
3713 #ifdef MONO_ARCH_SOFT_FLOAT
3714 MonoMethodSignature *sig = mono_method_signature (method);
3715 int i;
3716 #endif
3718 if (cfg->generic_sharing_context)
3719 return FALSE;
3721 if (cfg->inline_depth > 10)
3722 return FALSE;
3724 #ifdef MONO_ARCH_HAVE_LMF_OPS
3725 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3726 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3727 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3728 return TRUE;
3729 #endif
3732 if (!mono_method_get_header_summary (method, &header))
3733 return FALSE;
3735 /*runtime, icall and pinvoke are checked by summary call*/
3736 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3737 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3738 (method->klass->marshalbyref) ||
3739 header.has_clauses)
3740 return FALSE;
3742 /* also consider num_locals? */
3743 /* Do the size check early to avoid creating vtables */
3744 if (!inline_limit_inited) {
3745 if (getenv ("MONO_INLINELIMIT"))
3746 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3747 else
3748 inline_limit = INLINE_LENGTH_LIMIT;
3749 inline_limit_inited = TRUE;
3751 if (header.code_size >= inline_limit)
3752 return FALSE;
3755 * if we can initialize the class of the method right away, we do,
3756 * otherwise we don't allow inlining if the class needs initialization,
3757 * since it would mean inserting a call to mono_runtime_class_init()
3758 * inside the inlined code
3760 if (!(cfg->opt & MONO_OPT_SHARED)) {
3761 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3762 if (cfg->run_cctors && method->klass->has_cctor) {
3763 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3764 if (!method->klass->runtime_info)
3765 /* No vtable created yet */
3766 return FALSE;
3767 vtable = mono_class_vtable (cfg->domain, method->klass);
3768 if (!vtable)
3769 return FALSE;
3770 /* This makes so that inline cannot trigger */
3771 /* .cctors: too many apps depend on them */
3772 /* running with a specific order... */
3773 if (! vtable->initialized)
3774 return FALSE;
3775 mono_runtime_class_init (vtable);
3777 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3778 if (!method->klass->runtime_info)
3779 /* No vtable created yet */
3780 return FALSE;
3781 vtable = mono_class_vtable (cfg->domain, method->klass);
3782 if (!vtable)
3783 return FALSE;
3784 if (!vtable->initialized)
3785 return FALSE;
3787 } else {
3789 * If we're compiling for shared code
3790 * the cctor will need to be run at aot method load time, for example,
3791 * or at the end of the compilation of the inlining method.
3793 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3794 return FALSE;
3798 * CAS - do not inline methods with declarative security
3799 * Note: this has to be before any possible return TRUE;
3801 if (mono_method_has_declsec (method))
3802 return FALSE;
3804 #ifdef MONO_ARCH_SOFT_FLOAT
3805 /* FIXME: */
3806 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3807 return FALSE;
3808 for (i = 0; i < sig->param_count; ++i)
3809 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3810 return FALSE;
3811 #endif
3813 return TRUE;
3816 static gboolean
3817 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3819 if (vtable->initialized && !cfg->compile_aot)
3820 return FALSE;
3822 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3823 return FALSE;
3825 if (!mono_class_needs_cctor_run (vtable->klass, method))
3826 return FALSE;
3828 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3829 /* The initialization is already done before the method is called */
3830 return FALSE;
3832 return TRUE;
3835 static MonoInst*
3836 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3838 MonoInst *ins;
3839 guint32 size;
3840 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3842 mono_class_init (klass);
3843 size = mono_class_array_element_size (klass);
3845 mult_reg = alloc_preg (cfg);
3846 array_reg = arr->dreg;
3847 index_reg = index->dreg;
3849 #if SIZEOF_REGISTER == 8
3850 /* The array reg is 64 bits but the index reg is only 32 */
3851 if (COMPILE_LLVM (cfg)) {
3852 /* Not needed */
3853 index2_reg = index_reg;
3854 } else {
3855 index2_reg = alloc_preg (cfg);
3856 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3858 #else
3859 if (index->type == STACK_I8) {
3860 index2_reg = alloc_preg (cfg);
3861 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3862 } else {
3863 index2_reg = index_reg;
3865 #endif
3867 if (bcheck)
3868 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3870 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3871 if (size == 1 || size == 2 || size == 4 || size == 8) {
3872 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3874 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3875 ins->type = STACK_PTR;
3877 return ins;
3879 #endif
3881 add_reg = alloc_preg (cfg);
3883 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3884 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3885 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3886 ins->type = STACK_PTR;
3887 MONO_ADD_INS (cfg->cbb, ins);
3889 return ins;
3892 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3893 static MonoInst*
3894 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3896 int bounds_reg = alloc_preg (cfg);
3897 int add_reg = alloc_preg (cfg);
3898 int mult_reg = alloc_preg (cfg);
3899 int mult2_reg = alloc_preg (cfg);
3900 int low1_reg = alloc_preg (cfg);
3901 int low2_reg = alloc_preg (cfg);
3902 int high1_reg = alloc_preg (cfg);
3903 int high2_reg = alloc_preg (cfg);
3904 int realidx1_reg = alloc_preg (cfg);
3905 int realidx2_reg = alloc_preg (cfg);
3906 int sum_reg = alloc_preg (cfg);
3907 int index1, index2;
3908 MonoInst *ins;
3909 guint32 size;
3911 mono_class_init (klass);
3912 size = mono_class_array_element_size (klass);
3914 index1 = index_ins1->dreg;
3915 index2 = index_ins2->dreg;
3917 /* range checking */
3918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3919 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3922 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3923 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3925 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3926 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3927 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3930 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3931 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3933 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3934 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3935 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3937 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3938 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3939 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3940 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3941 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3943 ins->type = STACK_MP;
3944 ins->klass = klass;
3945 MONO_ADD_INS (cfg->cbb, ins);
3947 return ins;
3949 #endif
3951 static MonoInst*
3952 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3954 int rank;
3955 MonoInst *addr;
3956 MonoMethod *addr_method;
3957 int element_size;
3959 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3961 if (rank == 1)
3962 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3964 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3965 /* emit_ldelema_2 depends on OP_LMUL */
3966 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3967 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3969 #endif
3971 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3972 addr_method = mono_marshal_get_array_address (rank, element_size);
3973 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3975 return addr;
3978 static MonoBreakPolicy
3979 always_insert_breakpoint (MonoMethod *method)
3981 return MONO_BREAK_POLICY_ALWAYS;
3984 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3987 * mono_set_break_policy:
3988 * policy_callback: the new callback function
3990 * Allow embedders to decide wherther to actually obey breakpoint instructions
3991 * (both break IL instructions and Debugger.Break () method calls), for example
3992 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3993 * untrusted or semi-trusted code.
3995 * @policy_callback will be called every time a break point instruction needs to
3996 * be inserted with the method argument being the method that calls Debugger.Break()
3997 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3998 * if it wants the breakpoint to not be effective in the given method.
3999 * #MONO_BREAK_POLICY_ALWAYS is the default.
4001 void
4002 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4004 if (policy_callback)
4005 break_policy_func = policy_callback;
4006 else
4007 break_policy_func = always_insert_breakpoint;
4010 static gboolean
4011 should_insert_brekpoint (MonoMethod *method) {
4012 switch (break_policy_func (method)) {
4013 case MONO_BREAK_POLICY_ALWAYS:
4014 return TRUE;
4015 case MONO_BREAK_POLICY_NEVER:
4016 return FALSE;
4017 case MONO_BREAK_POLICY_ON_DBG:
4018 return mono_debug_using_mono_debugger ();
4019 default:
4020 g_warning ("Incorrect value returned from break policy callback");
4021 return FALSE;
4025 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4026 static MonoInst*
4027 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4029 MonoInst *addr, *store, *load;
4030 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4032 /* the bounds check is already done by the callers */
4033 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4034 if (is_set) {
4035 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4036 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4037 } else {
4038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4041 return store;
4044 static MonoInst*
4045 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4047 MonoInst *ins = NULL;
4048 #ifdef MONO_ARCH_SIMD_INTRINSICS
4049 if (cfg->opt & MONO_OPT_SIMD) {
4050 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4051 if (ins)
4052 return ins;
4054 #endif
4056 return ins;
4059 static MonoInst*
4060 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4062 MonoInst *ins = NULL;
4064 static MonoClass *runtime_helpers_class = NULL;
4065 if (! runtime_helpers_class)
4066 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4067 "System.Runtime.CompilerServices", "RuntimeHelpers");
4069 if (cmethod->klass == mono_defaults.string_class) {
4070 if (strcmp (cmethod->name, "get_Chars") == 0) {
4071 int dreg = alloc_ireg (cfg);
4072 int index_reg = alloc_preg (cfg);
4073 int mult_reg = alloc_preg (cfg);
4074 int add_reg = alloc_preg (cfg);
4076 #if SIZEOF_REGISTER == 8
4077 /* The array reg is 64 bits but the index reg is only 32 */
4078 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4079 #else
4080 index_reg = args [1]->dreg;
4081 #endif
4082 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4084 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4085 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4086 add_reg = ins->dreg;
4087 /* Avoid a warning */
4088 mult_reg = 0;
4089 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4090 add_reg, 0);
4091 #else
4092 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4093 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4094 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4095 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4096 #endif
4097 type_from_op (ins, NULL, NULL);
4098 return ins;
4099 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4100 int dreg = alloc_ireg (cfg);
4101 /* Decompose later to allow more optimizations */
4102 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4103 ins->type = STACK_I4;
4104 cfg->cbb->has_array_access = TRUE;
4105 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4107 return ins;
4108 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4109 int mult_reg = alloc_preg (cfg);
4110 int add_reg = alloc_preg (cfg);
4112 /* The corlib functions check for oob already. */
4113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4114 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4116 return cfg->cbb->last_ins;
4117 } else
4118 return NULL;
4119 } else if (cmethod->klass == mono_defaults.object_class) {
4121 if (strcmp (cmethod->name, "GetType") == 0) {
4122 int dreg = alloc_preg (cfg);
4123 int vt_reg = alloc_preg (cfg);
4124 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4125 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4126 type_from_op (ins, NULL, NULL);
4128 return ins;
4129 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4130 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4131 int dreg = alloc_ireg (cfg);
4132 int t1 = alloc_ireg (cfg);
4134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4135 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4136 ins->type = STACK_I4;
4138 return ins;
4139 #endif
4140 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4141 MONO_INST_NEW (cfg, ins, OP_NOP);
4142 MONO_ADD_INS (cfg->cbb, ins);
4143 return ins;
4144 } else
4145 return NULL;
4146 } else if (cmethod->klass == mono_defaults.array_class) {
4147 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4148 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4149 if (cmethod->name [0] != 'g')
4150 return NULL;
4152 if (strcmp (cmethod->name, "get_Rank") == 0) {
4153 int dreg = alloc_ireg (cfg);
4154 int vtable_reg = alloc_preg (cfg);
4155 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4156 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4157 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4158 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4159 type_from_op (ins, NULL, NULL);
4161 return ins;
4162 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4163 int dreg = alloc_ireg (cfg);
4165 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4166 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4167 type_from_op (ins, NULL, NULL);
4169 return ins;
4170 } else
4171 return NULL;
4172 } else if (cmethod->klass == runtime_helpers_class) {
4174 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4175 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4176 return ins;
4177 } else
4178 return NULL;
4179 } else if (cmethod->klass == mono_defaults.thread_class) {
4180 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4181 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4182 MONO_ADD_INS (cfg->cbb, ins);
4183 return ins;
4184 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4185 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4186 MONO_ADD_INS (cfg->cbb, ins);
4187 return ins;
4189 } else if (cmethod->klass == mono_defaults.monitor_class) {
4190 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4191 if (strcmp (cmethod->name, "Enter") == 0) {
4192 MonoCallInst *call;
4194 if (COMPILE_LLVM (cfg)) {
4196 * Pass the argument normally, the LLVM backend will handle the
4197 * calling convention problems.
4199 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4200 } else {
4201 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4202 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4203 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4204 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4207 return (MonoInst*)call;
4208 } else if (strcmp (cmethod->name, "Exit") == 0) {
4209 MonoCallInst *call;
4211 if (COMPILE_LLVM (cfg)) {
4212 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4213 } else {
4214 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4215 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4216 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4217 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4220 return (MonoInst*)call;
4222 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4223 MonoMethod *fast_method = NULL;
4225 /* Avoid infinite recursion */
4226 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4227 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4228 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4229 return NULL;
4231 if (strcmp (cmethod->name, "Enter") == 0 ||
4232 strcmp (cmethod->name, "Exit") == 0)
4233 fast_method = mono_monitor_get_fast_path (cmethod);
4234 if (!fast_method)
4235 return NULL;
4237 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4238 #endif
4239 } else if (cmethod->klass->image == mono_defaults.corlib &&
4240 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4241 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4242 ins = NULL;
4244 #if SIZEOF_REGISTER == 8
4245 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4246 /* 64 bit reads are already atomic */
4247 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4248 ins->dreg = mono_alloc_preg (cfg);
4249 ins->inst_basereg = args [0]->dreg;
4250 ins->inst_offset = 0;
4251 MONO_ADD_INS (cfg->cbb, ins);
4253 #endif
4255 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4256 if (strcmp (cmethod->name, "Increment") == 0) {
4257 MonoInst *ins_iconst;
4258 guint32 opcode = 0;
4260 if (fsig->params [0]->type == MONO_TYPE_I4)
4261 opcode = OP_ATOMIC_ADD_NEW_I4;
4262 #if SIZEOF_REGISTER == 8
4263 else if (fsig->params [0]->type == MONO_TYPE_I8)
4264 opcode = OP_ATOMIC_ADD_NEW_I8;
4265 #endif
4266 if (opcode) {
4267 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4268 ins_iconst->inst_c0 = 1;
4269 ins_iconst->dreg = mono_alloc_ireg (cfg);
4270 MONO_ADD_INS (cfg->cbb, ins_iconst);
4272 MONO_INST_NEW (cfg, ins, opcode);
4273 ins->dreg = mono_alloc_ireg (cfg);
4274 ins->inst_basereg = args [0]->dreg;
4275 ins->inst_offset = 0;
4276 ins->sreg2 = ins_iconst->dreg;
4277 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4278 MONO_ADD_INS (cfg->cbb, ins);
4280 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4281 MonoInst *ins_iconst;
4282 guint32 opcode = 0;
4284 if (fsig->params [0]->type == MONO_TYPE_I4)
4285 opcode = OP_ATOMIC_ADD_NEW_I4;
4286 #if SIZEOF_REGISTER == 8
4287 else if (fsig->params [0]->type == MONO_TYPE_I8)
4288 opcode = OP_ATOMIC_ADD_NEW_I8;
4289 #endif
4290 if (opcode) {
4291 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4292 ins_iconst->inst_c0 = -1;
4293 ins_iconst->dreg = mono_alloc_ireg (cfg);
4294 MONO_ADD_INS (cfg->cbb, ins_iconst);
4296 MONO_INST_NEW (cfg, ins, opcode);
4297 ins->dreg = mono_alloc_ireg (cfg);
4298 ins->inst_basereg = args [0]->dreg;
4299 ins->inst_offset = 0;
4300 ins->sreg2 = ins_iconst->dreg;
4301 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4302 MONO_ADD_INS (cfg->cbb, ins);
4304 } else if (strcmp (cmethod->name, "Add") == 0) {
4305 guint32 opcode = 0;
4307 if (fsig->params [0]->type == MONO_TYPE_I4)
4308 opcode = OP_ATOMIC_ADD_NEW_I4;
4309 #if SIZEOF_REGISTER == 8
4310 else if (fsig->params [0]->type == MONO_TYPE_I8)
4311 opcode = OP_ATOMIC_ADD_NEW_I8;
4312 #endif
4314 if (opcode) {
4315 MONO_INST_NEW (cfg, ins, opcode);
4316 ins->dreg = mono_alloc_ireg (cfg);
4317 ins->inst_basereg = args [0]->dreg;
4318 ins->inst_offset = 0;
4319 ins->sreg2 = args [1]->dreg;
4320 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4321 MONO_ADD_INS (cfg->cbb, ins);
4324 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4326 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4327 if (strcmp (cmethod->name, "Exchange") == 0) {
4328 guint32 opcode;
4329 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4331 if (fsig->params [0]->type == MONO_TYPE_I4)
4332 opcode = OP_ATOMIC_EXCHANGE_I4;
4333 #if SIZEOF_REGISTER == 8
4334 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4335 (fsig->params [0]->type == MONO_TYPE_I))
4336 opcode = OP_ATOMIC_EXCHANGE_I8;
4337 #else
4338 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4339 opcode = OP_ATOMIC_EXCHANGE_I4;
4340 #endif
4341 else
4342 return NULL;
4344 MONO_INST_NEW (cfg, ins, opcode);
4345 ins->dreg = mono_alloc_ireg (cfg);
4346 ins->inst_basereg = args [0]->dreg;
4347 ins->inst_offset = 0;
4348 ins->sreg2 = args [1]->dreg;
4349 MONO_ADD_INS (cfg->cbb, ins);
4351 switch (fsig->params [0]->type) {
4352 case MONO_TYPE_I4:
4353 ins->type = STACK_I4;
4354 break;
4355 case MONO_TYPE_I8:
4356 case MONO_TYPE_I:
4357 ins->type = STACK_I8;
4358 break;
4359 case MONO_TYPE_OBJECT:
4360 ins->type = STACK_OBJ;
4361 break;
4362 default:
4363 g_assert_not_reached ();
4366 #if HAVE_WRITE_BARRIERS
4367 if (is_ref) {
4368 MonoInst *dummy_use;
4369 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4370 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4371 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4373 #endif
4375 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4377 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4378 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4379 int size = 0;
4380 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4381 if (fsig->params [1]->type == MONO_TYPE_I4)
4382 size = 4;
4383 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4384 size = sizeof (gpointer);
4385 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4386 size = 8;
4387 if (size == 4) {
4388 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4389 ins->dreg = alloc_ireg (cfg);
4390 ins->sreg1 = args [0]->dreg;
4391 ins->sreg2 = args [1]->dreg;
4392 ins->sreg3 = args [2]->dreg;
4393 ins->type = STACK_I4;
4394 MONO_ADD_INS (cfg->cbb, ins);
4395 } else if (size == 8) {
4396 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4397 ins->dreg = alloc_ireg (cfg);
4398 ins->sreg1 = args [0]->dreg;
4399 ins->sreg2 = args [1]->dreg;
4400 ins->sreg3 = args [2]->dreg;
4401 ins->type = STACK_I8;
4402 MONO_ADD_INS (cfg->cbb, ins);
4403 } else {
4404 /* g_assert_not_reached (); */
4406 #if HAVE_WRITE_BARRIERS
4407 if (is_ref) {
4408 MonoInst *dummy_use;
4409 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4410 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4411 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4413 #endif
4415 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4417 if (ins)
4418 return ins;
4419 } else if (cmethod->klass->image == mono_defaults.corlib) {
4420 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4421 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4422 if (should_insert_brekpoint (cfg->method))
4423 MONO_INST_NEW (cfg, ins, OP_BREAK);
4424 else
4425 MONO_INST_NEW (cfg, ins, OP_NOP);
4426 MONO_ADD_INS (cfg->cbb, ins);
4427 return ins;
4429 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4430 && strcmp (cmethod->klass->name, "Environment") == 0) {
4431 #ifdef TARGET_WIN32
4432 EMIT_NEW_ICONST (cfg, ins, 1);
4433 #else
4434 EMIT_NEW_ICONST (cfg, ins, 0);
4435 #endif
4436 return ins;
4438 } else if (cmethod->klass == mono_defaults.math_class) {
4440 * There is general branches code for Min/Max, but it does not work for
4441 * all inputs:
4442 * http://everything2.com/?node_id=1051618
4446 #ifdef MONO_ARCH_SIMD_INTRINSICS
4447 if (cfg->opt & MONO_OPT_SIMD) {
4448 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4449 if (ins)
4450 return ins;
4452 #endif
4454 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4458 * This entry point could be used later for arbitrary method
4459 * redirection.
4461 inline static MonoInst*
4462 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4463 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4465 if (method->klass == mono_defaults.string_class) {
4466 /* managed string allocation support */
4467 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4468 MonoInst *iargs [2];
4469 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4470 MonoMethod *managed_alloc = NULL;
4472 g_assert (vtable); /*Should not fail since it System.String*/
4473 #ifndef MONO_CROSS_COMPILE
4474 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4475 #endif
4476 if (!managed_alloc)
4477 return NULL;
4478 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4479 iargs [1] = args [0];
4480 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4483 return NULL;
4486 static void
4487 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4489 MonoInst *store, *temp;
4490 int i;
4492 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4493 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4496 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4497 * would be different than the MonoInst's used to represent arguments, and
4498 * the ldelema implementation can't deal with that.
4499 * Solution: When ldelema is used on an inline argument, create a var for
4500 * it, emit ldelema on that var, and emit the saving code below in
4501 * inline_method () if needed.
4503 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4504 cfg->args [i] = temp;
4505 /* This uses cfg->args [i] which is set by the preceeding line */
4506 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4507 store->cil_code = sp [0]->cil_code;
4508 sp++;
4512 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4513 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4515 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4516 static gboolean
4517 check_inline_called_method_name_limit (MonoMethod *called_method)
4519 int strncmp_result;
4520 static char *limit = NULL;
4522 if (limit == NULL) {
4523 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4525 if (limit_string != NULL)
4526 limit = limit_string;
4527 else
4528 limit = (char *) "";
4531 if (limit [0] != '\0') {
4532 char *called_method_name = mono_method_full_name (called_method, TRUE);
4534 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4535 g_free (called_method_name);
4537 //return (strncmp_result <= 0);
4538 return (strncmp_result == 0);
4539 } else {
4540 return TRUE;
4543 #endif
4545 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4546 static gboolean
4547 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4549 int strncmp_result;
4550 static char *limit = NULL;
4552 if (limit == NULL) {
4553 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4554 if (limit_string != NULL) {
4555 limit = limit_string;
4556 } else {
4557 limit = (char *) "";
4561 if (limit [0] != '\0') {
4562 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4564 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4565 g_free (caller_method_name);
4567 //return (strncmp_result <= 0);
4568 return (strncmp_result == 0);
4569 } else {
4570 return TRUE;
4573 #endif
4575 static int
4576 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4577 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4579 MonoInst *ins, *rvar = NULL;
4580 MonoMethodHeader *cheader;
4581 MonoBasicBlock *ebblock, *sbblock;
4582 int i, costs;
4583 MonoMethod *prev_inlined_method;
4584 MonoInst **prev_locals, **prev_args;
4585 MonoType **prev_arg_types;
4586 guint prev_real_offset;
4587 GHashTable *prev_cbb_hash;
4588 MonoBasicBlock **prev_cil_offset_to_bb;
4589 MonoBasicBlock *prev_cbb;
4590 unsigned char* prev_cil_start;
4591 guint32 prev_cil_offset_to_bb_len;
4592 MonoMethod *prev_current_method;
4593 MonoGenericContext *prev_generic_context;
4594 gboolean ret_var_set, prev_ret_var_set;
4596 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4598 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4599 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4600 return 0;
4601 #endif
4602 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4603 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4604 return 0;
4605 #endif
4607 if (cfg->verbose_level > 2)
4608 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4610 if (!cmethod->inline_info) {
4611 mono_jit_stats.inlineable_methods++;
4612 cmethod->inline_info = 1;
4615 /* allocate local variables */
4616 cheader = mono_method_get_header (cmethod);
4618 if (cheader == NULL || mono_loader_get_last_error ()) {
4619 if (cheader)
4620 mono_metadata_free_mh (cheader);
4621 mono_loader_clear_error ();
4622 return 0;
4625 /* allocate space to store the return value */
4626 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4627 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4631 prev_locals = cfg->locals;
4632 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4633 for (i = 0; i < cheader->num_locals; ++i)
4634 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4636 /* allocate start and end blocks */
4637 /* This is needed so if the inline is aborted, we can clean up */
4638 NEW_BBLOCK (cfg, sbblock);
4639 sbblock->real_offset = real_offset;
4641 NEW_BBLOCK (cfg, ebblock);
4642 ebblock->block_num = cfg->num_bblocks++;
4643 ebblock->real_offset = real_offset;
4645 prev_args = cfg->args;
4646 prev_arg_types = cfg->arg_types;
4647 prev_inlined_method = cfg->inlined_method;
4648 cfg->inlined_method = cmethod;
4649 cfg->ret_var_set = FALSE;
4650 cfg->inline_depth ++;
4651 prev_real_offset = cfg->real_offset;
4652 prev_cbb_hash = cfg->cbb_hash;
4653 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4654 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4655 prev_cil_start = cfg->cil_start;
4656 prev_cbb = cfg->cbb;
4657 prev_current_method = cfg->current_method;
4658 prev_generic_context = cfg->generic_context;
4659 prev_ret_var_set = cfg->ret_var_set;
4661 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4663 ret_var_set = cfg->ret_var_set;
4665 cfg->inlined_method = prev_inlined_method;
4666 cfg->real_offset = prev_real_offset;
4667 cfg->cbb_hash = prev_cbb_hash;
4668 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4669 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4670 cfg->cil_start = prev_cil_start;
4671 cfg->locals = prev_locals;
4672 cfg->args = prev_args;
4673 cfg->arg_types = prev_arg_types;
4674 cfg->current_method = prev_current_method;
4675 cfg->generic_context = prev_generic_context;
4676 cfg->ret_var_set = prev_ret_var_set;
4677 cfg->inline_depth --;
4679 if ((costs >= 0 && costs < 60) || inline_allways) {
4680 if (cfg->verbose_level > 2)
4681 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4683 mono_jit_stats.inlined_methods++;
4685 /* always add some code to avoid block split failures */
4686 MONO_INST_NEW (cfg, ins, OP_NOP);
4687 MONO_ADD_INS (prev_cbb, ins);
4689 prev_cbb->next_bb = sbblock;
4690 link_bblock (cfg, prev_cbb, sbblock);
4693 * Get rid of the begin and end bblocks if possible to aid local
4694 * optimizations.
4696 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4698 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4699 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4701 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4702 MonoBasicBlock *prev = ebblock->in_bb [0];
4703 mono_merge_basic_blocks (cfg, prev, ebblock);
4704 cfg->cbb = prev;
4705 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4706 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4707 cfg->cbb = prev_cbb;
4709 } else {
4710 cfg->cbb = ebblock;
4713 if (rvar) {
4715 * If the inlined method contains only a throw, then the ret var is not
4716 * set, so set it to a dummy value.
4718 if (!ret_var_set) {
4719 static double r8_0 = 0.0;
4721 switch (rvar->type) {
4722 case STACK_I4:
4723 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4724 break;
4725 case STACK_I8:
4726 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4727 break;
4728 case STACK_PTR:
4729 case STACK_MP:
4730 case STACK_OBJ:
4731 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4732 break;
4733 case STACK_R8:
4734 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4735 ins->type = STACK_R8;
4736 ins->inst_p0 = (void*)&r8_0;
4737 ins->dreg = rvar->dreg;
4738 MONO_ADD_INS (cfg->cbb, ins);
4739 break;
4740 case STACK_VTYPE:
4741 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4742 break;
4743 default:
4744 g_assert_not_reached ();
4748 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4749 *sp++ = ins;
4751 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4752 return costs + 1;
4753 } else {
4754 if (cfg->verbose_level > 2)
4755 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4756 cfg->exception_type = MONO_EXCEPTION_NONE;
4757 mono_loader_clear_error ();
4759 /* This gets rid of the newly added bblocks */
4760 cfg->cbb = prev_cbb;
4762 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4763 return 0;
4767 * Some of these comments may well be out-of-date.
4768 * Design decisions: we do a single pass over the IL code (and we do bblock
4769 * splitting/merging in the few cases when it's required: a back jump to an IL
4770 * address that was not already seen as bblock starting point).
4771 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4772 * Complex operations are decomposed in simpler ones right away. We need to let the
4773 * arch-specific code peek and poke inside this process somehow (except when the
4774 * optimizations can take advantage of the full semantic info of coarse opcodes).
4775 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4776 * MonoInst->opcode initially is the IL opcode or some simplification of that
4777 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4778 * opcode with value bigger than OP_LAST.
4779 * At this point the IR can be handed over to an interpreter, a dumb code generator
4780 * or to the optimizing code generator that will translate it to SSA form.
4782 * Profiling directed optimizations.
4783 * We may compile by default with few or no optimizations and instrument the code
4784 * or the user may indicate what methods to optimize the most either in a config file
4785 * or through repeated runs where the compiler applies offline the optimizations to
4786 * each method and then decides if it was worth it.
4789 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4790 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4791 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4792 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4793 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4794 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4795 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4796 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4798 /* offset from br.s -> br like opcodes */
4799 #define BIG_BRANCH_OFFSET 13
4801 static gboolean
4802 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4804 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4806 return b == NULL || b == bb;
4809 static int
4810 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4812 unsigned char *ip = start;
4813 unsigned char *target;
4814 int i;
4815 guint cli_addr;
4816 MonoBasicBlock *bblock;
4817 const MonoOpcode *opcode;
4819 while (ip < end) {
4820 cli_addr = ip - start;
4821 i = mono_opcode_value ((const guint8 **)&ip, end);
4822 if (i < 0)
4823 UNVERIFIED;
4824 opcode = &mono_opcodes [i];
4825 switch (opcode->argument) {
4826 case MonoInlineNone:
4827 ip++;
4828 break;
4829 case MonoInlineString:
4830 case MonoInlineType:
4831 case MonoInlineField:
4832 case MonoInlineMethod:
4833 case MonoInlineTok:
4834 case MonoInlineSig:
4835 case MonoShortInlineR:
4836 case MonoInlineI:
4837 ip += 5;
4838 break;
4839 case MonoInlineVar:
4840 ip += 3;
4841 break;
4842 case MonoShortInlineVar:
4843 case MonoShortInlineI:
4844 ip += 2;
4845 break;
4846 case MonoShortInlineBrTarget:
4847 target = start + cli_addr + 2 + (signed char)ip [1];
4848 GET_BBLOCK (cfg, bblock, target);
4849 ip += 2;
4850 if (ip < end)
4851 GET_BBLOCK (cfg, bblock, ip);
4852 break;
4853 case MonoInlineBrTarget:
4854 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4855 GET_BBLOCK (cfg, bblock, target);
4856 ip += 5;
4857 if (ip < end)
4858 GET_BBLOCK (cfg, bblock, ip);
4859 break;
4860 case MonoInlineSwitch: {
4861 guint32 n = read32 (ip + 1);
4862 guint32 j;
4863 ip += 5;
4864 cli_addr += 5 + 4 * n;
4865 target = start + cli_addr;
4866 GET_BBLOCK (cfg, bblock, target);
4868 for (j = 0; j < n; ++j) {
4869 target = start + cli_addr + (gint32)read32 (ip);
4870 GET_BBLOCK (cfg, bblock, target);
4871 ip += 4;
4873 break;
4875 case MonoInlineR:
4876 case MonoInlineI8:
4877 ip += 9;
4878 break;
4879 default:
4880 g_assert_not_reached ();
4883 if (i == CEE_THROW) {
4884 unsigned char *bb_start = ip - 1;
4886 /* Find the start of the bblock containing the throw */
4887 bblock = NULL;
4888 while ((bb_start >= start) && !bblock) {
4889 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4890 bb_start --;
4892 if (bblock)
4893 bblock->out_of_line = 1;
4896 return 0;
4897 unverified:
4898 *pos = ip;
4899 return 1;
4902 static inline MonoMethod *
4903 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4905 MonoMethod *method;
4907 if (m->wrapper_type != MONO_WRAPPER_NONE)
4908 return mono_method_get_wrapper_data (m, token);
4910 method = mono_get_method_full (m->klass->image, token, klass, context);
4912 return method;
4915 static inline MonoMethod *
4916 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4918 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4920 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4921 return NULL;
4923 return method;
4926 static inline MonoClass*
4927 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4929 MonoClass *klass;
4931 if (method->wrapper_type != MONO_WRAPPER_NONE)
4932 klass = mono_method_get_wrapper_data (method, token);
4933 else
4934 klass = mono_class_get_full (method->klass->image, token, context);
4935 if (klass)
4936 mono_class_init (klass);
4937 return klass;
4941 * Returns TRUE if the JIT should abort inlining because "callee"
4942 * is influenced by security attributes.
4944 static
4945 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4947 guint32 result;
4949 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4950 return TRUE;
4953 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4954 if (result == MONO_JIT_SECURITY_OK)
4955 return FALSE;
4957 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4958 /* Generate code to throw a SecurityException before the actual call/link */
4959 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4960 MonoInst *args [2];
4962 NEW_ICONST (cfg, args [0], 4);
4963 NEW_METHODCONST (cfg, args [1], caller);
4964 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4965 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4966 /* don't hide previous results */
4967 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4968 cfg->exception_data = result;
4969 return TRUE;
4972 return FALSE;
4975 static MonoMethod*
4976 throw_exception (void)
4978 static MonoMethod *method = NULL;
4980 if (!method) {
4981 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4982 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4984 g_assert (method);
4985 return method;
4988 static void
4989 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4991 MonoMethod *thrower = throw_exception ();
4992 MonoInst *args [1];
4994 EMIT_NEW_PCONST (cfg, args [0], ex);
4995 mono_emit_method_call (cfg, thrower, args, NULL);
4999 * Return the original method is a wrapper is specified. We can only access
5000 * the custom attributes from the original method.
5002 static MonoMethod*
5003 get_original_method (MonoMethod *method)
5005 if (method->wrapper_type == MONO_WRAPPER_NONE)
5006 return method;
5008 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5009 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5010 return NULL;
5012 /* in other cases we need to find the original method */
5013 return mono_marshal_method_from_wrapper (method);
5016 static void
5017 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5018 MonoBasicBlock *bblock, unsigned char *ip)
5020 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5021 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5022 if (ex)
5023 emit_throw_exception (cfg, ex);
5026 static void
5027 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5028 MonoBasicBlock *bblock, unsigned char *ip)
5030 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5031 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5032 if (ex)
5033 emit_throw_exception (cfg, ex);
5037 * Check that the IL instructions at ip are the array initialization
5038 * sequence and return the pointer to the data and the size.
5040 static const char*
5041 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5044 * newarr[System.Int32]
5045 * dup
5046 * ldtoken field valuetype ...
5047 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5049 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5050 guint32 token = read32 (ip + 7);
5051 guint32 field_token = read32 (ip + 2);
5052 guint32 field_index = field_token & 0xffffff;
5053 guint32 rva;
5054 const char *data_ptr;
5055 int size = 0;
5056 MonoMethod *cmethod;
5057 MonoClass *dummy_class;
5058 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5059 int dummy_align;
5061 if (!field)
5062 return NULL;
5064 *out_field_token = field_token;
5066 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5067 if (!cmethod)
5068 return NULL;
5069 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5070 return NULL;
5071 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5072 case MONO_TYPE_BOOLEAN:
5073 case MONO_TYPE_I1:
5074 case MONO_TYPE_U1:
5075 size = 1; break;
5076 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5077 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5078 case MONO_TYPE_CHAR:
5079 case MONO_TYPE_I2:
5080 case MONO_TYPE_U2:
5081 size = 2; break;
5082 case MONO_TYPE_I4:
5083 case MONO_TYPE_U4:
5084 case MONO_TYPE_R4:
5085 size = 4; break;
5086 case MONO_TYPE_R8:
5087 #ifdef ARM_FPU_FPA
5088 return NULL; /* stupid ARM FP swapped format */
5089 #endif
5090 case MONO_TYPE_I8:
5091 case MONO_TYPE_U8:
5092 size = 8; break;
5093 #endif
5094 default:
5095 return NULL;
5097 size *= len;
5098 if (size > mono_type_size (field->type, &dummy_align))
5099 return NULL;
5100 *out_size = size;
5101 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5102 if (!method->klass->image->dynamic) {
5103 field_index = read32 (ip + 2) & 0xffffff;
5104 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5105 data_ptr = mono_image_rva_map (method->klass->image, rva);
5106 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5107 /* for aot code we do the lookup on load */
5108 if (aot && data_ptr)
5109 return GUINT_TO_POINTER (rva);
5110 } else {
5111 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5112 g_assert (!aot);
5113 data_ptr = mono_field_get_data (field);
5115 return data_ptr;
5117 return NULL;
5120 static void
5121 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5123 char *method_fname = mono_method_full_name (method, TRUE);
5124 char *method_code;
5125 MonoMethodHeader *header = mono_method_get_header (method);
5127 if (header->code_size == 0)
5128 method_code = g_strdup ("method body is empty.");
5129 else
5130 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5131 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5132 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5133 g_free (method_fname);
5134 g_free (method_code);
5135 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5138 static void
5139 set_exception_object (MonoCompile *cfg, MonoException *exception)
5141 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5142 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5143 cfg->exception_ptr = exception;
5146 static gboolean
5147 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5149 MonoType *type;
5151 if (cfg->generic_sharing_context)
5152 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5153 else
5154 type = &klass->byval_arg;
5155 return MONO_TYPE_IS_REFERENCE (type);
5158 static void
5159 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5161 MonoInst *ins;
5162 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5163 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5164 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5165 /* Optimize reg-reg moves away */
5167 * Can't optimize other opcodes, since sp[0] might point to
5168 * the last ins of a decomposed opcode.
5170 sp [0]->dreg = (cfg)->locals [n]->dreg;
5171 } else {
5172 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5177 * ldloca inhibits many optimizations so try to get rid of it in common
5178 * cases.
5180 static inline unsigned char *
5181 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5183 int local, token;
5184 MonoClass *klass;
5186 if (size == 1) {
5187 local = ip [1];
5188 ip += 2;
5189 } else {
5190 local = read16 (ip + 2);
5191 ip += 4;
5194 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5195 gboolean skip = FALSE;
5197 /* From the INITOBJ case */
5198 token = read32 (ip + 2);
5199 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5200 CHECK_TYPELOAD (klass);
5201 if (generic_class_is_reference_type (cfg, klass)) {
5202 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5203 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5204 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5205 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5206 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5207 } else {
5208 skip = TRUE;
5211 if (!skip)
5212 return ip + 6;
5214 load_error:
5215 return NULL;
5218 static gboolean
5219 is_exception_class (MonoClass *class)
5221 while (class) {
5222 if (class == mono_defaults.exception_class)
5223 return TRUE;
5224 class = class->parent;
5226 return FALSE;
5230 * mono_method_to_ir:
5232 * Translate the .net IL into linear IR.
5235 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5236 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5237 guint inline_offset, gboolean is_virtual_call)
5239 MonoError error;
5240 MonoInst *ins, **sp, **stack_start;
5241 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5242 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5243 MonoMethod *cmethod, *method_definition;
5244 MonoInst **arg_array;
5245 MonoMethodHeader *header;
5246 MonoImage *image;
5247 guint32 token, ins_flag;
5248 MonoClass *klass;
5249 MonoClass *constrained_call = NULL;
5250 unsigned char *ip, *end, *target, *err_pos;
5251 static double r8_0 = 0.0;
5252 MonoMethodSignature *sig;
5253 MonoGenericContext *generic_context = NULL;
5254 MonoGenericContainer *generic_container = NULL;
5255 MonoType **param_types;
5256 int i, n, start_new_bblock, dreg;
5257 int num_calls = 0, inline_costs = 0;
5258 int breakpoint_id = 0;
5259 guint num_args;
5260 MonoBoolean security, pinvoke;
5261 MonoSecurityManager* secman = NULL;
5262 MonoDeclSecurityActions actions;
5263 GSList *class_inits = NULL;
5264 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5265 int context_used;
5266 gboolean init_locals, seq_points, skip_dead_blocks;
5268 /* serialization and xdomain stuff may need access to private fields and methods */
5269 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5270 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5271 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5272 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5273 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5274 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5276 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5278 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5279 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5280 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5281 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5283 image = method->klass->image;
5284 header = mono_method_get_header (method);
5285 generic_container = mono_method_get_generic_container (method);
5286 sig = mono_method_signature (method);
5287 num_args = sig->hasthis + sig->param_count;
5288 ip = (unsigned char*)header->code;
5289 cfg->cil_start = ip;
5290 end = ip + header->code_size;
5291 mono_jit_stats.cil_code_size += header->code_size;
5292 init_locals = header->init_locals;
5294 seq_points = cfg->gen_seq_points && cfg->method == method;
5297 * Methods without init_locals set could cause asserts in various passes
5298 * (#497220).
5300 init_locals = TRUE;
5302 method_definition = method;
5303 while (method_definition->is_inflated) {
5304 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5305 method_definition = imethod->declaring;
5308 /* SkipVerification is not allowed if core-clr is enabled */
5309 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5310 dont_verify = TRUE;
5311 dont_verify_stloc = TRUE;
5314 if (!dont_verify && mini_method_verify (cfg, method_definition))
5315 goto exception_exit;
5317 if (mono_debug_using_mono_debugger ())
5318 cfg->keep_cil_nops = TRUE;
5320 if (sig->is_inflated)
5321 generic_context = mono_method_get_context (method);
5322 else if (generic_container)
5323 generic_context = &generic_container->context;
5324 cfg->generic_context = generic_context;
5326 if (!cfg->generic_sharing_context)
5327 g_assert (!sig->has_type_parameters);
5329 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5330 g_assert (method->is_inflated);
5331 g_assert (mono_method_get_context (method)->method_inst);
5333 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5334 g_assert (sig->generic_param_count);
5336 if (cfg->method == method) {
5337 cfg->real_offset = 0;
5338 } else {
5339 cfg->real_offset = inline_offset;
5342 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5343 cfg->cil_offset_to_bb_len = header->code_size;
5345 cfg->current_method = method;
5347 if (cfg->verbose_level > 2)
5348 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5350 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5351 if (sig->hasthis)
5352 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5353 for (n = 0; n < sig->param_count; ++n)
5354 param_types [n + sig->hasthis] = sig->params [n];
5355 cfg->arg_types = param_types;
5357 dont_inline = g_list_prepend (dont_inline, method);
5358 if (cfg->method == method) {
5360 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5361 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5363 /* ENTRY BLOCK */
5364 NEW_BBLOCK (cfg, start_bblock);
5365 cfg->bb_entry = start_bblock;
5366 start_bblock->cil_code = NULL;
5367 start_bblock->cil_length = 0;
5369 /* EXIT BLOCK */
5370 NEW_BBLOCK (cfg, end_bblock);
5371 cfg->bb_exit = end_bblock;
5372 end_bblock->cil_code = NULL;
5373 end_bblock->cil_length = 0;
5374 g_assert (cfg->num_bblocks == 2);
5376 arg_array = cfg->args;
5378 if (header->num_clauses) {
5379 cfg->spvars = g_hash_table_new (NULL, NULL);
5380 cfg->exvars = g_hash_table_new (NULL, NULL);
5382 /* handle exception clauses */
5383 for (i = 0; i < header->num_clauses; ++i) {
5384 MonoBasicBlock *try_bb;
5385 MonoExceptionClause *clause = &header->clauses [i];
5386 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5387 try_bb->real_offset = clause->try_offset;
5388 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5389 tblock->real_offset = clause->handler_offset;
5390 tblock->flags |= BB_EXCEPTION_HANDLER;
5392 link_bblock (cfg, try_bb, tblock);
5394 if (*(ip + clause->handler_offset) == CEE_POP)
5395 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5397 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5398 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5399 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5400 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5401 MONO_ADD_INS (tblock, ins);
5403 /* todo: is a fault block unsafe to optimize? */
5404 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5405 tblock->flags |= BB_EXCEPTION_UNSAFE;
5409 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5410 while (p < end) {
5411 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5413 /* catch and filter blocks get the exception object on the stack */
5414 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5415 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5416 MonoInst *dummy_use;
5418 /* mostly like handle_stack_args (), but just sets the input args */
5419 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5420 tblock->in_scount = 1;
5421 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5422 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5425 * Add a dummy use for the exvar so its liveness info will be
5426 * correct.
5428 cfg->cbb = tblock;
5429 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5431 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5432 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5433 tblock->flags |= BB_EXCEPTION_HANDLER;
5434 tblock->real_offset = clause->data.filter_offset;
5435 tblock->in_scount = 1;
5436 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5437 /* The filter block shares the exvar with the handler block */
5438 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5439 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5440 MONO_ADD_INS (tblock, ins);
5444 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5445 clause->data.catch_class &&
5446 cfg->generic_sharing_context &&
5447 mono_class_check_context_used (clause->data.catch_class)) {
5449 * In shared generic code with catch
5450 * clauses containing type variables
5451 * the exception handling code has to
5452 * be able to get to the rgctx.
5453 * Therefore we have to make sure that
5454 * the vtable/mrgctx argument (for
5455 * static or generic methods) or the
5456 * "this" argument (for non-static
5457 * methods) are live.
5459 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5460 mini_method_get_context (method)->method_inst ||
5461 method->klass->valuetype) {
5462 mono_get_vtable_var (cfg);
5463 } else {
5464 MonoInst *dummy_use;
5466 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5470 } else {
5471 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5472 cfg->cbb = start_bblock;
5473 cfg->args = arg_array;
5474 mono_save_args (cfg, sig, inline_args);
5477 /* FIRST CODE BLOCK */
5478 NEW_BBLOCK (cfg, bblock);
5479 bblock->cil_code = ip;
5480 cfg->cbb = bblock;
5481 cfg->ip = ip;
5483 ADD_BBLOCK (cfg, bblock);
5485 if (cfg->method == method) {
5486 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5487 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5488 MONO_INST_NEW (cfg, ins, OP_BREAK);
5489 MONO_ADD_INS (bblock, ins);
5493 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5494 secman = mono_security_manager_get_methods ();
5496 security = (secman && mono_method_has_declsec (method));
5497 /* at this point having security doesn't mean we have any code to generate */
5498 if (security && (cfg->method == method)) {
5499 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5500 * And we do not want to enter the next section (with allocation) if we
5501 * have nothing to generate */
5502 security = mono_declsec_get_demands (method, &actions);
5505 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5506 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5507 if (pinvoke) {
5508 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5509 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5510 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5512 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5513 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5514 pinvoke = FALSE;
5516 if (custom)
5517 mono_custom_attrs_free (custom);
5519 if (pinvoke) {
5520 custom = mono_custom_attrs_from_class (wrapped->klass);
5521 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5522 pinvoke = FALSE;
5524 if (custom)
5525 mono_custom_attrs_free (custom);
5527 } else {
5528 /* not a P/Invoke after all */
5529 pinvoke = FALSE;
5533 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5534 /* we use a separate basic block for the initialization code */
5535 NEW_BBLOCK (cfg, init_localsbb);
5536 cfg->bb_init = init_localsbb;
5537 init_localsbb->real_offset = cfg->real_offset;
5538 start_bblock->next_bb = init_localsbb;
5539 init_localsbb->next_bb = bblock;
5540 link_bblock (cfg, start_bblock, init_localsbb);
5541 link_bblock (cfg, init_localsbb, bblock);
5543 cfg->cbb = init_localsbb;
5544 } else {
5545 start_bblock->next_bb = bblock;
5546 link_bblock (cfg, start_bblock, bblock);
5549 /* at this point we know, if security is TRUE, that some code needs to be generated */
5550 if (security && (cfg->method == method)) {
5551 MonoInst *args [2];
5553 mono_jit_stats.cas_demand_generation++;
5555 if (actions.demand.blob) {
5556 /* Add code for SecurityAction.Demand */
5557 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5558 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5559 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5560 mono_emit_method_call (cfg, secman->demand, args, NULL);
5562 if (actions.noncasdemand.blob) {
5563 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5564 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5565 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5566 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5567 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5568 mono_emit_method_call (cfg, secman->demand, args, NULL);
5570 if (actions.demandchoice.blob) {
5571 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5572 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5573 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5574 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5575 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5579 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5580 if (pinvoke) {
5581 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5584 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5585 /* check if this is native code, e.g. an icall or a p/invoke */
5586 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5587 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5588 if (wrapped) {
5589 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5590 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5592 /* if this ia a native call then it can only be JITted from platform code */
5593 if ((icall || pinvk) && method->klass && method->klass->image) {
5594 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5595 MonoException *ex = icall ? mono_get_exception_security () :
5596 mono_get_exception_method_access ();
5597 emit_throw_exception (cfg, ex);
5604 if (header->code_size == 0)
5605 UNVERIFIED;
5607 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5608 ip = err_pos;
5609 UNVERIFIED;
5612 if (cfg->method == method)
5613 mono_debug_init_method (cfg, bblock, breakpoint_id);
5615 for (n = 0; n < header->num_locals; ++n) {
5616 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5617 UNVERIFIED;
5619 class_inits = NULL;
5621 /* We force the vtable variable here for all shared methods
5622 for the possibility that they might show up in a stack
5623 trace where their exact instantiation is needed. */
5624 if (cfg->generic_sharing_context && method == cfg->method) {
5625 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5626 mini_method_get_context (method)->method_inst ||
5627 method->klass->valuetype) {
5628 mono_get_vtable_var (cfg);
5629 } else {
5630 /* FIXME: Is there a better way to do this?
5631 We need the variable live for the duration
5632 of the whole method. */
5633 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5637 /* add a check for this != NULL to inlined methods */
5638 if (is_virtual_call) {
5639 MonoInst *arg_ins;
5641 NEW_ARGLOAD (cfg, arg_ins, 0);
5642 MONO_ADD_INS (cfg->cbb, arg_ins);
5643 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5646 skip_dead_blocks = !dont_verify;
5647 if (skip_dead_blocks) {
5648 original_bb = bb = mono_basic_block_split (method, &error);
5649 if (!mono_error_ok (&error)) {
5650 mono_error_cleanup (&error);
5651 UNVERIFIED;
5653 g_assert (bb);
5656 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5657 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5659 ins_flag = 0;
5660 start_new_bblock = 0;
5661 cfg->cbb = bblock;
5662 while (ip < end) {
5663 if (cfg->method == method)
5664 cfg->real_offset = ip - header->code;
5665 else
5666 cfg->real_offset = inline_offset;
5667 cfg->ip = ip;
5669 context_used = 0;
5671 if (start_new_bblock) {
5672 bblock->cil_length = ip - bblock->cil_code;
5673 if (start_new_bblock == 2) {
5674 g_assert (ip == tblock->cil_code);
5675 } else {
5676 GET_BBLOCK (cfg, tblock, ip);
5678 bblock->next_bb = tblock;
5679 bblock = tblock;
5680 cfg->cbb = bblock;
5681 start_new_bblock = 0;
5682 for (i = 0; i < bblock->in_scount; ++i) {
5683 if (cfg->verbose_level > 3)
5684 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5685 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5686 *sp++ = ins;
5688 if (class_inits)
5689 g_slist_free (class_inits);
5690 class_inits = NULL;
5691 } else {
5692 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5693 link_bblock (cfg, bblock, tblock);
5694 if (sp != stack_start) {
5695 handle_stack_args (cfg, stack_start, sp - stack_start);
5696 sp = stack_start;
5697 CHECK_UNVERIFIABLE (cfg);
5699 bblock->next_bb = tblock;
5700 bblock = tblock;
5701 cfg->cbb = bblock;
5702 for (i = 0; i < bblock->in_scount; ++i) {
5703 if (cfg->verbose_level > 3)
5704 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5705 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5706 *sp++ = ins;
5708 g_slist_free (class_inits);
5709 class_inits = NULL;
5713 if (skip_dead_blocks) {
5714 int ip_offset = ip - header->code;
5716 if (ip_offset == bb->end)
5717 bb = bb->next;
5719 if (bb->dead) {
5720 int op_size = mono_opcode_size (ip, end);
5721 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5723 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5725 if (ip_offset + op_size == bb->end) {
5726 MONO_INST_NEW (cfg, ins, OP_NOP);
5727 MONO_ADD_INS (bblock, ins);
5728 start_new_bblock = 1;
5731 ip += op_size;
5732 continue;
5736 * Sequence points are points where the debugger can place a breakpoint.
5737 * Currently, we generate these automatically at points where the IL
5738 * stack is empty.
5740 if (seq_points && sp == stack_start) {
5741 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5742 MONO_ADD_INS (cfg->cbb, ins);
5745 bblock->real_offset = cfg->real_offset;
5747 if ((cfg->method == method) && cfg->coverage_info) {
5748 guint32 cil_offset = ip - header->code;
5749 cfg->coverage_info->data [cil_offset].cil_code = ip;
5751 /* TODO: Use an increment here */
5752 #if defined(TARGET_X86)
5753 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5754 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5755 ins->inst_imm = 1;
5756 MONO_ADD_INS (cfg->cbb, ins);
5757 #else
5758 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5759 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5760 #endif
5763 if (cfg->verbose_level > 3)
5764 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5766 switch (*ip) {
5767 case CEE_NOP:
5768 if (cfg->keep_cil_nops)
5769 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5770 else
5771 MONO_INST_NEW (cfg, ins, OP_NOP);
5772 ip++;
5773 MONO_ADD_INS (bblock, ins);
5774 break;
5775 case CEE_BREAK:
5776 if (should_insert_brekpoint (cfg->method))
5777 MONO_INST_NEW (cfg, ins, OP_BREAK);
5778 else
5779 MONO_INST_NEW (cfg, ins, OP_NOP);
5780 ip++;
5781 MONO_ADD_INS (bblock, ins);
5782 break;
5783 case CEE_LDARG_0:
5784 case CEE_LDARG_1:
5785 case CEE_LDARG_2:
5786 case CEE_LDARG_3:
5787 CHECK_STACK_OVF (1);
5788 n = (*ip)-CEE_LDARG_0;
5789 CHECK_ARG (n);
5790 EMIT_NEW_ARGLOAD (cfg, ins, n);
5791 ip++;
5792 *sp++ = ins;
5793 break;
5794 case CEE_LDLOC_0:
5795 case CEE_LDLOC_1:
5796 case CEE_LDLOC_2:
5797 case CEE_LDLOC_3:
5798 CHECK_STACK_OVF (1);
5799 n = (*ip)-CEE_LDLOC_0;
5800 CHECK_LOCAL (n);
5801 EMIT_NEW_LOCLOAD (cfg, ins, n);
5802 ip++;
5803 *sp++ = ins;
5804 break;
5805 case CEE_STLOC_0:
5806 case CEE_STLOC_1:
5807 case CEE_STLOC_2:
5808 case CEE_STLOC_3: {
5809 CHECK_STACK (1);
5810 n = (*ip)-CEE_STLOC_0;
5811 CHECK_LOCAL (n);
5812 --sp;
5813 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5814 UNVERIFIED;
5815 emit_stloc_ir (cfg, sp, header, n);
5816 ++ip;
5817 inline_costs += 1;
5818 break;
5820 case CEE_LDARG_S:
5821 CHECK_OPSIZE (2);
5822 CHECK_STACK_OVF (1);
5823 n = ip [1];
5824 CHECK_ARG (n);
5825 EMIT_NEW_ARGLOAD (cfg, ins, n);
5826 *sp++ = ins;
5827 ip += 2;
5828 break;
5829 case CEE_LDARGA_S:
5830 CHECK_OPSIZE (2);
5831 CHECK_STACK_OVF (1);
5832 n = ip [1];
5833 CHECK_ARG (n);
5834 NEW_ARGLOADA (cfg, ins, n);
5835 MONO_ADD_INS (cfg->cbb, ins);
5836 *sp++ = ins;
5837 ip += 2;
5838 break;
5839 case CEE_STARG_S:
5840 CHECK_OPSIZE (2);
5841 CHECK_STACK (1);
5842 --sp;
5843 n = ip [1];
5844 CHECK_ARG (n);
5845 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5846 UNVERIFIED;
5847 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5848 ip += 2;
5849 break;
5850 case CEE_LDLOC_S:
5851 CHECK_OPSIZE (2);
5852 CHECK_STACK_OVF (1);
5853 n = ip [1];
5854 CHECK_LOCAL (n);
5855 EMIT_NEW_LOCLOAD (cfg, ins, n);
5856 *sp++ = ins;
5857 ip += 2;
5858 break;
5859 case CEE_LDLOCA_S: {
5860 unsigned char *tmp_ip;
5861 CHECK_OPSIZE (2);
5862 CHECK_STACK_OVF (1);
5863 CHECK_LOCAL (ip [1]);
5865 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5866 ip = tmp_ip;
5867 inline_costs += 1;
5868 break;
5871 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5872 *sp++ = ins;
5873 ip += 2;
5874 break;
5876 case CEE_STLOC_S:
5877 CHECK_OPSIZE (2);
5878 CHECK_STACK (1);
5879 --sp;
5880 CHECK_LOCAL (ip [1]);
5881 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5882 UNVERIFIED;
5883 emit_stloc_ir (cfg, sp, header, ip [1]);
5884 ip += 2;
5885 inline_costs += 1;
5886 break;
5887 case CEE_LDNULL:
5888 CHECK_STACK_OVF (1);
5889 EMIT_NEW_PCONST (cfg, ins, NULL);
5890 ins->type = STACK_OBJ;
5891 ++ip;
5892 *sp++ = ins;
5893 break;
5894 case CEE_LDC_I4_M1:
5895 CHECK_STACK_OVF (1);
5896 EMIT_NEW_ICONST (cfg, ins, -1);
5897 ++ip;
5898 *sp++ = ins;
5899 break;
5900 case CEE_LDC_I4_0:
5901 case CEE_LDC_I4_1:
5902 case CEE_LDC_I4_2:
5903 case CEE_LDC_I4_3:
5904 case CEE_LDC_I4_4:
5905 case CEE_LDC_I4_5:
5906 case CEE_LDC_I4_6:
5907 case CEE_LDC_I4_7:
5908 case CEE_LDC_I4_8:
5909 CHECK_STACK_OVF (1);
5910 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5911 ++ip;
5912 *sp++ = ins;
5913 break;
5914 case CEE_LDC_I4_S:
5915 CHECK_OPSIZE (2);
5916 CHECK_STACK_OVF (1);
5917 ++ip;
5918 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5919 ++ip;
5920 *sp++ = ins;
5921 break;
5922 case CEE_LDC_I4:
5923 CHECK_OPSIZE (5);
5924 CHECK_STACK_OVF (1);
5925 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5926 ip += 5;
5927 *sp++ = ins;
5928 break;
5929 case CEE_LDC_I8:
5930 CHECK_OPSIZE (9);
5931 CHECK_STACK_OVF (1);
5932 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5933 ins->type = STACK_I8;
5934 ins->dreg = alloc_dreg (cfg, STACK_I8);
5935 ++ip;
5936 ins->inst_l = (gint64)read64 (ip);
5937 MONO_ADD_INS (bblock, ins);
5938 ip += 8;
5939 *sp++ = ins;
5940 break;
5941 case CEE_LDC_R4: {
5942 float *f;
5943 gboolean use_aotconst = FALSE;
5945 #ifdef TARGET_POWERPC
5946 /* FIXME: Clean this up */
5947 if (cfg->compile_aot)
5948 use_aotconst = TRUE;
5949 #endif
5951 /* FIXME: we should really allocate this only late in the compilation process */
5952 f = mono_domain_alloc (cfg->domain, sizeof (float));
5953 CHECK_OPSIZE (5);
5954 CHECK_STACK_OVF (1);
5956 if (use_aotconst) {
5957 MonoInst *cons;
5958 int dreg;
5960 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5962 dreg = alloc_freg (cfg);
5963 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5964 ins->type = STACK_R8;
5965 } else {
5966 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5967 ins->type = STACK_R8;
5968 ins->dreg = alloc_dreg (cfg, STACK_R8);
5969 ins->inst_p0 = f;
5970 MONO_ADD_INS (bblock, ins);
5972 ++ip;
5973 readr4 (ip, f);
5974 ip += 4;
5975 *sp++ = ins;
5976 break;
5978 case CEE_LDC_R8: {
5979 double *d;
5980 gboolean use_aotconst = FALSE;
5982 #ifdef TARGET_POWERPC
5983 /* FIXME: Clean this up */
5984 if (cfg->compile_aot)
5985 use_aotconst = TRUE;
5986 #endif
5988 /* FIXME: we should really allocate this only late in the compilation process */
5989 d = mono_domain_alloc (cfg->domain, sizeof (double));
5990 CHECK_OPSIZE (9);
5991 CHECK_STACK_OVF (1);
5993 if (use_aotconst) {
5994 MonoInst *cons;
5995 int dreg;
5997 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5999 dreg = alloc_freg (cfg);
6000 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6001 ins->type = STACK_R8;
6002 } else {
6003 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6004 ins->type = STACK_R8;
6005 ins->dreg = alloc_dreg (cfg, STACK_R8);
6006 ins->inst_p0 = d;
6007 MONO_ADD_INS (bblock, ins);
6009 ++ip;
6010 readr8 (ip, d);
6011 ip += 8;
6012 *sp++ = ins;
6013 break;
6015 case CEE_DUP: {
6016 MonoInst *temp, *store;
6017 CHECK_STACK (1);
6018 CHECK_STACK_OVF (1);
6019 sp--;
6020 ins = *sp;
6022 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6023 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6025 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6026 *sp++ = ins;
6028 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6029 *sp++ = ins;
6031 ++ip;
6032 inline_costs += 2;
6033 break;
6035 case CEE_POP:
6036 CHECK_STACK (1);
6037 ip++;
6038 --sp;
6040 #ifdef TARGET_X86
6041 if (sp [0]->type == STACK_R8)
6042 /* we need to pop the value from the x86 FP stack */
6043 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6044 #endif
6045 break;
6046 case CEE_JMP: {
6047 MonoCallInst *call;
6049 INLINE_FAILURE;
6051 CHECK_OPSIZE (5);
6052 if (stack_start != sp)
6053 UNVERIFIED;
6054 token = read32 (ip + 1);
6055 /* FIXME: check the signature matches */
6056 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6058 if (!cmethod)
6059 goto load_error;
6061 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6062 GENERIC_SHARING_FAILURE (CEE_JMP);
6064 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6065 CHECK_CFG_EXCEPTION;
6067 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6069 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6070 int i, n;
6072 /* Handle tail calls similarly to calls */
6073 n = fsig->param_count + fsig->hasthis;
6075 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6076 call->method = cmethod;
6077 call->tail_call = TRUE;
6078 call->signature = mono_method_signature (cmethod);
6079 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6080 call->inst.inst_p0 = cmethod;
6081 for (i = 0; i < n; ++i)
6082 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6084 mono_arch_emit_call (cfg, call);
6085 MONO_ADD_INS (bblock, (MonoInst*)call);
6087 #else
6088 for (i = 0; i < num_args; ++i)
6089 /* Prevent arguments from being optimized away */
6090 arg_array [i]->flags |= MONO_INST_VOLATILE;
6092 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6093 ins = (MonoInst*)call;
6094 ins->inst_p0 = cmethod;
6095 MONO_ADD_INS (bblock, ins);
6096 #endif
6098 ip += 5;
6099 start_new_bblock = 1;
6100 break;
6102 case CEE_CALLI:
6103 case CEE_CALL:
6104 case CEE_CALLVIRT: {
6105 MonoInst *addr = NULL;
6106 MonoMethodSignature *fsig = NULL;
6107 int array_rank = 0;
6108 int virtual = *ip == CEE_CALLVIRT;
6109 int calli = *ip == CEE_CALLI;
6110 gboolean pass_imt_from_rgctx = FALSE;
6111 MonoInst *imt_arg = NULL;
6112 gboolean pass_vtable = FALSE;
6113 gboolean pass_mrgctx = FALSE;
6114 MonoInst *vtable_arg = NULL;
6115 gboolean check_this = FALSE;
6116 gboolean supported_tail_call = FALSE;
6118 CHECK_OPSIZE (5);
6119 token = read32 (ip + 1);
6121 if (calli) {
6122 cmethod = NULL;
6123 CHECK_STACK (1);
6124 --sp;
6125 addr = *sp;
6126 if (method->wrapper_type != MONO_WRAPPER_NONE)
6127 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6128 else
6129 fsig = mono_metadata_parse_signature (image, token);
6131 n = fsig->param_count + fsig->hasthis;
6133 if (method->dynamic && fsig->pinvoke) {
6134 MonoInst *args [3];
6137 * This is a call through a function pointer using a pinvoke
6138 * signature. Have to create a wrapper and call that instead.
6139 * FIXME: This is very slow, need to create a wrapper at JIT time
6140 * instead based on the signature.
6142 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6143 EMIT_NEW_PCONST (cfg, args [1], fsig);
6144 args [2] = addr;
6145 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6147 } else {
6148 MonoMethod *cil_method;
6150 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6151 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6152 cil_method = cmethod;
6153 } else if (constrained_call) {
6154 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6156 * This is needed since get_method_constrained can't find
6157 * the method in klass representing a type var.
6158 * The type var is guaranteed to be a reference type in this
6159 * case.
6161 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6162 cil_method = cmethod;
6163 g_assert (!cmethod->klass->valuetype);
6164 } else {
6165 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6167 } else {
6168 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6169 cil_method = cmethod;
6172 if (!cmethod)
6173 goto load_error;
6174 if (!dont_verify && !cfg->skip_visibility) {
6175 MonoMethod *target_method = cil_method;
6176 if (method->is_inflated) {
6177 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6179 if (!mono_method_can_access_method (method_definition, target_method) &&
6180 !mono_method_can_access_method (method, cil_method))
6181 METHOD_ACCESS_FAILURE;
6184 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6185 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6187 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6188 /* MS.NET seems to silently convert this to a callvirt */
6189 virtual = 1;
6193 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6194 * converts to a callvirt.
6196 * tests/bug-515884.il is an example of this behavior
6198 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6199 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6200 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6201 virtual = 1;
6204 if (!cmethod->klass->inited)
6205 if (!mono_class_init (cmethod->klass))
6206 goto load_error;
6208 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6209 mini_class_is_system_array (cmethod->klass)) {
6210 array_rank = cmethod->klass->rank;
6211 fsig = mono_method_signature (cmethod);
6212 } else {
6213 fsig = mono_method_signature (cmethod);
6215 if (!fsig)
6216 goto load_error;
6218 if (fsig->pinvoke) {
6219 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6220 check_for_pending_exc, FALSE);
6221 fsig = mono_method_signature (wrapper);
6222 } else if (constrained_call) {
6223 fsig = mono_method_signature (cmethod);
6224 } else {
6225 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6229 mono_save_token_info (cfg, image, token, cil_method);
6231 n = fsig->param_count + fsig->hasthis;
6233 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6234 if (check_linkdemand (cfg, method, cmethod))
6235 INLINE_FAILURE;
6236 CHECK_CFG_EXCEPTION;
6239 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6240 g_assert_not_reached ();
6243 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6244 UNVERIFIED;
6246 if (!cfg->generic_sharing_context && cmethod)
6247 g_assert (!mono_method_check_context_used (cmethod));
6249 CHECK_STACK (n);
6251 //g_assert (!virtual || fsig->hasthis);
6253 sp -= n;
6255 if (constrained_call) {
6257 * We have the `constrained.' prefix opcode.
6259 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6261 * The type parameter is instantiated as a valuetype,
6262 * but that type doesn't override the method we're
6263 * calling, so we need to box `this'.
6265 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6266 ins->klass = constrained_call;
6267 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6268 CHECK_CFG_EXCEPTION;
6269 } else if (!constrained_call->valuetype) {
6270 int dreg = alloc_preg (cfg);
6273 * The type parameter is instantiated as a reference
6274 * type. We have a managed pointer on the stack, so
6275 * we need to dereference it here.
6277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6278 ins->type = STACK_OBJ;
6279 sp [0] = ins;
6280 } else if (cmethod->klass->valuetype)
6281 virtual = 0;
6282 constrained_call = NULL;
6285 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6286 UNVERIFIED;
6289 * If the callee is a shared method, then its static cctor
6290 * might not get called after the call was patched.
6292 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6293 emit_generic_class_init (cfg, cmethod->klass);
6294 CHECK_TYPELOAD (cmethod->klass);
6297 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6298 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6299 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6300 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6301 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6304 * Pass vtable iff target method might
6305 * be shared, which means that sharing
6306 * is enabled for its class and its
6307 * context is sharable (and it's not a
6308 * generic method).
6310 if (sharing_enabled && context_sharable &&
6311 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6312 pass_vtable = TRUE;
6315 if (cmethod && mini_method_get_context (cmethod) &&
6316 mini_method_get_context (cmethod)->method_inst) {
6317 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6318 MonoGenericContext *context = mini_method_get_context (cmethod);
6319 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6321 g_assert (!pass_vtable);
6323 if (sharing_enabled && context_sharable)
6324 pass_mrgctx = TRUE;
6327 if (cfg->generic_sharing_context && cmethod) {
6328 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6330 context_used = mono_method_check_context_used (cmethod);
6332 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6333 /* Generic method interface
6334 calls are resolved via a
6335 helper function and don't
6336 need an imt. */
6337 if (!cmethod_context || !cmethod_context->method_inst)
6338 pass_imt_from_rgctx = TRUE;
6342 * If a shared method calls another
6343 * shared method then the caller must
6344 * have a generic sharing context
6345 * because the magic trampoline
6346 * requires it. FIXME: We shouldn't
6347 * have to force the vtable/mrgctx
6348 * variable here. Instead there
6349 * should be a flag in the cfg to
6350 * request a generic sharing context.
6352 if (context_used &&
6353 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6354 mono_get_vtable_var (cfg);
6357 if (pass_vtable) {
6358 if (context_used) {
6359 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6360 } else {
6361 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6363 CHECK_TYPELOAD (cmethod->klass);
6364 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6368 if (pass_mrgctx) {
6369 g_assert (!vtable_arg);
6371 if (!cfg->compile_aot) {
6373 * emit_get_rgctx_method () calls mono_class_vtable () so check
6374 * for type load errors before.
6376 mono_class_setup_vtable (cmethod->klass);
6377 CHECK_TYPELOAD (cmethod->klass);
6380 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6382 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6383 MONO_METHOD_IS_FINAL (cmethod)) {
6384 if (virtual)
6385 check_this = TRUE;
6386 virtual = 0;
6390 if (pass_imt_from_rgctx) {
6391 g_assert (!pass_vtable);
6392 g_assert (cmethod);
6394 imt_arg = emit_get_rgctx_method (cfg, context_used,
6395 cmethod, MONO_RGCTX_INFO_METHOD);
6398 if (check_this)
6399 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6401 /* Calling virtual generic methods */
6402 if (cmethod && virtual &&
6403 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6404 !(MONO_METHOD_IS_FINAL (cmethod) &&
6405 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6406 mono_method_signature (cmethod)->generic_param_count) {
6407 MonoInst *this_temp, *this_arg_temp, *store;
6408 MonoInst *iargs [4];
6410 g_assert (mono_method_signature (cmethod)->is_inflated);
6412 /* Prevent inlining of methods that contain indirect calls */
6413 INLINE_FAILURE;
6415 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6416 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6417 g_assert (!imt_arg);
6418 if (!context_used)
6419 g_assert (cmethod->is_inflated);
6420 imt_arg = emit_get_rgctx_method (cfg, context_used,
6421 cmethod, MONO_RGCTX_INFO_METHOD);
6422 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6423 } else
6424 #endif
6426 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6427 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6428 MONO_ADD_INS (bblock, store);
6430 /* FIXME: This should be a managed pointer */
6431 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6433 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6434 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6435 cmethod, MONO_RGCTX_INFO_METHOD);
6436 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6437 addr = mono_emit_jit_icall (cfg,
6438 mono_helper_compile_generic_method, iargs);
6440 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6442 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6445 if (!MONO_TYPE_IS_VOID (fsig->ret))
6446 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6448 CHECK_CFG_EXCEPTION;
6450 ip += 5;
6451 ins_flag = 0;
6452 break;
6455 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6456 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6457 #else
6458 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6459 #endif
6461 /* Tail prefix */
6462 /* FIXME: runtime generic context pointer for jumps? */
6463 /* FIXME: handle this for generic sharing eventually */
6464 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6465 MonoCallInst *call;
6467 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6468 INLINE_FAILURE;
6470 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6471 /* Handle tail calls similarly to calls */
6472 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6473 #else
6474 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6475 call->tail_call = TRUE;
6476 call->method = cmethod;
6477 call->signature = mono_method_signature (cmethod);
6480 * We implement tail calls by storing the actual arguments into the
6481 * argument variables, then emitting a CEE_JMP.
6483 for (i = 0; i < n; ++i) {
6484 /* Prevent argument from being register allocated */
6485 arg_array [i]->flags |= MONO_INST_VOLATILE;
6486 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6488 #endif
6490 ins = (MonoInst*)call;
6491 ins->inst_p0 = cmethod;
6492 ins->inst_p1 = arg_array [0];
6493 MONO_ADD_INS (bblock, ins);
6494 link_bblock (cfg, bblock, end_bblock);
6495 start_new_bblock = 1;
6497 CHECK_CFG_EXCEPTION;
6499 /* skip CEE_RET as well */
6500 ip += 6;
6501 ins_flag = 0;
6502 break;
6505 /* Conversion to a JIT intrinsic */
6506 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6507 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6508 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6509 *sp = ins;
6510 sp++;
6513 CHECK_CFG_EXCEPTION;
6515 ip += 5;
6516 ins_flag = 0;
6517 break;
6520 /* Inlining */
6521 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6522 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6523 mono_method_check_inlining (cfg, cmethod) &&
6524 !g_list_find (dont_inline, cmethod)) {
6525 int costs;
6526 gboolean allways = FALSE;
6528 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6529 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6530 /* Prevent inlining of methods that call wrappers */
6531 INLINE_FAILURE;
6532 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6533 allways = TRUE;
6536 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6537 ip += 5;
6538 cfg->real_offset += 5;
6539 bblock = cfg->cbb;
6541 if (!MONO_TYPE_IS_VOID (fsig->ret))
6542 /* *sp is already set by inline_method */
6543 sp++;
6545 inline_costs += costs;
6546 ins_flag = 0;
6547 break;
6551 inline_costs += 10 * num_calls++;
6553 /* Tail recursion elimination */
6554 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6555 gboolean has_vtargs = FALSE;
6556 int i;
6558 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6559 INLINE_FAILURE;
6561 /* keep it simple */
6562 for (i = fsig->param_count - 1; i >= 0; i--) {
6563 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6564 has_vtargs = TRUE;
6567 if (!has_vtargs) {
6568 for (i = 0; i < n; ++i)
6569 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6570 MONO_INST_NEW (cfg, ins, OP_BR);
6571 MONO_ADD_INS (bblock, ins);
6572 tblock = start_bblock->out_bb [0];
6573 link_bblock (cfg, bblock, tblock);
6574 ins->inst_target_bb = tblock;
6575 start_new_bblock = 1;
6577 /* skip the CEE_RET, too */
6578 if (ip_in_bb (cfg, bblock, ip + 5))
6579 ip += 6;
6580 else
6581 ip += 5;
6583 ins_flag = 0;
6584 break;
6588 /* Generic sharing */
6589 /* FIXME: only do this for generic methods if
6590 they are not shared! */
6591 if (context_used && !imt_arg && !array_rank &&
6592 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6593 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6594 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6595 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6596 INLINE_FAILURE;
6598 g_assert (cfg->generic_sharing_context && cmethod);
6599 g_assert (!addr);
6602 * We are compiling a call to a
6603 * generic method from shared code,
6604 * which means that we have to look up
6605 * the method in the rgctx and do an
6606 * indirect call.
6608 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6611 /* Indirect calls */
6612 if (addr) {
6613 g_assert (!imt_arg);
6615 if (*ip == CEE_CALL)
6616 g_assert (context_used);
6617 else if (*ip == CEE_CALLI)
6618 g_assert (!vtable_arg);
6619 else
6620 /* FIXME: what the hell is this??? */
6621 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6622 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6624 /* Prevent inlining of methods with indirect calls */
6625 INLINE_FAILURE;
6627 if (vtable_arg) {
6628 MonoCallInst *call;
6629 int rgctx_reg = mono_alloc_preg (cfg);
6631 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6632 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6633 call = (MonoCallInst*)ins;
6634 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6635 } else {
6636 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6638 * Instead of emitting an indirect call, emit a direct call
6639 * with the contents of the aotconst as the patch info.
6641 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6642 NULLIFY_INS (addr);
6643 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6644 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6645 NULLIFY_INS (addr);
6646 } else {
6647 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6650 if (!MONO_TYPE_IS_VOID (fsig->ret))
6651 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6653 CHECK_CFG_EXCEPTION;
6655 ip += 5;
6656 ins_flag = 0;
6657 break;
6660 /* Array methods */
6661 if (array_rank) {
6662 MonoInst *addr;
6664 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6665 if (sp [fsig->param_count]->type == STACK_OBJ) {
6666 MonoInst *iargs [2];
6668 iargs [0] = sp [0];
6669 iargs [1] = sp [fsig->param_count];
6671 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6674 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6675 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6676 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6677 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6679 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6681 *sp++ = ins;
6682 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6683 if (!cmethod->klass->element_class->valuetype && !readonly)
6684 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6685 CHECK_TYPELOAD (cmethod->klass);
6687 readonly = FALSE;
6688 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6689 *sp++ = addr;
6690 } else {
6691 g_assert_not_reached ();
6694 CHECK_CFG_EXCEPTION;
6696 ip += 5;
6697 ins_flag = 0;
6698 break;
6701 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6702 if (ins) {
6703 if (!MONO_TYPE_IS_VOID (fsig->ret))
6704 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6706 CHECK_CFG_EXCEPTION;
6708 ip += 5;
6709 ins_flag = 0;
6710 break;
6713 /* Common call */
6714 INLINE_FAILURE;
6715 if (vtable_arg) {
6716 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6717 NULL, vtable_arg);
6718 } else if (imt_arg) {
6719 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6720 } else {
6721 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6724 if (!MONO_TYPE_IS_VOID (fsig->ret))
6725 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6727 CHECK_CFG_EXCEPTION;
6729 ip += 5;
6730 ins_flag = 0;
6731 break;
6733 case CEE_RET:
6734 if (cfg->method != method) {
6735 /* return from inlined method */
6737 * If in_count == 0, that means the ret is unreachable due to
6738 * being preceeded by a throw. In that case, inline_method () will
6739 * handle setting the return value
6740 * (test case: test_0_inline_throw ()).
6742 if (return_var && cfg->cbb->in_count) {
6743 MonoInst *store;
6744 CHECK_STACK (1);
6745 --sp;
6746 //g_assert (returnvar != -1);
6747 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6748 cfg->ret_var_set = TRUE;
6750 } else {
6751 if (cfg->ret) {
6752 MonoType *ret_type = mono_method_signature (method)->ret;
6754 if (seq_points) {
6756 * Place a seq point here too even through the IL stack is not
6757 * empty, so a step over on
6758 * call <FOO>
6759 * ret
6760 * will work correctly.
6762 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6763 MONO_ADD_INS (cfg->cbb, ins);
6766 g_assert (!return_var);
6767 CHECK_STACK (1);
6768 --sp;
6769 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6770 MonoInst *ret_addr;
6772 if (!cfg->vret_addr) {
6773 MonoInst *ins;
6775 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6776 } else {
6777 EMIT_NEW_RETLOADA (cfg, ret_addr);
6779 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6780 ins->klass = mono_class_from_mono_type (ret_type);
6782 } else {
6783 #ifdef MONO_ARCH_SOFT_FLOAT
6784 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6785 MonoInst *iargs [1];
6786 MonoInst *conv;
6788 iargs [0] = *sp;
6789 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6790 mono_arch_emit_setret (cfg, method, conv);
6791 } else {
6792 mono_arch_emit_setret (cfg, method, *sp);
6794 #else
6795 mono_arch_emit_setret (cfg, method, *sp);
6796 #endif
6800 if (sp != stack_start)
6801 UNVERIFIED;
6802 MONO_INST_NEW (cfg, ins, OP_BR);
6803 ip++;
6804 ins->inst_target_bb = end_bblock;
6805 MONO_ADD_INS (bblock, ins);
6806 link_bblock (cfg, bblock, end_bblock);
6807 start_new_bblock = 1;
6808 break;
6809 case CEE_BR_S:
6810 CHECK_OPSIZE (2);
6811 MONO_INST_NEW (cfg, ins, OP_BR);
6812 ip++;
6813 target = ip + 1 + (signed char)(*ip);
6814 ++ip;
6815 GET_BBLOCK (cfg, tblock, target);
6816 link_bblock (cfg, bblock, tblock);
6817 ins->inst_target_bb = tblock;
6818 if (sp != stack_start) {
6819 handle_stack_args (cfg, stack_start, sp - stack_start);
6820 sp = stack_start;
6821 CHECK_UNVERIFIABLE (cfg);
6823 MONO_ADD_INS (bblock, ins);
6824 start_new_bblock = 1;
6825 inline_costs += BRANCH_COST;
6826 break;
6827 case CEE_BEQ_S:
6828 case CEE_BGE_S:
6829 case CEE_BGT_S:
6830 case CEE_BLE_S:
6831 case CEE_BLT_S:
6832 case CEE_BNE_UN_S:
6833 case CEE_BGE_UN_S:
6834 case CEE_BGT_UN_S:
6835 case CEE_BLE_UN_S:
6836 case CEE_BLT_UN_S:
6837 CHECK_OPSIZE (2);
6838 CHECK_STACK (2);
6839 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6840 ip++;
6841 target = ip + 1 + *(signed char*)ip;
6842 ip++;
6844 ADD_BINCOND (NULL);
6846 sp = stack_start;
6847 inline_costs += BRANCH_COST;
6848 break;
6849 case CEE_BR:
6850 CHECK_OPSIZE (5);
6851 MONO_INST_NEW (cfg, ins, OP_BR);
6852 ip++;
6854 target = ip + 4 + (gint32)read32(ip);
6855 ip += 4;
6856 GET_BBLOCK (cfg, tblock, target);
6857 link_bblock (cfg, bblock, tblock);
6858 ins->inst_target_bb = tblock;
6859 if (sp != stack_start) {
6860 handle_stack_args (cfg, stack_start, sp - stack_start);
6861 sp = stack_start;
6862 CHECK_UNVERIFIABLE (cfg);
6865 MONO_ADD_INS (bblock, ins);
6867 start_new_bblock = 1;
6868 inline_costs += BRANCH_COST;
6869 break;
6870 case CEE_BRFALSE_S:
6871 case CEE_BRTRUE_S:
6872 case CEE_BRFALSE:
6873 case CEE_BRTRUE: {
6874 MonoInst *cmp;
6875 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6876 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6877 guint32 opsize = is_short ? 1 : 4;
6879 CHECK_OPSIZE (opsize);
6880 CHECK_STACK (1);
6881 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6882 UNVERIFIED;
6883 ip ++;
6884 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6885 ip += opsize;
6887 sp--;
6889 GET_BBLOCK (cfg, tblock, target);
6890 link_bblock (cfg, bblock, tblock);
6891 GET_BBLOCK (cfg, tblock, ip);
6892 link_bblock (cfg, bblock, tblock);
6894 if (sp != stack_start) {
6895 handle_stack_args (cfg, stack_start, sp - stack_start);
6896 CHECK_UNVERIFIABLE (cfg);
6899 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6900 cmp->sreg1 = sp [0]->dreg;
6901 type_from_op (cmp, sp [0], NULL);
6902 CHECK_TYPE (cmp);
6904 #if SIZEOF_REGISTER == 4
6905 if (cmp->opcode == OP_LCOMPARE_IMM) {
6906 /* Convert it to OP_LCOMPARE */
6907 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6908 ins->type = STACK_I8;
6909 ins->dreg = alloc_dreg (cfg, STACK_I8);
6910 ins->inst_l = 0;
6911 MONO_ADD_INS (bblock, ins);
6912 cmp->opcode = OP_LCOMPARE;
6913 cmp->sreg2 = ins->dreg;
6915 #endif
6916 MONO_ADD_INS (bblock, cmp);
6918 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6919 type_from_op (ins, sp [0], NULL);
6920 MONO_ADD_INS (bblock, ins);
6921 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6922 GET_BBLOCK (cfg, tblock, target);
6923 ins->inst_true_bb = tblock;
6924 GET_BBLOCK (cfg, tblock, ip);
6925 ins->inst_false_bb = tblock;
6926 start_new_bblock = 2;
6928 sp = stack_start;
6929 inline_costs += BRANCH_COST;
6930 break;
6932 case CEE_BEQ:
6933 case CEE_BGE:
6934 case CEE_BGT:
6935 case CEE_BLE:
6936 case CEE_BLT:
6937 case CEE_BNE_UN:
6938 case CEE_BGE_UN:
6939 case CEE_BGT_UN:
6940 case CEE_BLE_UN:
6941 case CEE_BLT_UN:
6942 CHECK_OPSIZE (5);
6943 CHECK_STACK (2);
6944 MONO_INST_NEW (cfg, ins, *ip);
6945 ip++;
6946 target = ip + 4 + (gint32)read32(ip);
6947 ip += 4;
6949 ADD_BINCOND (NULL);
6951 sp = stack_start;
6952 inline_costs += BRANCH_COST;
6953 break;
6954 case CEE_SWITCH: {
6955 MonoInst *src1;
6956 MonoBasicBlock **targets;
6957 MonoBasicBlock *default_bblock;
6958 MonoJumpInfoBBTable *table;
6959 int offset_reg = alloc_preg (cfg);
6960 int target_reg = alloc_preg (cfg);
6961 int table_reg = alloc_preg (cfg);
6962 int sum_reg = alloc_preg (cfg);
6963 gboolean use_op_switch;
6965 CHECK_OPSIZE (5);
6966 CHECK_STACK (1);
6967 n = read32 (ip + 1);
6968 --sp;
6969 src1 = sp [0];
6970 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6971 UNVERIFIED;
6973 ip += 5;
6974 CHECK_OPSIZE (n * sizeof (guint32));
6975 target = ip + n * sizeof (guint32);
6977 GET_BBLOCK (cfg, default_bblock, target);
6979 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6980 for (i = 0; i < n; ++i) {
6981 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6982 targets [i] = tblock;
6983 ip += 4;
6986 if (sp != stack_start) {
6988 * Link the current bb with the targets as well, so handle_stack_args
6989 * will set their in_stack correctly.
6991 link_bblock (cfg, bblock, default_bblock);
6992 for (i = 0; i < n; ++i)
6993 link_bblock (cfg, bblock, targets [i]);
6995 handle_stack_args (cfg, stack_start, sp - stack_start);
6996 sp = stack_start;
6997 CHECK_UNVERIFIABLE (cfg);
7000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7002 bblock = cfg->cbb;
7004 for (i = 0; i < n; ++i)
7005 link_bblock (cfg, bblock, targets [i]);
7007 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7008 table->table = targets;
7009 table->table_size = n;
7011 use_op_switch = FALSE;
7012 #ifdef TARGET_ARM
7013 /* ARM implements SWITCH statements differently */
7014 /* FIXME: Make it use the generic implementation */
7015 if (!cfg->compile_aot)
7016 use_op_switch = TRUE;
7017 #endif
7019 if (COMPILE_LLVM (cfg))
7020 use_op_switch = TRUE;
7022 cfg->cbb->has_jump_table = 1;
7024 if (use_op_switch) {
7025 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7026 ins->sreg1 = src1->dreg;
7027 ins->inst_p0 = table;
7028 ins->inst_many_bb = targets;
7029 ins->klass = GUINT_TO_POINTER (n);
7030 MONO_ADD_INS (cfg->cbb, ins);
7031 } else {
7032 if (sizeof (gpointer) == 8)
7033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7034 else
7035 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7037 #if SIZEOF_REGISTER == 8
7038 /* The upper word might not be zero, and we add it to a 64 bit address later */
7039 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7040 #endif
7042 if (cfg->compile_aot) {
7043 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7044 } else {
7045 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7046 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7047 ins->inst_p0 = table;
7048 ins->dreg = table_reg;
7049 MONO_ADD_INS (cfg->cbb, ins);
7052 /* FIXME: Use load_memindex */
7053 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7055 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7057 start_new_bblock = 1;
7058 inline_costs += (BRANCH_COST * 2);
7059 break;
7061 case CEE_LDIND_I1:
7062 case CEE_LDIND_U1:
7063 case CEE_LDIND_I2:
7064 case CEE_LDIND_U2:
7065 case CEE_LDIND_I4:
7066 case CEE_LDIND_U4:
7067 case CEE_LDIND_I8:
7068 case CEE_LDIND_I:
7069 case CEE_LDIND_R4:
7070 case CEE_LDIND_R8:
7071 case CEE_LDIND_REF:
7072 CHECK_STACK (1);
7073 --sp;
7075 switch (*ip) {
7076 case CEE_LDIND_R4:
7077 case CEE_LDIND_R8:
7078 dreg = alloc_freg (cfg);
7079 break;
7080 case CEE_LDIND_I8:
7081 dreg = alloc_lreg (cfg);
7082 break;
7083 default:
7084 dreg = alloc_preg (cfg);
7087 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7088 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7089 ins->flags |= ins_flag;
7090 ins_flag = 0;
7091 MONO_ADD_INS (bblock, ins);
7092 *sp++ = ins;
7093 ++ip;
7094 break;
7095 case CEE_STIND_REF:
7096 case CEE_STIND_I1:
7097 case CEE_STIND_I2:
7098 case CEE_STIND_I4:
7099 case CEE_STIND_I8:
7100 case CEE_STIND_R4:
7101 case CEE_STIND_R8:
7102 case CEE_STIND_I:
7103 CHECK_STACK (2);
7104 sp -= 2;
7106 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7107 ins->flags |= ins_flag;
7108 ins_flag = 0;
7109 MONO_ADD_INS (bblock, ins);
7111 #if HAVE_WRITE_BARRIERS
7112 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7113 MonoInst *dummy_use;
7114 /* insert call to write barrier */
7115 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7116 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7117 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7119 #endif
7121 inline_costs += 1;
7122 ++ip;
7123 break;
7125 case CEE_MUL:
7126 CHECK_STACK (2);
7128 MONO_INST_NEW (cfg, ins, (*ip));
7129 sp -= 2;
7130 ins->sreg1 = sp [0]->dreg;
7131 ins->sreg2 = sp [1]->dreg;
7132 type_from_op (ins, sp [0], sp [1]);
7133 CHECK_TYPE (ins);
7134 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7136 /* Use the immediate opcodes if possible */
7137 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7138 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7139 if (imm_opcode != -1) {
7140 ins->opcode = imm_opcode;
7141 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7142 ins->sreg2 = -1;
7144 sp [1]->opcode = OP_NOP;
7148 MONO_ADD_INS ((cfg)->cbb, (ins));
7150 *sp++ = mono_decompose_opcode (cfg, ins);
7151 ip++;
7152 break;
7153 case CEE_ADD:
7154 case CEE_SUB:
7155 case CEE_DIV:
7156 case CEE_DIV_UN:
7157 case CEE_REM:
7158 case CEE_REM_UN:
7159 case CEE_AND:
7160 case CEE_OR:
7161 case CEE_XOR:
7162 case CEE_SHL:
7163 case CEE_SHR:
7164 case CEE_SHR_UN:
7165 CHECK_STACK (2);
7167 MONO_INST_NEW (cfg, ins, (*ip));
7168 sp -= 2;
7169 ins->sreg1 = sp [0]->dreg;
7170 ins->sreg2 = sp [1]->dreg;
7171 type_from_op (ins, sp [0], sp [1]);
7172 CHECK_TYPE (ins);
7173 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7174 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7176 /* FIXME: Pass opcode to is_inst_imm */
7178 /* Use the immediate opcodes if possible */
7179 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7180 int imm_opcode;
7182 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7183 if (imm_opcode != -1) {
7184 ins->opcode = imm_opcode;
7185 if (sp [1]->opcode == OP_I8CONST) {
7186 #if SIZEOF_REGISTER == 8
7187 ins->inst_imm = sp [1]->inst_l;
7188 #else
7189 ins->inst_ls_word = sp [1]->inst_ls_word;
7190 ins->inst_ms_word = sp [1]->inst_ms_word;
7191 #endif
7193 else
7194 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7195 ins->sreg2 = -1;
7197 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7198 if (sp [1]->next == NULL)
7199 sp [1]->opcode = OP_NOP;
7202 MONO_ADD_INS ((cfg)->cbb, (ins));
7204 *sp++ = mono_decompose_opcode (cfg, ins);
7205 ip++;
7206 break;
7207 case CEE_NEG:
7208 case CEE_NOT:
7209 case CEE_CONV_I1:
7210 case CEE_CONV_I2:
7211 case CEE_CONV_I4:
7212 case CEE_CONV_R4:
7213 case CEE_CONV_R8:
7214 case CEE_CONV_U4:
7215 case CEE_CONV_I8:
7216 case CEE_CONV_U8:
7217 case CEE_CONV_OVF_I8:
7218 case CEE_CONV_OVF_U8:
7219 case CEE_CONV_R_UN:
7220 CHECK_STACK (1);
7222 /* Special case this earlier so we have long constants in the IR */
7223 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7224 int data = sp [-1]->inst_c0;
7225 sp [-1]->opcode = OP_I8CONST;
7226 sp [-1]->type = STACK_I8;
7227 #if SIZEOF_REGISTER == 8
7228 if ((*ip) == CEE_CONV_U8)
7229 sp [-1]->inst_c0 = (guint32)data;
7230 else
7231 sp [-1]->inst_c0 = data;
7232 #else
7233 sp [-1]->inst_ls_word = data;
7234 if ((*ip) == CEE_CONV_U8)
7235 sp [-1]->inst_ms_word = 0;
7236 else
7237 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7238 #endif
7239 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7241 else {
7242 ADD_UNOP (*ip);
7244 ip++;
7245 break;
7246 case CEE_CONV_OVF_I4:
7247 case CEE_CONV_OVF_I1:
7248 case CEE_CONV_OVF_I2:
7249 case CEE_CONV_OVF_I:
7250 case CEE_CONV_OVF_U:
7251 CHECK_STACK (1);
7253 if (sp [-1]->type == STACK_R8) {
7254 ADD_UNOP (CEE_CONV_OVF_I8);
7255 ADD_UNOP (*ip);
7256 } else {
7257 ADD_UNOP (*ip);
7259 ip++;
7260 break;
7261 case CEE_CONV_OVF_U1:
7262 case CEE_CONV_OVF_U2:
7263 case CEE_CONV_OVF_U4:
7264 CHECK_STACK (1);
7266 if (sp [-1]->type == STACK_R8) {
7267 ADD_UNOP (CEE_CONV_OVF_U8);
7268 ADD_UNOP (*ip);
7269 } else {
7270 ADD_UNOP (*ip);
7272 ip++;
7273 break;
7274 case CEE_CONV_OVF_I1_UN:
7275 case CEE_CONV_OVF_I2_UN:
7276 case CEE_CONV_OVF_I4_UN:
7277 case CEE_CONV_OVF_I8_UN:
7278 case CEE_CONV_OVF_U1_UN:
7279 case CEE_CONV_OVF_U2_UN:
7280 case CEE_CONV_OVF_U4_UN:
7281 case CEE_CONV_OVF_U8_UN:
7282 case CEE_CONV_OVF_I_UN:
7283 case CEE_CONV_OVF_U_UN:
7284 case CEE_CONV_U2:
7285 case CEE_CONV_U1:
7286 case CEE_CONV_I:
7287 case CEE_CONV_U:
7288 CHECK_STACK (1);
7289 ADD_UNOP (*ip);
7290 CHECK_CFG_EXCEPTION;
7291 ip++;
7292 break;
7293 case CEE_ADD_OVF:
7294 case CEE_ADD_OVF_UN:
7295 case CEE_MUL_OVF:
7296 case CEE_MUL_OVF_UN:
7297 case CEE_SUB_OVF:
7298 case CEE_SUB_OVF_UN:
7299 CHECK_STACK (2);
7300 ADD_BINOP (*ip);
7301 ip++;
7302 break;
7303 case CEE_CPOBJ:
7304 CHECK_OPSIZE (5);
7305 CHECK_STACK (2);
7306 token = read32 (ip + 1);
7307 klass = mini_get_class (method, token, generic_context);
7308 CHECK_TYPELOAD (klass);
7309 sp -= 2;
7310 if (generic_class_is_reference_type (cfg, klass)) {
7311 MonoInst *store, *load;
7312 int dreg = alloc_preg (cfg);
7314 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7315 load->flags |= ins_flag;
7316 MONO_ADD_INS (cfg->cbb, load);
7318 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7319 store->flags |= ins_flag;
7320 MONO_ADD_INS (cfg->cbb, store);
7322 #if HAVE_WRITE_BARRIERS
7323 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7324 MonoInst *dummy_use;
7325 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7326 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7327 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7329 #endif
7330 } else {
7331 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7333 ins_flag = 0;
7334 ip += 5;
7335 break;
7336 case CEE_LDOBJ: {
7337 int loc_index = -1;
7338 int stloc_len = 0;
7340 CHECK_OPSIZE (5);
7341 CHECK_STACK (1);
7342 --sp;
7343 token = read32 (ip + 1);
7344 klass = mini_get_class (method, token, generic_context);
7345 CHECK_TYPELOAD (klass);
7347 /* Optimize the common ldobj+stloc combination */
7348 switch (ip [5]) {
7349 case CEE_STLOC_S:
7350 loc_index = ip [6];
7351 stloc_len = 2;
7352 break;
7353 case CEE_STLOC_0:
7354 case CEE_STLOC_1:
7355 case CEE_STLOC_2:
7356 case CEE_STLOC_3:
7357 loc_index = ip [5] - CEE_STLOC_0;
7358 stloc_len = 1;
7359 break;
7360 default:
7361 break;
7364 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7365 CHECK_LOCAL (loc_index);
7367 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7368 ins->dreg = cfg->locals [loc_index]->dreg;
7369 ip += 5;
7370 ip += stloc_len;
7371 break;
7374 /* Optimize the ldobj+stobj combination */
7375 /* The reference case ends up being a load+store anyway */
7376 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7377 CHECK_STACK (1);
7379 sp --;
7381 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7383 ip += 5 + 5;
7384 ins_flag = 0;
7385 break;
7388 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7389 *sp++ = ins;
7391 ip += 5;
7392 ins_flag = 0;
7393 inline_costs += 1;
7394 break;
7396 case CEE_LDSTR:
7397 CHECK_STACK_OVF (1);
7398 CHECK_OPSIZE (5);
7399 n = read32 (ip + 1);
7401 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7402 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7403 ins->type = STACK_OBJ;
7404 *sp = ins;
7406 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7407 MonoInst *iargs [1];
7409 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7410 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7411 } else {
7412 if (cfg->opt & MONO_OPT_SHARED) {
7413 MonoInst *iargs [3];
7415 if (cfg->compile_aot) {
7416 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7418 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7419 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7420 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7421 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7422 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7423 } else {
7424 if (bblock->out_of_line) {
7425 MonoInst *iargs [2];
7427 if (image == mono_defaults.corlib) {
7429 * Avoid relocations in AOT and save some space by using a
7430 * version of helper_ldstr specialized to mscorlib.
7432 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7433 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7434 } else {
7435 /* Avoid creating the string object */
7436 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7437 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7438 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7441 else
7442 if (cfg->compile_aot) {
7443 NEW_LDSTRCONST (cfg, ins, image, n);
7444 *sp = ins;
7445 MONO_ADD_INS (bblock, ins);
7447 else {
7448 NEW_PCONST (cfg, ins, NULL);
7449 ins->type = STACK_OBJ;
7450 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7451 *sp = ins;
7452 MONO_ADD_INS (bblock, ins);
7457 sp++;
7458 ip += 5;
7459 break;
7460 case CEE_NEWOBJ: {
7461 MonoInst *iargs [2];
7462 MonoMethodSignature *fsig;
7463 MonoInst this_ins;
7464 MonoInst *alloc;
7465 MonoInst *vtable_arg = NULL;
7467 CHECK_OPSIZE (5);
7468 token = read32 (ip + 1);
7469 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7470 if (!cmethod)
7471 goto load_error;
7472 fsig = mono_method_get_signature (cmethod, image, token);
7473 if (!fsig)
7474 goto load_error;
7476 mono_save_token_info (cfg, image, token, cmethod);
7478 if (!mono_class_init (cmethod->klass))
7479 goto load_error;
7481 if (cfg->generic_sharing_context)
7482 context_used = mono_method_check_context_used (cmethod);
7484 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7485 if (check_linkdemand (cfg, method, cmethod))
7486 INLINE_FAILURE;
7487 CHECK_CFG_EXCEPTION;
7488 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7489 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7492 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7493 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7494 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7495 mono_class_vtable (cfg->domain, cmethod->klass);
7496 CHECK_TYPELOAD (cmethod->klass);
7498 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7499 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7500 } else {
7501 if (context_used) {
7502 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7503 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7504 } else {
7505 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7507 CHECK_TYPELOAD (cmethod->klass);
7508 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7513 n = fsig->param_count;
7514 CHECK_STACK (n);
7517 * Generate smaller code for the common newobj <exception> instruction in
7518 * argument checking code.
7520 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7521 is_exception_class (cmethod->klass) && n <= 2 &&
7522 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7523 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7524 MonoInst *iargs [3];
7526 g_assert (!vtable_arg);
7528 sp -= n;
7530 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7531 switch (n) {
7532 case 0:
7533 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7534 break;
7535 case 1:
7536 iargs [1] = sp [0];
7537 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7538 break;
7539 case 2:
7540 iargs [1] = sp [0];
7541 iargs [2] = sp [1];
7542 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7543 break;
7544 default:
7545 g_assert_not_reached ();
7548 ip += 5;
7549 inline_costs += 5;
7550 break;
7553 /* move the args to allow room for 'this' in the first position */
7554 while (n--) {
7555 --sp;
7556 sp [1] = sp [0];
7559 /* check_call_signature () requires sp[0] to be set */
7560 this_ins.type = STACK_OBJ;
7561 sp [0] = &this_ins;
7562 if (check_call_signature (cfg, fsig, sp))
7563 UNVERIFIED;
7565 iargs [0] = NULL;
7567 if (mini_class_is_system_array (cmethod->klass)) {
7568 g_assert (!vtable_arg);
7570 *sp = emit_get_rgctx_method (cfg, context_used,
7571 cmethod, MONO_RGCTX_INFO_METHOD);
7573 /* Avoid varargs in the common case */
7574 if (fsig->param_count == 1)
7575 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7576 else if (fsig->param_count == 2)
7577 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7578 else if (fsig->param_count == 3)
7579 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7580 else
7581 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7582 } else if (cmethod->string_ctor) {
7583 g_assert (!context_used);
7584 g_assert (!vtable_arg);
7585 /* we simply pass a null pointer */
7586 EMIT_NEW_PCONST (cfg, *sp, NULL);
7587 /* now call the string ctor */
7588 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7589 } else {
7590 MonoInst* callvirt_this_arg = NULL;
7592 if (cmethod->klass->valuetype) {
7593 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7594 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7595 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7597 alloc = NULL;
7600 * The code generated by mini_emit_virtual_call () expects
7601 * iargs [0] to be a boxed instance, but luckily the vcall
7602 * will be transformed into a normal call there.
7604 } else if (context_used) {
7605 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7606 *sp = alloc;
7607 } else {
7608 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7610 CHECK_TYPELOAD (cmethod->klass);
7613 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7614 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7615 * As a workaround, we call class cctors before allocating objects.
7617 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7618 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7619 if (cfg->verbose_level > 2)
7620 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7621 class_inits = g_slist_prepend (class_inits, vtable);
7624 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7625 *sp = alloc;
7627 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7629 if (alloc)
7630 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7632 /* Now call the actual ctor */
7633 /* Avoid virtual calls to ctors if possible */
7634 if (cmethod->klass->marshalbyref)
7635 callvirt_this_arg = sp [0];
7638 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7639 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7640 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7641 *sp = ins;
7642 sp++;
7645 CHECK_CFG_EXCEPTION;
7646 } else
7650 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7651 mono_method_check_inlining (cfg, cmethod) &&
7652 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7653 !g_list_find (dont_inline, cmethod)) {
7654 int costs;
7656 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7657 cfg->real_offset += 5;
7658 bblock = cfg->cbb;
7660 inline_costs += costs - 5;
7661 } else {
7662 INLINE_FAILURE;
7663 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7665 } else if (context_used &&
7666 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7667 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7668 MonoInst *cmethod_addr;
7670 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7671 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7673 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7674 } else {
7675 INLINE_FAILURE;
7676 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7677 callvirt_this_arg, NULL, vtable_arg);
7681 if (alloc == NULL) {
7682 /* Valuetype */
7683 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7684 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7685 *sp++= ins;
7687 else
7688 *sp++ = alloc;
7690 ip += 5;
7691 inline_costs += 5;
7692 break;
7694 case CEE_CASTCLASS:
7695 CHECK_STACK (1);
7696 --sp;
7697 CHECK_OPSIZE (5);
7698 token = read32 (ip + 1);
7699 klass = mini_get_class (method, token, generic_context);
7700 CHECK_TYPELOAD (klass);
7701 if (sp [0]->type != STACK_OBJ)
7702 UNVERIFIED;
7704 if (cfg->generic_sharing_context)
7705 context_used = mono_class_check_context_used (klass);
7707 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7708 MonoInst *args [2];
7710 /* obj */
7711 args [0] = *sp;
7713 /* klass */
7714 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7716 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7717 *sp ++ = ins;
7718 ip += 5;
7719 inline_costs += 2;
7720 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7721 MonoMethod *mono_castclass;
7722 MonoInst *iargs [1];
7723 int costs;
7725 mono_castclass = mono_marshal_get_castclass (klass);
7726 iargs [0] = sp [0];
7728 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7729 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7730 g_assert (costs > 0);
7732 ip += 5;
7733 cfg->real_offset += 5;
7734 bblock = cfg->cbb;
7736 *sp++ = iargs [0];
7738 inline_costs += costs;
7740 else {
7741 ins = handle_castclass (cfg, klass, *sp, context_used);
7742 CHECK_CFG_EXCEPTION;
7743 bblock = cfg->cbb;
7744 *sp ++ = ins;
7745 ip += 5;
7747 break;
7748 case CEE_ISINST: {
7749 CHECK_STACK (1);
7750 --sp;
7751 CHECK_OPSIZE (5);
7752 token = read32 (ip + 1);
7753 klass = mini_get_class (method, token, generic_context);
7754 CHECK_TYPELOAD (klass);
7755 if (sp [0]->type != STACK_OBJ)
7756 UNVERIFIED;
7758 if (cfg->generic_sharing_context)
7759 context_used = mono_class_check_context_used (klass);
7761 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7762 MonoInst *args [2];
7764 /* obj */
7765 args [0] = *sp;
7767 /* klass */
7768 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7770 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7771 sp++;
7772 ip += 5;
7773 inline_costs += 2;
7774 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7775 MonoMethod *mono_isinst;
7776 MonoInst *iargs [1];
7777 int costs;
7779 mono_isinst = mono_marshal_get_isinst (klass);
7780 iargs [0] = sp [0];
7782 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7783 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7784 g_assert (costs > 0);
7786 ip += 5;
7787 cfg->real_offset += 5;
7788 bblock = cfg->cbb;
7790 *sp++= iargs [0];
7792 inline_costs += costs;
7794 else {
7795 ins = handle_isinst (cfg, klass, *sp, context_used);
7796 CHECK_CFG_EXCEPTION;
7797 bblock = cfg->cbb;
7798 *sp ++ = ins;
7799 ip += 5;
7801 break;
7803 case CEE_UNBOX_ANY: {
7804 CHECK_STACK (1);
7805 --sp;
7806 CHECK_OPSIZE (5);
7807 token = read32 (ip + 1);
7808 klass = mini_get_class (method, token, generic_context);
7809 CHECK_TYPELOAD (klass);
7811 mono_save_token_info (cfg, image, token, klass);
7813 if (cfg->generic_sharing_context)
7814 context_used = mono_class_check_context_used (klass);
7816 if (generic_class_is_reference_type (cfg, klass)) {
7817 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7818 if (context_used) {
7819 MonoInst *iargs [2];
7821 /* obj */
7822 iargs [0] = *sp;
7823 /* klass */
7824 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7825 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7826 *sp ++ = ins;
7827 ip += 5;
7828 inline_costs += 2;
7829 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7830 MonoMethod *mono_castclass;
7831 MonoInst *iargs [1];
7832 int costs;
7834 mono_castclass = mono_marshal_get_castclass (klass);
7835 iargs [0] = sp [0];
7837 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7838 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7840 g_assert (costs > 0);
7842 ip += 5;
7843 cfg->real_offset += 5;
7844 bblock = cfg->cbb;
7846 *sp++ = iargs [0];
7847 inline_costs += costs;
7848 } else {
7849 ins = handle_castclass (cfg, klass, *sp, 0);
7850 CHECK_CFG_EXCEPTION;
7851 bblock = cfg->cbb;
7852 *sp ++ = ins;
7853 ip += 5;
7855 break;
7858 if (mono_class_is_nullable (klass)) {
7859 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7860 *sp++= ins;
7861 ip += 5;
7862 break;
7865 /* UNBOX */
7866 ins = handle_unbox (cfg, klass, sp, context_used);
7867 *sp = ins;
7869 ip += 5;
7871 /* LDOBJ */
7872 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7873 *sp++ = ins;
7875 inline_costs += 2;
7876 break;
7878 case CEE_BOX: {
7879 MonoInst *val;
7881 CHECK_STACK (1);
7882 --sp;
7883 val = *sp;
7884 CHECK_OPSIZE (5);
7885 token = read32 (ip + 1);
7886 klass = mini_get_class (method, token, generic_context);
7887 CHECK_TYPELOAD (klass);
7889 mono_save_token_info (cfg, image, token, klass);
7891 if (cfg->generic_sharing_context)
7892 context_used = mono_class_check_context_used (klass);
7894 if (generic_class_is_reference_type (cfg, klass)) {
7895 *sp++ = val;
7896 ip += 5;
7897 break;
7900 if (klass == mono_defaults.void_class)
7901 UNVERIFIED;
7902 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7903 UNVERIFIED;
7904 /* frequent check in generic code: box (struct), brtrue */
7905 if (!mono_class_is_nullable (klass) &&
7906 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7907 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7908 ip += 5;
7909 MONO_INST_NEW (cfg, ins, OP_BR);
7910 if (*ip == CEE_BRTRUE_S) {
7911 CHECK_OPSIZE (2);
7912 ip++;
7913 target = ip + 1 + (signed char)(*ip);
7914 ip++;
7915 } else {
7916 CHECK_OPSIZE (5);
7917 ip++;
7918 target = ip + 4 + (gint)(read32 (ip));
7919 ip += 4;
7921 GET_BBLOCK (cfg, tblock, target);
7922 link_bblock (cfg, bblock, tblock);
7923 ins->inst_target_bb = tblock;
7924 GET_BBLOCK (cfg, tblock, ip);
7926 * This leads to some inconsistency, since the two bblocks are
7927 * not really connected, but it is needed for handling stack
7928 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7929 * FIXME: This should only be needed if sp != stack_start, but that
7930 * doesn't work for some reason (test failure in mcs/tests on x86).
7932 link_bblock (cfg, bblock, tblock);
7933 if (sp != stack_start) {
7934 handle_stack_args (cfg, stack_start, sp - stack_start);
7935 sp = stack_start;
7936 CHECK_UNVERIFIABLE (cfg);
7938 MONO_ADD_INS (bblock, ins);
7939 start_new_bblock = 1;
7940 break;
7943 *sp++ = handle_box (cfg, val, klass, context_used);
7945 CHECK_CFG_EXCEPTION;
7946 ip += 5;
7947 inline_costs += 1;
7948 break;
7950 case CEE_UNBOX: {
7951 CHECK_STACK (1);
7952 --sp;
7953 CHECK_OPSIZE (5);
7954 token = read32 (ip + 1);
7955 klass = mini_get_class (method, token, generic_context);
7956 CHECK_TYPELOAD (klass);
7958 mono_save_token_info (cfg, image, token, klass);
7960 if (cfg->generic_sharing_context)
7961 context_used = mono_class_check_context_used (klass);
7963 if (mono_class_is_nullable (klass)) {
7964 MonoInst *val;
7966 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7967 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7969 *sp++= ins;
7970 } else {
7971 ins = handle_unbox (cfg, klass, sp, context_used);
7972 *sp++ = ins;
7974 ip += 5;
7975 inline_costs += 2;
7976 break;
7978 case CEE_LDFLD:
7979 case CEE_LDFLDA:
7980 case CEE_STFLD: {
7981 MonoClassField *field;
7982 int costs;
7983 guint foffset;
7985 if (*ip == CEE_STFLD) {
7986 CHECK_STACK (2);
7987 sp -= 2;
7988 } else {
7989 CHECK_STACK (1);
7990 --sp;
7992 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7993 UNVERIFIED;
7994 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7995 UNVERIFIED;
7996 CHECK_OPSIZE (5);
7997 token = read32 (ip + 1);
7998 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7999 field = mono_method_get_wrapper_data (method, token);
8000 klass = field->parent;
8002 else {
8003 field = mono_field_from_token (image, token, &klass, generic_context);
8005 if (!field)
8006 goto load_error;
8007 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8008 FIELD_ACCESS_FAILURE;
8009 mono_class_init (klass);
8011 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8012 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8013 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8014 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8017 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8018 if (*ip == CEE_STFLD) {
8019 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8020 UNVERIFIED;
8021 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8022 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8023 MonoInst *iargs [5];
8025 iargs [0] = sp [0];
8026 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8027 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8028 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8029 field->offset);
8030 iargs [4] = sp [1];
8032 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8033 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8034 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8035 g_assert (costs > 0);
8037 cfg->real_offset += 5;
8038 bblock = cfg->cbb;
8040 inline_costs += costs;
8041 } else {
8042 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8044 } else {
8045 MonoInst *store;
8047 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8049 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8050 store->flags |= MONO_INST_FAULT;
8052 #if HAVE_WRITE_BARRIERS
8053 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8054 /* insert call to write barrier */
8055 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8056 MonoInst *iargs [2], *dummy_use;
8057 int dreg;
8059 dreg = alloc_preg (cfg);
8060 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8061 iargs [1] = sp [1];
8062 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8064 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8066 #endif
8068 store->flags |= ins_flag;
8070 ins_flag = 0;
8071 ip += 5;
8072 break;
8075 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8076 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8077 MonoInst *iargs [4];
8079 iargs [0] = sp [0];
8080 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8081 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8082 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8083 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8084 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8085 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8086 bblock = cfg->cbb;
8087 g_assert (costs > 0);
8089 cfg->real_offset += 5;
8091 *sp++ = iargs [0];
8093 inline_costs += costs;
8094 } else {
8095 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8096 *sp++ = ins;
8098 } else {
8099 if (sp [0]->type == STACK_VTYPE) {
8100 MonoInst *var;
8102 /* Have to compute the address of the variable */
8104 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8105 if (!var)
8106 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8107 else
8108 g_assert (var->klass == klass);
8110 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8111 sp [0] = ins;
8114 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8116 if (*ip == CEE_LDFLDA) {
8117 dreg = alloc_preg (cfg);
8119 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8120 ins->klass = mono_class_from_mono_type (field->type);
8121 ins->type = STACK_MP;
8122 *sp++ = ins;
8123 } else {
8124 MonoInst *load;
8126 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8127 load->flags |= ins_flag;
8128 load->flags |= MONO_INST_FAULT;
8129 *sp++ = load;
8132 ins_flag = 0;
8133 ip += 5;
8134 break;
8136 case CEE_LDSFLD:
8137 case CEE_LDSFLDA:
8138 case CEE_STSFLD: {
8139 MonoClassField *field;
8140 gpointer addr = NULL;
8141 gboolean is_special_static;
8143 CHECK_OPSIZE (5);
8144 token = read32 (ip + 1);
8146 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8147 field = mono_method_get_wrapper_data (method, token);
8148 klass = field->parent;
8150 else
8151 field = mono_field_from_token (image, token, &klass, generic_context);
8152 if (!field)
8153 goto load_error;
8154 mono_class_init (klass);
8155 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8156 FIELD_ACCESS_FAILURE;
8158 /* if the class is Critical then transparent code cannot access it's fields */
8159 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8160 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8163 * We can only support shared generic static
8164 * field access on architectures where the
8165 * trampoline code has been extended to handle
8166 * the generic class init.
8168 #ifndef MONO_ARCH_VTABLE_REG
8169 GENERIC_SHARING_FAILURE (*ip);
8170 #endif
8172 if (cfg->generic_sharing_context)
8173 context_used = mono_class_check_context_used (klass);
8175 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8177 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8178 * to be called here.
8180 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8181 mono_class_vtable (cfg->domain, klass);
8182 CHECK_TYPELOAD (klass);
8184 mono_domain_lock (cfg->domain);
8185 if (cfg->domain->special_static_fields)
8186 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8187 mono_domain_unlock (cfg->domain);
8189 is_special_static = mono_class_field_is_special_static (field);
8191 /* Generate IR to compute the field address */
8192 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8194 * Fast access to TLS data
8195 * Inline version of get_thread_static_data () in
8196 * threads.c.
8198 guint32 offset;
8199 int idx, static_data_reg, array_reg, dreg;
8200 MonoInst *thread_ins;
8202 // offset &= 0x7fffffff;
8203 // idx = (offset >> 24) - 1;
8204 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8206 thread_ins = mono_get_thread_intrinsic (cfg);
8207 MONO_ADD_INS (cfg->cbb, thread_ins);
8208 static_data_reg = alloc_ireg (cfg);
8209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8211 if (cfg->compile_aot) {
8212 int offset_reg, offset2_reg, idx_reg;
8214 /* For TLS variables, this will return the TLS offset */
8215 EMIT_NEW_SFLDACONST (cfg, ins, field);
8216 offset_reg = ins->dreg;
8217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8218 idx_reg = alloc_ireg (cfg);
8219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8220 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8222 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8223 array_reg = alloc_ireg (cfg);
8224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8225 offset2_reg = alloc_ireg (cfg);
8226 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8227 dreg = alloc_ireg (cfg);
8228 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8229 } else {
8230 offset = (gsize)addr & 0x7fffffff;
8231 idx = (offset >> 24) - 1;
8233 array_reg = alloc_ireg (cfg);
8234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8235 dreg = alloc_ireg (cfg);
8236 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8238 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8239 (cfg->compile_aot && is_special_static) ||
8240 (context_used && is_special_static)) {
8241 MonoInst *iargs [2];
8243 g_assert (field->parent);
8244 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8245 if (context_used) {
8246 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8247 field, MONO_RGCTX_INFO_CLASS_FIELD);
8248 } else {
8249 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8251 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8252 } else if (context_used) {
8253 MonoInst *static_data;
8256 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8257 method->klass->name_space, method->klass->name, method->name,
8258 depth, field->offset);
8261 if (mono_class_needs_cctor_run (klass, method))
8262 emit_generic_class_init (cfg, klass);
8265 * The pointer we're computing here is
8267 * super_info.static_data + field->offset
8269 static_data = emit_get_rgctx_klass (cfg, context_used,
8270 klass, MONO_RGCTX_INFO_STATIC_DATA);
8272 if (field->offset == 0) {
8273 ins = static_data;
8274 } else {
8275 int addr_reg = mono_alloc_preg (cfg);
8276 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8278 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8279 MonoInst *iargs [2];
8281 g_assert (field->parent);
8282 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8283 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8284 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8285 } else {
8286 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8288 CHECK_TYPELOAD (klass);
8289 if (!addr) {
8290 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8291 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8292 if (cfg->verbose_level > 2)
8293 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8294 class_inits = g_slist_prepend (class_inits, vtable);
8295 } else {
8296 if (cfg->run_cctors) {
8297 MonoException *ex;
8298 /* This makes so that inline cannot trigger */
8299 /* .cctors: too many apps depend on them */
8300 /* running with a specific order... */
8301 if (! vtable->initialized)
8302 INLINE_FAILURE;
8303 ex = mono_runtime_class_init_full (vtable, FALSE);
8304 if (ex) {
8305 set_exception_object (cfg, ex);
8306 goto exception_exit;
8310 addr = (char*)vtable->data + field->offset;
8312 if (cfg->compile_aot)
8313 EMIT_NEW_SFLDACONST (cfg, ins, field);
8314 else
8315 EMIT_NEW_PCONST (cfg, ins, addr);
8316 } else {
8317 MonoInst *iargs [1];
8318 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8319 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8323 /* Generate IR to do the actual load/store operation */
8325 if (*ip == CEE_LDSFLDA) {
8326 ins->klass = mono_class_from_mono_type (field->type);
8327 ins->type = STACK_PTR;
8328 *sp++ = ins;
8329 } else if (*ip == CEE_STSFLD) {
8330 MonoInst *store;
8331 CHECK_STACK (1);
8332 sp--;
8334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8335 store->flags |= ins_flag;
8336 } else {
8337 gboolean is_const = FALSE;
8338 MonoVTable *vtable = NULL;
8340 if (!context_used) {
8341 vtable = mono_class_vtable (cfg->domain, klass);
8342 CHECK_TYPELOAD (klass);
8344 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8345 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8346 gpointer addr = (char*)vtable->data + field->offset;
8347 int ro_type = field->type->type;
8348 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8349 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8351 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8352 is_const = TRUE;
8353 switch (ro_type) {
8354 case MONO_TYPE_BOOLEAN:
8355 case MONO_TYPE_U1:
8356 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8357 sp++;
8358 break;
8359 case MONO_TYPE_I1:
8360 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8361 sp++;
8362 break;
8363 case MONO_TYPE_CHAR:
8364 case MONO_TYPE_U2:
8365 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8366 sp++;
8367 break;
8368 case MONO_TYPE_I2:
8369 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8370 sp++;
8371 break;
8372 break;
8373 case MONO_TYPE_I4:
8374 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8375 sp++;
8376 break;
8377 case MONO_TYPE_U4:
8378 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8379 sp++;
8380 break;
8381 case MONO_TYPE_I:
8382 case MONO_TYPE_U:
8383 case MONO_TYPE_PTR:
8384 case MONO_TYPE_FNPTR:
8385 #ifndef HAVE_MOVING_COLLECTOR
8386 case MONO_TYPE_STRING:
8387 case MONO_TYPE_OBJECT:
8388 case MONO_TYPE_CLASS:
8389 case MONO_TYPE_SZARRAY:
8390 case MONO_TYPE_ARRAY:
8391 #endif
8392 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8393 type_to_eval_stack_type ((cfg), field->type, *sp);
8394 sp++;
8395 break;
8396 case MONO_TYPE_I8:
8397 case MONO_TYPE_U8:
8398 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8399 sp++;
8400 break;
8401 case MONO_TYPE_R4:
8402 case MONO_TYPE_R8:
8403 case MONO_TYPE_VALUETYPE:
8404 default:
8405 is_const = FALSE;
8406 break;
8410 if (!is_const) {
8411 MonoInst *load;
8413 CHECK_STACK_OVF (1);
8415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8416 load->flags |= ins_flag;
8417 ins_flag = 0;
8418 *sp++ = load;
8421 ins_flag = 0;
8422 ip += 5;
8423 break;
8425 case CEE_STOBJ:
8426 CHECK_STACK (2);
8427 sp -= 2;
8428 CHECK_OPSIZE (5);
8429 token = read32 (ip + 1);
8430 klass = mini_get_class (method, token, generic_context);
8431 CHECK_TYPELOAD (klass);
8432 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8433 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8434 #if HAVE_WRITE_BARRIERS
8435 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8436 generic_class_is_reference_type (cfg, klass)) {
8437 MonoInst *dummy_use;
8438 /* insert call to write barrier */
8439 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8440 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8441 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8443 #endif
8444 ins_flag = 0;
8445 ip += 5;
8446 inline_costs += 1;
8447 break;
8450 * Array opcodes
8452 case CEE_NEWARR: {
8453 MonoInst *len_ins;
8454 const char *data_ptr;
8455 int data_size = 0;
8456 guint32 field_token;
8458 CHECK_STACK (1);
8459 --sp;
8461 CHECK_OPSIZE (5);
8462 token = read32 (ip + 1);
8464 klass = mini_get_class (method, token, generic_context);
8465 CHECK_TYPELOAD (klass);
8467 if (cfg->generic_sharing_context)
8468 context_used = mono_class_check_context_used (klass);
8470 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8471 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8472 ins->sreg1 = sp [0]->dreg;
8473 ins->type = STACK_I4;
8474 ins->dreg = alloc_ireg (cfg);
8475 MONO_ADD_INS (cfg->cbb, ins);
8476 *sp = mono_decompose_opcode (cfg, ins);
8479 if (context_used) {
8480 MonoInst *args [3];
8481 MonoClass *array_class = mono_array_class_get (klass, 1);
8482 /* FIXME: we cannot get a managed
8483 allocator because we can't get the
8484 open generic class's vtable. We
8485 have the same problem in
8486 handle_alloc(). This
8487 needs to be solved so that we can
8488 have managed allocs of shared
8489 generic classes. */
8491 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8492 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8494 MonoMethod *managed_alloc = NULL;
8496 /* FIXME: Decompose later to help abcrem */
8498 /* vtable */
8499 args [0] = emit_get_rgctx_klass (cfg, context_used,
8500 array_class, MONO_RGCTX_INFO_VTABLE);
8501 /* array len */
8502 args [1] = sp [0];
8504 if (managed_alloc)
8505 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8506 else
8507 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8508 } else {
8509 if (cfg->opt & MONO_OPT_SHARED) {
8510 /* Decompose now to avoid problems with references to the domainvar */
8511 MonoInst *iargs [3];
8513 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8514 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8515 iargs [2] = sp [0];
8517 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8518 } else {
8519 /* Decompose later since it is needed by abcrem */
8520 MonoClass *array_type = mono_array_class_get (klass, 1);
8521 mono_class_vtable (cfg->domain, array_type);
8522 CHECK_TYPELOAD (array_type);
8524 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8525 ins->dreg = alloc_preg (cfg);
8526 ins->sreg1 = sp [0]->dreg;
8527 ins->inst_newa_class = klass;
8528 ins->type = STACK_OBJ;
8529 ins->klass = klass;
8530 MONO_ADD_INS (cfg->cbb, ins);
8531 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8532 cfg->cbb->has_array_access = TRUE;
8534 /* Needed so mono_emit_load_get_addr () gets called */
8535 mono_get_got_var (cfg);
8539 len_ins = sp [0];
8540 ip += 5;
8541 *sp++ = ins;
8542 inline_costs += 1;
8545 * we inline/optimize the initialization sequence if possible.
8546 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8547 * for small sizes open code the memcpy
8548 * ensure the rva field is big enough
8550 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8551 MonoMethod *memcpy_method = get_memcpy_method ();
8552 MonoInst *iargs [3];
8553 int add_reg = alloc_preg (cfg);
8555 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8556 if (cfg->compile_aot) {
8557 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8558 } else {
8559 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8561 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8562 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8563 ip += 11;
8566 break;
8568 case CEE_LDLEN:
8569 CHECK_STACK (1);
8570 --sp;
8571 if (sp [0]->type != STACK_OBJ)
8572 UNVERIFIED;
8574 dreg = alloc_preg (cfg);
8575 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8576 ins->dreg = alloc_preg (cfg);
8577 ins->sreg1 = sp [0]->dreg;
8578 ins->type = STACK_I4;
8579 MONO_ADD_INS (cfg->cbb, ins);
8580 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8581 cfg->cbb->has_array_access = TRUE;
8582 ip ++;
8583 *sp++ = ins;
8584 break;
8585 case CEE_LDELEMA:
8586 CHECK_STACK (2);
8587 sp -= 2;
8588 CHECK_OPSIZE (5);
8589 if (sp [0]->type != STACK_OBJ)
8590 UNVERIFIED;
8592 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8594 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8595 CHECK_TYPELOAD (klass);
8596 /* we need to make sure that this array is exactly the type it needs
8597 * to be for correctness. the wrappers are lax with their usage
8598 * so we need to ignore them here
8600 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8601 MonoClass *array_class = mono_array_class_get (klass, 1);
8602 mini_emit_check_array_type (cfg, sp [0], array_class);
8603 CHECK_TYPELOAD (array_class);
8606 readonly = FALSE;
8607 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8608 *sp++ = ins;
8609 ip += 5;
8610 break;
8611 case CEE_LDELEM:
8612 case CEE_LDELEM_I1:
8613 case CEE_LDELEM_U1:
8614 case CEE_LDELEM_I2:
8615 case CEE_LDELEM_U2:
8616 case CEE_LDELEM_I4:
8617 case CEE_LDELEM_U4:
8618 case CEE_LDELEM_I8:
8619 case CEE_LDELEM_I:
8620 case CEE_LDELEM_R4:
8621 case CEE_LDELEM_R8:
8622 case CEE_LDELEM_REF: {
8623 MonoInst *addr;
8625 CHECK_STACK (2);
8626 sp -= 2;
8628 if (*ip == CEE_LDELEM) {
8629 CHECK_OPSIZE (5);
8630 token = read32 (ip + 1);
8631 klass = mini_get_class (method, token, generic_context);
8632 CHECK_TYPELOAD (klass);
8633 mono_class_init (klass);
8635 else
8636 klass = array_access_to_klass (*ip);
8638 if (sp [0]->type != STACK_OBJ)
8639 UNVERIFIED;
8641 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8643 if (sp [1]->opcode == OP_ICONST) {
8644 int array_reg = sp [0]->dreg;
8645 int index_reg = sp [1]->dreg;
8646 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8648 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8650 } else {
8651 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8654 *sp++ = ins;
8655 if (*ip == CEE_LDELEM)
8656 ip += 5;
8657 else
8658 ++ip;
8659 break;
8661 case CEE_STELEM_I:
8662 case CEE_STELEM_I1:
8663 case CEE_STELEM_I2:
8664 case CEE_STELEM_I4:
8665 case CEE_STELEM_I8:
8666 case CEE_STELEM_R4:
8667 case CEE_STELEM_R8:
8668 case CEE_STELEM_REF:
8669 case CEE_STELEM: {
8670 MonoInst *addr;
8672 CHECK_STACK (3);
8673 sp -= 3;
8675 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8677 if (*ip == CEE_STELEM) {
8678 CHECK_OPSIZE (5);
8679 token = read32 (ip + 1);
8680 klass = mini_get_class (method, token, generic_context);
8681 CHECK_TYPELOAD (klass);
8682 mono_class_init (klass);
8684 else
8685 klass = array_access_to_klass (*ip);
8687 if (sp [0]->type != STACK_OBJ)
8688 UNVERIFIED;
8690 /* storing a NULL doesn't need any of the complex checks in stelemref */
8691 if (generic_class_is_reference_type (cfg, klass) &&
8692 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8693 MonoMethod* helper = mono_marshal_get_stelemref ();
8694 MonoInst *iargs [3];
8696 if (sp [0]->type != STACK_OBJ)
8697 UNVERIFIED;
8698 if (sp [2]->type != STACK_OBJ)
8699 UNVERIFIED;
8701 iargs [2] = sp [2];
8702 iargs [1] = sp [1];
8703 iargs [0] = sp [0];
8705 mono_emit_method_call (cfg, helper, iargs, NULL);
8706 } else {
8707 if (sp [1]->opcode == OP_ICONST) {
8708 int array_reg = sp [0]->dreg;
8709 int index_reg = sp [1]->dreg;
8710 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8712 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8714 } else {
8715 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8716 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8720 if (*ip == CEE_STELEM)
8721 ip += 5;
8722 else
8723 ++ip;
8724 inline_costs += 1;
8725 break;
8727 case CEE_CKFINITE: {
8728 CHECK_STACK (1);
8729 --sp;
8731 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8732 ins->sreg1 = sp [0]->dreg;
8733 ins->dreg = alloc_freg (cfg);
8734 ins->type = STACK_R8;
8735 MONO_ADD_INS (bblock, ins);
8737 *sp++ = mono_decompose_opcode (cfg, ins);
8739 ++ip;
8740 break;
8742 case CEE_REFANYVAL: {
8743 MonoInst *src_var, *src;
8745 int klass_reg = alloc_preg (cfg);
8746 int dreg = alloc_preg (cfg);
8748 CHECK_STACK (1);
8749 MONO_INST_NEW (cfg, ins, *ip);
8750 --sp;
8751 CHECK_OPSIZE (5);
8752 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8753 CHECK_TYPELOAD (klass);
8754 mono_class_init (klass);
8756 if (cfg->generic_sharing_context)
8757 context_used = mono_class_check_context_used (klass);
8759 // FIXME:
8760 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8761 if (!src_var)
8762 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8763 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8766 if (context_used) {
8767 MonoInst *klass_ins;
8769 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8770 klass, MONO_RGCTX_INFO_KLASS);
8772 // FIXME:
8773 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8774 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8775 } else {
8776 mini_emit_class_check (cfg, klass_reg, klass);
8778 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8779 ins->type = STACK_MP;
8780 *sp++ = ins;
8781 ip += 5;
8782 break;
8784 case CEE_MKREFANY: {
8785 MonoInst *loc, *addr;
8787 CHECK_STACK (1);
8788 MONO_INST_NEW (cfg, ins, *ip);
8789 --sp;
8790 CHECK_OPSIZE (5);
8791 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8792 CHECK_TYPELOAD (klass);
8793 mono_class_init (klass);
8795 if (cfg->generic_sharing_context)
8796 context_used = mono_class_check_context_used (klass);
8798 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8799 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8801 if (context_used) {
8802 MonoInst *const_ins;
8803 int type_reg = alloc_preg (cfg);
8805 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8809 } else if (cfg->compile_aot) {
8810 int const_reg = alloc_preg (cfg);
8811 int type_reg = alloc_preg (cfg);
8813 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8815 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8816 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8817 } else {
8818 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8823 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8824 ins->type = STACK_VTYPE;
8825 ins->klass = mono_defaults.typed_reference_class;
8826 *sp++ = ins;
8827 ip += 5;
8828 break;
8830 case CEE_LDTOKEN: {
8831 gpointer handle;
8832 MonoClass *handle_class;
8834 CHECK_STACK_OVF (1);
8836 CHECK_OPSIZE (5);
8837 n = read32 (ip + 1);
8839 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8840 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8841 handle = mono_method_get_wrapper_data (method, n);
8842 handle_class = mono_method_get_wrapper_data (method, n + 1);
8843 if (handle_class == mono_defaults.typehandle_class)
8844 handle = &((MonoClass*)handle)->byval_arg;
8846 else {
8847 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8849 if (!handle)
8850 goto load_error;
8851 mono_class_init (handle_class);
8852 if (cfg->generic_sharing_context) {
8853 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8854 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8855 /* This case handles ldtoken
8856 of an open type, like for
8857 typeof(Gen<>). */
8858 context_used = 0;
8859 } else if (handle_class == mono_defaults.typehandle_class) {
8860 /* If we get a MONO_TYPE_CLASS
8861 then we need to provide the
8862 open type, not an
8863 instantiation of it. */
8864 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8865 context_used = 0;
8866 else
8867 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8868 } else if (handle_class == mono_defaults.fieldhandle_class)
8869 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8870 else if (handle_class == mono_defaults.methodhandle_class)
8871 context_used = mono_method_check_context_used (handle);
8872 else
8873 g_assert_not_reached ();
8876 if ((cfg->opt & MONO_OPT_SHARED) &&
8877 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8878 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8879 MonoInst *addr, *vtvar, *iargs [3];
8880 int method_context_used;
8882 if (cfg->generic_sharing_context)
8883 method_context_used = mono_method_check_context_used (method);
8884 else
8885 method_context_used = 0;
8887 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8889 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8890 EMIT_NEW_ICONST (cfg, iargs [1], n);
8891 if (method_context_used) {
8892 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8893 method, MONO_RGCTX_INFO_METHOD);
8894 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8895 } else {
8896 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8897 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8899 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8903 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8904 } else {
8905 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8906 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8907 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8908 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8909 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8910 MonoClass *tclass = mono_class_from_mono_type (handle);
8912 mono_class_init (tclass);
8913 if (context_used) {
8914 ins = emit_get_rgctx_klass (cfg, context_used,
8915 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8916 } else if (cfg->compile_aot) {
8917 if (method->wrapper_type) {
8918 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8919 /* Special case for static synchronized wrappers */
8920 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8921 } else {
8922 /* FIXME: n is not a normal token */
8923 cfg->disable_aot = TRUE;
8924 EMIT_NEW_PCONST (cfg, ins, NULL);
8926 } else {
8927 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8929 } else {
8930 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8932 ins->type = STACK_OBJ;
8933 ins->klass = cmethod->klass;
8934 ip += 5;
8935 } else {
8936 MonoInst *addr, *vtvar;
8938 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8940 if (context_used) {
8941 if (handle_class == mono_defaults.typehandle_class) {
8942 ins = emit_get_rgctx_klass (cfg, context_used,
8943 mono_class_from_mono_type (handle),
8944 MONO_RGCTX_INFO_TYPE);
8945 } else if (handle_class == mono_defaults.methodhandle_class) {
8946 ins = emit_get_rgctx_method (cfg, context_used,
8947 handle, MONO_RGCTX_INFO_METHOD);
8948 } else if (handle_class == mono_defaults.fieldhandle_class) {
8949 ins = emit_get_rgctx_field (cfg, context_used,
8950 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8951 } else {
8952 g_assert_not_reached ();
8954 } else if (cfg->compile_aot) {
8955 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8956 } else {
8957 EMIT_NEW_PCONST (cfg, ins, handle);
8959 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8961 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8965 *sp++ = ins;
8966 ip += 5;
8967 break;
8969 case CEE_THROW:
8970 CHECK_STACK (1);
8971 MONO_INST_NEW (cfg, ins, OP_THROW);
8972 --sp;
8973 ins->sreg1 = sp [0]->dreg;
8974 ip++;
8975 bblock->out_of_line = TRUE;
8976 MONO_ADD_INS (bblock, ins);
8977 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8978 MONO_ADD_INS (bblock, ins);
8979 sp = stack_start;
8981 link_bblock (cfg, bblock, end_bblock);
8982 start_new_bblock = 1;
8983 break;
8984 case CEE_ENDFINALLY:
8985 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8986 MONO_ADD_INS (bblock, ins);
8987 ip++;
8988 start_new_bblock = 1;
8991 * Control will leave the method so empty the stack, otherwise
8992 * the next basic block will start with a nonempty stack.
8994 while (sp != stack_start) {
8995 sp--;
8997 break;
8998 case CEE_LEAVE:
8999 case CEE_LEAVE_S: {
9000 GList *handlers;
9002 if (*ip == CEE_LEAVE) {
9003 CHECK_OPSIZE (5);
9004 target = ip + 5 + (gint32)read32(ip + 1);
9005 } else {
9006 CHECK_OPSIZE (2);
9007 target = ip + 2 + (signed char)(ip [1]);
9010 /* empty the stack */
9011 while (sp != stack_start) {
9012 sp--;
9016 * If this leave statement is in a catch block, check for a
9017 * pending exception, and rethrow it if necessary.
9018 * We avoid doing this in runtime invoke wrappers, since those are called
9019 * by native code which excepts the wrapper to catch all exceptions.
9021 for (i = 0; i < header->num_clauses; ++i) {
9022 MonoExceptionClause *clause = &header->clauses [i];
9025 * Use <= in the final comparison to handle clauses with multiple
9026 * leave statements, like in bug #78024.
9027 * The ordering of the exception clauses guarantees that we find the
9028 * innermost clause.
9030 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9031 MonoInst *exc_ins;
9032 MonoBasicBlock *dont_throw;
9035 MonoInst *load;
9037 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9040 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9042 NEW_BBLOCK (cfg, dont_throw);
9045 * Currently, we allways rethrow the abort exception, despite the
9046 * fact that this is not correct. See thread6.cs for an example.
9047 * But propagating the abort exception is more important than
9048 * getting the sematics right.
9050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9051 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9052 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9054 MONO_START_BB (cfg, dont_throw);
9055 bblock = cfg->cbb;
9059 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9060 GList *tmp;
9061 MonoExceptionClause *clause;
9063 for (tmp = handlers; tmp; tmp = tmp->next) {
9064 clause = tmp->data;
9065 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9066 g_assert (tblock);
9067 link_bblock (cfg, bblock, tblock);
9068 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9069 ins->inst_target_bb = tblock;
9070 ins->inst_eh_block = clause;
9071 MONO_ADD_INS (bblock, ins);
9072 bblock->has_call_handler = 1;
9073 if (COMPILE_LLVM (cfg)) {
9074 MonoBasicBlock *target_bb;
9077 * Link the finally bblock with the target, since it will
9078 * conceptually branch there.
9079 * FIXME: Have to link the bblock containing the endfinally.
9081 GET_BBLOCK (cfg, target_bb, target);
9082 link_bblock (cfg, tblock, target_bb);
9085 g_list_free (handlers);
9088 MONO_INST_NEW (cfg, ins, OP_BR);
9089 MONO_ADD_INS (bblock, ins);
9090 GET_BBLOCK (cfg, tblock, target);
9091 link_bblock (cfg, bblock, tblock);
9092 ins->inst_target_bb = tblock;
9093 start_new_bblock = 1;
9095 if (*ip == CEE_LEAVE)
9096 ip += 5;
9097 else
9098 ip += 2;
9100 break;
9104 * Mono specific opcodes
9106 case MONO_CUSTOM_PREFIX: {
9108 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9110 CHECK_OPSIZE (2);
9111 switch (ip [1]) {
9112 case CEE_MONO_ICALL: {
9113 gpointer func;
9114 MonoJitICallInfo *info;
9116 token = read32 (ip + 2);
9117 func = mono_method_get_wrapper_data (method, token);
9118 info = mono_find_jit_icall_by_addr (func);
9119 g_assert (info);
9121 CHECK_STACK (info->sig->param_count);
9122 sp -= info->sig->param_count;
9124 ins = mono_emit_jit_icall (cfg, info->func, sp);
9125 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9126 *sp++ = ins;
9128 ip += 6;
9129 inline_costs += 10 * num_calls++;
9131 break;
9133 case CEE_MONO_LDPTR: {
9134 gpointer ptr;
9136 CHECK_STACK_OVF (1);
9137 CHECK_OPSIZE (6);
9138 token = read32 (ip + 2);
9140 ptr = mono_method_get_wrapper_data (method, token);
9141 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9142 MonoJitICallInfo *callinfo;
9143 const char *icall_name;
9145 icall_name = method->name + strlen ("__icall_wrapper_");
9146 g_assert (icall_name);
9147 callinfo = mono_find_jit_icall_by_name (icall_name);
9148 g_assert (callinfo);
9150 if (ptr == callinfo->func) {
9151 /* Will be transformed into an AOTCONST later */
9152 EMIT_NEW_PCONST (cfg, ins, ptr);
9153 *sp++ = ins;
9154 ip += 6;
9155 break;
9158 /* FIXME: Generalize this */
9159 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9160 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9161 *sp++ = ins;
9162 ip += 6;
9163 break;
9165 EMIT_NEW_PCONST (cfg, ins, ptr);
9166 *sp++ = ins;
9167 ip += 6;
9168 inline_costs += 10 * num_calls++;
9169 /* Can't embed random pointers into AOT code */
9170 cfg->disable_aot = 1;
9171 break;
9173 case CEE_MONO_ICALL_ADDR: {
9174 MonoMethod *cmethod;
9175 gpointer ptr;
9177 CHECK_STACK_OVF (1);
9178 CHECK_OPSIZE (6);
9179 token = read32 (ip + 2);
9181 cmethod = mono_method_get_wrapper_data (method, token);
9183 if (cfg->compile_aot) {
9184 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9185 } else {
9186 ptr = mono_lookup_internal_call (cmethod);
9187 g_assert (ptr);
9188 EMIT_NEW_PCONST (cfg, ins, ptr);
9190 *sp++ = ins;
9191 ip += 6;
9192 break;
9194 case CEE_MONO_VTADDR: {
9195 MonoInst *src_var, *src;
9197 CHECK_STACK (1);
9198 --sp;
9200 // FIXME:
9201 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9202 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9203 *sp++ = src;
9204 ip += 2;
9205 break;
9207 case CEE_MONO_NEWOBJ: {
9208 MonoInst *iargs [2];
9210 CHECK_STACK_OVF (1);
9211 CHECK_OPSIZE (6);
9212 token = read32 (ip + 2);
9213 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9214 mono_class_init (klass);
9215 NEW_DOMAINCONST (cfg, iargs [0]);
9216 MONO_ADD_INS (cfg->cbb, iargs [0]);
9217 NEW_CLASSCONST (cfg, iargs [1], klass);
9218 MONO_ADD_INS (cfg->cbb, iargs [1]);
9219 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9220 ip += 6;
9221 inline_costs += 10 * num_calls++;
9222 break;
9224 case CEE_MONO_OBJADDR:
9225 CHECK_STACK (1);
9226 --sp;
9227 MONO_INST_NEW (cfg, ins, OP_MOVE);
9228 ins->dreg = alloc_preg (cfg);
9229 ins->sreg1 = sp [0]->dreg;
9230 ins->type = STACK_MP;
9231 MONO_ADD_INS (cfg->cbb, ins);
9232 *sp++ = ins;
9233 ip += 2;
9234 break;
9235 case CEE_MONO_LDNATIVEOBJ:
9237 * Similar to LDOBJ, but instead load the unmanaged
9238 * representation of the vtype to the stack.
9240 CHECK_STACK (1);
9241 CHECK_OPSIZE (6);
9242 --sp;
9243 token = read32 (ip + 2);
9244 klass = mono_method_get_wrapper_data (method, token);
9245 g_assert (klass->valuetype);
9246 mono_class_init (klass);
9249 MonoInst *src, *dest, *temp;
9251 src = sp [0];
9252 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9253 temp->backend.is_pinvoke = 1;
9254 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9255 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9257 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9258 dest->type = STACK_VTYPE;
9259 dest->klass = klass;
9261 *sp ++ = dest;
9262 ip += 6;
9264 break;
9265 case CEE_MONO_RETOBJ: {
9267 * Same as RET, but return the native representation of a vtype
9268 * to the caller.
9270 g_assert (cfg->ret);
9271 g_assert (mono_method_signature (method)->pinvoke);
9272 CHECK_STACK (1);
9273 --sp;
9275 CHECK_OPSIZE (6);
9276 token = read32 (ip + 2);
9277 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9279 if (!cfg->vret_addr) {
9280 g_assert (cfg->ret_var_is_local);
9282 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9283 } else {
9284 EMIT_NEW_RETLOADA (cfg, ins);
9286 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9288 if (sp != stack_start)
9289 UNVERIFIED;
9291 MONO_INST_NEW (cfg, ins, OP_BR);
9292 ins->inst_target_bb = end_bblock;
9293 MONO_ADD_INS (bblock, ins);
9294 link_bblock (cfg, bblock, end_bblock);
9295 start_new_bblock = 1;
9296 ip += 6;
9297 break;
9299 case CEE_MONO_CISINST:
9300 case CEE_MONO_CCASTCLASS: {
9301 int token;
9302 CHECK_STACK (1);
9303 --sp;
9304 CHECK_OPSIZE (6);
9305 token = read32 (ip + 2);
9306 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9307 if (ip [1] == CEE_MONO_CISINST)
9308 ins = handle_cisinst (cfg, klass, sp [0]);
9309 else
9310 ins = handle_ccastclass (cfg, klass, sp [0]);
9311 bblock = cfg->cbb;
9312 *sp++ = ins;
9313 ip += 6;
9314 break;
9316 case CEE_MONO_SAVE_LMF:
9317 case CEE_MONO_RESTORE_LMF:
9318 #ifdef MONO_ARCH_HAVE_LMF_OPS
9319 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9320 MONO_ADD_INS (bblock, ins);
9321 cfg->need_lmf_area = TRUE;
9322 #endif
9323 ip += 2;
9324 break;
9325 case CEE_MONO_CLASSCONST:
9326 CHECK_STACK_OVF (1);
9327 CHECK_OPSIZE (6);
9328 token = read32 (ip + 2);
9329 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9330 *sp++ = ins;
9331 ip += 6;
9332 inline_costs += 10 * num_calls++;
9333 break;
9334 case CEE_MONO_NOT_TAKEN:
9335 bblock->out_of_line = TRUE;
9336 ip += 2;
9337 break;
9338 case CEE_MONO_TLS:
9339 CHECK_STACK_OVF (1);
9340 CHECK_OPSIZE (6);
9341 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9342 ins->dreg = alloc_preg (cfg);
9343 ins->inst_offset = (gint32)read32 (ip + 2);
9344 ins->type = STACK_PTR;
9345 MONO_ADD_INS (bblock, ins);
9346 *sp++ = ins;
9347 ip += 6;
9348 break;
9349 case CEE_MONO_DYN_CALL: {
9350 MonoCallInst *call;
9352 /* It would be easier to call a trampoline, but that would put an
9353 * extra frame on the stack, confusing exception handling. So
9354 * implement it inline using an opcode for now.
9357 if (!cfg->dyn_call_var) {
9358 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9359 /* prevent it from being register allocated */
9360 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9363 /* Has to use a call inst since it local regalloc expects it */
9364 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9365 ins = (MonoInst*)call;
9366 sp -= 2;
9367 ins->sreg1 = sp [0]->dreg;
9368 ins->sreg2 = sp [1]->dreg;
9369 MONO_ADD_INS (bblock, ins);
9371 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9372 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9373 #endif
9375 ip += 2;
9376 inline_costs += 10 * num_calls++;
9378 break;
9380 default:
9381 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9382 break;
9384 break;
9387 case CEE_PREFIX1: {
9388 CHECK_OPSIZE (2);
9389 switch (ip [1]) {
9390 case CEE_ARGLIST: {
9391 /* somewhat similar to LDTOKEN */
9392 MonoInst *addr, *vtvar;
9393 CHECK_STACK_OVF (1);
9394 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9396 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9397 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9399 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9400 ins->type = STACK_VTYPE;
9401 ins->klass = mono_defaults.argumenthandle_class;
9402 *sp++ = ins;
9403 ip += 2;
9404 break;
9406 case CEE_CEQ:
9407 case CEE_CGT:
9408 case CEE_CGT_UN:
9409 case CEE_CLT:
9410 case CEE_CLT_UN: {
9411 MonoInst *cmp;
9412 CHECK_STACK (2);
9414 * The following transforms:
9415 * CEE_CEQ into OP_CEQ
9416 * CEE_CGT into OP_CGT
9417 * CEE_CGT_UN into OP_CGT_UN
9418 * CEE_CLT into OP_CLT
9419 * CEE_CLT_UN into OP_CLT_UN
9421 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9423 MONO_INST_NEW (cfg, ins, cmp->opcode);
9424 sp -= 2;
9425 cmp->sreg1 = sp [0]->dreg;
9426 cmp->sreg2 = sp [1]->dreg;
9427 type_from_op (cmp, sp [0], sp [1]);
9428 CHECK_TYPE (cmp);
9429 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9430 cmp->opcode = OP_LCOMPARE;
9431 else if (sp [0]->type == STACK_R8)
9432 cmp->opcode = OP_FCOMPARE;
9433 else
9434 cmp->opcode = OP_ICOMPARE;
9435 MONO_ADD_INS (bblock, cmp);
9436 ins->type = STACK_I4;
9437 ins->dreg = alloc_dreg (cfg, ins->type);
9438 type_from_op (ins, sp [0], sp [1]);
9440 if (cmp->opcode == OP_FCOMPARE) {
9442 * The backends expect the fceq opcodes to do the
9443 * comparison too.
9445 cmp->opcode = OP_NOP;
9446 ins->sreg1 = cmp->sreg1;
9447 ins->sreg2 = cmp->sreg2;
9449 MONO_ADD_INS (bblock, ins);
9450 *sp++ = ins;
9451 ip += 2;
9452 break;
9454 case CEE_LDFTN: {
9455 MonoInst *argconst;
9456 MonoMethod *cil_method;
9457 gboolean needs_static_rgctx_invoke;
9459 CHECK_STACK_OVF (1);
9460 CHECK_OPSIZE (6);
9461 n = read32 (ip + 2);
9462 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9463 if (!cmethod)
9464 goto load_error;
9465 mono_class_init (cmethod->klass);
9467 mono_save_token_info (cfg, image, n, cmethod);
9469 if (cfg->generic_sharing_context)
9470 context_used = mono_method_check_context_used (cmethod);
9472 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9474 cil_method = cmethod;
9475 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9476 METHOD_ACCESS_FAILURE;
9478 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9479 if (check_linkdemand (cfg, method, cmethod))
9480 INLINE_FAILURE;
9481 CHECK_CFG_EXCEPTION;
9482 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9483 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9487 * Optimize the common case of ldftn+delegate creation
9489 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9490 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9491 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9492 MonoMethod *invoke;
9493 int invoke_context_used = 0;
9495 invoke = mono_get_delegate_invoke (ctor_method->klass);
9496 if (!invoke || !mono_method_signature (invoke))
9497 goto load_error;
9499 if (cfg->generic_sharing_context)
9500 invoke_context_used = mono_method_check_context_used (invoke);
9502 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9503 /* FIXME: SGEN support */
9504 if (invoke_context_used == 0) {
9505 MonoInst *target_ins;
9507 ip += 6;
9508 if (cfg->verbose_level > 3)
9509 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9510 target_ins = sp [-1];
9511 sp --;
9512 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9513 CHECK_CFG_EXCEPTION;
9514 ip += 5;
9515 sp ++;
9516 break;
9518 #endif
9522 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9523 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9524 *sp++ = ins;
9526 ip += 6;
9527 inline_costs += 10 * num_calls++;
9528 break;
9530 case CEE_LDVIRTFTN: {
9531 MonoInst *args [2];
9533 CHECK_STACK (1);
9534 CHECK_OPSIZE (6);
9535 n = read32 (ip + 2);
9536 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9537 if (!cmethod)
9538 goto load_error;
9539 mono_class_init (cmethod->klass);
9541 if (cfg->generic_sharing_context)
9542 context_used = mono_method_check_context_used (cmethod);
9544 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9545 if (check_linkdemand (cfg, method, cmethod))
9546 INLINE_FAILURE;
9547 CHECK_CFG_EXCEPTION;
9548 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9549 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9552 --sp;
9553 args [0] = *sp;
9555 args [1] = emit_get_rgctx_method (cfg, context_used,
9556 cmethod, MONO_RGCTX_INFO_METHOD);
9558 if (context_used)
9559 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9560 else
9561 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9563 ip += 6;
9564 inline_costs += 10 * num_calls++;
9565 break;
9567 case CEE_LDARG:
9568 CHECK_STACK_OVF (1);
9569 CHECK_OPSIZE (4);
9570 n = read16 (ip + 2);
9571 CHECK_ARG (n);
9572 EMIT_NEW_ARGLOAD (cfg, ins, n);
9573 *sp++ = ins;
9574 ip += 4;
9575 break;
9576 case CEE_LDARGA:
9577 CHECK_STACK_OVF (1);
9578 CHECK_OPSIZE (4);
9579 n = read16 (ip + 2);
9580 CHECK_ARG (n);
9581 NEW_ARGLOADA (cfg, ins, n);
9582 MONO_ADD_INS (cfg->cbb, ins);
9583 *sp++ = ins;
9584 ip += 4;
9585 break;
9586 case CEE_STARG:
9587 CHECK_STACK (1);
9588 --sp;
9589 CHECK_OPSIZE (4);
9590 n = read16 (ip + 2);
9591 CHECK_ARG (n);
9592 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9593 UNVERIFIED;
9594 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9595 ip += 4;
9596 break;
9597 case CEE_LDLOC:
9598 CHECK_STACK_OVF (1);
9599 CHECK_OPSIZE (4);
9600 n = read16 (ip + 2);
9601 CHECK_LOCAL (n);
9602 EMIT_NEW_LOCLOAD (cfg, ins, n);
9603 *sp++ = ins;
9604 ip += 4;
9605 break;
9606 case CEE_LDLOCA: {
9607 unsigned char *tmp_ip;
9608 CHECK_STACK_OVF (1);
9609 CHECK_OPSIZE (4);
9610 n = read16 (ip + 2);
9611 CHECK_LOCAL (n);
9613 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9614 ip = tmp_ip;
9615 inline_costs += 1;
9616 break;
9619 EMIT_NEW_LOCLOADA (cfg, ins, n);
9620 *sp++ = ins;
9621 ip += 4;
9622 break;
9624 case CEE_STLOC:
9625 CHECK_STACK (1);
9626 --sp;
9627 CHECK_OPSIZE (4);
9628 n = read16 (ip + 2);
9629 CHECK_LOCAL (n);
9630 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9631 UNVERIFIED;
9632 emit_stloc_ir (cfg, sp, header, n);
9633 ip += 4;
9634 inline_costs += 1;
9635 break;
9636 case CEE_LOCALLOC:
9637 CHECK_STACK (1);
9638 --sp;
9639 if (sp != stack_start)
9640 UNVERIFIED;
9641 if (cfg->method != method)
9643 * Inlining this into a loop in a parent could lead to
9644 * stack overflows which is different behavior than the
9645 * non-inlined case, thus disable inlining in this case.
9647 goto inline_failure;
9649 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9650 ins->dreg = alloc_preg (cfg);
9651 ins->sreg1 = sp [0]->dreg;
9652 ins->type = STACK_PTR;
9653 MONO_ADD_INS (cfg->cbb, ins);
9655 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9656 if (init_locals)
9657 ins->flags |= MONO_INST_INIT;
9659 *sp++ = ins;
9660 ip += 2;
9661 break;
9662 case CEE_ENDFILTER: {
9663 MonoExceptionClause *clause, *nearest;
9664 int cc, nearest_num;
9666 CHECK_STACK (1);
9667 --sp;
9668 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9669 UNVERIFIED;
9670 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9671 ins->sreg1 = (*sp)->dreg;
9672 MONO_ADD_INS (bblock, ins);
9673 start_new_bblock = 1;
9674 ip += 2;
9676 nearest = NULL;
9677 nearest_num = 0;
9678 for (cc = 0; cc < header->num_clauses; ++cc) {
9679 clause = &header->clauses [cc];
9680 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9681 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9682 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9683 nearest = clause;
9684 nearest_num = cc;
9687 g_assert (nearest);
9688 if ((ip - header->code) != nearest->handler_offset)
9689 UNVERIFIED;
9691 break;
9693 case CEE_UNALIGNED_:
9694 ins_flag |= MONO_INST_UNALIGNED;
9695 /* FIXME: record alignment? we can assume 1 for now */
9696 CHECK_OPSIZE (3);
9697 ip += 3;
9698 break;
9699 case CEE_VOLATILE_:
9700 ins_flag |= MONO_INST_VOLATILE;
9701 ip += 2;
9702 break;
9703 case CEE_TAIL_:
9704 ins_flag |= MONO_INST_TAILCALL;
9705 cfg->flags |= MONO_CFG_HAS_TAIL;
9706 /* Can't inline tail calls at this time */
9707 inline_costs += 100000;
9708 ip += 2;
9709 break;
9710 case CEE_INITOBJ:
9711 CHECK_STACK (1);
9712 --sp;
9713 CHECK_OPSIZE (6);
9714 token = read32 (ip + 2);
9715 klass = mini_get_class (method, token, generic_context);
9716 CHECK_TYPELOAD (klass);
9717 if (generic_class_is_reference_type (cfg, klass))
9718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9719 else
9720 mini_emit_initobj (cfg, *sp, NULL, klass);
9721 ip += 6;
9722 inline_costs += 1;
9723 break;
9724 case CEE_CONSTRAINED_:
9725 CHECK_OPSIZE (6);
9726 token = read32 (ip + 2);
9727 if (method->wrapper_type != MONO_WRAPPER_NONE)
9728 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9729 else
9730 constrained_call = mono_class_get_full (image, token, generic_context);
9731 CHECK_TYPELOAD (constrained_call);
9732 ip += 6;
9733 break;
9734 case CEE_CPBLK:
9735 case CEE_INITBLK: {
9736 MonoInst *iargs [3];
9737 CHECK_STACK (3);
9738 sp -= 3;
9740 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9741 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9742 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9743 /* emit_memset only works when val == 0 */
9744 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9745 } else {
9746 iargs [0] = sp [0];
9747 iargs [1] = sp [1];
9748 iargs [2] = sp [2];
9749 if (ip [1] == CEE_CPBLK) {
9750 MonoMethod *memcpy_method = get_memcpy_method ();
9751 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9752 } else {
9753 MonoMethod *memset_method = get_memset_method ();
9754 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9757 ip += 2;
9758 inline_costs += 1;
9759 break;
9761 case CEE_NO_:
9762 CHECK_OPSIZE (3);
9763 if (ip [2] & 0x1)
9764 ins_flag |= MONO_INST_NOTYPECHECK;
9765 if (ip [2] & 0x2)
9766 ins_flag |= MONO_INST_NORANGECHECK;
9767 /* we ignore the no-nullcheck for now since we
9768 * really do it explicitly only when doing callvirt->call
9770 ip += 3;
9771 break;
9772 case CEE_RETHROW: {
9773 MonoInst *load;
9774 int handler_offset = -1;
9776 for (i = 0; i < header->num_clauses; ++i) {
9777 MonoExceptionClause *clause = &header->clauses [i];
9778 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9779 handler_offset = clause->handler_offset;
9780 break;
9784 bblock->flags |= BB_EXCEPTION_UNSAFE;
9786 g_assert (handler_offset != -1);
9788 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9789 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9790 ins->sreg1 = load->dreg;
9791 MONO_ADD_INS (bblock, ins);
9793 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9794 MONO_ADD_INS (bblock, ins);
9796 sp = stack_start;
9797 link_bblock (cfg, bblock, end_bblock);
9798 start_new_bblock = 1;
9799 ip += 2;
9800 break;
9802 case CEE_SIZEOF: {
9803 guint32 align;
9804 int ialign;
9806 CHECK_STACK_OVF (1);
9807 CHECK_OPSIZE (6);
9808 token = read32 (ip + 2);
9809 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9810 MonoType *type = mono_type_create_from_typespec (image, token);
9811 token = mono_type_size (type, &ialign);
9812 } else {
9813 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9814 CHECK_TYPELOAD (klass);
9815 mono_class_init (klass);
9816 token = mono_class_value_size (klass, &align);
9818 EMIT_NEW_ICONST (cfg, ins, token);
9819 *sp++= ins;
9820 ip += 6;
9821 break;
9823 case CEE_REFANYTYPE: {
9824 MonoInst *src_var, *src;
9826 CHECK_STACK (1);
9827 --sp;
9829 // FIXME:
9830 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9831 if (!src_var)
9832 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9833 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9835 *sp++ = ins;
9836 ip += 2;
9837 break;
9839 case CEE_READONLY_:
9840 readonly = TRUE;
9841 ip += 2;
9842 break;
9844 case CEE_UNUSED56:
9845 case CEE_UNUSED57:
9846 case CEE_UNUSED70:
9847 case CEE_UNUSED:
9848 case CEE_UNUSED99:
9849 UNVERIFIED;
9851 default:
9852 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9853 UNVERIFIED;
9855 break;
9857 case CEE_UNUSED58:
9858 case CEE_UNUSED1:
9859 UNVERIFIED;
9861 default:
9862 g_warning ("opcode 0x%02x not handled", *ip);
9863 UNVERIFIED;
9866 if (start_new_bblock != 1)
9867 UNVERIFIED;
9869 bblock->cil_length = ip - bblock->cil_code;
9870 bblock->next_bb = end_bblock;
9872 if (cfg->method == method && cfg->domainvar) {
9873 MonoInst *store;
9874 MonoInst *get_domain;
9876 cfg->cbb = init_localsbb;
9878 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9879 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9881 else {
9882 get_domain->dreg = alloc_preg (cfg);
9883 MONO_ADD_INS (cfg->cbb, get_domain);
9885 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9886 MONO_ADD_INS (cfg->cbb, store);
9889 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9890 if (cfg->compile_aot)
9891 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9892 mono_get_got_var (cfg);
9893 #endif
9895 if (cfg->method == method && cfg->got_var)
9896 mono_emit_load_got_addr (cfg);
9898 if (init_locals) {
9899 MonoInst *store;
9901 cfg->cbb = init_localsbb;
9902 cfg->ip = NULL;
9903 for (i = 0; i < header->num_locals; ++i) {
9904 MonoType *ptype = header->locals [i];
9905 int t = ptype->type;
9906 dreg = cfg->locals [i]->dreg;
9908 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9909 t = mono_class_enum_basetype (ptype->data.klass)->type;
9910 if (ptype->byref) {
9911 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9912 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9913 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9914 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9915 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9916 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9917 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9918 ins->type = STACK_R8;
9919 ins->inst_p0 = (void*)&r8_0;
9920 ins->dreg = alloc_dreg (cfg, STACK_R8);
9921 MONO_ADD_INS (init_localsbb, ins);
9922 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9923 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9924 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9925 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9926 } else {
9927 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9932 if (cfg->init_ref_vars && cfg->method == method) {
9933 /* Emit initialization for ref vars */
9934 // FIXME: Avoid duplication initialization for IL locals.
9935 for (i = 0; i < cfg->num_varinfo; ++i) {
9936 MonoInst *ins = cfg->varinfo [i];
9938 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9939 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9943 /* Add a sequence point for method entry/exit events */
9944 if (seq_points) {
9945 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9946 MONO_ADD_INS (init_localsbb, ins);
9947 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9948 MONO_ADD_INS (cfg->bb_exit, ins);
9951 cfg->ip = NULL;
9953 if (cfg->method == method) {
9954 MonoBasicBlock *bb;
9955 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9956 bb->region = mono_find_block_region (cfg, bb->real_offset);
9957 if (cfg->spvars)
9958 mono_create_spvar_for_region (cfg, bb->region);
9959 if (cfg->verbose_level > 2)
9960 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9964 g_slist_free (class_inits);
9965 dont_inline = g_list_remove (dont_inline, method);
9967 if (inline_costs < 0) {
9968 char *mname;
9970 /* Method is too large */
9971 mname = mono_method_full_name (method, TRUE);
9972 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9973 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9974 g_free (mname);
9975 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9976 mono_basic_block_free (original_bb);
9977 return -1;
9980 if ((cfg->verbose_level > 2) && (cfg->method == method))
9981 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9983 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9984 mono_basic_block_free (original_bb);
9985 return inline_costs;
9987 exception_exit:
9988 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9989 goto cleanup;
9991 inline_failure:
9992 goto cleanup;
9994 load_error:
9995 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9996 goto cleanup;
9998 unverified:
9999 set_exception_type_from_invalid_il (cfg, method, ip);
10000 goto cleanup;
10002 cleanup:
10003 g_slist_free (class_inits);
10004 mono_basic_block_free (original_bb);
10005 dont_inline = g_list_remove (dont_inline, method);
10006 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10007 return -1;
10010 static int
10011 store_membase_reg_to_store_membase_imm (int opcode)
10013 switch (opcode) {
10014 case OP_STORE_MEMBASE_REG:
10015 return OP_STORE_MEMBASE_IMM;
10016 case OP_STOREI1_MEMBASE_REG:
10017 return OP_STOREI1_MEMBASE_IMM;
10018 case OP_STOREI2_MEMBASE_REG:
10019 return OP_STOREI2_MEMBASE_IMM;
10020 case OP_STOREI4_MEMBASE_REG:
10021 return OP_STOREI4_MEMBASE_IMM;
10022 case OP_STOREI8_MEMBASE_REG:
10023 return OP_STOREI8_MEMBASE_IMM;
10024 default:
10025 g_assert_not_reached ();
10028 return -1;
10031 #endif /* DISABLE_JIT */
10034 mono_op_to_op_imm (int opcode)
10036 switch (opcode) {
10037 case OP_IADD:
10038 return OP_IADD_IMM;
10039 case OP_ISUB:
10040 return OP_ISUB_IMM;
10041 case OP_IDIV:
10042 return OP_IDIV_IMM;
10043 case OP_IDIV_UN:
10044 return OP_IDIV_UN_IMM;
10045 case OP_IREM:
10046 return OP_IREM_IMM;
10047 case OP_IREM_UN:
10048 return OP_IREM_UN_IMM;
10049 case OP_IMUL:
10050 return OP_IMUL_IMM;
10051 case OP_IAND:
10052 return OP_IAND_IMM;
10053 case OP_IOR:
10054 return OP_IOR_IMM;
10055 case OP_IXOR:
10056 return OP_IXOR_IMM;
10057 case OP_ISHL:
10058 return OP_ISHL_IMM;
10059 case OP_ISHR:
10060 return OP_ISHR_IMM;
10061 case OP_ISHR_UN:
10062 return OP_ISHR_UN_IMM;
10064 case OP_LADD:
10065 return OP_LADD_IMM;
10066 case OP_LSUB:
10067 return OP_LSUB_IMM;
10068 case OP_LAND:
10069 return OP_LAND_IMM;
10070 case OP_LOR:
10071 return OP_LOR_IMM;
10072 case OP_LXOR:
10073 return OP_LXOR_IMM;
10074 case OP_LSHL:
10075 return OP_LSHL_IMM;
10076 case OP_LSHR:
10077 return OP_LSHR_IMM;
10078 case OP_LSHR_UN:
10079 return OP_LSHR_UN_IMM;
10081 case OP_COMPARE:
10082 return OP_COMPARE_IMM;
10083 case OP_ICOMPARE:
10084 return OP_ICOMPARE_IMM;
10085 case OP_LCOMPARE:
10086 return OP_LCOMPARE_IMM;
10088 case OP_STORE_MEMBASE_REG:
10089 return OP_STORE_MEMBASE_IMM;
10090 case OP_STOREI1_MEMBASE_REG:
10091 return OP_STOREI1_MEMBASE_IMM;
10092 case OP_STOREI2_MEMBASE_REG:
10093 return OP_STOREI2_MEMBASE_IMM;
10094 case OP_STOREI4_MEMBASE_REG:
10095 return OP_STOREI4_MEMBASE_IMM;
10097 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10098 case OP_X86_PUSH:
10099 return OP_X86_PUSH_IMM;
10100 case OP_X86_COMPARE_MEMBASE_REG:
10101 return OP_X86_COMPARE_MEMBASE_IMM;
10102 #endif
10103 #if defined(TARGET_AMD64)
10104 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10105 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10106 #endif
10107 case OP_VOIDCALL_REG:
10108 return OP_VOIDCALL;
10109 case OP_CALL_REG:
10110 return OP_CALL;
10111 case OP_LCALL_REG:
10112 return OP_LCALL;
10113 case OP_FCALL_REG:
10114 return OP_FCALL;
10115 case OP_LOCALLOC:
10116 return OP_LOCALLOC_IMM;
10119 return -1;
10122 static int
10123 ldind_to_load_membase (int opcode)
10125 switch (opcode) {
10126 case CEE_LDIND_I1:
10127 return OP_LOADI1_MEMBASE;
10128 case CEE_LDIND_U1:
10129 return OP_LOADU1_MEMBASE;
10130 case CEE_LDIND_I2:
10131 return OP_LOADI2_MEMBASE;
10132 case CEE_LDIND_U2:
10133 return OP_LOADU2_MEMBASE;
10134 case CEE_LDIND_I4:
10135 return OP_LOADI4_MEMBASE;
10136 case CEE_LDIND_U4:
10137 return OP_LOADU4_MEMBASE;
10138 case CEE_LDIND_I:
10139 return OP_LOAD_MEMBASE;
10140 case CEE_LDIND_REF:
10141 return OP_LOAD_MEMBASE;
10142 case CEE_LDIND_I8:
10143 return OP_LOADI8_MEMBASE;
10144 case CEE_LDIND_R4:
10145 return OP_LOADR4_MEMBASE;
10146 case CEE_LDIND_R8:
10147 return OP_LOADR8_MEMBASE;
10148 default:
10149 g_assert_not_reached ();
10152 return -1;
10155 static int
10156 stind_to_store_membase (int opcode)
10158 switch (opcode) {
10159 case CEE_STIND_I1:
10160 return OP_STOREI1_MEMBASE_REG;
10161 case CEE_STIND_I2:
10162 return OP_STOREI2_MEMBASE_REG;
10163 case CEE_STIND_I4:
10164 return OP_STOREI4_MEMBASE_REG;
10165 case CEE_STIND_I:
10166 case CEE_STIND_REF:
10167 return OP_STORE_MEMBASE_REG;
10168 case CEE_STIND_I8:
10169 return OP_STOREI8_MEMBASE_REG;
10170 case CEE_STIND_R4:
10171 return OP_STORER4_MEMBASE_REG;
10172 case CEE_STIND_R8:
10173 return OP_STORER8_MEMBASE_REG;
10174 default:
10175 g_assert_not_reached ();
10178 return -1;
10182 mono_load_membase_to_load_mem (int opcode)
10184 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10185 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10186 switch (opcode) {
10187 case OP_LOAD_MEMBASE:
10188 return OP_LOAD_MEM;
10189 case OP_LOADU1_MEMBASE:
10190 return OP_LOADU1_MEM;
10191 case OP_LOADU2_MEMBASE:
10192 return OP_LOADU2_MEM;
10193 case OP_LOADI4_MEMBASE:
10194 return OP_LOADI4_MEM;
10195 case OP_LOADU4_MEMBASE:
10196 return OP_LOADU4_MEM;
10197 #if SIZEOF_REGISTER == 8
10198 case OP_LOADI8_MEMBASE:
10199 return OP_LOADI8_MEM;
10200 #endif
10202 #endif
10204 return -1;
10207 static inline int
10208 op_to_op_dest_membase (int store_opcode, int opcode)
10210 #if defined(TARGET_X86)
10211 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10212 return -1;
10214 switch (opcode) {
10215 case OP_IADD:
10216 return OP_X86_ADD_MEMBASE_REG;
10217 case OP_ISUB:
10218 return OP_X86_SUB_MEMBASE_REG;
10219 case OP_IAND:
10220 return OP_X86_AND_MEMBASE_REG;
10221 case OP_IOR:
10222 return OP_X86_OR_MEMBASE_REG;
10223 case OP_IXOR:
10224 return OP_X86_XOR_MEMBASE_REG;
10225 case OP_ADD_IMM:
10226 case OP_IADD_IMM:
10227 return OP_X86_ADD_MEMBASE_IMM;
10228 case OP_SUB_IMM:
10229 case OP_ISUB_IMM:
10230 return OP_X86_SUB_MEMBASE_IMM;
10231 case OP_AND_IMM:
10232 case OP_IAND_IMM:
10233 return OP_X86_AND_MEMBASE_IMM;
10234 case OP_OR_IMM:
10235 case OP_IOR_IMM:
10236 return OP_X86_OR_MEMBASE_IMM;
10237 case OP_XOR_IMM:
10238 case OP_IXOR_IMM:
10239 return OP_X86_XOR_MEMBASE_IMM;
10240 case OP_MOVE:
10241 return OP_NOP;
10243 #endif
10245 #if defined(TARGET_AMD64)
10246 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10247 return -1;
10249 switch (opcode) {
10250 case OP_IADD:
10251 return OP_X86_ADD_MEMBASE_REG;
10252 case OP_ISUB:
10253 return OP_X86_SUB_MEMBASE_REG;
10254 case OP_IAND:
10255 return OP_X86_AND_MEMBASE_REG;
10256 case OP_IOR:
10257 return OP_X86_OR_MEMBASE_REG;
10258 case OP_IXOR:
10259 return OP_X86_XOR_MEMBASE_REG;
10260 case OP_IADD_IMM:
10261 return OP_X86_ADD_MEMBASE_IMM;
10262 case OP_ISUB_IMM:
10263 return OP_X86_SUB_MEMBASE_IMM;
10264 case OP_IAND_IMM:
10265 return OP_X86_AND_MEMBASE_IMM;
10266 case OP_IOR_IMM:
10267 return OP_X86_OR_MEMBASE_IMM;
10268 case OP_IXOR_IMM:
10269 return OP_X86_XOR_MEMBASE_IMM;
10270 case OP_LADD:
10271 return OP_AMD64_ADD_MEMBASE_REG;
10272 case OP_LSUB:
10273 return OP_AMD64_SUB_MEMBASE_REG;
10274 case OP_LAND:
10275 return OP_AMD64_AND_MEMBASE_REG;
10276 case OP_LOR:
10277 return OP_AMD64_OR_MEMBASE_REG;
10278 case OP_LXOR:
10279 return OP_AMD64_XOR_MEMBASE_REG;
10280 case OP_ADD_IMM:
10281 case OP_LADD_IMM:
10282 return OP_AMD64_ADD_MEMBASE_IMM;
10283 case OP_SUB_IMM:
10284 case OP_LSUB_IMM:
10285 return OP_AMD64_SUB_MEMBASE_IMM;
10286 case OP_AND_IMM:
10287 case OP_LAND_IMM:
10288 return OP_AMD64_AND_MEMBASE_IMM;
10289 case OP_OR_IMM:
10290 case OP_LOR_IMM:
10291 return OP_AMD64_OR_MEMBASE_IMM;
10292 case OP_XOR_IMM:
10293 case OP_LXOR_IMM:
10294 return OP_AMD64_XOR_MEMBASE_IMM;
10295 case OP_MOVE:
10296 return OP_NOP;
10298 #endif
10300 return -1;
10303 static inline int
10304 op_to_op_store_membase (int store_opcode, int opcode)
10306 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10307 switch (opcode) {
10308 case OP_ICEQ:
10309 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10310 return OP_X86_SETEQ_MEMBASE;
10311 case OP_CNE:
10312 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10313 return OP_X86_SETNE_MEMBASE;
10315 #endif
10317 return -1;
10320 static inline int
10321 op_to_op_src1_membase (int load_opcode, int opcode)
10323 #ifdef TARGET_X86
10324 /* FIXME: This has sign extension issues */
10326 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10327 return OP_X86_COMPARE_MEMBASE8_IMM;
10330 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10331 return -1;
10333 switch (opcode) {
10334 case OP_X86_PUSH:
10335 return OP_X86_PUSH_MEMBASE;
10336 case OP_COMPARE_IMM:
10337 case OP_ICOMPARE_IMM:
10338 return OP_X86_COMPARE_MEMBASE_IMM;
10339 case OP_COMPARE:
10340 case OP_ICOMPARE:
10341 return OP_X86_COMPARE_MEMBASE_REG;
10343 #endif
10345 #ifdef TARGET_AMD64
10346 /* FIXME: This has sign extension issues */
10348 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10349 return OP_X86_COMPARE_MEMBASE8_IMM;
10352 switch (opcode) {
10353 case OP_X86_PUSH:
10354 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10355 return OP_X86_PUSH_MEMBASE;
10356 break;
10357 /* FIXME: This only works for 32 bit immediates
10358 case OP_COMPARE_IMM:
10359 case OP_LCOMPARE_IMM:
10360 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10361 return OP_AMD64_COMPARE_MEMBASE_IMM;
10363 case OP_ICOMPARE_IMM:
10364 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10365 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10366 break;
10367 case OP_COMPARE:
10368 case OP_LCOMPARE:
10369 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10370 return OP_AMD64_COMPARE_MEMBASE_REG;
10371 break;
10372 case OP_ICOMPARE:
10373 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10374 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10375 break;
10377 #endif
10379 return -1;
10382 static inline int
10383 op_to_op_src2_membase (int load_opcode, int opcode)
10385 #ifdef TARGET_X86
10386 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10387 return -1;
10389 switch (opcode) {
10390 case OP_COMPARE:
10391 case OP_ICOMPARE:
10392 return OP_X86_COMPARE_REG_MEMBASE;
10393 case OP_IADD:
10394 return OP_X86_ADD_REG_MEMBASE;
10395 case OP_ISUB:
10396 return OP_X86_SUB_REG_MEMBASE;
10397 case OP_IAND:
10398 return OP_X86_AND_REG_MEMBASE;
10399 case OP_IOR:
10400 return OP_X86_OR_REG_MEMBASE;
10401 case OP_IXOR:
10402 return OP_X86_XOR_REG_MEMBASE;
10404 #endif
10406 #ifdef TARGET_AMD64
10407 switch (opcode) {
10408 case OP_ICOMPARE:
10409 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10410 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10411 break;
10412 case OP_COMPARE:
10413 case OP_LCOMPARE:
10414 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10415 return OP_AMD64_COMPARE_REG_MEMBASE;
10416 break;
10417 case OP_IADD:
10418 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10419 return OP_X86_ADD_REG_MEMBASE;
10420 case OP_ISUB:
10421 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10422 return OP_X86_SUB_REG_MEMBASE;
10423 case OP_IAND:
10424 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10425 return OP_X86_AND_REG_MEMBASE;
10426 case OP_IOR:
10427 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10428 return OP_X86_OR_REG_MEMBASE;
10429 case OP_IXOR:
10430 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10431 return OP_X86_XOR_REG_MEMBASE;
10432 case OP_LADD:
10433 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10434 return OP_AMD64_ADD_REG_MEMBASE;
10435 case OP_LSUB:
10436 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10437 return OP_AMD64_SUB_REG_MEMBASE;
10438 case OP_LAND:
10439 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10440 return OP_AMD64_AND_REG_MEMBASE;
10441 case OP_LOR:
10442 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10443 return OP_AMD64_OR_REG_MEMBASE;
10444 case OP_LXOR:
10445 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10446 return OP_AMD64_XOR_REG_MEMBASE;
10448 #endif
10450 return -1;
10454 mono_op_to_op_imm_noemul (int opcode)
10456 switch (opcode) {
10457 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10458 case OP_LSHR:
10459 case OP_LSHL:
10460 case OP_LSHR_UN:
10461 return -1;
10462 #endif
10463 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10464 case OP_IDIV:
10465 case OP_IDIV_UN:
10466 case OP_IREM:
10467 case OP_IREM_UN:
10468 return -1;
10469 #endif
10470 default:
10471 return mono_op_to_op_imm (opcode);
10475 #ifndef DISABLE_JIT
10478 * mono_handle_global_vregs:
10480 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10481 * for them.
10483 void
10484 mono_handle_global_vregs (MonoCompile *cfg)
10486 gint32 *vreg_to_bb;
10487 MonoBasicBlock *bb;
10488 int i, pos;
10490 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10492 #ifdef MONO_ARCH_SIMD_INTRINSICS
10493 if (cfg->uses_simd_intrinsics)
10494 mono_simd_simplify_indirection (cfg);
10495 #endif
10497 /* Find local vregs used in more than one bb */
10498 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10499 MonoInst *ins = bb->code;
10500 int block_num = bb->block_num;
10502 if (cfg->verbose_level > 2)
10503 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10505 cfg->cbb = bb;
10506 for (; ins; ins = ins->next) {
10507 const char *spec = INS_INFO (ins->opcode);
10508 int regtype = 0, regindex;
10509 gint32 prev_bb;
10511 if (G_UNLIKELY (cfg->verbose_level > 2))
10512 mono_print_ins (ins);
10514 g_assert (ins->opcode >= MONO_CEE_LAST);
10516 for (regindex = 0; regindex < 4; regindex ++) {
10517 int vreg = 0;
10519 if (regindex == 0) {
10520 regtype = spec [MONO_INST_DEST];
10521 if (regtype == ' ')
10522 continue;
10523 vreg = ins->dreg;
10524 } else if (regindex == 1) {
10525 regtype = spec [MONO_INST_SRC1];
10526 if (regtype == ' ')
10527 continue;
10528 vreg = ins->sreg1;
10529 } else if (regindex == 2) {
10530 regtype = spec [MONO_INST_SRC2];
10531 if (regtype == ' ')
10532 continue;
10533 vreg = ins->sreg2;
10534 } else if (regindex == 3) {
10535 regtype = spec [MONO_INST_SRC3];
10536 if (regtype == ' ')
10537 continue;
10538 vreg = ins->sreg3;
10541 #if SIZEOF_REGISTER == 4
10542 /* In the LLVM case, the long opcodes are not decomposed */
10543 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10545 * Since some instructions reference the original long vreg,
10546 * and some reference the two component vregs, it is quite hard
10547 * to determine when it needs to be global. So be conservative.
10549 if (!get_vreg_to_inst (cfg, vreg)) {
10550 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10552 if (cfg->verbose_level > 2)
10553 printf ("LONG VREG R%d made global.\n", vreg);
10557 * Make the component vregs volatile since the optimizations can
10558 * get confused otherwise.
10560 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10561 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10563 #endif
10565 g_assert (vreg != -1);
10567 prev_bb = vreg_to_bb [vreg];
10568 if (prev_bb == 0) {
10569 /* 0 is a valid block num */
10570 vreg_to_bb [vreg] = block_num + 1;
10571 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10572 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10573 continue;
10575 if (!get_vreg_to_inst (cfg, vreg)) {
10576 if (G_UNLIKELY (cfg->verbose_level > 2))
10577 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10579 switch (regtype) {
10580 case 'i':
10581 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10582 break;
10583 case 'l':
10584 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10585 break;
10586 case 'f':
10587 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10588 break;
10589 case 'v':
10590 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10591 break;
10592 default:
10593 g_assert_not_reached ();
10597 /* Flag as having been used in more than one bb */
10598 vreg_to_bb [vreg] = -1;
10604 /* If a variable is used in only one bblock, convert it into a local vreg */
10605 for (i = 0; i < cfg->num_varinfo; i++) {
10606 MonoInst *var = cfg->varinfo [i];
10607 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10609 switch (var->type) {
10610 case STACK_I4:
10611 case STACK_OBJ:
10612 case STACK_PTR:
10613 case STACK_MP:
10614 case STACK_VTYPE:
10615 #if SIZEOF_REGISTER == 8
10616 case STACK_I8:
10617 #endif
10618 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10619 /* Enabling this screws up the fp stack on x86 */
10620 case STACK_R8:
10621 #endif
10622 /* Arguments are implicitly global */
10623 /* Putting R4 vars into registers doesn't work currently */
10624 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10626 * Make that the variable's liveness interval doesn't contain a call, since
10627 * that would cause the lvreg to be spilled, making the whole optimization
10628 * useless.
10630 /* This is too slow for JIT compilation */
10631 #if 0
10632 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10633 MonoInst *ins;
10634 int def_index, call_index, ins_index;
10635 gboolean spilled = FALSE;
10637 def_index = -1;
10638 call_index = -1;
10639 ins_index = 0;
10640 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10641 const char *spec = INS_INFO (ins->opcode);
10643 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10644 def_index = ins_index;
10646 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10647 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10648 if (call_index > def_index) {
10649 spilled = TRUE;
10650 break;
10654 if (MONO_IS_CALL (ins))
10655 call_index = ins_index;
10657 ins_index ++;
10660 if (spilled)
10661 break;
10663 #endif
10665 if (G_UNLIKELY (cfg->verbose_level > 2))
10666 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10667 var->flags |= MONO_INST_IS_DEAD;
10668 cfg->vreg_to_inst [var->dreg] = NULL;
10670 break;
10675 * Compress the varinfo and vars tables so the liveness computation is faster and
10676 * takes up less space.
10678 pos = 0;
10679 for (i = 0; i < cfg->num_varinfo; ++i) {
10680 MonoInst *var = cfg->varinfo [i];
10681 if (pos < i && cfg->locals_start == i)
10682 cfg->locals_start = pos;
10683 if (!(var->flags & MONO_INST_IS_DEAD)) {
10684 if (pos < i) {
10685 cfg->varinfo [pos] = cfg->varinfo [i];
10686 cfg->varinfo [pos]->inst_c0 = pos;
10687 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10688 cfg->vars [pos].idx = pos;
10689 #if SIZEOF_REGISTER == 4
10690 if (cfg->varinfo [pos]->type == STACK_I8) {
10691 /* Modify the two component vars too */
10692 MonoInst *var1;
10694 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10695 var1->inst_c0 = pos;
10696 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10697 var1->inst_c0 = pos;
10699 #endif
10701 pos ++;
10704 cfg->num_varinfo = pos;
10705 if (cfg->locals_start > cfg->num_varinfo)
10706 cfg->locals_start = cfg->num_varinfo;
10710 * mono_spill_global_vars:
10712 * Generate spill code for variables which are not allocated to registers,
10713 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10714 * code is generated which could be optimized by the local optimization passes.
10716 void
10717 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10719 MonoBasicBlock *bb;
10720 char spec2 [16];
10721 int orig_next_vreg;
10722 guint32 *vreg_to_lvreg;
10723 guint32 *lvregs;
10724 guint32 i, lvregs_len;
10725 gboolean dest_has_lvreg = FALSE;
10726 guint32 stacktypes [128];
10727 MonoInst **live_range_start, **live_range_end;
10728 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10730 *need_local_opts = FALSE;
10732 memset (spec2, 0, sizeof (spec2));
10734 /* FIXME: Move this function to mini.c */
10735 stacktypes ['i'] = STACK_PTR;
10736 stacktypes ['l'] = STACK_I8;
10737 stacktypes ['f'] = STACK_R8;
10738 #ifdef MONO_ARCH_SIMD_INTRINSICS
10739 stacktypes ['x'] = STACK_VTYPE;
10740 #endif
10742 #if SIZEOF_REGISTER == 4
10743 /* Create MonoInsts for longs */
10744 for (i = 0; i < cfg->num_varinfo; i++) {
10745 MonoInst *ins = cfg->varinfo [i];
10747 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10748 switch (ins->type) {
10749 case STACK_R8:
10750 case STACK_I8: {
10751 MonoInst *tree;
10753 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10754 break;
10756 g_assert (ins->opcode == OP_REGOFFSET);
10758 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10759 g_assert (tree);
10760 tree->opcode = OP_REGOFFSET;
10761 tree->inst_basereg = ins->inst_basereg;
10762 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10764 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10765 g_assert (tree);
10766 tree->opcode = OP_REGOFFSET;
10767 tree->inst_basereg = ins->inst_basereg;
10768 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10769 break;
10771 default:
10772 break;
10776 #endif
10778 /* FIXME: widening and truncation */
10781 * As an optimization, when a variable allocated to the stack is first loaded into
10782 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10783 * the variable again.
10785 orig_next_vreg = cfg->next_vreg;
10786 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10787 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10788 lvregs_len = 0;
10791 * These arrays contain the first and last instructions accessing a given
10792 * variable.
10793 * Since we emit bblocks in the same order we process them here, and we
10794 * don't split live ranges, these will precisely describe the live range of
10795 * the variable, i.e. the instruction range where a valid value can be found
10796 * in the variables location.
10797 * The live range is computed using the liveness info computed by the liveness pass.
10798 * We can't use vmv->range, since that is an abstract live range, and we need
10799 * one which is instruction precise.
10800 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10802 /* FIXME: Only do this if debugging info is requested */
10803 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10804 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10805 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10806 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10808 /* Add spill loads/stores */
10809 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10810 MonoInst *ins;
10812 if (cfg->verbose_level > 2)
10813 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10815 /* Clear vreg_to_lvreg array */
10816 for (i = 0; i < lvregs_len; i++)
10817 vreg_to_lvreg [lvregs [i]] = 0;
10818 lvregs_len = 0;
10820 cfg->cbb = bb;
10821 MONO_BB_FOR_EACH_INS (bb, ins) {
10822 const char *spec = INS_INFO (ins->opcode);
10823 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10824 gboolean store, no_lvreg;
10825 int sregs [MONO_MAX_SRC_REGS];
10827 if (G_UNLIKELY (cfg->verbose_level > 2))
10828 mono_print_ins (ins);
10830 if (ins->opcode == OP_NOP)
10831 continue;
10834 * We handle LDADDR here as well, since it can only be decomposed
10835 * when variable addresses are known.
10837 if (ins->opcode == OP_LDADDR) {
10838 MonoInst *var = ins->inst_p0;
10840 if (var->opcode == OP_VTARG_ADDR) {
10841 /* Happens on SPARC/S390 where vtypes are passed by reference */
10842 MonoInst *vtaddr = var->inst_left;
10843 if (vtaddr->opcode == OP_REGVAR) {
10844 ins->opcode = OP_MOVE;
10845 ins->sreg1 = vtaddr->dreg;
10847 else if (var->inst_left->opcode == OP_REGOFFSET) {
10848 ins->opcode = OP_LOAD_MEMBASE;
10849 ins->inst_basereg = vtaddr->inst_basereg;
10850 ins->inst_offset = vtaddr->inst_offset;
10851 } else
10852 NOT_IMPLEMENTED;
10853 } else {
10854 g_assert (var->opcode == OP_REGOFFSET);
10856 ins->opcode = OP_ADD_IMM;
10857 ins->sreg1 = var->inst_basereg;
10858 ins->inst_imm = var->inst_offset;
10861 *need_local_opts = TRUE;
10862 spec = INS_INFO (ins->opcode);
10865 if (ins->opcode < MONO_CEE_LAST) {
10866 mono_print_ins (ins);
10867 g_assert_not_reached ();
10871 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10872 * src register.
10873 * FIXME:
10875 if (MONO_IS_STORE_MEMBASE (ins)) {
10876 tmp_reg = ins->dreg;
10877 ins->dreg = ins->sreg2;
10878 ins->sreg2 = tmp_reg;
10879 store = TRUE;
10881 spec2 [MONO_INST_DEST] = ' ';
10882 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10883 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10884 spec2 [MONO_INST_SRC3] = ' ';
10885 spec = spec2;
10886 } else if (MONO_IS_STORE_MEMINDEX (ins))
10887 g_assert_not_reached ();
10888 else
10889 store = FALSE;
10890 no_lvreg = FALSE;
10892 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10893 printf ("\t %.3s %d", spec, ins->dreg);
10894 num_sregs = mono_inst_get_src_registers (ins, sregs);
10895 for (srcindex = 0; srcindex < 3; ++srcindex)
10896 printf (" %d", sregs [srcindex]);
10897 printf ("\n");
10900 /***************/
10901 /* DREG */
10902 /***************/
10903 regtype = spec [MONO_INST_DEST];
10904 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10905 prev_dreg = -1;
10907 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10908 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10909 MonoInst *store_ins;
10910 int store_opcode;
10911 MonoInst *def_ins = ins;
10912 int dreg = ins->dreg; /* The original vreg */
10914 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10916 if (var->opcode == OP_REGVAR) {
10917 ins->dreg = var->dreg;
10918 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10920 * Instead of emitting a load+store, use a _membase opcode.
10922 g_assert (var->opcode == OP_REGOFFSET);
10923 if (ins->opcode == OP_MOVE) {
10924 NULLIFY_INS (ins);
10925 def_ins = NULL;
10926 } else {
10927 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10928 ins->inst_basereg = var->inst_basereg;
10929 ins->inst_offset = var->inst_offset;
10930 ins->dreg = -1;
10932 spec = INS_INFO (ins->opcode);
10933 } else {
10934 guint32 lvreg;
10936 g_assert (var->opcode == OP_REGOFFSET);
10938 prev_dreg = ins->dreg;
10940 /* Invalidate any previous lvreg for this vreg */
10941 vreg_to_lvreg [ins->dreg] = 0;
10943 lvreg = 0;
10945 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10946 regtype = 'l';
10947 store_opcode = OP_STOREI8_MEMBASE_REG;
10950 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10952 if (regtype == 'l') {
10953 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10954 mono_bblock_insert_after_ins (bb, ins, store_ins);
10955 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10956 mono_bblock_insert_after_ins (bb, ins, store_ins);
10957 def_ins = store_ins;
10959 else {
10960 g_assert (store_opcode != OP_STOREV_MEMBASE);
10962 /* Try to fuse the store into the instruction itself */
10963 /* FIXME: Add more instructions */
10964 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10965 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10966 ins->inst_imm = ins->inst_c0;
10967 ins->inst_destbasereg = var->inst_basereg;
10968 ins->inst_offset = var->inst_offset;
10969 spec = INS_INFO (ins->opcode);
10970 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10971 ins->opcode = store_opcode;
10972 ins->inst_destbasereg = var->inst_basereg;
10973 ins->inst_offset = var->inst_offset;
10975 no_lvreg = TRUE;
10977 tmp_reg = ins->dreg;
10978 ins->dreg = ins->sreg2;
10979 ins->sreg2 = tmp_reg;
10980 store = TRUE;
10982 spec2 [MONO_INST_DEST] = ' ';
10983 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10984 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10985 spec2 [MONO_INST_SRC3] = ' ';
10986 spec = spec2;
10987 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10988 // FIXME: The backends expect the base reg to be in inst_basereg
10989 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10990 ins->dreg = -1;
10991 ins->inst_basereg = var->inst_basereg;
10992 ins->inst_offset = var->inst_offset;
10993 spec = INS_INFO (ins->opcode);
10994 } else {
10995 /* printf ("INS: "); mono_print_ins (ins); */
10996 /* Create a store instruction */
10997 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10999 /* Insert it after the instruction */
11000 mono_bblock_insert_after_ins (bb, ins, store_ins);
11002 def_ins = store_ins;
11005 * We can't assign ins->dreg to var->dreg here, since the
11006 * sregs could use it. So set a flag, and do it after
11007 * the sregs.
11009 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11010 dest_has_lvreg = TRUE;
11015 if (def_ins && !live_range_start [dreg]) {
11016 live_range_start [dreg] = def_ins;
11017 live_range_start_bb [dreg] = bb;
11021 /************/
11022 /* SREGS */
11023 /************/
11024 num_sregs = mono_inst_get_src_registers (ins, sregs);
11025 for (srcindex = 0; srcindex < 3; ++srcindex) {
11026 regtype = spec [MONO_INST_SRC1 + srcindex];
11027 sreg = sregs [srcindex];
11029 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11030 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11031 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11032 MonoInst *use_ins = ins;
11033 MonoInst *load_ins;
11034 guint32 load_opcode;
11036 if (var->opcode == OP_REGVAR) {
11037 sregs [srcindex] = var->dreg;
11038 //mono_inst_set_src_registers (ins, sregs);
11039 live_range_end [sreg] = use_ins;
11040 live_range_end_bb [sreg] = bb;
11041 continue;
11044 g_assert (var->opcode == OP_REGOFFSET);
11046 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11048 g_assert (load_opcode != OP_LOADV_MEMBASE);
11050 if (vreg_to_lvreg [sreg]) {
11051 g_assert (vreg_to_lvreg [sreg] != -1);
11053 /* The variable is already loaded to an lvreg */
11054 if (G_UNLIKELY (cfg->verbose_level > 2))
11055 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11056 sregs [srcindex] = vreg_to_lvreg [sreg];
11057 //mono_inst_set_src_registers (ins, sregs);
11058 continue;
11061 /* Try to fuse the load into the instruction */
11062 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11063 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11064 sregs [0] = var->inst_basereg;
11065 //mono_inst_set_src_registers (ins, sregs);
11066 ins->inst_offset = var->inst_offset;
11067 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11068 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11069 sregs [1] = var->inst_basereg;
11070 //mono_inst_set_src_registers (ins, sregs);
11071 ins->inst_offset = var->inst_offset;
11072 } else {
11073 if (MONO_IS_REAL_MOVE (ins)) {
11074 ins->opcode = OP_NOP;
11075 sreg = ins->dreg;
11076 } else {
11077 //printf ("%d ", srcindex); mono_print_ins (ins);
11079 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11081 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11082 if (var->dreg == prev_dreg) {
11084 * sreg refers to the value loaded by the load
11085 * emitted below, but we need to use ins->dreg
11086 * since it refers to the store emitted earlier.
11088 sreg = ins->dreg;
11090 g_assert (sreg != -1);
11091 vreg_to_lvreg [var->dreg] = sreg;
11092 g_assert (lvregs_len < 1024);
11093 lvregs [lvregs_len ++] = var->dreg;
11097 sregs [srcindex] = sreg;
11098 //mono_inst_set_src_registers (ins, sregs);
11100 if (regtype == 'l') {
11101 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11102 mono_bblock_insert_before_ins (bb, ins, load_ins);
11103 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11104 mono_bblock_insert_before_ins (bb, ins, load_ins);
11105 use_ins = load_ins;
11107 else {
11108 #if SIZEOF_REGISTER == 4
11109 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11110 #endif
11111 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11112 mono_bblock_insert_before_ins (bb, ins, load_ins);
11113 use_ins = load_ins;
11117 if (var->dreg < orig_next_vreg) {
11118 live_range_end [var->dreg] = use_ins;
11119 live_range_end_bb [var->dreg] = bb;
11123 mono_inst_set_src_registers (ins, sregs);
11125 if (dest_has_lvreg) {
11126 g_assert (ins->dreg != -1);
11127 vreg_to_lvreg [prev_dreg] = ins->dreg;
11128 g_assert (lvregs_len < 1024);
11129 lvregs [lvregs_len ++] = prev_dreg;
11130 dest_has_lvreg = FALSE;
11133 if (store) {
11134 tmp_reg = ins->dreg;
11135 ins->dreg = ins->sreg2;
11136 ins->sreg2 = tmp_reg;
11139 if (MONO_IS_CALL (ins)) {
11140 /* Clear vreg_to_lvreg array */
11141 for (i = 0; i < lvregs_len; i++)
11142 vreg_to_lvreg [lvregs [i]] = 0;
11143 lvregs_len = 0;
11144 } else if (ins->opcode == OP_NOP) {
11145 ins->dreg = -1;
11146 MONO_INST_NULLIFY_SREGS (ins);
11149 if (cfg->verbose_level > 2)
11150 mono_print_ins_index (1, ins);
11153 /* Extend the live range based on the liveness info */
11154 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11155 for (i = 0; i < cfg->num_varinfo; i ++) {
11156 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11158 if (vreg_is_volatile (cfg, vi->vreg))
11159 /* The liveness info is incomplete */
11160 continue;
11162 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11163 /* Live from at least the first ins of this bb */
11164 live_range_start [vi->vreg] = bb->code;
11165 live_range_start_bb [vi->vreg] = bb;
11168 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11169 /* Live at least until the last ins of this bb */
11170 live_range_end [vi->vreg] = bb->last_ins;
11171 live_range_end_bb [vi->vreg] = bb;
11177 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11179 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11180 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11182 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11183 for (i = 0; i < cfg->num_varinfo; ++i) {
11184 int vreg = MONO_VARINFO (cfg, i)->vreg;
11185 MonoInst *ins;
11187 if (live_range_start [vreg]) {
11188 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11189 ins->inst_c0 = i;
11190 ins->inst_c1 = vreg;
11191 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11193 if (live_range_end [vreg]) {
11194 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11195 ins->inst_c0 = i;
11196 ins->inst_c1 = vreg;
11197 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11198 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11199 else
11200 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11204 #endif
11206 g_free (live_range_start);
11207 g_free (live_range_end);
11208 g_free (live_range_start_bb);
11209 g_free (live_range_end_bb);
11213 * FIXME:
11214 * - use 'iadd' instead of 'int_add'
11215 * - handling ovf opcodes: decompose in method_to_ir.
11216 * - unify iregs/fregs
11217 * -> partly done, the missing parts are:
11218 * - a more complete unification would involve unifying the hregs as well, so
11219 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11220 * would no longer map to the machine hregs, so the code generators would need to
11221 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11222 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11223 * fp/non-fp branches speeds it up by about 15%.
11224 * - use sext/zext opcodes instead of shifts
11225 * - add OP_ICALL
11226 * - get rid of TEMPLOADs if possible and use vregs instead
11227 * - clean up usage of OP_P/OP_ opcodes
11228 * - cleanup usage of DUMMY_USE
11229 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11230 * stack
11231 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11232 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11233 * - make sure handle_stack_args () is called before the branch is emitted
11234 * - when the new IR is done, get rid of all unused stuff
11235 * - COMPARE/BEQ as separate instructions or unify them ?
11236 * - keeping them separate allows specialized compare instructions like
11237 * compare_imm, compare_membase
11238 * - most back ends unify fp compare+branch, fp compare+ceq
11239 * - integrate mono_save_args into inline_method
11240 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11241 * - handle long shift opts on 32 bit platforms somehow: they require
11242 * 3 sregs (2 for arg1 and 1 for arg2)
11243 * - make byref a 'normal' type.
11244 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11245 * variable if needed.
11246 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11247 * like inline_method.
11248 * - remove inlining restrictions
11249 * - fix LNEG and enable cfold of INEG
11250 * - generalize x86 optimizations like ldelema as a peephole optimization
11251 * - add store_mem_imm for amd64
11252 * - optimize the loading of the interruption flag in the managed->native wrappers
11253 * - avoid special handling of OP_NOP in passes
11254 * - move code inserting instructions into one function/macro.
11255 * - try a coalescing phase after liveness analysis
11256 * - add float -> vreg conversion + local optimizations on !x86
11257 * - figure out how to handle decomposed branches during optimizations, ie.
11258 * compare+branch, op_jump_table+op_br etc.
11259 * - promote RuntimeXHandles to vregs
11260 * - vtype cleanups:
11261 * - add a NEW_VARLOADA_VREG macro
11262 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11263 * accessing vtype fields.
11264 * - get rid of I8CONST on 64 bit platforms
11265 * - dealing with the increase in code size due to branches created during opcode
11266 * decomposition:
11267 * - use extended basic blocks
11268 * - all parts of the JIT
11269 * - handle_global_vregs () && local regalloc
11270 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11271 * - sources of increase in code size:
11272 * - vtypes
11273 * - long compares
11274 * - isinst and castclass
11275 * - lvregs not allocated to global registers even if used multiple times
11276 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11277 * meaningful.
11278 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11279 * - add all micro optimizations from the old JIT
11280 * - put tree optimizations into the deadce pass
11281 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11282 * specific function.
11283 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11284 * fcompare + branchCC.
11285 * - create a helper function for allocating a stack slot, taking into account
11286 * MONO_CFG_HAS_SPILLUP.
11287 * - merge r68207.
11288 * - merge the ia64 switch changes.
11289 * - optimize mono_regstate2_alloc_int/float.
11290 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11291 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11292 * parts of the tree could be separated by other instructions, killing the tree
11293 * arguments, or stores killing loads etc. Also, should we fold loads into other
11294 * instructions if the result of the load is used multiple times ?
11295 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11296 * - LAST MERGE: 108395.
11297 * - when returning vtypes in registers, generate IR and append it to the end of the
11298 * last bb instead of doing it in the epilog.
11299 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11304 NOTES
11305 -----
11307 - When to decompose opcodes:
11308 - earlier: this makes some optimizations hard to implement, since the low level IR
11309 no longer contains the neccessary information. But it is easier to do.
11310 - later: harder to implement, enables more optimizations.
11311 - Branches inside bblocks:
11312 - created when decomposing complex opcodes.
11313 - branches to another bblock: harmless, but not tracked by the branch
11314 optimizations, so need to branch to a label at the start of the bblock.
11315 - branches to inside the same bblock: very problematic, trips up the local
11316 reg allocator. Can be fixed by spitting the current bblock, but that is a
11317 complex operation, since some local vregs can become global vregs etc.
11318 - Local/global vregs:
11319 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11320 local register allocator.
11321 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11322 structure, created by mono_create_var (). Assigned to hregs or the stack by
11323 the global register allocator.
11324 - When to do optimizations like alu->alu_imm:
11325 - earlier -> saves work later on since the IR will be smaller/simpler
11326 - later -> can work on more instructions
11327 - Handling of valuetypes:
11328 - When a vtype is pushed on the stack, a new temporary is created, an
11329 instruction computing its address (LDADDR) is emitted and pushed on
11330 the stack. Need to optimize cases when the vtype is used immediately as in
11331 argument passing, stloc etc.
11332 - Instead of the to_end stuff in the old JIT, simply call the function handling
11333 the values on the stack before emitting the last instruction of the bb.
11336 #endif /* DISABLE_JIT */