2010-06-17 Zoltan Varga <vargaz@gmail.com>
[mono.git] / mono / mini / method-to-ir.c
blob7db0f9b086a413cea38c5c7a8a4410574f4a8c67
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
55 #include "mini.h"
56 #include "trace.h"
58 #include "ir-emit.h"
60 #include "jit-icalls.h"
61 #include "jit.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 goto inline_failure;\
69 } while (0)
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 goto exception_exit;\
73 } while (0)
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
82 } while (0)
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
91 } while (0)
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
98 } \
99 } while (0)
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
119 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
121 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
124 * Instruction metadata
126 #ifdef MINI_OP
127 #undef MINI_OP
128 #endif
129 #ifdef MINI_OP3
130 #undef MINI_OP3
131 #endif
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #define NONE ' '
135 #define IREG 'i'
136 #define FREG 'f'
137 #define VREG 'v'
138 #define XREG 'x'
139 #if SIZEOF_REGISTER == 8
140 #define LREG IREG
141 #else
142 #define LREG 'l'
143 #endif
144 /* keep in sync with the enum in mini.h */
145 const char
146 ins_info[] = {
147 #include "mini-ops.h"
149 #undef MINI_OP
150 #undef MINI_OP3
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts[] = {
159 #include "mini-ops.h"
161 #undef MINI_OP
162 #undef MINI_OP3
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
166 (vi)->reg = -1; \
167 (vi)->idx = (id); \
168 } while (0)
170 void
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
178 guint32
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
184 guint32
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
190 guint32
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
196 guint32
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
202 guint
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
205 if (type->byref)
206 return OP_MOVE;
208 handle_enum:
209 switch (type->type) {
210 case MONO_TYPE_I1:
211 case MONO_TYPE_U1:
212 case MONO_TYPE_BOOLEAN:
213 return OP_MOVE;
214 case MONO_TYPE_I2:
215 case MONO_TYPE_U2:
216 case MONO_TYPE_CHAR:
217 return OP_MOVE;
218 case MONO_TYPE_I4:
219 case MONO_TYPE_U4:
220 return OP_MOVE;
221 case MONO_TYPE_I:
222 case MONO_TYPE_U:
223 case MONO_TYPE_PTR:
224 case MONO_TYPE_FNPTR:
225 return OP_MOVE;
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
231 return OP_MOVE;
232 case MONO_TYPE_I8:
233 case MONO_TYPE_U8:
234 #if SIZEOF_REGISTER == 8
235 return OP_MOVE;
236 #else
237 return OP_LMOVE;
238 #endif
239 case MONO_TYPE_R4:
240 return OP_FMOVE;
241 case MONO_TYPE_R8:
242 return OP_FMOVE;
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
246 goto handle_enum;
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
249 return OP_XMOVE;
250 return OP_VMOVE;
251 case MONO_TYPE_TYPEDBYREF:
252 return OP_VMOVE;
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
255 goto handle_enum;
256 case MONO_TYPE_VAR:
257 case MONO_TYPE_MVAR:
258 g_assert (cfg->generic_sharing_context);
259 return OP_MOVE;
260 default:
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
263 return -1;
266 void
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
269 int i;
270 MonoInst *tree;
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 printf (", OUT: ");
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 printf (" ]\n");
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
285 * file.
287 #ifndef DISABLE_JIT
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if (!(tblock)) { \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
299 } while (0)
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 } while (0)
311 #endif
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 MonoInst *widen; \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
322 } while (0)
323 #else
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
325 #endif
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
329 sp -= 2; \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
333 CHECK_TYPE (ins); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
339 } while (0)
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
343 sp--; \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
346 CHECK_TYPE (ins); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
350 } while (0)
352 #define ADD_BINCOND(next_block) do { \
353 MonoInst *cmp; \
354 sp -= 2; \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
359 CHECK_TYPE (cmp); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
369 } else { \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
381 } while (0)
383 /* *
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
390 static void
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
394 int i, found;
396 #if 0
397 if (from->cil_code) {
398 if (to->cil_code)
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 else
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
402 } else {
403 if (to->cil_code)
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 else
406 printf ("edge from entry to exit\n");
408 #endif
410 found = FALSE;
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
413 found = TRUE;
414 break;
417 if (!found) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
422 newa [i] = to;
423 from->out_count++;
424 from->out_bb = newa;
427 found = FALSE;
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
430 found = TRUE;
431 break;
434 if (!found) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
439 newa [i] = from;
440 to->in_count++;
441 to->in_bb = newa;
445 void
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
457 * Returns:
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
465 static int
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethodHeader *header = cfg->header;
469 MonoExceptionClause *clause;
470 int i;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 else
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
491 return -1;
494 static GList*
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
499 int i;
500 GList *res = NULL;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type)
507 res = g_list_append (res, clause);
510 return res;
513 static void
514 mono_create_spvar_for_region (MonoCompile *cfg, int region)
516 MonoInst *var;
518 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
519 if (var)
520 return;
522 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
523 /* prevent it from being register allocated */
524 var->flags |= MONO_INST_INDIRECT;
526 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 MonoInst *
530 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
532 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 static MonoInst*
536 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
538 MonoInst *var;
540 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
541 if (var)
542 return var;
544 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
545 /* prevent it from being register allocated */
546 var->flags |= MONO_INST_INDIRECT;
548 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
550 return var;
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 void
558 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
560 MonoClass *klass;
562 inst->klass = klass = mono_class_from_mono_type (type);
563 if (type->byref) {
564 inst->type = STACK_MP;
565 return;
568 handle_enum:
569 switch (type->type) {
570 case MONO_TYPE_VOID:
571 inst->type = STACK_INV;
572 return;
573 case MONO_TYPE_I1:
574 case MONO_TYPE_U1:
575 case MONO_TYPE_BOOLEAN:
576 case MONO_TYPE_I2:
577 case MONO_TYPE_U2:
578 case MONO_TYPE_CHAR:
579 case MONO_TYPE_I4:
580 case MONO_TYPE_U4:
581 inst->type = STACK_I4;
582 return;
583 case MONO_TYPE_I:
584 case MONO_TYPE_U:
585 case MONO_TYPE_PTR:
586 case MONO_TYPE_FNPTR:
587 inst->type = STACK_PTR;
588 return;
589 case MONO_TYPE_CLASS:
590 case MONO_TYPE_STRING:
591 case MONO_TYPE_OBJECT:
592 case MONO_TYPE_SZARRAY:
593 case MONO_TYPE_ARRAY:
594 inst->type = STACK_OBJ;
595 return;
596 case MONO_TYPE_I8:
597 case MONO_TYPE_U8:
598 inst->type = STACK_I8;
599 return;
600 case MONO_TYPE_R4:
601 case MONO_TYPE_R8:
602 inst->type = STACK_R8;
603 return;
604 case MONO_TYPE_VALUETYPE:
605 if (type->data.klass->enumtype) {
606 type = mono_class_enum_basetype (type->data.klass);
607 goto handle_enum;
608 } else {
609 inst->klass = klass;
610 inst->type = STACK_VTYPE;
611 return;
613 case MONO_TYPE_TYPEDBYREF:
614 inst->klass = mono_defaults.typed_reference_class;
615 inst->type = STACK_VTYPE;
616 return;
617 case MONO_TYPE_GENERICINST:
618 type = &type->data.generic_class->container_class->byval_arg;
619 goto handle_enum;
620 case MONO_TYPE_VAR :
621 case MONO_TYPE_MVAR :
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
624 * really a reference
626 g_assert (cfg->generic_sharing_context);
627 inst->type = STACK_OBJ;
628 return;
629 default:
630 g_error ("unknown type 0x%02x in eval stack type", type->type);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
637 static const char
638 bin_num_table [STACK_MAX] [STACK_MAX] = {
639 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
649 static const char
650 neg_table [] = {
651 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
654 /* reduce the size of this table */
655 static const char
656 bin_int_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 static const char
668 bin_comp_table [STACK_MAX] [STACK_MAX] = {
669 /* Inv i L p F & O vt */
670 {0},
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
681 static const char
682 shift_table [STACK_MAX] [STACK_MAX] = {
683 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 static const guint16
699 binops_op_map [STACK_MAX] = {
700 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
704 static const guint16
705 unops_op_map [STACK_MAX] = {
706 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 static const guint16
711 ovfops_op_map [STACK_MAX] = {
712 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 static const guint16
717 ovf2ops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 static const guint16
723 ovf3ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
728 static const guint16
729 beqops_op_map [STACK_MAX] = {
730 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
734 static const guint16
735 ceqops_op_map [STACK_MAX] = {
736 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
747 static void
748 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
750 switch (ins->opcode) {
751 /* binops */
752 case CEE_ADD:
753 case CEE_SUB:
754 case CEE_MUL:
755 case CEE_DIV:
756 case CEE_REM:
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins->type = bin_num_table [src1->type] [src2->type];
759 ins->opcode += binops_op_map [ins->type];
760 break;
761 case CEE_DIV_UN:
762 case CEE_REM_UN:
763 case CEE_AND:
764 case CEE_OR:
765 case CEE_XOR:
766 ins->type = bin_int_table [src1->type] [src2->type];
767 ins->opcode += binops_op_map [ins->type];
768 break;
769 case CEE_SHL:
770 case CEE_SHR:
771 case CEE_SHR_UN:
772 ins->type = shift_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
774 break;
775 case OP_COMPARE:
776 case OP_LCOMPARE:
777 case OP_ICOMPARE:
778 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
779 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
780 ins->opcode = OP_LCOMPARE;
781 else if (src1->type == STACK_R8)
782 ins->opcode = OP_FCOMPARE;
783 else
784 ins->opcode = OP_ICOMPARE;
785 break;
786 case OP_ICOMPARE_IMM:
787 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
788 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
789 ins->opcode = OP_LCOMPARE_IMM;
790 break;
791 case CEE_BEQ:
792 case CEE_BGE:
793 case CEE_BGT:
794 case CEE_BLE:
795 case CEE_BLT:
796 case CEE_BNE_UN:
797 case CEE_BGE_UN:
798 case CEE_BGT_UN:
799 case CEE_BLE_UN:
800 case CEE_BLT_UN:
801 ins->opcode += beqops_op_map [src1->type];
802 break;
803 case OP_CEQ:
804 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
805 ins->opcode += ceqops_op_map [src1->type];
806 break;
807 case OP_CGT:
808 case OP_CGT_UN:
809 case OP_CLT:
810 case OP_CLT_UN:
811 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
813 break;
814 /* unops */
815 case CEE_NEG:
816 ins->type = neg_table [src1->type];
817 ins->opcode += unops_op_map [ins->type];
818 break;
819 case CEE_NOT:
820 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
821 ins->type = src1->type;
822 else
823 ins->type = STACK_INV;
824 ins->opcode += unops_op_map [ins->type];
825 break;
826 case CEE_CONV_I1:
827 case CEE_CONV_I2:
828 case CEE_CONV_I4:
829 case CEE_CONV_U4:
830 ins->type = STACK_I4;
831 ins->opcode += unops_op_map [src1->type];
832 break;
833 case CEE_CONV_R_UN:
834 ins->type = STACK_R8;
835 switch (src1->type) {
836 case STACK_I4:
837 case STACK_PTR:
838 ins->opcode = OP_ICONV_TO_R_UN;
839 break;
840 case STACK_I8:
841 ins->opcode = OP_LCONV_TO_R_UN;
842 break;
844 break;
845 case CEE_CONV_OVF_I1:
846 case CEE_CONV_OVF_U1:
847 case CEE_CONV_OVF_I2:
848 case CEE_CONV_OVF_U2:
849 case CEE_CONV_OVF_I4:
850 case CEE_CONV_OVF_U4:
851 ins->type = STACK_I4;
852 ins->opcode += ovf3ops_op_map [src1->type];
853 break;
854 case CEE_CONV_OVF_I_UN:
855 case CEE_CONV_OVF_U_UN:
856 ins->type = STACK_PTR;
857 ins->opcode += ovf2ops_op_map [src1->type];
858 break;
859 case CEE_CONV_OVF_I1_UN:
860 case CEE_CONV_OVF_I2_UN:
861 case CEE_CONV_OVF_I4_UN:
862 case CEE_CONV_OVF_U1_UN:
863 case CEE_CONV_OVF_U2_UN:
864 case CEE_CONV_OVF_U4_UN:
865 ins->type = STACK_I4;
866 ins->opcode += ovf2ops_op_map [src1->type];
867 break;
868 case CEE_CONV_U:
869 ins->type = STACK_PTR;
870 switch (src1->type) {
871 case STACK_I4:
872 ins->opcode = OP_ICONV_TO_U;
873 break;
874 case STACK_PTR:
875 case STACK_MP:
876 #if SIZEOF_REGISTER == 8
877 ins->opcode = OP_LCONV_TO_U;
878 #else
879 ins->opcode = OP_MOVE;
880 #endif
881 break;
882 case STACK_I8:
883 ins->opcode = OP_LCONV_TO_U;
884 break;
885 case STACK_R8:
886 ins->opcode = OP_FCONV_TO_U;
887 break;
889 break;
890 case CEE_CONV_I8:
891 case CEE_CONV_U8:
892 ins->type = STACK_I8;
893 ins->opcode += unops_op_map [src1->type];
894 break;
895 case CEE_CONV_OVF_I8:
896 case CEE_CONV_OVF_U8:
897 ins->type = STACK_I8;
898 ins->opcode += ovf3ops_op_map [src1->type];
899 break;
900 case CEE_CONV_OVF_U8_UN:
901 case CEE_CONV_OVF_I8_UN:
902 ins->type = STACK_I8;
903 ins->opcode += ovf2ops_op_map [src1->type];
904 break;
905 case CEE_CONV_R4:
906 case CEE_CONV_R8:
907 ins->type = STACK_R8;
908 ins->opcode += unops_op_map [src1->type];
909 break;
910 case OP_CKFINITE:
911 ins->type = STACK_R8;
912 break;
913 case CEE_CONV_U2:
914 case CEE_CONV_U1:
915 ins->type = STACK_I4;
916 ins->opcode += ovfops_op_map [src1->type];
917 break;
918 case CEE_CONV_I:
919 case CEE_CONV_OVF_I:
920 case CEE_CONV_OVF_U:
921 ins->type = STACK_PTR;
922 ins->opcode += ovfops_op_map [src1->type];
923 break;
924 case CEE_ADD_OVF:
925 case CEE_ADD_OVF_UN:
926 case CEE_MUL_OVF:
927 case CEE_MUL_OVF_UN:
928 case CEE_SUB_OVF:
929 case CEE_SUB_OVF_UN:
930 ins->type = bin_num_table [src1->type] [src2->type];
931 ins->opcode += ovfops_op_map [src1->type];
932 if (ins->type == STACK_R8)
933 ins->type = STACK_INV;
934 break;
935 case OP_LOAD_MEMBASE:
936 ins->type = STACK_PTR;
937 break;
938 case OP_LOADI1_MEMBASE:
939 case OP_LOADU1_MEMBASE:
940 case OP_LOADI2_MEMBASE:
941 case OP_LOADU2_MEMBASE:
942 case OP_LOADI4_MEMBASE:
943 case OP_LOADU4_MEMBASE:
944 ins->type = STACK_PTR;
945 break;
946 case OP_LOADI8_MEMBASE:
947 ins->type = STACK_I8;
948 break;
949 case OP_LOADR4_MEMBASE:
950 case OP_LOADR8_MEMBASE:
951 ins->type = STACK_R8;
952 break;
953 default:
954 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
955 break;
958 if (ins->type == STACK_MP)
959 ins->klass = mono_defaults.object_class;
962 static const char
963 ldind_type [] = {
964 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
967 #if 0
969 static const char
970 param_table [STACK_MAX] [STACK_MAX] = {
971 {0},
974 static int
975 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
976 int i;
978 if (sig->hasthis) {
979 switch (args->type) {
980 case STACK_I4:
981 case STACK_I8:
982 case STACK_R8:
983 case STACK_VTYPE:
984 case STACK_INV:
985 return 0;
987 args++;
989 for (i = 0; i < sig->param_count; ++i) {
990 switch (args [i].type) {
991 case STACK_INV:
992 return 0;
993 case STACK_MP:
994 if (!sig->params [i]->byref)
995 return 0;
996 continue;
997 case STACK_OBJ:
998 if (sig->params [i]->byref)
999 return 0;
1000 switch (sig->params [i]->type) {
1001 case MONO_TYPE_CLASS:
1002 case MONO_TYPE_STRING:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_SZARRAY:
1005 case MONO_TYPE_ARRAY:
1006 break;
1007 default:
1008 return 0;
1010 continue;
1011 case STACK_R8:
1012 if (sig->params [i]->byref)
1013 return 0;
1014 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1015 return 0;
1016 continue;
1017 case STACK_PTR:
1018 case STACK_I4:
1019 case STACK_I8:
1020 case STACK_VTYPE:
1021 break;
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1024 return 0;*/
1026 return 1;
1028 #endif
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst *
1036 mono_get_domainvar (MonoCompile *cfg)
1038 if (!cfg->domainvar)
1039 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1040 return cfg->domainvar;
1044 * The got_var contains the address of the Global Offset Table when AOT
1045 * compiling.
1047 MonoInst *
1048 mono_get_got_var (MonoCompile *cfg)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg->compile_aot)
1052 return NULL;
1053 if (!cfg->got_var) {
1054 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1056 return cfg->got_var;
1057 #else
1058 return NULL;
1059 #endif
1062 static MonoInst *
1063 mono_get_vtable_var (MonoCompile *cfg)
1065 g_assert (cfg->generic_sharing_context);
1067 if (!cfg->rgctx_var) {
1068 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 /* force the var to be stack allocated */
1070 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1073 return cfg->rgctx_var;
1076 static MonoType*
1077 type_from_stack_type (MonoInst *ins) {
1078 switch (ins->type) {
1079 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1080 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1081 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1082 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 case STACK_MP:
1084 return &ins->klass->this_arg;
1085 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1086 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 default:
1088 g_error ("stack type %d to monotype not handled\n", ins->type);
1090 return NULL;
1093 static G_GNUC_UNUSED int
1094 type_to_stack_type (MonoType *t)
1096 t = mono_type_get_underlying_type (t);
1097 switch (t->type) {
1098 case MONO_TYPE_I1:
1099 case MONO_TYPE_U1:
1100 case MONO_TYPE_BOOLEAN:
1101 case MONO_TYPE_I2:
1102 case MONO_TYPE_U2:
1103 case MONO_TYPE_CHAR:
1104 case MONO_TYPE_I4:
1105 case MONO_TYPE_U4:
1106 return STACK_I4;
1107 case MONO_TYPE_I:
1108 case MONO_TYPE_U:
1109 case MONO_TYPE_PTR:
1110 case MONO_TYPE_FNPTR:
1111 return STACK_PTR;
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1117 return STACK_OBJ;
1118 case MONO_TYPE_I8:
1119 case MONO_TYPE_U8:
1120 return STACK_I8;
1121 case MONO_TYPE_R4:
1122 case MONO_TYPE_R8:
1123 return STACK_R8;
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1126 return STACK_VTYPE;
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1129 return STACK_VTYPE;
1130 else
1131 return STACK_OBJ;
1132 break;
1133 default:
1134 g_assert_not_reached ();
1137 return -1;
1140 static MonoClass*
1141 array_access_to_klass (int opcode)
1143 switch (opcode) {
1144 case CEE_LDELEM_U1:
1145 return mono_defaults.byte_class;
1146 case CEE_LDELEM_U2:
1147 return mono_defaults.uint16_class;
1148 case CEE_LDELEM_I:
1149 case CEE_STELEM_I:
1150 return mono_defaults.int_class;
1151 case CEE_LDELEM_I1:
1152 case CEE_STELEM_I1:
1153 return mono_defaults.sbyte_class;
1154 case CEE_LDELEM_I2:
1155 case CEE_STELEM_I2:
1156 return mono_defaults.int16_class;
1157 case CEE_LDELEM_I4:
1158 case CEE_STELEM_I4:
1159 return mono_defaults.int32_class;
1160 case CEE_LDELEM_U4:
1161 return mono_defaults.uint32_class;
1162 case CEE_LDELEM_I8:
1163 case CEE_STELEM_I8:
1164 return mono_defaults.int64_class;
1165 case CEE_LDELEM_R4:
1166 case CEE_STELEM_R4:
1167 return mono_defaults.single_class;
1168 case CEE_LDELEM_R8:
1169 case CEE_STELEM_R8:
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1174 default:
1175 g_assert_not_reached ();
1177 return NULL;
1181 * We try to share variables when possible
1183 static MonoInst *
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1186 MonoInst *res;
1187 int pos, vnum;
1189 /* inlining can result in deeper stacks */
1190 if (slot >= cfg->header->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1196 case STACK_I4:
1197 case STACK_I8:
1198 case STACK_R8:
1199 case STACK_PTR:
1200 case STACK_MP:
1201 case STACK_OBJ:
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1206 break;
1207 default:
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 return res;
1213 static void
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 static void
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 int i, bindex;
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1248 gboolean found;
1250 if (!count)
1251 return;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1257 found = FALSE;
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 continue;
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1265 found = TRUE;
1266 bb->out_stack = outb->in_stack;
1267 break;
1270 //printf ("\n");
1271 if (!found) {
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1280 * slot as well.
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1284 * example.
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 else
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 continue;
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1302 return;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1311 cfg->cbb = bb;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1323 * in_stacks.
1326 found = TRUE;
1327 bindex = 0;
1328 while (found) {
1329 /* Find a bblock which has a different in_stack */
1330 found = FALSE;
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1335 bindex++;
1336 continue;
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1347 found = TRUE;
1348 break;
1350 bindex ++;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1358 static void
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 else {
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 static void
1375 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1377 int ibitmap_reg = alloc_preg (cfg);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *args [2];
1380 MonoInst *res, *ins;
1381 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1382 MONO_ADD_INS (cfg->cbb, ins);
1383 args [0] = ins;
1384 if (cfg->compile_aot)
1385 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 else
1387 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1388 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1389 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 #else
1391 int ibitmap_byte_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1395 if (cfg->compile_aot) {
1396 int iid_reg = alloc_preg (cfg);
1397 int shifted_iid_reg = alloc_preg (cfg);
1398 int ibitmap_byte_address_reg = alloc_preg (cfg);
1399 int masked_iid_reg = alloc_preg (cfg);
1400 int iid_one_bit_reg = alloc_preg (cfg);
1401 int iid_bit_reg = alloc_preg (cfg);
1402 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1407 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1409 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 } else {
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1414 #endif
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1421 static void
1422 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1424 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1431 static void
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1434 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1441 static void
1442 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1443 MonoBasicBlock *false_target)
1445 if (cfg->compile_aot) {
1446 int iid_reg = alloc_preg (cfg);
1447 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1448 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 else
1451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 if (false_target)
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 else
1455 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1459 static void
1460 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 int max_iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1466 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1469 /* Same as above, but obtains max_iid from a klass */
1470 static void
1471 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 int max_iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1477 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 static void
1481 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1483 int idepth_reg = alloc_preg (cfg);
1484 int stypes_reg = alloc_preg (cfg);
1485 int stype = alloc_preg (cfg);
1487 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 if (klass_ins) {
1495 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1496 } else if (cfg->compile_aot) {
1497 int const_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 } else {
1501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 static void
1507 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1509 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 static void
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 if (true_target)
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 else
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1529 static void
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 if (true_target)
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 else
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 static inline void
1544 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 if (klass_inst) {
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1548 } else if (cfg->compile_aot) {
1549 int const_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 } else {
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1555 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 static inline void
1559 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1561 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 static inline void
1565 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1567 if (cfg->compile_aot) {
1568 int const_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1570 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 } else {
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 static void
1578 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 static void
1581 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 if (klass->rank) {
1584 int rank_reg = alloc_preg (cfg);
1585 int eclass_reg = alloc_preg (cfg);
1587 g_assert (!klass_inst);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1590 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1593 if (klass->cast_class == mono_defaults.object_class) {
1594 int parent_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1596 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1597 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1598 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1599 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1600 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1601 } else if (klass->cast_class == mono_defaults.enum_class) {
1602 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1603 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1604 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 } else {
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1610 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 } else {
1618 int idepth_reg = alloc_preg (cfg);
1619 int stypes_reg = alloc_preg (cfg);
1620 int stype = alloc_preg (cfg);
1622 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1625 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1629 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 static void
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1636 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 static void
1640 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1642 int val_reg;
1644 g_assert (val == 0);
1646 if (align == 0)
1647 align = 4;
1649 if ((size <= 4) && (size <= align)) {
1650 switch (size) {
1651 case 1:
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1653 return;
1654 case 2:
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1656 return;
1657 case 4:
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 return;
1660 #if SIZEOF_REGISTER == 8
1661 case 8:
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1663 return;
1664 #endif
1668 val_reg = alloc_preg (cfg);
1670 if (SIZEOF_REGISTER == 8)
1671 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 else
1673 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 if (align < 4) {
1676 /* This could be optimized further if neccesary */
1677 while (size >= 1) {
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1679 offset += 1;
1680 size -= 1;
1682 return;
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER == 8) {
1687 if (offset % 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1689 offset += 4;
1690 size -= 4;
1692 while (size >= 8) {
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1694 offset += 8;
1695 size -= 8;
1698 #endif
1700 while (size >= 4) {
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1702 offset += 4;
1703 size -= 4;
1705 while (size >= 2) {
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1707 offset += 2;
1708 size -= 2;
1710 while (size >= 1) {
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1712 offset += 1;
1713 size -= 1;
1717 void
1718 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1720 int cur_reg;
1722 if (align == 0)
1723 align = 4;
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size < 10000);
1728 if (align < 4) {
1729 /* This could be optimized further if neccesary */
1730 while (size >= 1) {
1731 cur_reg = alloc_preg (cfg);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1734 doffset += 1;
1735 soffset += 1;
1736 size -= 1;
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER == 8) {
1742 while (size >= 8) {
1743 cur_reg = alloc_preg (cfg);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1746 doffset += 8;
1747 soffset += 8;
1748 size -= 8;
1751 #endif
1753 while (size >= 4) {
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1757 doffset += 4;
1758 soffset += 4;
1759 size -= 4;
1761 while (size >= 2) {
1762 cur_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1765 doffset += 2;
1766 soffset += 2;
1767 size -= 2;
1769 while (size >= 1) {
1770 cur_reg = alloc_preg (cfg);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1773 doffset += 1;
1774 soffset += 1;
1775 size -= 1;
1779 static int
1780 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1782 if (type->byref)
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1785 handle_enum:
1786 type = mini_get_basic_type_from_generic (gsctx, type);
1787 switch (type->type) {
1788 case MONO_TYPE_VOID:
1789 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1790 case MONO_TYPE_I1:
1791 case MONO_TYPE_U1:
1792 case MONO_TYPE_BOOLEAN:
1793 case MONO_TYPE_I2:
1794 case MONO_TYPE_U2:
1795 case MONO_TYPE_CHAR:
1796 case MONO_TYPE_I4:
1797 case MONO_TYPE_U4:
1798 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1799 case MONO_TYPE_I:
1800 case MONO_TYPE_U:
1801 case MONO_TYPE_PTR:
1802 case MONO_TYPE_FNPTR:
1803 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 case MONO_TYPE_CLASS:
1805 case MONO_TYPE_STRING:
1806 case MONO_TYPE_OBJECT:
1807 case MONO_TYPE_SZARRAY:
1808 case MONO_TYPE_ARRAY:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1810 case MONO_TYPE_I8:
1811 case MONO_TYPE_U8:
1812 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1813 case MONO_TYPE_R4:
1814 case MONO_TYPE_R8:
1815 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1816 case MONO_TYPE_VALUETYPE:
1817 if (type->data.klass->enumtype) {
1818 type = mono_class_enum_basetype (type->data.klass);
1819 goto handle_enum;
1820 } else
1821 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1822 case MONO_TYPE_TYPEDBYREF:
1823 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1824 case MONO_TYPE_GENERICINST:
1825 type = &type->data.generic_class->container_class->byval_arg;
1826 goto handle_enum;
1827 default:
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1830 return -1;
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1840 * validity checks.
1842 * Returns: non-0 value if arg can't be stored on a target.
1844 static int
1845 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1847 MonoType *simple_type;
1848 MonoClass *klass;
1850 if (target->byref) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg->type == STACK_MP)
1853 return arg->klass != mono_class_from_mono_type (target);
1854 if (arg->type == STACK_PTR)
1855 return 0;
1856 return 1;
1859 simple_type = mono_type_get_underlying_type (target);
1860 switch (simple_type->type) {
1861 case MONO_TYPE_VOID:
1862 return 1;
1863 case MONO_TYPE_I1:
1864 case MONO_TYPE_U1:
1865 case MONO_TYPE_BOOLEAN:
1866 case MONO_TYPE_I2:
1867 case MONO_TYPE_U2:
1868 case MONO_TYPE_CHAR:
1869 case MONO_TYPE_I4:
1870 case MONO_TYPE_U4:
1871 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1872 return 1;
1873 return 0;
1874 case MONO_TYPE_PTR:
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1877 return 1;
1878 return 0;
1879 case MONO_TYPE_I:
1880 case MONO_TYPE_U:
1881 case MONO_TYPE_FNPTR:
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1883 return 1;
1884 return 0;
1885 case MONO_TYPE_CLASS:
1886 case MONO_TYPE_STRING:
1887 case MONO_TYPE_OBJECT:
1888 case MONO_TYPE_SZARRAY:
1889 case MONO_TYPE_ARRAY:
1890 if (arg->type != STACK_OBJ)
1891 return 1;
1892 /* FIXME: check type compatibility */
1893 return 0;
1894 case MONO_TYPE_I8:
1895 case MONO_TYPE_U8:
1896 if (arg->type != STACK_I8)
1897 return 1;
1898 return 0;
1899 case MONO_TYPE_R4:
1900 case MONO_TYPE_R8:
1901 if (arg->type != STACK_R8)
1902 return 1;
1903 return 0;
1904 case MONO_TYPE_VALUETYPE:
1905 if (arg->type != STACK_VTYPE)
1906 return 1;
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1909 return 1;
1910 return 0;
1911 case MONO_TYPE_TYPEDBYREF:
1912 if (arg->type != STACK_VTYPE)
1913 return 1;
1914 klass = mono_class_from_mono_type (simple_type);
1915 if (klass != arg->klass)
1916 return 1;
1917 return 0;
1918 case MONO_TYPE_GENERICINST:
1919 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1920 if (arg->type != STACK_VTYPE)
1921 return 1;
1922 klass = mono_class_from_mono_type (simple_type);
1923 if (klass != arg->klass)
1924 return 1;
1925 return 0;
1926 } else {
1927 if (arg->type != STACK_OBJ)
1928 return 1;
1929 /* FIXME: check type compatibility */
1930 return 0;
1932 case MONO_TYPE_VAR:
1933 case MONO_TYPE_MVAR:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg->generic_sharing_context);
1939 if (arg->type != STACK_OBJ)
1940 return 1;
1941 return 0;
1942 default:
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1945 return 1;
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1951 * signature.
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1957 static int
1958 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1960 MonoType *simple_type;
1961 int i;
1963 if (sig->hasthis) {
1964 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1965 return 1;
1966 args++;
1968 for (i = 0; i < sig->param_count; ++i) {
1969 if (sig->params [i]->byref) {
1970 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1971 return 1;
1972 continue;
1974 simple_type = sig->params [i];
1975 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1976 handle_enum:
1977 switch (simple_type->type) {
1978 case MONO_TYPE_VOID:
1979 return 1;
1980 continue;
1981 case MONO_TYPE_I1:
1982 case MONO_TYPE_U1:
1983 case MONO_TYPE_BOOLEAN:
1984 case MONO_TYPE_I2:
1985 case MONO_TYPE_U2:
1986 case MONO_TYPE_CHAR:
1987 case MONO_TYPE_I4:
1988 case MONO_TYPE_U4:
1989 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1990 return 1;
1991 continue;
1992 case MONO_TYPE_I:
1993 case MONO_TYPE_U:
1994 case MONO_TYPE_PTR:
1995 case MONO_TYPE_FNPTR:
1996 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1997 return 1;
1998 continue;
1999 case MONO_TYPE_CLASS:
2000 case MONO_TYPE_STRING:
2001 case MONO_TYPE_OBJECT:
2002 case MONO_TYPE_SZARRAY:
2003 case MONO_TYPE_ARRAY:
2004 if (args [i]->type != STACK_OBJ)
2005 return 1;
2006 continue;
2007 case MONO_TYPE_I8:
2008 case MONO_TYPE_U8:
2009 if (args [i]->type != STACK_I8)
2010 return 1;
2011 continue;
2012 case MONO_TYPE_R4:
2013 case MONO_TYPE_R8:
2014 if (args [i]->type != STACK_R8)
2015 return 1;
2016 continue;
2017 case MONO_TYPE_VALUETYPE:
2018 if (simple_type->data.klass->enumtype) {
2019 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2020 goto handle_enum;
2022 if (args [i]->type != STACK_VTYPE)
2023 return 1;
2024 continue;
2025 case MONO_TYPE_TYPEDBYREF:
2026 if (args [i]->type != STACK_VTYPE)
2027 return 1;
2028 continue;
2029 case MONO_TYPE_GENERICINST:
2030 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2031 goto handle_enum;
2033 default:
2034 g_error ("unknown type 0x%02x in check_call_signature",
2035 simple_type->type);
2038 return 0;
2041 static int
2042 callvirt_to_call (int opcode)
2044 switch (opcode) {
2045 case OP_CALLVIRT:
2046 return OP_CALL;
2047 case OP_VOIDCALLVIRT:
2048 return OP_VOIDCALL;
2049 case OP_FCALLVIRT:
2050 return OP_FCALL;
2051 case OP_VCALLVIRT:
2052 return OP_VCALL;
2053 case OP_LCALLVIRT:
2054 return OP_LCALL;
2055 default:
2056 g_assert_not_reached ();
2059 return -1;
2062 static int
2063 callvirt_to_call_membase (int opcode)
2065 switch (opcode) {
2066 case OP_CALLVIRT:
2067 return OP_CALL_MEMBASE;
2068 case OP_VOIDCALLVIRT:
2069 return OP_VOIDCALL_MEMBASE;
2070 case OP_FCALLVIRT:
2071 return OP_FCALL_MEMBASE;
2072 case OP_LCALLVIRT:
2073 return OP_LCALL_MEMBASE;
2074 case OP_VCALLVIRT:
2075 return OP_VCALL_MEMBASE;
2076 default:
2077 g_assert_not_reached ();
2080 return -1;
2083 #ifdef MONO_ARCH_HAVE_IMT
2084 static void
2085 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg = alloc_preg (cfg);
2090 if (imt_arg) {
2091 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2092 } else if (cfg->compile_aot) {
2093 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2094 } else {
2095 MonoInst *ins;
2096 MONO_INST_NEW (cfg, ins, OP_PCONST);
2097 ins->inst_p0 = call->method;
2098 ins->dreg = method_reg;
2099 MONO_ADD_INS (cfg->cbb, ins);
2102 #ifdef ENABLE_LLVM
2103 if (COMPILE_LLVM (cfg))
2104 call->imt_arg_reg = method_reg;
2105 #endif
2106 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2107 #else
2108 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2109 #endif
2111 #endif
2113 static MonoJumpInfo *
2114 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2116 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2118 ji->ip.i = ip;
2119 ji->type = type;
2120 ji->data.target = target;
2122 return ji;
2125 inline static MonoCallInst *
2126 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2127 MonoInst **args, int calli, int virtual, int tail)
2129 MonoCallInst *call;
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2131 int i;
2132 #endif
2134 if (tail)
2135 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2136 else
2137 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2139 call->args = args;
2140 call->signature = sig;
2142 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2144 if (tail) {
2145 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2146 call->vret_var = cfg->vret_addr;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2150 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2151 MonoInst *loada;
2153 temp->backend.is_pinvoke = sig->pinvoke;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2164 loada->dreg = alloc_preg (cfg);
2165 loada->inst_p0 = temp;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada->inst_p1 = call;
2168 MONO_ADD_INS (cfg->cbb, loada);
2170 call->inst.dreg = temp->dreg;
2172 call->vret_var = loada;
2173 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2174 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2184 MonoType *t;
2185 MonoInst *in = call->args [i];
2187 if (i >= sig->hasthis)
2188 t = sig->params [i - sig->hasthis];
2189 else
2190 t = &mono_defaults.int_class->byval_arg;
2191 t = mono_type_get_underlying_type (t);
2193 if (!t->byref && t->type == MONO_TYPE_R4) {
2194 MonoInst *iargs [1];
2195 MonoInst *conv;
2197 iargs [0] = in;
2198 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2200 /* The result will be in an int vreg */
2201 call->args [i] = conv;
2205 #endif
2207 #ifdef ENABLE_LLVM
2208 if (COMPILE_LLVM (cfg))
2209 mono_llvm_emit_call (cfg, call);
2210 else
2211 mono_arch_emit_call (cfg, call);
2212 #else
2213 mono_arch_emit_call (cfg, call);
2214 #endif
2216 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2217 cfg->flags |= MONO_CFG_HAS_CALLS;
2219 return call;
2222 inline static MonoInst*
2223 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2225 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2227 call->inst.sreg1 = addr->dreg;
2229 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2231 return (MonoInst*)call;
2234 static void
2235 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2239 cfg->uses_rgctx_reg = TRUE;
2240 call->rgctx_reg = TRUE;
2241 #ifdef ENABLE_LLVM
2242 call->rgctx_arg_reg = rgctx_reg;
2243 #endif
2244 #else
2245 NOT_IMPLEMENTED;
2246 #endif
2249 inline static MonoInst*
2250 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2252 MonoCallInst *call;
2253 int rgctx_reg = -1;
2255 if (rgctx_arg) {
2256 rgctx_reg = mono_alloc_preg (cfg);
2257 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2259 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2260 if (rgctx_arg)
2261 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2262 return (MonoInst*)call;
2265 static MonoInst*
2266 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2267 static MonoInst*
2268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2270 static MonoInst*
2271 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2272 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2274 gboolean might_be_remote;
2275 gboolean virtual = this != NULL;
2276 gboolean enable_for_aot = TRUE;
2277 int context_used;
2278 MonoCallInst *call;
2280 if (method->string_ctor) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2284 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2286 sig = ctor_sig;
2289 might_be_remote = this && sig->hasthis &&
2290 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2291 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2293 context_used = mono_method_check_context_used (method);
2294 if (might_be_remote && context_used) {
2295 MonoInst *addr;
2297 g_assert (cfg->generic_sharing_context);
2299 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2301 return mono_emit_calli (cfg, sig, args, addr);
2304 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2306 if (might_be_remote)
2307 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2308 else
2309 call->method = method;
2310 call->inst.flags |= MONO_INST_HAS_METHOD;
2311 call->inst.inst_left = this;
2313 if (virtual) {
2314 int vtable_reg, slot_reg, this_reg;
2316 this_reg = this->dreg;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2322 /* Make a call to delegate->invoke_impl */
2323 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 call->inst.inst_basereg = this_reg;
2325 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2326 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2328 return (MonoInst*)call;
2330 #endif
2332 if ((!cfg->compile_aot || enable_for_aot) &&
2333 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2334 (MONO_METHOD_IS_FINAL (method) &&
2335 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2336 !(method->klass->marshalbyref && context_used)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2349 if (!method->string_ctor)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2352 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2354 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 return (MonoInst*)call;
2359 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2367 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2368 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2370 return (MonoInst*)call;
2373 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2375 vtable_reg = alloc_preg (cfg);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2377 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2378 slot_reg = -1;
2379 #ifdef MONO_ARCH_HAVE_IMT
2380 if (mono_use_imt) {
2381 guint32 imt_slot = mono_method_get_imt_slot (method);
2382 emit_imt_argument (cfg, call, imt_arg);
2383 slot_reg = vtable_reg;
2384 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2386 #endif
2387 if (slot_reg == -1) {
2388 slot_reg = alloc_preg (cfg);
2389 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2390 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2392 } else {
2393 slot_reg = vtable_reg;
2394 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2395 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2396 #ifdef MONO_ARCH_HAVE_IMT
2397 if (imt_arg) {
2398 g_assert (mono_method_signature (method)->generic_param_count);
2399 emit_imt_argument (cfg, call, imt_arg);
2401 #endif
2404 call->inst.sreg1 = slot_reg;
2405 call->virtual = TRUE;
2408 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 return (MonoInst*)call;
2413 static MonoInst*
2414 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2415 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2417 int rgctx_reg = 0;
2418 MonoInst *ins;
2419 MonoCallInst *call;
2421 if (vtable_arg) {
2422 rgctx_reg = mono_alloc_preg (cfg);
2423 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2425 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2427 call = (MonoCallInst*)ins;
2428 if (vtable_arg)
2429 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2431 return ins;
2434 MonoInst*
2435 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2437 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2440 MonoInst*
2441 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2442 MonoInst **args)
2444 MonoCallInst *call;
2446 g_assert (sig);
2448 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2449 call->fptr = func;
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2456 MonoInst*
2457 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2459 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2461 g_assert (info);
2463 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst*
2472 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2473 MonoMethodSignature *sig, MonoInst **args)
2475 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2476 MonoInst *ins;
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2480 * handle it.
2482 if (cfg->abs_patches == NULL)
2483 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2484 g_hash_table_insert (cfg->abs_patches, ji, ji);
2485 ins = mono_emit_native_call (cfg, ji, sig, args);
2486 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2487 return ins;
2490 static MonoInst*
2491 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2493 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2494 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2495 int widen_op = -1;
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2502 case OP_LOADI1_MEMBASE:
2503 widen_op = OP_ICONV_TO_I1;
2504 break;
2505 case OP_LOADU1_MEMBASE:
2506 widen_op = OP_ICONV_TO_U1;
2507 break;
2508 case OP_LOADI2_MEMBASE:
2509 widen_op = OP_ICONV_TO_I2;
2510 break;
2511 case OP_LOADU2_MEMBASE:
2512 widen_op = OP_ICONV_TO_U2;
2513 break;
2514 default:
2515 break;
2518 if (widen_op != -1) {
2519 int dreg = alloc_preg (cfg);
2520 MonoInst *widen;
2522 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2523 widen->type = ins->type;
2524 ins = widen;
2529 return ins;
2532 static MonoMethod*
2533 get_memcpy_method (void)
2535 static MonoMethod *memcpy_method = NULL;
2536 if (!memcpy_method) {
2537 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2538 if (!memcpy_method)
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method;
2544 #if HAVE_WRITE_BARRIERS
2546 static void
2547 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2549 MonoClassField *field;
2550 gpointer iter = NULL;
2552 while ((field = mono_class_get_fields (klass, &iter))) {
2553 int foffset;
2555 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2556 continue;
2557 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2558 if (mono_type_is_reference (field->type)) {
2559 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2560 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2561 } else {
2562 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2563 MonoClass *field_class = mono_class_from_mono_type (field->type);
2564 if (field_class->has_references)
2565 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2570 static gboolean
2571 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2573 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2574 unsigned need_wb = 0;
2576 if (align == 0)
2577 align = 4;
2579 /*types with references can't have alignment smaller than sizeof(void*) */
2580 if (align < SIZEOF_VOID_P)
2581 return FALSE;
2583 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2584 if (size > 32 * SIZEOF_VOID_P)
2585 return FALSE;
2587 create_write_barrier_bitmap (klass, &need_wb, 0);
2589 /* We don't unroll more than 5 stores to avoid code bloat. */
2590 if (size > 5 * SIZEOF_VOID_P) {
2591 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2592 size += (SIZEOF_VOID_P - 1);
2593 size &= ~(SIZEOF_VOID_P - 1);
2595 EMIT_NEW_ICONST (cfg, iargs [2], size);
2596 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2597 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2598 return TRUE;
2601 destreg = iargs [0]->dreg;
2602 srcreg = iargs [1]->dreg;
2603 offset = 0;
2605 dest_ptr_reg = alloc_preg (cfg);
2606 tmp_reg = alloc_preg (cfg);
2608 /*tmp = dreg*/
2609 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2611 while (size >= SIZEOF_VOID_P) {
2612 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2613 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2615 if (need_wb & 0x1) {
2616 MonoInst *dummy_use;
2618 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2619 mono_emit_method_call (cfg, write_barrier, &iargs [0], NULL);
2621 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2622 dummy_use->sreg1 = dest_ptr_reg;
2623 MONO_ADD_INS (cfg->cbb, dummy_use);
2627 offset += SIZEOF_VOID_P;
2628 size -= SIZEOF_VOID_P;
2629 need_wb >>= 1;
2631 /*tmp += sizeof (void*)*/
2632 if (size >= SIZEOF_VOID_P) {
2633 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2634 MONO_ADD_INS (cfg->cbb, iargs [0]);
2638 /* Those cannot be references since size < sizeof (void*) */
2639 while (size >= 4) {
2640 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2641 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2642 offset += 4;
2643 size -= 4;
2646 while (size >= 2) {
2647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2648 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2649 offset += 2;
2650 size -= 2;
2653 while (size >= 1) {
2654 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2656 offset += 1;
2657 size -= 1;
2660 return TRUE;
2662 #endif
2665 * Emit code to copy a valuetype of type @klass whose address is stored in
2666 * @src->dreg to memory whose address is stored at @dest->dreg.
2668 void
2669 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2671 MonoInst *iargs [4];
2672 int n;
2673 guint32 align = 0;
2674 MonoMethod *memcpy_method;
2676 g_assert (klass);
2678 * This check breaks with spilled vars... need to handle it during verification anyway.
2679 * g_assert (klass && klass == src->klass && klass == dest->klass);
2682 if (native)
2683 n = mono_class_native_size (klass, &align);
2684 else
2685 n = mono_class_value_size (klass, &align);
2687 #if HAVE_WRITE_BARRIERS
2688 /* if native is true there should be no references in the struct */
2689 if (klass->has_references && !native) {
2690 /* Avoid barriers when storing to the stack */
2691 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2692 (dest->opcode == OP_LDADDR))) {
2693 int context_used = 0;
2695 iargs [0] = dest;
2696 iargs [1] = src;
2698 if (cfg->generic_sharing_context)
2699 context_used = mono_class_check_context_used (klass);
2701 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2702 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2703 return;
2704 } else if (context_used) {
2705 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2706 } else {
2707 if (cfg->compile_aot) {
2708 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2709 } else {
2710 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2711 mono_class_compute_gc_descriptor (klass);
2715 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2716 return;
2719 #endif
2721 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2722 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2723 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2724 } else {
2725 iargs [0] = dest;
2726 iargs [1] = src;
2727 EMIT_NEW_ICONST (cfg, iargs [2], n);
2729 memcpy_method = get_memcpy_method ();
2730 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2734 static MonoMethod*
2735 get_memset_method (void)
2737 static MonoMethod *memset_method = NULL;
2738 if (!memset_method) {
2739 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2740 if (!memset_method)
2741 g_error ("Old corlib found. Install a new one");
2743 return memset_method;
2746 void
2747 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2749 MonoInst *iargs [3];
2750 int n;
2751 guint32 align;
2752 MonoMethod *memset_method;
2754 /* FIXME: Optimize this for the case when dest is an LDADDR */
2756 mono_class_init (klass);
2757 n = mono_class_value_size (klass, &align);
2759 if (n <= sizeof (gpointer) * 5) {
2760 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2762 else {
2763 memset_method = get_memset_method ();
2764 iargs [0] = dest;
2765 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2766 EMIT_NEW_ICONST (cfg, iargs [2], n);
2767 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2771 static MonoInst*
2772 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2774 MonoInst *this = NULL;
2776 g_assert (cfg->generic_sharing_context);
2778 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2779 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2780 !method->klass->valuetype)
2781 EMIT_NEW_ARGLOAD (cfg, this, 0);
2783 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2784 MonoInst *mrgctx_loc, *mrgctx_var;
2786 g_assert (!this);
2787 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2789 mrgctx_loc = mono_get_vtable_var (cfg);
2790 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2792 return mrgctx_var;
2793 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2794 MonoInst *vtable_loc, *vtable_var;
2796 g_assert (!this);
2798 vtable_loc = mono_get_vtable_var (cfg);
2799 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2801 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2802 MonoInst *mrgctx_var = vtable_var;
2803 int vtable_reg;
2805 vtable_reg = alloc_preg (cfg);
2806 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2807 vtable_var->type = STACK_PTR;
2810 return vtable_var;
2811 } else {
2812 MonoInst *ins;
2813 int vtable_reg, res_reg;
2815 vtable_reg = alloc_preg (cfg);
2816 res_reg = alloc_preg (cfg);
2817 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2818 return ins;
2822 static MonoJumpInfoRgctxEntry *
2823 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2825 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2826 res->method = method;
2827 res->in_mrgctx = in_mrgctx;
2828 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2829 res->data->type = patch_type;
2830 res->data->data.target = patch_data;
2831 res->info_type = info_type;
2833 return res;
2836 static inline MonoInst*
2837 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2839 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2842 static MonoInst*
2843 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2844 MonoClass *klass, int rgctx_type)
2846 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2847 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2849 return emit_rgctx_fetch (cfg, rgctx, entry);
2853 * emit_get_rgctx_method:
2855 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2856 * normal constants, else emit a load from the rgctx.
2858 static MonoInst*
2859 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2860 MonoMethod *cmethod, int rgctx_type)
2862 if (!context_used) {
2863 MonoInst *ins;
2865 switch (rgctx_type) {
2866 case MONO_RGCTX_INFO_METHOD:
2867 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2868 return ins;
2869 case MONO_RGCTX_INFO_METHOD_RGCTX:
2870 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2871 return ins;
2872 default:
2873 g_assert_not_reached ();
2875 } else {
2876 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2877 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2879 return emit_rgctx_fetch (cfg, rgctx, entry);
2883 static MonoInst*
2884 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2885 MonoClassField *field, int rgctx_type)
2887 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2888 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2890 return emit_rgctx_fetch (cfg, rgctx, entry);
2894 * On return the caller must check @klass for load errors.
2896 static void
2897 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2899 MonoInst *vtable_arg;
2900 MonoCallInst *call;
2901 int context_used = 0;
2903 if (cfg->generic_sharing_context)
2904 context_used = mono_class_check_context_used (klass);
2906 if (context_used) {
2907 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2908 klass, MONO_RGCTX_INFO_VTABLE);
2909 } else {
2910 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2912 if (!vtable)
2913 return;
2914 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2917 if (COMPILE_LLVM (cfg))
2918 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
2919 else
2920 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2921 #ifdef MONO_ARCH_VTABLE_REG
2922 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2923 cfg->uses_vtable_reg = TRUE;
2924 #else
2925 NOT_IMPLEMENTED;
2926 #endif
2930 * On return the caller must check @array_class for load errors
2932 static void
2933 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2935 int vtable_reg = alloc_preg (cfg);
2936 int context_used = 0;
2938 if (cfg->generic_sharing_context)
2939 context_used = mono_class_check_context_used (array_class);
2941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2943 if (cfg->opt & MONO_OPT_SHARED) {
2944 int class_reg = alloc_preg (cfg);
2945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2946 if (cfg->compile_aot) {
2947 int klass_reg = alloc_preg (cfg);
2948 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2949 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2950 } else {
2951 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2953 } else if (context_used) {
2954 MonoInst *vtable_ins;
2956 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2957 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2958 } else {
2959 if (cfg->compile_aot) {
2960 int vt_reg;
2961 MonoVTable *vtable;
2963 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2964 return;
2965 vt_reg = alloc_preg (cfg);
2966 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2967 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2968 } else {
2969 MonoVTable *vtable;
2970 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2971 return;
2972 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2976 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2979 static void
2980 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2982 if (mini_get_debug_options ()->better_cast_details) {
2983 int to_klass_reg = alloc_preg (cfg);
2984 int vtable_reg = alloc_preg (cfg);
2985 int klass_reg = alloc_preg (cfg);
2986 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2988 if (!tls_get) {
2989 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2990 exit (1);
2993 MONO_ADD_INS (cfg->cbb, tls_get);
2994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2995 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2997 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2998 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2999 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3003 static void
3004 reset_cast_details (MonoCompile *cfg)
3006 /* Reset the variables holding the cast details */
3007 if (mini_get_debug_options ()->better_cast_details) {
3008 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3010 MONO_ADD_INS (cfg->cbb, tls_get);
3011 /* It is enough to reset the from field */
3012 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3017 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3018 * generic code is generated.
3020 static MonoInst*
3021 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3023 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3025 if (context_used) {
3026 MonoInst *rgctx, *addr;
3028 /* FIXME: What if the class is shared? We might not
3029 have to get the address of the method from the
3030 RGCTX. */
3031 addr = emit_get_rgctx_method (cfg, context_used, method,
3032 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3034 rgctx = emit_get_rgctx (cfg, method, context_used);
3036 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3037 } else {
3038 return mono_emit_method_call (cfg, method, &val, NULL);
3042 static MonoInst*
3043 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3045 MonoInst *add;
3046 int obj_reg;
3047 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3048 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3049 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3050 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3052 obj_reg = sp [0]->dreg;
3053 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3054 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3056 /* FIXME: generics */
3057 g_assert (klass->rank == 0);
3059 // Check rank == 0
3060 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3061 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3066 if (context_used) {
3067 MonoInst *element_class;
3069 /* This assertion is from the unboxcast insn */
3070 g_assert (klass->rank == 0);
3072 element_class = emit_get_rgctx_klass (cfg, context_used,
3073 klass->element_class, MONO_RGCTX_INFO_KLASS);
3075 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3076 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3077 } else {
3078 save_cast_details (cfg, klass->element_class, obj_reg);
3079 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3080 reset_cast_details (cfg);
3083 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3084 MONO_ADD_INS (cfg->cbb, add);
3085 add->type = STACK_MP;
3086 add->klass = klass;
3088 return add;
3092 * Returns NULL and set the cfg exception on error.
3094 static MonoInst*
3095 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3097 MonoInst *iargs [2];
3098 void *alloc_ftn;
3100 if (context_used) {
3101 MonoInst *data;
3102 int rgctx_info;
3103 MonoInst *iargs [2];
3106 FIXME: we cannot get managed_alloc here because we can't get
3107 the class's vtable (because it's not a closed class)
3109 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3110 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3113 if (cfg->opt & MONO_OPT_SHARED)
3114 rgctx_info = MONO_RGCTX_INFO_KLASS;
3115 else
3116 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3117 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3119 if (cfg->opt & MONO_OPT_SHARED) {
3120 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3121 iargs [1] = data;
3122 alloc_ftn = mono_object_new;
3123 } else {
3124 iargs [0] = data;
3125 alloc_ftn = mono_object_new_specific;
3128 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3131 if (cfg->opt & MONO_OPT_SHARED) {
3132 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3133 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3135 alloc_ftn = mono_object_new;
3136 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3137 /* This happens often in argument checking code, eg. throw new FooException... */
3138 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3139 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3140 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3141 } else {
3142 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3143 MonoMethod *managed_alloc = NULL;
3144 gboolean pass_lw;
3146 if (!vtable) {
3147 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3148 cfg->exception_ptr = klass;
3149 return NULL;
3152 #ifndef MONO_CROSS_COMPILE
3153 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3154 #endif
3156 if (managed_alloc) {
3157 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3158 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3160 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3161 if (pass_lw) {
3162 guint32 lw = vtable->klass->instance_size;
3163 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3164 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3165 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3167 else {
3168 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3172 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3176 * Returns NULL and set the cfg exception on error.
3178 static MonoInst*
3179 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3181 MonoInst *alloc, *ins;
3183 if (mono_class_is_nullable (klass)) {
3184 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3186 if (context_used) {
3187 /* FIXME: What if the class is shared? We might not
3188 have to get the method address from the RGCTX. */
3189 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3190 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3191 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3193 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3194 } else {
3195 return mono_emit_method_call (cfg, method, &val, NULL);
3199 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3200 if (!alloc)
3201 return NULL;
3203 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3205 return alloc;
3208 // FIXME: This doesn't work yet (class libs tests fail?)
3209 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3212 * Returns NULL and set the cfg exception on error.
3214 static MonoInst*
3215 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3217 MonoBasicBlock *is_null_bb;
3218 int obj_reg = src->dreg;
3219 int vtable_reg = alloc_preg (cfg);
3220 MonoInst *klass_inst = NULL;
3222 if (context_used) {
3223 MonoInst *args [2];
3225 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3226 klass, MONO_RGCTX_INFO_KLASS);
3228 if (is_complex_isinst (klass)) {
3229 /* Complex case, handle by an icall */
3231 /* obj */
3232 args [0] = src;
3234 /* klass */
3235 args [1] = klass_inst;
3237 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3238 } else {
3239 /* Simple case, handled by the code below */
3243 NEW_BBLOCK (cfg, is_null_bb);
3245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3246 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3248 save_cast_details (cfg, klass, obj_reg);
3250 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3252 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3253 } else {
3254 int klass_reg = alloc_preg (cfg);
3256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3258 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3259 /* the remoting code is broken, access the class for now */
3260 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3261 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3262 if (!vt) {
3263 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3264 cfg->exception_ptr = klass;
3265 return NULL;
3267 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3268 } else {
3269 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3272 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3273 } else {
3274 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3275 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3279 MONO_START_BB (cfg, is_null_bb);
3281 reset_cast_details (cfg);
3283 return src;
3287 * Returns NULL and set the cfg exception on error.
3289 static MonoInst*
3290 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3292 MonoInst *ins;
3293 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3294 int obj_reg = src->dreg;
3295 int vtable_reg = alloc_preg (cfg);
3296 int res_reg = alloc_preg (cfg);
3297 MonoInst *klass_inst = NULL;
3299 if (context_used) {
3300 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3302 if (is_complex_isinst (klass)) {
3303 MonoInst *args [2];
3305 /* Complex case, handle by an icall */
3307 /* obj */
3308 args [0] = src;
3310 /* klass */
3311 args [1] = klass_inst;
3313 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3314 } else {
3315 /* Simple case, the code below can handle it */
3319 NEW_BBLOCK (cfg, is_null_bb);
3320 NEW_BBLOCK (cfg, false_bb);
3321 NEW_BBLOCK (cfg, end_bb);
3323 /* Do the assignment at the beginning, so the other assignment can be if converted */
3324 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3325 ins->type = STACK_OBJ;
3326 ins->klass = klass;
3328 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3329 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3331 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3333 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3334 g_assert (!context_used);
3335 /* the is_null_bb target simply copies the input register to the output */
3336 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3337 } else {
3338 int klass_reg = alloc_preg (cfg);
3340 if (klass->rank) {
3341 int rank_reg = alloc_preg (cfg);
3342 int eclass_reg = alloc_preg (cfg);
3344 g_assert (!context_used);
3345 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3346 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3347 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3350 if (klass->cast_class == mono_defaults.object_class) {
3351 int parent_reg = alloc_preg (cfg);
3352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3353 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3354 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3355 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3356 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3357 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3358 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3360 } else if (klass->cast_class == mono_defaults.enum_class) {
3361 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3362 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3363 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3364 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3365 } else {
3366 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3367 /* Check that the object is a vector too */
3368 int bounds_reg = alloc_preg (cfg);
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3371 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3374 /* the is_null_bb target simply copies the input register to the output */
3375 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3377 } else if (mono_class_is_nullable (klass)) {
3378 g_assert (!context_used);
3379 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3380 /* the is_null_bb target simply copies the input register to the output */
3381 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3382 } else {
3383 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3384 g_assert (!context_used);
3385 /* the remoting code is broken, access the class for now */
3386 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3387 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3388 if (!vt) {
3389 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3390 cfg->exception_ptr = klass;
3391 return NULL;
3393 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3394 } else {
3395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3399 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3400 } else {
3401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3402 /* the is_null_bb target simply copies the input register to the output */
3403 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3408 MONO_START_BB (cfg, false_bb);
3410 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3411 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3413 MONO_START_BB (cfg, is_null_bb);
3415 MONO_START_BB (cfg, end_bb);
3417 return ins;
3420 static MonoInst*
3421 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3423 /* This opcode takes as input an object reference and a class, and returns:
3424 0) if the object is an instance of the class,
3425 1) if the object is not instance of the class,
3426 2) if the object is a proxy whose type cannot be determined */
3428 MonoInst *ins;
3429 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3430 int obj_reg = src->dreg;
3431 int dreg = alloc_ireg (cfg);
3432 int tmp_reg;
3433 int klass_reg = alloc_preg (cfg);
3435 NEW_BBLOCK (cfg, true_bb);
3436 NEW_BBLOCK (cfg, false_bb);
3437 NEW_BBLOCK (cfg, false2_bb);
3438 NEW_BBLOCK (cfg, end_bb);
3439 NEW_BBLOCK (cfg, no_proxy_bb);
3441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3442 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3444 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3445 NEW_BBLOCK (cfg, interface_fail_bb);
3447 tmp_reg = alloc_preg (cfg);
3448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3449 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3450 MONO_START_BB (cfg, interface_fail_bb);
3451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3453 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3455 tmp_reg = alloc_preg (cfg);
3456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3459 } else {
3460 tmp_reg = alloc_preg (cfg);
3461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3464 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3465 tmp_reg = alloc_preg (cfg);
3466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3469 tmp_reg = alloc_preg (cfg);
3470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3472 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3474 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3477 MONO_START_BB (cfg, no_proxy_bb);
3479 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3482 MONO_START_BB (cfg, false_bb);
3484 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3487 MONO_START_BB (cfg, false2_bb);
3489 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3492 MONO_START_BB (cfg, true_bb);
3494 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3496 MONO_START_BB (cfg, end_bb);
3498 /* FIXME: */
3499 MONO_INST_NEW (cfg, ins, OP_ICONST);
3500 ins->dreg = dreg;
3501 ins->type = STACK_I4;
3503 return ins;
3506 static MonoInst*
3507 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3509 /* This opcode takes as input an object reference and a class, and returns:
3510 0) if the object is an instance of the class,
3511 1) if the object is a proxy whose type cannot be determined
3512 an InvalidCastException exception is thrown otherwhise*/
3514 MonoInst *ins;
3515 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3516 int obj_reg = src->dreg;
3517 int dreg = alloc_ireg (cfg);
3518 int tmp_reg = alloc_preg (cfg);
3519 int klass_reg = alloc_preg (cfg);
3521 NEW_BBLOCK (cfg, end_bb);
3522 NEW_BBLOCK (cfg, ok_result_bb);
3524 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3525 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3527 save_cast_details (cfg, klass, obj_reg);
3529 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3530 NEW_BBLOCK (cfg, interface_fail_bb);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3533 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3534 MONO_START_BB (cfg, interface_fail_bb);
3535 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3537 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3539 tmp_reg = alloc_preg (cfg);
3540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3542 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3544 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3547 } else {
3548 NEW_BBLOCK (cfg, no_proxy_bb);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3552 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3554 tmp_reg = alloc_preg (cfg);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3558 tmp_reg = alloc_preg (cfg);
3559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3563 NEW_BBLOCK (cfg, fail_1_bb);
3565 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3567 MONO_START_BB (cfg, fail_1_bb);
3569 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3572 MONO_START_BB (cfg, no_proxy_bb);
3574 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3577 MONO_START_BB (cfg, ok_result_bb);
3579 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3581 MONO_START_BB (cfg, end_bb);
3583 /* FIXME: */
3584 MONO_INST_NEW (cfg, ins, OP_ICONST);
3585 ins->dreg = dreg;
3586 ins->type = STACK_I4;
3588 return ins;
3592 * Returns NULL and set the cfg exception on error.
3594 static G_GNUC_UNUSED MonoInst*
3595 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3597 gpointer *trampoline;
3598 MonoInst *obj, *method_ins, *tramp_ins;
3599 MonoDomain *domain;
3600 guint8 **code_slot;
3602 obj = handle_alloc (cfg, klass, FALSE, 0);
3603 if (!obj)
3604 return NULL;
3606 /* Inline the contents of mono_delegate_ctor */
3608 /* Set target field */
3609 /* Optimize away setting of NULL target */
3610 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3611 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3613 /* Set method field */
3614 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3618 * To avoid looking up the compiled code belonging to the target method
3619 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3620 * store it, and we fill it after the method has been compiled.
3622 if (!cfg->compile_aot && !method->dynamic) {
3623 MonoInst *code_slot_ins;
3625 if (context_used) {
3626 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3627 } else {
3628 domain = mono_domain_get ();
3629 mono_domain_lock (domain);
3630 if (!domain_jit_info (domain)->method_code_hash)
3631 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3632 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3633 if (!code_slot) {
3634 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3635 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3637 mono_domain_unlock (domain);
3639 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3641 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3644 /* Set invoke_impl field */
3645 if (cfg->compile_aot) {
3646 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3647 } else {
3648 trampoline = mono_create_delegate_trampoline (klass);
3649 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3651 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3653 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3655 return obj;
3658 static MonoInst*
3659 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3661 MonoJitICallInfo *info;
3663 /* Need to register the icall so it gets an icall wrapper */
3664 info = mono_get_array_new_va_icall (rank);
3666 cfg->flags |= MONO_CFG_HAS_VARARGS;
3668 /* mono_array_new_va () needs a vararg calling convention */
3669 cfg->disable_llvm = TRUE;
3671 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3672 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3675 static void
3676 mono_emit_load_got_addr (MonoCompile *cfg)
3678 MonoInst *getaddr, *dummy_use;
3680 if (!cfg->got_var || cfg->got_var_allocated)
3681 return;
3683 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3684 getaddr->dreg = cfg->got_var->dreg;
3686 /* Add it to the start of the first bblock */
3687 if (cfg->bb_entry->code) {
3688 getaddr->next = cfg->bb_entry->code;
3689 cfg->bb_entry->code = getaddr;
3691 else
3692 MONO_ADD_INS (cfg->bb_entry, getaddr);
3694 cfg->got_var_allocated = TRUE;
3697 * Add a dummy use to keep the got_var alive, since real uses might
3698 * only be generated by the back ends.
3699 * Add it to end_bblock, so the variable's lifetime covers the whole
3700 * method.
3701 * It would be better to make the usage of the got var explicit in all
3702 * cases when the backend needs it (i.e. calls, throw etc.), so this
3703 * wouldn't be needed.
3705 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3706 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3709 static int inline_limit;
3710 static gboolean inline_limit_inited;
3712 static gboolean
3713 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3715 MonoMethodHeaderSummary header;
3716 MonoVTable *vtable;
3717 #ifdef MONO_ARCH_SOFT_FLOAT
3718 MonoMethodSignature *sig = mono_method_signature (method);
3719 int i;
3720 #endif
3722 if (cfg->generic_sharing_context)
3723 return FALSE;
3725 if (cfg->inline_depth > 10)
3726 return FALSE;
3728 #ifdef MONO_ARCH_HAVE_LMF_OPS
3729 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3730 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3731 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3732 return TRUE;
3733 #endif
3736 if (!mono_method_get_header_summary (method, &header))
3737 return FALSE;
3739 /*runtime, icall and pinvoke are checked by summary call*/
3740 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3741 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3742 (method->klass->marshalbyref) ||
3743 header.has_clauses)
3744 return FALSE;
3746 /* also consider num_locals? */
3747 /* Do the size check early to avoid creating vtables */
3748 if (!inline_limit_inited) {
3749 if (getenv ("MONO_INLINELIMIT"))
3750 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3751 else
3752 inline_limit = INLINE_LENGTH_LIMIT;
3753 inline_limit_inited = TRUE;
3755 if (header.code_size >= inline_limit)
3756 return FALSE;
3759 * if we can initialize the class of the method right away, we do,
3760 * otherwise we don't allow inlining if the class needs initialization,
3761 * since it would mean inserting a call to mono_runtime_class_init()
3762 * inside the inlined code
3764 if (!(cfg->opt & MONO_OPT_SHARED)) {
3765 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3766 if (cfg->run_cctors && method->klass->has_cctor) {
3767 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3768 if (!method->klass->runtime_info)
3769 /* No vtable created yet */
3770 return FALSE;
3771 vtable = mono_class_vtable (cfg->domain, method->klass);
3772 if (!vtable)
3773 return FALSE;
3774 /* This makes so that inline cannot trigger */
3775 /* .cctors: too many apps depend on them */
3776 /* running with a specific order... */
3777 if (! vtable->initialized)
3778 return FALSE;
3779 mono_runtime_class_init (vtable);
3781 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3782 if (!method->klass->runtime_info)
3783 /* No vtable created yet */
3784 return FALSE;
3785 vtable = mono_class_vtable (cfg->domain, method->klass);
3786 if (!vtable)
3787 return FALSE;
3788 if (!vtable->initialized)
3789 return FALSE;
3791 } else {
3793 * If we're compiling for shared code
3794 * the cctor will need to be run at aot method load time, for example,
3795 * or at the end of the compilation of the inlining method.
3797 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3798 return FALSE;
3802 * CAS - do not inline methods with declarative security
3803 * Note: this has to be before any possible return TRUE;
3805 if (mono_method_has_declsec (method))
3806 return FALSE;
3808 #ifdef MONO_ARCH_SOFT_FLOAT
3809 /* FIXME: */
3810 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3811 return FALSE;
3812 for (i = 0; i < sig->param_count; ++i)
3813 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3814 return FALSE;
3815 #endif
3817 return TRUE;
3820 static gboolean
3821 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3823 if (vtable->initialized && !cfg->compile_aot)
3824 return FALSE;
3826 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3827 return FALSE;
3829 if (!mono_class_needs_cctor_run (vtable->klass, method))
3830 return FALSE;
3832 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3833 /* The initialization is already done before the method is called */
3834 return FALSE;
3836 return TRUE;
3839 static MonoInst*
3840 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3842 MonoInst *ins;
3843 guint32 size;
3844 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3846 mono_class_init (klass);
3847 size = mono_class_array_element_size (klass);
3849 mult_reg = alloc_preg (cfg);
3850 array_reg = arr->dreg;
3851 index_reg = index->dreg;
3853 #if SIZEOF_REGISTER == 8
3854 /* The array reg is 64 bits but the index reg is only 32 */
3855 if (COMPILE_LLVM (cfg)) {
3856 /* Not needed */
3857 index2_reg = index_reg;
3858 } else {
3859 index2_reg = alloc_preg (cfg);
3860 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3862 #else
3863 if (index->type == STACK_I8) {
3864 index2_reg = alloc_preg (cfg);
3865 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3866 } else {
3867 index2_reg = index_reg;
3869 #endif
3871 if (bcheck)
3872 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3874 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3875 if (size == 1 || size == 2 || size == 4 || size == 8) {
3876 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3878 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3879 ins->type = STACK_PTR;
3881 return ins;
3883 #endif
3885 add_reg = alloc_preg (cfg);
3887 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3888 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3889 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3890 ins->type = STACK_PTR;
3891 MONO_ADD_INS (cfg->cbb, ins);
3893 return ins;
3896 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3897 static MonoInst*
3898 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3900 int bounds_reg = alloc_preg (cfg);
3901 int add_reg = alloc_preg (cfg);
3902 int mult_reg = alloc_preg (cfg);
3903 int mult2_reg = alloc_preg (cfg);
3904 int low1_reg = alloc_preg (cfg);
3905 int low2_reg = alloc_preg (cfg);
3906 int high1_reg = alloc_preg (cfg);
3907 int high2_reg = alloc_preg (cfg);
3908 int realidx1_reg = alloc_preg (cfg);
3909 int realidx2_reg = alloc_preg (cfg);
3910 int sum_reg = alloc_preg (cfg);
3911 int index1, index2;
3912 MonoInst *ins;
3913 guint32 size;
3915 mono_class_init (klass);
3916 size = mono_class_array_element_size (klass);
3918 index1 = index_ins1->dreg;
3919 index2 = index_ins2->dreg;
3921 /* range checking */
3922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3923 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3925 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3926 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3927 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3928 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3929 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3930 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3931 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3933 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3934 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3935 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3937 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3938 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3939 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3941 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3942 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3944 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3945 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3947 ins->type = STACK_MP;
3948 ins->klass = klass;
3949 MONO_ADD_INS (cfg->cbb, ins);
3951 return ins;
3953 #endif
3955 static MonoInst*
3956 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3958 int rank;
3959 MonoInst *addr;
3960 MonoMethod *addr_method;
3961 int element_size;
3963 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3965 if (rank == 1)
3966 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3968 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3969 /* emit_ldelema_2 depends on OP_LMUL */
3970 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3971 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3973 #endif
3975 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3976 addr_method = mono_marshal_get_array_address (rank, element_size);
3977 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3979 return addr;
3982 static MonoBreakPolicy
3983 always_insert_breakpoint (MonoMethod *method)
3985 return MONO_BREAK_POLICY_ALWAYS;
3988 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3991 * mono_set_break_policy:
3992 * policy_callback: the new callback function
3994 * Allow embedders to decide wherther to actually obey breakpoint instructions
3995 * (both break IL instructions and Debugger.Break () method calls), for example
3996 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3997 * untrusted or semi-trusted code.
3999 * @policy_callback will be called every time a break point instruction needs to
4000 * be inserted with the method argument being the method that calls Debugger.Break()
4001 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4002 * if it wants the breakpoint to not be effective in the given method.
4003 * #MONO_BREAK_POLICY_ALWAYS is the default.
4005 void
4006 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4008 if (policy_callback)
4009 break_policy_func = policy_callback;
4010 else
4011 break_policy_func = always_insert_breakpoint;
4014 static gboolean
4015 should_insert_brekpoint (MonoMethod *method) {
4016 switch (break_policy_func (method)) {
4017 case MONO_BREAK_POLICY_ALWAYS:
4018 return TRUE;
4019 case MONO_BREAK_POLICY_NEVER:
4020 return FALSE;
4021 case MONO_BREAK_POLICY_ON_DBG:
4022 return mono_debug_using_mono_debugger ();
4023 default:
4024 g_warning ("Incorrect value returned from break policy callback");
4025 return FALSE;
4029 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4030 static MonoInst*
4031 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4033 MonoInst *addr, *store, *load;
4034 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4036 /* the bounds check is already done by the callers */
4037 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4038 if (is_set) {
4039 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4040 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4041 } else {
4042 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4043 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4045 return store;
4048 static MonoInst*
4049 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4051 MonoInst *ins = NULL;
4052 #ifdef MONO_ARCH_SIMD_INTRINSICS
4053 if (cfg->opt & MONO_OPT_SIMD) {
4054 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4055 if (ins)
4056 return ins;
4058 #endif
4060 return ins;
4063 static MonoInst*
4064 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4066 MonoInst *ins = NULL;
4068 static MonoClass *runtime_helpers_class = NULL;
4069 if (! runtime_helpers_class)
4070 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4071 "System.Runtime.CompilerServices", "RuntimeHelpers");
4073 if (cmethod->klass == mono_defaults.string_class) {
4074 if (strcmp (cmethod->name, "get_Chars") == 0) {
4075 int dreg = alloc_ireg (cfg);
4076 int index_reg = alloc_preg (cfg);
4077 int mult_reg = alloc_preg (cfg);
4078 int add_reg = alloc_preg (cfg);
4080 #if SIZEOF_REGISTER == 8
4081 /* The array reg is 64 bits but the index reg is only 32 */
4082 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4083 #else
4084 index_reg = args [1]->dreg;
4085 #endif
4086 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4088 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4089 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4090 add_reg = ins->dreg;
4091 /* Avoid a warning */
4092 mult_reg = 0;
4093 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4094 add_reg, 0);
4095 #else
4096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4097 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4098 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4099 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4100 #endif
4101 type_from_op (ins, NULL, NULL);
4102 return ins;
4103 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4104 int dreg = alloc_ireg (cfg);
4105 /* Decompose later to allow more optimizations */
4106 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4107 ins->type = STACK_I4;
4108 cfg->cbb->has_array_access = TRUE;
4109 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4111 return ins;
4112 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4113 int mult_reg = alloc_preg (cfg);
4114 int add_reg = alloc_preg (cfg);
4116 /* The corlib functions check for oob already. */
4117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4118 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4119 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4120 return cfg->cbb->last_ins;
4121 } else
4122 return NULL;
4123 } else if (cmethod->klass == mono_defaults.object_class) {
4125 if (strcmp (cmethod->name, "GetType") == 0) {
4126 int dreg = alloc_preg (cfg);
4127 int vt_reg = alloc_preg (cfg);
4128 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4129 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4130 type_from_op (ins, NULL, NULL);
4132 return ins;
4133 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4134 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4135 int dreg = alloc_ireg (cfg);
4136 int t1 = alloc_ireg (cfg);
4138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4139 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4140 ins->type = STACK_I4;
4142 return ins;
4143 #endif
4144 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4145 MONO_INST_NEW (cfg, ins, OP_NOP);
4146 MONO_ADD_INS (cfg->cbb, ins);
4147 return ins;
4148 } else
4149 return NULL;
4150 } else if (cmethod->klass == mono_defaults.array_class) {
4151 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4152 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4153 if (cmethod->name [0] != 'g')
4154 return NULL;
4156 if (strcmp (cmethod->name, "get_Rank") == 0) {
4157 int dreg = alloc_ireg (cfg);
4158 int vtable_reg = alloc_preg (cfg);
4159 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4160 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4161 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4162 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4163 type_from_op (ins, NULL, NULL);
4165 return ins;
4166 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4167 int dreg = alloc_ireg (cfg);
4169 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4170 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4171 type_from_op (ins, NULL, NULL);
4173 return ins;
4174 } else
4175 return NULL;
4176 } else if (cmethod->klass == runtime_helpers_class) {
4178 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4179 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4180 return ins;
4181 } else
4182 return NULL;
4183 } else if (cmethod->klass == mono_defaults.thread_class) {
4184 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4185 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4186 MONO_ADD_INS (cfg->cbb, ins);
4187 return ins;
4188 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4189 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4190 MONO_ADD_INS (cfg->cbb, ins);
4191 return ins;
4193 } else if (cmethod->klass == mono_defaults.monitor_class) {
4194 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4195 if (strcmp (cmethod->name, "Enter") == 0) {
4196 MonoCallInst *call;
4198 if (COMPILE_LLVM (cfg)) {
4200 * Pass the argument normally, the LLVM backend will handle the
4201 * calling convention problems.
4203 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4204 } else {
4205 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4206 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4207 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4208 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4211 return (MonoInst*)call;
4212 } else if (strcmp (cmethod->name, "Exit") == 0) {
4213 MonoCallInst *call;
4215 if (COMPILE_LLVM (cfg)) {
4216 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4217 } else {
4218 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4219 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4220 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4221 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4224 return (MonoInst*)call;
4226 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4227 MonoMethod *fast_method = NULL;
4229 /* Avoid infinite recursion */
4230 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4231 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4232 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4233 return NULL;
4235 if (strcmp (cmethod->name, "Enter") == 0 ||
4236 strcmp (cmethod->name, "Exit") == 0)
4237 fast_method = mono_monitor_get_fast_path (cmethod);
4238 if (!fast_method)
4239 return NULL;
4241 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4242 #endif
4243 } else if (cmethod->klass->image == mono_defaults.corlib &&
4244 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4245 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4246 ins = NULL;
4248 #if SIZEOF_REGISTER == 8
4249 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4250 /* 64 bit reads are already atomic */
4251 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4252 ins->dreg = mono_alloc_preg (cfg);
4253 ins->inst_basereg = args [0]->dreg;
4254 ins->inst_offset = 0;
4255 MONO_ADD_INS (cfg->cbb, ins);
4257 #endif
4259 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4260 if (strcmp (cmethod->name, "Increment") == 0) {
4261 MonoInst *ins_iconst;
4262 guint32 opcode = 0;
4264 if (fsig->params [0]->type == MONO_TYPE_I4)
4265 opcode = OP_ATOMIC_ADD_NEW_I4;
4266 #if SIZEOF_REGISTER == 8
4267 else if (fsig->params [0]->type == MONO_TYPE_I8)
4268 opcode = OP_ATOMIC_ADD_NEW_I8;
4269 #endif
4270 if (opcode) {
4271 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4272 ins_iconst->inst_c0 = 1;
4273 ins_iconst->dreg = mono_alloc_ireg (cfg);
4274 MONO_ADD_INS (cfg->cbb, ins_iconst);
4276 MONO_INST_NEW (cfg, ins, opcode);
4277 ins->dreg = mono_alloc_ireg (cfg);
4278 ins->inst_basereg = args [0]->dreg;
4279 ins->inst_offset = 0;
4280 ins->sreg2 = ins_iconst->dreg;
4281 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4282 MONO_ADD_INS (cfg->cbb, ins);
4284 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4285 MonoInst *ins_iconst;
4286 guint32 opcode = 0;
4288 if (fsig->params [0]->type == MONO_TYPE_I4)
4289 opcode = OP_ATOMIC_ADD_NEW_I4;
4290 #if SIZEOF_REGISTER == 8
4291 else if (fsig->params [0]->type == MONO_TYPE_I8)
4292 opcode = OP_ATOMIC_ADD_NEW_I8;
4293 #endif
4294 if (opcode) {
4295 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4296 ins_iconst->inst_c0 = -1;
4297 ins_iconst->dreg = mono_alloc_ireg (cfg);
4298 MONO_ADD_INS (cfg->cbb, ins_iconst);
4300 MONO_INST_NEW (cfg, ins, opcode);
4301 ins->dreg = mono_alloc_ireg (cfg);
4302 ins->inst_basereg = args [0]->dreg;
4303 ins->inst_offset = 0;
4304 ins->sreg2 = ins_iconst->dreg;
4305 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4306 MONO_ADD_INS (cfg->cbb, ins);
4308 } else if (strcmp (cmethod->name, "Add") == 0) {
4309 guint32 opcode = 0;
4311 if (fsig->params [0]->type == MONO_TYPE_I4)
4312 opcode = OP_ATOMIC_ADD_NEW_I4;
4313 #if SIZEOF_REGISTER == 8
4314 else if (fsig->params [0]->type == MONO_TYPE_I8)
4315 opcode = OP_ATOMIC_ADD_NEW_I8;
4316 #endif
4318 if (opcode) {
4319 MONO_INST_NEW (cfg, ins, opcode);
4320 ins->dreg = mono_alloc_ireg (cfg);
4321 ins->inst_basereg = args [0]->dreg;
4322 ins->inst_offset = 0;
4323 ins->sreg2 = args [1]->dreg;
4324 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4325 MONO_ADD_INS (cfg->cbb, ins);
4328 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4330 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4331 if (strcmp (cmethod->name, "Exchange") == 0) {
4332 guint32 opcode;
4333 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4335 if (fsig->params [0]->type == MONO_TYPE_I4)
4336 opcode = OP_ATOMIC_EXCHANGE_I4;
4337 #if SIZEOF_REGISTER == 8
4338 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4339 (fsig->params [0]->type == MONO_TYPE_I))
4340 opcode = OP_ATOMIC_EXCHANGE_I8;
4341 #else
4342 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4343 opcode = OP_ATOMIC_EXCHANGE_I4;
4344 #endif
4345 else
4346 return NULL;
4348 MONO_INST_NEW (cfg, ins, opcode);
4349 ins->dreg = mono_alloc_ireg (cfg);
4350 ins->inst_basereg = args [0]->dreg;
4351 ins->inst_offset = 0;
4352 ins->sreg2 = args [1]->dreg;
4353 MONO_ADD_INS (cfg->cbb, ins);
4355 switch (fsig->params [0]->type) {
4356 case MONO_TYPE_I4:
4357 ins->type = STACK_I4;
4358 break;
4359 case MONO_TYPE_I8:
4360 case MONO_TYPE_I:
4361 ins->type = STACK_I8;
4362 break;
4363 case MONO_TYPE_OBJECT:
4364 ins->type = STACK_OBJ;
4365 break;
4366 default:
4367 g_assert_not_reached ();
4370 #if HAVE_WRITE_BARRIERS
4371 if (is_ref) {
4372 MonoInst *dummy_use;
4373 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4374 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4375 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4377 #endif
4379 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4381 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4382 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4383 int size = 0;
4384 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4385 if (fsig->params [1]->type == MONO_TYPE_I4)
4386 size = 4;
4387 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4388 size = sizeof (gpointer);
4389 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4390 size = 8;
4391 if (size == 4) {
4392 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4393 ins->dreg = alloc_ireg (cfg);
4394 ins->sreg1 = args [0]->dreg;
4395 ins->sreg2 = args [1]->dreg;
4396 ins->sreg3 = args [2]->dreg;
4397 ins->type = STACK_I4;
4398 MONO_ADD_INS (cfg->cbb, ins);
4399 } else if (size == 8) {
4400 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4401 ins->dreg = alloc_ireg (cfg);
4402 ins->sreg1 = args [0]->dreg;
4403 ins->sreg2 = args [1]->dreg;
4404 ins->sreg3 = args [2]->dreg;
4405 ins->type = STACK_I8;
4406 MONO_ADD_INS (cfg->cbb, ins);
4407 } else {
4408 /* g_assert_not_reached (); */
4410 #if HAVE_WRITE_BARRIERS
4411 if (is_ref) {
4412 MonoInst *dummy_use;
4413 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4414 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4415 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4417 #endif
4419 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4421 if (ins)
4422 return ins;
4423 } else if (cmethod->klass->image == mono_defaults.corlib) {
4424 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4425 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4426 if (should_insert_brekpoint (cfg->method))
4427 MONO_INST_NEW (cfg, ins, OP_BREAK);
4428 else
4429 MONO_INST_NEW (cfg, ins, OP_NOP);
4430 MONO_ADD_INS (cfg->cbb, ins);
4431 return ins;
4433 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4434 && strcmp (cmethod->klass->name, "Environment") == 0) {
4435 #ifdef TARGET_WIN32
4436 EMIT_NEW_ICONST (cfg, ins, 1);
4437 #else
4438 EMIT_NEW_ICONST (cfg, ins, 0);
4439 #endif
4440 return ins;
4442 } else if (cmethod->klass == mono_defaults.math_class) {
4444 * There is general branches code for Min/Max, but it does not work for
4445 * all inputs:
4446 * http://everything2.com/?node_id=1051618
4450 #ifdef MONO_ARCH_SIMD_INTRINSICS
4451 if (cfg->opt & MONO_OPT_SIMD) {
4452 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4453 if (ins)
4454 return ins;
4456 #endif
4458 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4462 * This entry point could be used later for arbitrary method
4463 * redirection.
4465 inline static MonoInst*
4466 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4467 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4469 if (method->klass == mono_defaults.string_class) {
4470 /* managed string allocation support */
4471 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4472 MonoInst *iargs [2];
4473 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4474 MonoMethod *managed_alloc = NULL;
4476 g_assert (vtable); /*Should not fail since it System.String*/
4477 #ifndef MONO_CROSS_COMPILE
4478 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4479 #endif
4480 if (!managed_alloc)
4481 return NULL;
4482 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4483 iargs [1] = args [0];
4484 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4487 return NULL;
4490 static void
4491 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4493 MonoInst *store, *temp;
4494 int i;
4496 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4497 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4500 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4501 * would be different than the MonoInst's used to represent arguments, and
4502 * the ldelema implementation can't deal with that.
4503 * Solution: When ldelema is used on an inline argument, create a var for
4504 * it, emit ldelema on that var, and emit the saving code below in
4505 * inline_method () if needed.
4507 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4508 cfg->args [i] = temp;
4509 /* This uses cfg->args [i] which is set by the preceeding line */
4510 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4511 store->cil_code = sp [0]->cil_code;
4512 sp++;
4516 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4517 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4519 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4520 static gboolean
4521 check_inline_called_method_name_limit (MonoMethod *called_method)
4523 int strncmp_result;
4524 static char *limit = NULL;
4526 if (limit == NULL) {
4527 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4529 if (limit_string != NULL)
4530 limit = limit_string;
4531 else
4532 limit = (char *) "";
4535 if (limit [0] != '\0') {
4536 char *called_method_name = mono_method_full_name (called_method, TRUE);
4538 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4539 g_free (called_method_name);
4541 //return (strncmp_result <= 0);
4542 return (strncmp_result == 0);
4543 } else {
4544 return TRUE;
4547 #endif
4549 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4550 static gboolean
4551 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4553 int strncmp_result;
4554 static char *limit = NULL;
4556 if (limit == NULL) {
4557 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4558 if (limit_string != NULL) {
4559 limit = limit_string;
4560 } else {
4561 limit = (char *) "";
4565 if (limit [0] != '\0') {
4566 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4568 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4569 g_free (caller_method_name);
4571 //return (strncmp_result <= 0);
4572 return (strncmp_result == 0);
4573 } else {
4574 return TRUE;
4577 #endif
4579 static int
4580 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4581 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4583 MonoInst *ins, *rvar = NULL;
4584 MonoMethodHeader *cheader;
4585 MonoBasicBlock *ebblock, *sbblock;
4586 int i, costs;
4587 MonoMethod *prev_inlined_method;
4588 MonoInst **prev_locals, **prev_args;
4589 MonoType **prev_arg_types;
4590 guint prev_real_offset;
4591 GHashTable *prev_cbb_hash;
4592 MonoBasicBlock **prev_cil_offset_to_bb;
4593 MonoBasicBlock *prev_cbb;
4594 unsigned char* prev_cil_start;
4595 guint32 prev_cil_offset_to_bb_len;
4596 MonoMethod *prev_current_method;
4597 MonoGenericContext *prev_generic_context;
4598 gboolean ret_var_set, prev_ret_var_set;
4600 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4602 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4603 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4604 return 0;
4605 #endif
4606 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4607 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4608 return 0;
4609 #endif
4611 if (cfg->verbose_level > 2)
4612 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4614 if (!cmethod->inline_info) {
4615 mono_jit_stats.inlineable_methods++;
4616 cmethod->inline_info = 1;
4619 /* allocate local variables */
4620 cheader = mono_method_get_header (cmethod);
4622 if (cheader == NULL || mono_loader_get_last_error ()) {
4623 if (cheader)
4624 mono_metadata_free_mh (cheader);
4625 mono_loader_clear_error ();
4626 return 0;
4629 /* allocate space to store the return value */
4630 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4631 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4635 prev_locals = cfg->locals;
4636 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4637 for (i = 0; i < cheader->num_locals; ++i)
4638 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4640 /* allocate start and end blocks */
4641 /* This is needed so if the inline is aborted, we can clean up */
4642 NEW_BBLOCK (cfg, sbblock);
4643 sbblock->real_offset = real_offset;
4645 NEW_BBLOCK (cfg, ebblock);
4646 ebblock->block_num = cfg->num_bblocks++;
4647 ebblock->real_offset = real_offset;
4649 prev_args = cfg->args;
4650 prev_arg_types = cfg->arg_types;
4651 prev_inlined_method = cfg->inlined_method;
4652 cfg->inlined_method = cmethod;
4653 cfg->ret_var_set = FALSE;
4654 cfg->inline_depth ++;
4655 prev_real_offset = cfg->real_offset;
4656 prev_cbb_hash = cfg->cbb_hash;
4657 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4658 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4659 prev_cil_start = cfg->cil_start;
4660 prev_cbb = cfg->cbb;
4661 prev_current_method = cfg->current_method;
4662 prev_generic_context = cfg->generic_context;
4663 prev_ret_var_set = cfg->ret_var_set;
4665 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4667 ret_var_set = cfg->ret_var_set;
4669 cfg->inlined_method = prev_inlined_method;
4670 cfg->real_offset = prev_real_offset;
4671 cfg->cbb_hash = prev_cbb_hash;
4672 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4673 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4674 cfg->cil_start = prev_cil_start;
4675 cfg->locals = prev_locals;
4676 cfg->args = prev_args;
4677 cfg->arg_types = prev_arg_types;
4678 cfg->current_method = prev_current_method;
4679 cfg->generic_context = prev_generic_context;
4680 cfg->ret_var_set = prev_ret_var_set;
4681 cfg->inline_depth --;
4683 if ((costs >= 0 && costs < 60) || inline_allways) {
4684 if (cfg->verbose_level > 2)
4685 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4687 mono_jit_stats.inlined_methods++;
4689 /* always add some code to avoid block split failures */
4690 MONO_INST_NEW (cfg, ins, OP_NOP);
4691 MONO_ADD_INS (prev_cbb, ins);
4693 prev_cbb->next_bb = sbblock;
4694 link_bblock (cfg, prev_cbb, sbblock);
4697 * Get rid of the begin and end bblocks if possible to aid local
4698 * optimizations.
4700 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4702 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4703 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4705 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4706 MonoBasicBlock *prev = ebblock->in_bb [0];
4707 mono_merge_basic_blocks (cfg, prev, ebblock);
4708 cfg->cbb = prev;
4709 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4710 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4711 cfg->cbb = prev_cbb;
4713 } else {
4714 cfg->cbb = ebblock;
4717 if (rvar) {
4719 * If the inlined method contains only a throw, then the ret var is not
4720 * set, so set it to a dummy value.
4722 if (!ret_var_set) {
4723 static double r8_0 = 0.0;
4725 switch (rvar->type) {
4726 case STACK_I4:
4727 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4728 break;
4729 case STACK_I8:
4730 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4731 break;
4732 case STACK_PTR:
4733 case STACK_MP:
4734 case STACK_OBJ:
4735 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4736 break;
4737 case STACK_R8:
4738 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4739 ins->type = STACK_R8;
4740 ins->inst_p0 = (void*)&r8_0;
4741 ins->dreg = rvar->dreg;
4742 MONO_ADD_INS (cfg->cbb, ins);
4743 break;
4744 case STACK_VTYPE:
4745 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4746 break;
4747 default:
4748 g_assert_not_reached ();
4752 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4753 *sp++ = ins;
4755 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4756 return costs + 1;
4757 } else {
4758 if (cfg->verbose_level > 2)
4759 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4760 cfg->exception_type = MONO_EXCEPTION_NONE;
4761 mono_loader_clear_error ();
4763 /* This gets rid of the newly added bblocks */
4764 cfg->cbb = prev_cbb;
4766 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4767 return 0;
4771 * Some of these comments may well be out-of-date.
4772 * Design decisions: we do a single pass over the IL code (and we do bblock
4773 * splitting/merging in the few cases when it's required: a back jump to an IL
4774 * address that was not already seen as bblock starting point).
4775 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4776 * Complex operations are decomposed in simpler ones right away. We need to let the
4777 * arch-specific code peek and poke inside this process somehow (except when the
4778 * optimizations can take advantage of the full semantic info of coarse opcodes).
4779 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4780 * MonoInst->opcode initially is the IL opcode or some simplification of that
4781 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4782 * opcode with value bigger than OP_LAST.
4783 * At this point the IR can be handed over to an interpreter, a dumb code generator
4784 * or to the optimizing code generator that will translate it to SSA form.
4786 * Profiling directed optimizations.
4787 * We may compile by default with few or no optimizations and instrument the code
4788 * or the user may indicate what methods to optimize the most either in a config file
4789 * or through repeated runs where the compiler applies offline the optimizations to
4790 * each method and then decides if it was worth it.
4793 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4794 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4795 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4796 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4797 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4798 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4799 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4800 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4802 /* offset from br.s -> br like opcodes */
4803 #define BIG_BRANCH_OFFSET 13
4805 static gboolean
4806 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4808 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4810 return b == NULL || b == bb;
4813 static int
4814 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4816 unsigned char *ip = start;
4817 unsigned char *target;
4818 int i;
4819 guint cli_addr;
4820 MonoBasicBlock *bblock;
4821 const MonoOpcode *opcode;
4823 while (ip < end) {
4824 cli_addr = ip - start;
4825 i = mono_opcode_value ((const guint8 **)&ip, end);
4826 if (i < 0)
4827 UNVERIFIED;
4828 opcode = &mono_opcodes [i];
4829 switch (opcode->argument) {
4830 case MonoInlineNone:
4831 ip++;
4832 break;
4833 case MonoInlineString:
4834 case MonoInlineType:
4835 case MonoInlineField:
4836 case MonoInlineMethod:
4837 case MonoInlineTok:
4838 case MonoInlineSig:
4839 case MonoShortInlineR:
4840 case MonoInlineI:
4841 ip += 5;
4842 break;
4843 case MonoInlineVar:
4844 ip += 3;
4845 break;
4846 case MonoShortInlineVar:
4847 case MonoShortInlineI:
4848 ip += 2;
4849 break;
4850 case MonoShortInlineBrTarget:
4851 target = start + cli_addr + 2 + (signed char)ip [1];
4852 GET_BBLOCK (cfg, bblock, target);
4853 ip += 2;
4854 if (ip < end)
4855 GET_BBLOCK (cfg, bblock, ip);
4856 break;
4857 case MonoInlineBrTarget:
4858 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4859 GET_BBLOCK (cfg, bblock, target);
4860 ip += 5;
4861 if (ip < end)
4862 GET_BBLOCK (cfg, bblock, ip);
4863 break;
4864 case MonoInlineSwitch: {
4865 guint32 n = read32 (ip + 1);
4866 guint32 j;
4867 ip += 5;
4868 cli_addr += 5 + 4 * n;
4869 target = start + cli_addr;
4870 GET_BBLOCK (cfg, bblock, target);
4872 for (j = 0; j < n; ++j) {
4873 target = start + cli_addr + (gint32)read32 (ip);
4874 GET_BBLOCK (cfg, bblock, target);
4875 ip += 4;
4877 break;
4879 case MonoInlineR:
4880 case MonoInlineI8:
4881 ip += 9;
4882 break;
4883 default:
4884 g_assert_not_reached ();
4887 if (i == CEE_THROW) {
4888 unsigned char *bb_start = ip - 1;
4890 /* Find the start of the bblock containing the throw */
4891 bblock = NULL;
4892 while ((bb_start >= start) && !bblock) {
4893 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4894 bb_start --;
4896 if (bblock)
4897 bblock->out_of_line = 1;
4900 return 0;
4901 unverified:
4902 *pos = ip;
4903 return 1;
4906 static inline MonoMethod *
4907 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4909 MonoMethod *method;
4911 if (m->wrapper_type != MONO_WRAPPER_NONE)
4912 return mono_method_get_wrapper_data (m, token);
4914 method = mono_get_method_full (m->klass->image, token, klass, context);
4916 return method;
4919 static inline MonoMethod *
4920 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4922 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4924 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4925 return NULL;
4927 return method;
4930 static inline MonoClass*
4931 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4933 MonoClass *klass;
4935 if (method->wrapper_type != MONO_WRAPPER_NONE)
4936 klass = mono_method_get_wrapper_data (method, token);
4937 else
4938 klass = mono_class_get_full (method->klass->image, token, context);
4939 if (klass)
4940 mono_class_init (klass);
4941 return klass;
4945 * Returns TRUE if the JIT should abort inlining because "callee"
4946 * is influenced by security attributes.
4948 static
4949 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4951 guint32 result;
4953 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4954 return TRUE;
4957 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4958 if (result == MONO_JIT_SECURITY_OK)
4959 return FALSE;
4961 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4962 /* Generate code to throw a SecurityException before the actual call/link */
4963 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4964 MonoInst *args [2];
4966 NEW_ICONST (cfg, args [0], 4);
4967 NEW_METHODCONST (cfg, args [1], caller);
4968 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4969 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4970 /* don't hide previous results */
4971 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4972 cfg->exception_data = result;
4973 return TRUE;
4976 return FALSE;
4979 static MonoMethod*
4980 throw_exception (void)
4982 static MonoMethod *method = NULL;
4984 if (!method) {
4985 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4986 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4988 g_assert (method);
4989 return method;
4992 static void
4993 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4995 MonoMethod *thrower = throw_exception ();
4996 MonoInst *args [1];
4998 EMIT_NEW_PCONST (cfg, args [0], ex);
4999 mono_emit_method_call (cfg, thrower, args, NULL);
5003 * Return the original method is a wrapper is specified. We can only access
5004 * the custom attributes from the original method.
5006 static MonoMethod*
5007 get_original_method (MonoMethod *method)
5009 if (method->wrapper_type == MONO_WRAPPER_NONE)
5010 return method;
5012 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5013 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5014 return NULL;
5016 /* in other cases we need to find the original method */
5017 return mono_marshal_method_from_wrapper (method);
5020 static void
5021 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5022 MonoBasicBlock *bblock, unsigned char *ip)
5024 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5025 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5026 if (ex)
5027 emit_throw_exception (cfg, ex);
5030 static void
5031 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5032 MonoBasicBlock *bblock, unsigned char *ip)
5034 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5035 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5036 if (ex)
5037 emit_throw_exception (cfg, ex);
5041 * Check that the IL instructions at ip are the array initialization
5042 * sequence and return the pointer to the data and the size.
5044 static const char*
5045 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5048 * newarr[System.Int32]
5049 * dup
5050 * ldtoken field valuetype ...
5051 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5053 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5054 guint32 token = read32 (ip + 7);
5055 guint32 field_token = read32 (ip + 2);
5056 guint32 field_index = field_token & 0xffffff;
5057 guint32 rva;
5058 const char *data_ptr;
5059 int size = 0;
5060 MonoMethod *cmethod;
5061 MonoClass *dummy_class;
5062 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5063 int dummy_align;
5065 if (!field)
5066 return NULL;
5068 *out_field_token = field_token;
5070 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5071 if (!cmethod)
5072 return NULL;
5073 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5074 return NULL;
5075 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5076 case MONO_TYPE_BOOLEAN:
5077 case MONO_TYPE_I1:
5078 case MONO_TYPE_U1:
5079 size = 1; break;
5080 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5081 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5082 case MONO_TYPE_CHAR:
5083 case MONO_TYPE_I2:
5084 case MONO_TYPE_U2:
5085 size = 2; break;
5086 case MONO_TYPE_I4:
5087 case MONO_TYPE_U4:
5088 case MONO_TYPE_R4:
5089 size = 4; break;
5090 case MONO_TYPE_R8:
5091 #ifdef ARM_FPU_FPA
5092 return NULL; /* stupid ARM FP swapped format */
5093 #endif
5094 case MONO_TYPE_I8:
5095 case MONO_TYPE_U8:
5096 size = 8; break;
5097 #endif
5098 default:
5099 return NULL;
5101 size *= len;
5102 if (size > mono_type_size (field->type, &dummy_align))
5103 return NULL;
5104 *out_size = size;
5105 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5106 if (!method->klass->image->dynamic) {
5107 field_index = read32 (ip + 2) & 0xffffff;
5108 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5109 data_ptr = mono_image_rva_map (method->klass->image, rva);
5110 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5111 /* for aot code we do the lookup on load */
5112 if (aot && data_ptr)
5113 return GUINT_TO_POINTER (rva);
5114 } else {
5115 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5116 g_assert (!aot);
5117 data_ptr = mono_field_get_data (field);
5119 return data_ptr;
5121 return NULL;
5124 static void
5125 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5127 char *method_fname = mono_method_full_name (method, TRUE);
5128 char *method_code;
5129 MonoMethodHeader *header = mono_method_get_header (method);
5131 if (header->code_size == 0)
5132 method_code = g_strdup ("method body is empty.");
5133 else
5134 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5135 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5136 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5137 g_free (method_fname);
5138 g_free (method_code);
5139 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5142 static void
5143 set_exception_object (MonoCompile *cfg, MonoException *exception)
5145 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5146 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5147 cfg->exception_ptr = exception;
5150 static gboolean
5151 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5153 MonoType *type;
5155 if (cfg->generic_sharing_context)
5156 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5157 else
5158 type = &klass->byval_arg;
5159 return MONO_TYPE_IS_REFERENCE (type);
5162 static void
5163 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5165 MonoInst *ins;
5166 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5167 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5168 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5169 /* Optimize reg-reg moves away */
5171 * Can't optimize other opcodes, since sp[0] might point to
5172 * the last ins of a decomposed opcode.
5174 sp [0]->dreg = (cfg)->locals [n]->dreg;
5175 } else {
5176 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5181 * ldloca inhibits many optimizations so try to get rid of it in common
5182 * cases.
5184 static inline unsigned char *
5185 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5187 int local, token;
5188 MonoClass *klass;
5190 if (size == 1) {
5191 local = ip [1];
5192 ip += 2;
5193 } else {
5194 local = read16 (ip + 2);
5195 ip += 4;
5198 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5199 gboolean skip = FALSE;
5201 /* From the INITOBJ case */
5202 token = read32 (ip + 2);
5203 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5204 CHECK_TYPELOAD (klass);
5205 if (generic_class_is_reference_type (cfg, klass)) {
5206 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5207 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5208 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5209 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5210 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5211 } else {
5212 skip = TRUE;
5215 if (!skip)
5216 return ip + 6;
5218 load_error:
5219 return NULL;
5222 static gboolean
5223 is_exception_class (MonoClass *class)
5225 while (class) {
5226 if (class == mono_defaults.exception_class)
5227 return TRUE;
5228 class = class->parent;
5230 return FALSE;
5234 * mono_method_to_ir:
5236 * Translate the .net IL into linear IR.
5239 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5240 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5241 guint inline_offset, gboolean is_virtual_call)
5243 MonoError error;
5244 MonoInst *ins, **sp, **stack_start;
5245 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5246 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5247 MonoMethod *cmethod, *method_definition;
5248 MonoInst **arg_array;
5249 MonoMethodHeader *header;
5250 MonoImage *image;
5251 guint32 token, ins_flag;
5252 MonoClass *klass;
5253 MonoClass *constrained_call = NULL;
5254 unsigned char *ip, *end, *target, *err_pos;
5255 static double r8_0 = 0.0;
5256 MonoMethodSignature *sig;
5257 MonoGenericContext *generic_context = NULL;
5258 MonoGenericContainer *generic_container = NULL;
5259 MonoType **param_types;
5260 int i, n, start_new_bblock, dreg;
5261 int num_calls = 0, inline_costs = 0;
5262 int breakpoint_id = 0;
5263 guint num_args;
5264 MonoBoolean security, pinvoke;
5265 MonoSecurityManager* secman = NULL;
5266 MonoDeclSecurityActions actions;
5267 GSList *class_inits = NULL;
5268 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5269 int context_used;
5270 gboolean init_locals, seq_points, skip_dead_blocks;
5272 /* serialization and xdomain stuff may need access to private fields and methods */
5273 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5274 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5275 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5276 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5277 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5278 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5280 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5282 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5283 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5284 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5285 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5287 image = method->klass->image;
5288 header = mono_method_get_header (method);
5289 if (!header) {
5290 MonoLoaderError *error;
5292 if ((error = mono_loader_get_last_error ())) {
5293 cfg->exception_type = error->exception_type;
5294 } else {
5295 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5296 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5298 goto exception_exit;
5300 generic_container = mono_method_get_generic_container (method);
5301 sig = mono_method_signature (method);
5302 num_args = sig->hasthis + sig->param_count;
5303 ip = (unsigned char*)header->code;
5304 cfg->cil_start = ip;
5305 end = ip + header->code_size;
5306 mono_jit_stats.cil_code_size += header->code_size;
5307 init_locals = header->init_locals;
5309 seq_points = cfg->gen_seq_points && cfg->method == method;
5312 * Methods without init_locals set could cause asserts in various passes
5313 * (#497220).
5315 init_locals = TRUE;
5317 method_definition = method;
5318 while (method_definition->is_inflated) {
5319 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5320 method_definition = imethod->declaring;
5323 /* SkipVerification is not allowed if core-clr is enabled */
5324 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5325 dont_verify = TRUE;
5326 dont_verify_stloc = TRUE;
5329 if (!dont_verify && mini_method_verify (cfg, method_definition))
5330 goto exception_exit;
5332 if (mono_debug_using_mono_debugger ())
5333 cfg->keep_cil_nops = TRUE;
5335 if (sig->is_inflated)
5336 generic_context = mono_method_get_context (method);
5337 else if (generic_container)
5338 generic_context = &generic_container->context;
5339 cfg->generic_context = generic_context;
5341 if (!cfg->generic_sharing_context)
5342 g_assert (!sig->has_type_parameters);
5344 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5345 g_assert (method->is_inflated);
5346 g_assert (mono_method_get_context (method)->method_inst);
5348 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5349 g_assert (sig->generic_param_count);
5351 if (cfg->method == method) {
5352 cfg->real_offset = 0;
5353 } else {
5354 cfg->real_offset = inline_offset;
5357 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5358 cfg->cil_offset_to_bb_len = header->code_size;
5360 cfg->current_method = method;
5362 if (cfg->verbose_level > 2)
5363 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5365 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5366 if (sig->hasthis)
5367 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5368 for (n = 0; n < sig->param_count; ++n)
5369 param_types [n + sig->hasthis] = sig->params [n];
5370 cfg->arg_types = param_types;
5372 dont_inline = g_list_prepend (dont_inline, method);
5373 if (cfg->method == method) {
5375 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5376 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5378 /* ENTRY BLOCK */
5379 NEW_BBLOCK (cfg, start_bblock);
5380 cfg->bb_entry = start_bblock;
5381 start_bblock->cil_code = NULL;
5382 start_bblock->cil_length = 0;
5384 /* EXIT BLOCK */
5385 NEW_BBLOCK (cfg, end_bblock);
5386 cfg->bb_exit = end_bblock;
5387 end_bblock->cil_code = NULL;
5388 end_bblock->cil_length = 0;
5389 g_assert (cfg->num_bblocks == 2);
5391 arg_array = cfg->args;
5393 if (header->num_clauses) {
5394 cfg->spvars = g_hash_table_new (NULL, NULL);
5395 cfg->exvars = g_hash_table_new (NULL, NULL);
5397 /* handle exception clauses */
5398 for (i = 0; i < header->num_clauses; ++i) {
5399 MonoBasicBlock *try_bb;
5400 MonoExceptionClause *clause = &header->clauses [i];
5401 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5402 try_bb->real_offset = clause->try_offset;
5403 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5404 tblock->real_offset = clause->handler_offset;
5405 tblock->flags |= BB_EXCEPTION_HANDLER;
5407 link_bblock (cfg, try_bb, tblock);
5409 if (*(ip + clause->handler_offset) == CEE_POP)
5410 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5412 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5413 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5414 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5415 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5416 MONO_ADD_INS (tblock, ins);
5418 /* todo: is a fault block unsafe to optimize? */
5419 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5420 tblock->flags |= BB_EXCEPTION_UNSAFE;
5424 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5425 while (p < end) {
5426 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5428 /* catch and filter blocks get the exception object on the stack */
5429 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5430 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5431 MonoInst *dummy_use;
5433 /* mostly like handle_stack_args (), but just sets the input args */
5434 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5435 tblock->in_scount = 1;
5436 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5437 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5440 * Add a dummy use for the exvar so its liveness info will be
5441 * correct.
5443 cfg->cbb = tblock;
5444 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5446 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5447 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5448 tblock->flags |= BB_EXCEPTION_HANDLER;
5449 tblock->real_offset = clause->data.filter_offset;
5450 tblock->in_scount = 1;
5451 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5452 /* The filter block shares the exvar with the handler block */
5453 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5454 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5455 MONO_ADD_INS (tblock, ins);
5459 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5460 clause->data.catch_class &&
5461 cfg->generic_sharing_context &&
5462 mono_class_check_context_used (clause->data.catch_class)) {
5464 * In shared generic code with catch
5465 * clauses containing type variables
5466 * the exception handling code has to
5467 * be able to get to the rgctx.
5468 * Therefore we have to make sure that
5469 * the vtable/mrgctx argument (for
5470 * static or generic methods) or the
5471 * "this" argument (for non-static
5472 * methods) are live.
5474 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5475 mini_method_get_context (method)->method_inst ||
5476 method->klass->valuetype) {
5477 mono_get_vtable_var (cfg);
5478 } else {
5479 MonoInst *dummy_use;
5481 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5485 } else {
5486 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5487 cfg->cbb = start_bblock;
5488 cfg->args = arg_array;
5489 mono_save_args (cfg, sig, inline_args);
5492 /* FIRST CODE BLOCK */
5493 NEW_BBLOCK (cfg, bblock);
5494 bblock->cil_code = ip;
5495 cfg->cbb = bblock;
5496 cfg->ip = ip;
5498 ADD_BBLOCK (cfg, bblock);
5500 if (cfg->method == method) {
5501 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5502 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5503 MONO_INST_NEW (cfg, ins, OP_BREAK);
5504 MONO_ADD_INS (bblock, ins);
5508 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5509 secman = mono_security_manager_get_methods ();
5511 security = (secman && mono_method_has_declsec (method));
5512 /* at this point having security doesn't mean we have any code to generate */
5513 if (security && (cfg->method == method)) {
5514 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5515 * And we do not want to enter the next section (with allocation) if we
5516 * have nothing to generate */
5517 security = mono_declsec_get_demands (method, &actions);
5520 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5521 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5522 if (pinvoke) {
5523 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5524 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5525 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5527 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5528 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5529 pinvoke = FALSE;
5531 if (custom)
5532 mono_custom_attrs_free (custom);
5534 if (pinvoke) {
5535 custom = mono_custom_attrs_from_class (wrapped->klass);
5536 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5537 pinvoke = FALSE;
5539 if (custom)
5540 mono_custom_attrs_free (custom);
5542 } else {
5543 /* not a P/Invoke after all */
5544 pinvoke = FALSE;
5548 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5549 /* we use a separate basic block for the initialization code */
5550 NEW_BBLOCK (cfg, init_localsbb);
5551 cfg->bb_init = init_localsbb;
5552 init_localsbb->real_offset = cfg->real_offset;
5553 start_bblock->next_bb = init_localsbb;
5554 init_localsbb->next_bb = bblock;
5555 link_bblock (cfg, start_bblock, init_localsbb);
5556 link_bblock (cfg, init_localsbb, bblock);
5558 cfg->cbb = init_localsbb;
5559 } else {
5560 start_bblock->next_bb = bblock;
5561 link_bblock (cfg, start_bblock, bblock);
5564 /* at this point we know, if security is TRUE, that some code needs to be generated */
5565 if (security && (cfg->method == method)) {
5566 MonoInst *args [2];
5568 mono_jit_stats.cas_demand_generation++;
5570 if (actions.demand.blob) {
5571 /* Add code for SecurityAction.Demand */
5572 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5573 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5574 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5575 mono_emit_method_call (cfg, secman->demand, args, NULL);
5577 if (actions.noncasdemand.blob) {
5578 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5579 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5580 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5581 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5582 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5583 mono_emit_method_call (cfg, secman->demand, args, NULL);
5585 if (actions.demandchoice.blob) {
5586 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5587 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5588 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5589 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5590 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5594 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5595 if (pinvoke) {
5596 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5599 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5600 /* check if this is native code, e.g. an icall or a p/invoke */
5601 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5602 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5603 if (wrapped) {
5604 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5605 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5607 /* if this ia a native call then it can only be JITted from platform code */
5608 if ((icall || pinvk) && method->klass && method->klass->image) {
5609 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5610 MonoException *ex = icall ? mono_get_exception_security () :
5611 mono_get_exception_method_access ();
5612 emit_throw_exception (cfg, ex);
5619 if (header->code_size == 0)
5620 UNVERIFIED;
5622 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5623 ip = err_pos;
5624 UNVERIFIED;
5627 if (cfg->method == method)
5628 mono_debug_init_method (cfg, bblock, breakpoint_id);
5630 for (n = 0; n < header->num_locals; ++n) {
5631 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5632 UNVERIFIED;
5634 class_inits = NULL;
5636 /* We force the vtable variable here for all shared methods
5637 for the possibility that they might show up in a stack
5638 trace where their exact instantiation is needed. */
5639 if (cfg->generic_sharing_context && method == cfg->method) {
5640 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5641 mini_method_get_context (method)->method_inst ||
5642 method->klass->valuetype) {
5643 mono_get_vtable_var (cfg);
5644 } else {
5645 /* FIXME: Is there a better way to do this?
5646 We need the variable live for the duration
5647 of the whole method. */
5648 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5652 /* add a check for this != NULL to inlined methods */
5653 if (is_virtual_call) {
5654 MonoInst *arg_ins;
5656 NEW_ARGLOAD (cfg, arg_ins, 0);
5657 MONO_ADD_INS (cfg->cbb, arg_ins);
5658 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5661 skip_dead_blocks = !dont_verify;
5662 if (skip_dead_blocks) {
5663 original_bb = bb = mono_basic_block_split (method, &error);
5664 if (!mono_error_ok (&error)) {
5665 mono_error_cleanup (&error);
5666 UNVERIFIED;
5668 g_assert (bb);
5671 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5672 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5674 ins_flag = 0;
5675 start_new_bblock = 0;
5676 cfg->cbb = bblock;
5677 while (ip < end) {
5678 if (cfg->method == method)
5679 cfg->real_offset = ip - header->code;
5680 else
5681 cfg->real_offset = inline_offset;
5682 cfg->ip = ip;
5684 context_used = 0;
5686 if (start_new_bblock) {
5687 bblock->cil_length = ip - bblock->cil_code;
5688 if (start_new_bblock == 2) {
5689 g_assert (ip == tblock->cil_code);
5690 } else {
5691 GET_BBLOCK (cfg, tblock, ip);
5693 bblock->next_bb = tblock;
5694 bblock = tblock;
5695 cfg->cbb = bblock;
5696 start_new_bblock = 0;
5697 for (i = 0; i < bblock->in_scount; ++i) {
5698 if (cfg->verbose_level > 3)
5699 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5700 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5701 *sp++ = ins;
5703 if (class_inits)
5704 g_slist_free (class_inits);
5705 class_inits = NULL;
5706 } else {
5707 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5708 link_bblock (cfg, bblock, tblock);
5709 if (sp != stack_start) {
5710 handle_stack_args (cfg, stack_start, sp - stack_start);
5711 sp = stack_start;
5712 CHECK_UNVERIFIABLE (cfg);
5714 bblock->next_bb = tblock;
5715 bblock = tblock;
5716 cfg->cbb = bblock;
5717 for (i = 0; i < bblock->in_scount; ++i) {
5718 if (cfg->verbose_level > 3)
5719 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5720 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5721 *sp++ = ins;
5723 g_slist_free (class_inits);
5724 class_inits = NULL;
5728 if (skip_dead_blocks) {
5729 int ip_offset = ip - header->code;
5731 if (ip_offset == bb->end)
5732 bb = bb->next;
5734 if (bb->dead) {
5735 int op_size = mono_opcode_size (ip, end);
5736 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5738 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5740 if (ip_offset + op_size == bb->end) {
5741 MONO_INST_NEW (cfg, ins, OP_NOP);
5742 MONO_ADD_INS (bblock, ins);
5743 start_new_bblock = 1;
5746 ip += op_size;
5747 continue;
5751 * Sequence points are points where the debugger can place a breakpoint.
5752 * Currently, we generate these automatically at points where the IL
5753 * stack is empty.
5755 if (seq_points && sp == stack_start) {
5756 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5757 MONO_ADD_INS (cfg->cbb, ins);
5760 bblock->real_offset = cfg->real_offset;
5762 if ((cfg->method == method) && cfg->coverage_info) {
5763 guint32 cil_offset = ip - header->code;
5764 cfg->coverage_info->data [cil_offset].cil_code = ip;
5766 /* TODO: Use an increment here */
5767 #if defined(TARGET_X86)
5768 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5769 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5770 ins->inst_imm = 1;
5771 MONO_ADD_INS (cfg->cbb, ins);
5772 #else
5773 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5774 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5775 #endif
5778 if (cfg->verbose_level > 3)
5779 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5781 switch (*ip) {
5782 case CEE_NOP:
5783 if (cfg->keep_cil_nops)
5784 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5785 else
5786 MONO_INST_NEW (cfg, ins, OP_NOP);
5787 ip++;
5788 MONO_ADD_INS (bblock, ins);
5789 break;
5790 case CEE_BREAK:
5791 if (should_insert_brekpoint (cfg->method))
5792 MONO_INST_NEW (cfg, ins, OP_BREAK);
5793 else
5794 MONO_INST_NEW (cfg, ins, OP_NOP);
5795 ip++;
5796 MONO_ADD_INS (bblock, ins);
5797 break;
5798 case CEE_LDARG_0:
5799 case CEE_LDARG_1:
5800 case CEE_LDARG_2:
5801 case CEE_LDARG_3:
5802 CHECK_STACK_OVF (1);
5803 n = (*ip)-CEE_LDARG_0;
5804 CHECK_ARG (n);
5805 EMIT_NEW_ARGLOAD (cfg, ins, n);
5806 ip++;
5807 *sp++ = ins;
5808 break;
5809 case CEE_LDLOC_0:
5810 case CEE_LDLOC_1:
5811 case CEE_LDLOC_2:
5812 case CEE_LDLOC_3:
5813 CHECK_STACK_OVF (1);
5814 n = (*ip)-CEE_LDLOC_0;
5815 CHECK_LOCAL (n);
5816 EMIT_NEW_LOCLOAD (cfg, ins, n);
5817 ip++;
5818 *sp++ = ins;
5819 break;
5820 case CEE_STLOC_0:
5821 case CEE_STLOC_1:
5822 case CEE_STLOC_2:
5823 case CEE_STLOC_3: {
5824 CHECK_STACK (1);
5825 n = (*ip)-CEE_STLOC_0;
5826 CHECK_LOCAL (n);
5827 --sp;
5828 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5829 UNVERIFIED;
5830 emit_stloc_ir (cfg, sp, header, n);
5831 ++ip;
5832 inline_costs += 1;
5833 break;
5835 case CEE_LDARG_S:
5836 CHECK_OPSIZE (2);
5837 CHECK_STACK_OVF (1);
5838 n = ip [1];
5839 CHECK_ARG (n);
5840 EMIT_NEW_ARGLOAD (cfg, ins, n);
5841 *sp++ = ins;
5842 ip += 2;
5843 break;
5844 case CEE_LDARGA_S:
5845 CHECK_OPSIZE (2);
5846 CHECK_STACK_OVF (1);
5847 n = ip [1];
5848 CHECK_ARG (n);
5849 NEW_ARGLOADA (cfg, ins, n);
5850 MONO_ADD_INS (cfg->cbb, ins);
5851 *sp++ = ins;
5852 ip += 2;
5853 break;
5854 case CEE_STARG_S:
5855 CHECK_OPSIZE (2);
5856 CHECK_STACK (1);
5857 --sp;
5858 n = ip [1];
5859 CHECK_ARG (n);
5860 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5861 UNVERIFIED;
5862 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5863 ip += 2;
5864 break;
5865 case CEE_LDLOC_S:
5866 CHECK_OPSIZE (2);
5867 CHECK_STACK_OVF (1);
5868 n = ip [1];
5869 CHECK_LOCAL (n);
5870 EMIT_NEW_LOCLOAD (cfg, ins, n);
5871 *sp++ = ins;
5872 ip += 2;
5873 break;
5874 case CEE_LDLOCA_S: {
5875 unsigned char *tmp_ip;
5876 CHECK_OPSIZE (2);
5877 CHECK_STACK_OVF (1);
5878 CHECK_LOCAL (ip [1]);
5880 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5881 ip = tmp_ip;
5882 inline_costs += 1;
5883 break;
5886 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5887 *sp++ = ins;
5888 ip += 2;
5889 break;
5891 case CEE_STLOC_S:
5892 CHECK_OPSIZE (2);
5893 CHECK_STACK (1);
5894 --sp;
5895 CHECK_LOCAL (ip [1]);
5896 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5897 UNVERIFIED;
5898 emit_stloc_ir (cfg, sp, header, ip [1]);
5899 ip += 2;
5900 inline_costs += 1;
5901 break;
5902 case CEE_LDNULL:
5903 CHECK_STACK_OVF (1);
5904 EMIT_NEW_PCONST (cfg, ins, NULL);
5905 ins->type = STACK_OBJ;
5906 ++ip;
5907 *sp++ = ins;
5908 break;
5909 case CEE_LDC_I4_M1:
5910 CHECK_STACK_OVF (1);
5911 EMIT_NEW_ICONST (cfg, ins, -1);
5912 ++ip;
5913 *sp++ = ins;
5914 break;
5915 case CEE_LDC_I4_0:
5916 case CEE_LDC_I4_1:
5917 case CEE_LDC_I4_2:
5918 case CEE_LDC_I4_3:
5919 case CEE_LDC_I4_4:
5920 case CEE_LDC_I4_5:
5921 case CEE_LDC_I4_6:
5922 case CEE_LDC_I4_7:
5923 case CEE_LDC_I4_8:
5924 CHECK_STACK_OVF (1);
5925 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5926 ++ip;
5927 *sp++ = ins;
5928 break;
5929 case CEE_LDC_I4_S:
5930 CHECK_OPSIZE (2);
5931 CHECK_STACK_OVF (1);
5932 ++ip;
5933 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5934 ++ip;
5935 *sp++ = ins;
5936 break;
5937 case CEE_LDC_I4:
5938 CHECK_OPSIZE (5);
5939 CHECK_STACK_OVF (1);
5940 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5941 ip += 5;
5942 *sp++ = ins;
5943 break;
5944 case CEE_LDC_I8:
5945 CHECK_OPSIZE (9);
5946 CHECK_STACK_OVF (1);
5947 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5948 ins->type = STACK_I8;
5949 ins->dreg = alloc_dreg (cfg, STACK_I8);
5950 ++ip;
5951 ins->inst_l = (gint64)read64 (ip);
5952 MONO_ADD_INS (bblock, ins);
5953 ip += 8;
5954 *sp++ = ins;
5955 break;
5956 case CEE_LDC_R4: {
5957 float *f;
5958 gboolean use_aotconst = FALSE;
5960 #ifdef TARGET_POWERPC
5961 /* FIXME: Clean this up */
5962 if (cfg->compile_aot)
5963 use_aotconst = TRUE;
5964 #endif
5966 /* FIXME: we should really allocate this only late in the compilation process */
5967 f = mono_domain_alloc (cfg->domain, sizeof (float));
5968 CHECK_OPSIZE (5);
5969 CHECK_STACK_OVF (1);
5971 if (use_aotconst) {
5972 MonoInst *cons;
5973 int dreg;
5975 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5977 dreg = alloc_freg (cfg);
5978 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5979 ins->type = STACK_R8;
5980 } else {
5981 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5982 ins->type = STACK_R8;
5983 ins->dreg = alloc_dreg (cfg, STACK_R8);
5984 ins->inst_p0 = f;
5985 MONO_ADD_INS (bblock, ins);
5987 ++ip;
5988 readr4 (ip, f);
5989 ip += 4;
5990 *sp++ = ins;
5991 break;
5993 case CEE_LDC_R8: {
5994 double *d;
5995 gboolean use_aotconst = FALSE;
5997 #ifdef TARGET_POWERPC
5998 /* FIXME: Clean this up */
5999 if (cfg->compile_aot)
6000 use_aotconst = TRUE;
6001 #endif
6003 /* FIXME: we should really allocate this only late in the compilation process */
6004 d = mono_domain_alloc (cfg->domain, sizeof (double));
6005 CHECK_OPSIZE (9);
6006 CHECK_STACK_OVF (1);
6008 if (use_aotconst) {
6009 MonoInst *cons;
6010 int dreg;
6012 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6014 dreg = alloc_freg (cfg);
6015 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6016 ins->type = STACK_R8;
6017 } else {
6018 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6019 ins->type = STACK_R8;
6020 ins->dreg = alloc_dreg (cfg, STACK_R8);
6021 ins->inst_p0 = d;
6022 MONO_ADD_INS (bblock, ins);
6024 ++ip;
6025 readr8 (ip, d);
6026 ip += 8;
6027 *sp++ = ins;
6028 break;
6030 case CEE_DUP: {
6031 MonoInst *temp, *store;
6032 CHECK_STACK (1);
6033 CHECK_STACK_OVF (1);
6034 sp--;
6035 ins = *sp;
6037 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6038 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6040 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6041 *sp++ = ins;
6043 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6044 *sp++ = ins;
6046 ++ip;
6047 inline_costs += 2;
6048 break;
6050 case CEE_POP:
6051 CHECK_STACK (1);
6052 ip++;
6053 --sp;
6055 #ifdef TARGET_X86
6056 if (sp [0]->type == STACK_R8)
6057 /* we need to pop the value from the x86 FP stack */
6058 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6059 #endif
6060 break;
6061 case CEE_JMP: {
6062 MonoCallInst *call;
6064 INLINE_FAILURE;
6066 CHECK_OPSIZE (5);
6067 if (stack_start != sp)
6068 UNVERIFIED;
6069 token = read32 (ip + 1);
6070 /* FIXME: check the signature matches */
6071 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6073 if (!cmethod)
6074 goto load_error;
6076 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6077 GENERIC_SHARING_FAILURE (CEE_JMP);
6079 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6080 CHECK_CFG_EXCEPTION;
6082 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6084 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6085 int i, n;
6087 /* Handle tail calls similarly to calls */
6088 n = fsig->param_count + fsig->hasthis;
6090 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6091 call->method = cmethod;
6092 call->tail_call = TRUE;
6093 call->signature = mono_method_signature (cmethod);
6094 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6095 call->inst.inst_p0 = cmethod;
6096 for (i = 0; i < n; ++i)
6097 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6099 mono_arch_emit_call (cfg, call);
6100 MONO_ADD_INS (bblock, (MonoInst*)call);
6102 #else
6103 for (i = 0; i < num_args; ++i)
6104 /* Prevent arguments from being optimized away */
6105 arg_array [i]->flags |= MONO_INST_VOLATILE;
6107 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6108 ins = (MonoInst*)call;
6109 ins->inst_p0 = cmethod;
6110 MONO_ADD_INS (bblock, ins);
6111 #endif
6113 ip += 5;
6114 start_new_bblock = 1;
6115 break;
6117 case CEE_CALLI:
6118 case CEE_CALL:
6119 case CEE_CALLVIRT: {
6120 MonoInst *addr = NULL;
6121 MonoMethodSignature *fsig = NULL;
6122 int array_rank = 0;
6123 int virtual = *ip == CEE_CALLVIRT;
6124 int calli = *ip == CEE_CALLI;
6125 gboolean pass_imt_from_rgctx = FALSE;
6126 MonoInst *imt_arg = NULL;
6127 gboolean pass_vtable = FALSE;
6128 gboolean pass_mrgctx = FALSE;
6129 MonoInst *vtable_arg = NULL;
6130 gboolean check_this = FALSE;
6131 gboolean supported_tail_call = FALSE;
6133 CHECK_OPSIZE (5);
6134 token = read32 (ip + 1);
6136 if (calli) {
6137 cmethod = NULL;
6138 CHECK_STACK (1);
6139 --sp;
6140 addr = *sp;
6141 if (method->wrapper_type != MONO_WRAPPER_NONE)
6142 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6143 else
6144 fsig = mono_metadata_parse_signature (image, token);
6146 n = fsig->param_count + fsig->hasthis;
6148 if (method->dynamic && fsig->pinvoke) {
6149 MonoInst *args [3];
6152 * This is a call through a function pointer using a pinvoke
6153 * signature. Have to create a wrapper and call that instead.
6154 * FIXME: This is very slow, need to create a wrapper at JIT time
6155 * instead based on the signature.
6157 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6158 EMIT_NEW_PCONST (cfg, args [1], fsig);
6159 args [2] = addr;
6160 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6162 } else {
6163 MonoMethod *cil_method;
6165 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6166 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6167 cil_method = cmethod;
6168 } else if (constrained_call) {
6169 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6171 * This is needed since get_method_constrained can't find
6172 * the method in klass representing a type var.
6173 * The type var is guaranteed to be a reference type in this
6174 * case.
6176 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6177 cil_method = cmethod;
6178 g_assert (!cmethod->klass->valuetype);
6179 } else {
6180 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6182 } else {
6183 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6184 cil_method = cmethod;
6187 if (!cmethod)
6188 goto load_error;
6189 if (!dont_verify && !cfg->skip_visibility) {
6190 MonoMethod *target_method = cil_method;
6191 if (method->is_inflated) {
6192 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6194 if (!mono_method_can_access_method (method_definition, target_method) &&
6195 !mono_method_can_access_method (method, cil_method))
6196 METHOD_ACCESS_FAILURE;
6199 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6200 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6202 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6203 /* MS.NET seems to silently convert this to a callvirt */
6204 virtual = 1;
6208 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6209 * converts to a callvirt.
6211 * tests/bug-515884.il is an example of this behavior
6213 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6214 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6215 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6216 virtual = 1;
6219 if (!cmethod->klass->inited)
6220 if (!mono_class_init (cmethod->klass))
6221 goto load_error;
6223 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6224 mini_class_is_system_array (cmethod->klass)) {
6225 array_rank = cmethod->klass->rank;
6226 fsig = mono_method_signature (cmethod);
6227 } else {
6228 fsig = mono_method_signature (cmethod);
6230 if (!fsig)
6231 goto load_error;
6233 if (fsig->pinvoke) {
6234 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6235 check_for_pending_exc, FALSE);
6236 fsig = mono_method_signature (wrapper);
6237 } else if (constrained_call) {
6238 fsig = mono_method_signature (cmethod);
6239 } else {
6240 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6244 mono_save_token_info (cfg, image, token, cil_method);
6246 n = fsig->param_count + fsig->hasthis;
6248 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6249 if (check_linkdemand (cfg, method, cmethod))
6250 INLINE_FAILURE;
6251 CHECK_CFG_EXCEPTION;
6254 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6255 g_assert_not_reached ();
6258 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6259 UNVERIFIED;
6261 if (!cfg->generic_sharing_context && cmethod)
6262 g_assert (!mono_method_check_context_used (cmethod));
6264 CHECK_STACK (n);
6266 //g_assert (!virtual || fsig->hasthis);
6268 sp -= n;
6270 if (constrained_call) {
6272 * We have the `constrained.' prefix opcode.
6274 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6276 * The type parameter is instantiated as a valuetype,
6277 * but that type doesn't override the method we're
6278 * calling, so we need to box `this'.
6280 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6281 ins->klass = constrained_call;
6282 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6283 CHECK_CFG_EXCEPTION;
6284 } else if (!constrained_call->valuetype) {
6285 int dreg = alloc_preg (cfg);
6288 * The type parameter is instantiated as a reference
6289 * type. We have a managed pointer on the stack, so
6290 * we need to dereference it here.
6292 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6293 ins->type = STACK_OBJ;
6294 sp [0] = ins;
6295 } else if (cmethod->klass->valuetype)
6296 virtual = 0;
6297 constrained_call = NULL;
6300 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6301 UNVERIFIED;
6304 * If the callee is a shared method, then its static cctor
6305 * might not get called after the call was patched.
6307 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6308 emit_generic_class_init (cfg, cmethod->klass);
6309 CHECK_TYPELOAD (cmethod->klass);
6312 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6313 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6314 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6315 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6316 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6319 * Pass vtable iff target method might
6320 * be shared, which means that sharing
6321 * is enabled for its class and its
6322 * context is sharable (and it's not a
6323 * generic method).
6325 if (sharing_enabled && context_sharable &&
6326 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6327 pass_vtable = TRUE;
6330 if (cmethod && mini_method_get_context (cmethod) &&
6331 mini_method_get_context (cmethod)->method_inst) {
6332 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6333 MonoGenericContext *context = mini_method_get_context (cmethod);
6334 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6336 g_assert (!pass_vtable);
6338 if (sharing_enabled && context_sharable)
6339 pass_mrgctx = TRUE;
6342 if (cfg->generic_sharing_context && cmethod) {
6343 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6345 context_used = mono_method_check_context_used (cmethod);
6347 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6348 /* Generic method interface
6349 calls are resolved via a
6350 helper function and don't
6351 need an imt. */
6352 if (!cmethod_context || !cmethod_context->method_inst)
6353 pass_imt_from_rgctx = TRUE;
6357 * If a shared method calls another
6358 * shared method then the caller must
6359 * have a generic sharing context
6360 * because the magic trampoline
6361 * requires it. FIXME: We shouldn't
6362 * have to force the vtable/mrgctx
6363 * variable here. Instead there
6364 * should be a flag in the cfg to
6365 * request a generic sharing context.
6367 if (context_used &&
6368 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6369 mono_get_vtable_var (cfg);
6372 if (pass_vtable) {
6373 if (context_used) {
6374 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6375 } else {
6376 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6378 CHECK_TYPELOAD (cmethod->klass);
6379 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6383 if (pass_mrgctx) {
6384 g_assert (!vtable_arg);
6386 if (!cfg->compile_aot) {
6388 * emit_get_rgctx_method () calls mono_class_vtable () so check
6389 * for type load errors before.
6391 mono_class_setup_vtable (cmethod->klass);
6392 CHECK_TYPELOAD (cmethod->klass);
6395 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6397 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6398 MONO_METHOD_IS_FINAL (cmethod)) {
6399 if (virtual)
6400 check_this = TRUE;
6401 virtual = 0;
6405 if (pass_imt_from_rgctx) {
6406 g_assert (!pass_vtable);
6407 g_assert (cmethod);
6409 imt_arg = emit_get_rgctx_method (cfg, context_used,
6410 cmethod, MONO_RGCTX_INFO_METHOD);
6413 if (check_this)
6414 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6416 /* Calling virtual generic methods */
6417 if (cmethod && virtual &&
6418 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6419 !(MONO_METHOD_IS_FINAL (cmethod) &&
6420 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6421 mono_method_signature (cmethod)->generic_param_count) {
6422 MonoInst *this_temp, *this_arg_temp, *store;
6423 MonoInst *iargs [4];
6425 g_assert (mono_method_signature (cmethod)->is_inflated);
6427 /* Prevent inlining of methods that contain indirect calls */
6428 INLINE_FAILURE;
6430 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6431 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6432 g_assert (!imt_arg);
6433 if (!context_used)
6434 g_assert (cmethod->is_inflated);
6435 imt_arg = emit_get_rgctx_method (cfg, context_used,
6436 cmethod, MONO_RGCTX_INFO_METHOD);
6437 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6438 } else
6439 #endif
6441 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6442 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6443 MONO_ADD_INS (bblock, store);
6445 /* FIXME: This should be a managed pointer */
6446 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6448 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6449 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6450 cmethod, MONO_RGCTX_INFO_METHOD);
6451 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6452 addr = mono_emit_jit_icall (cfg,
6453 mono_helper_compile_generic_method, iargs);
6455 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6457 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6460 if (!MONO_TYPE_IS_VOID (fsig->ret))
6461 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6463 CHECK_CFG_EXCEPTION;
6465 ip += 5;
6466 ins_flag = 0;
6467 break;
6470 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6471 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6472 #else
6473 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6474 #endif
6476 /* Tail prefix */
6477 /* FIXME: runtime generic context pointer for jumps? */
6478 /* FIXME: handle this for generic sharing eventually */
6479 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6480 MonoCallInst *call;
6482 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6483 INLINE_FAILURE;
6485 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6486 /* Handle tail calls similarly to calls */
6487 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6488 #else
6489 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6490 call->tail_call = TRUE;
6491 call->method = cmethod;
6492 call->signature = mono_method_signature (cmethod);
6495 * We implement tail calls by storing the actual arguments into the
6496 * argument variables, then emitting a CEE_JMP.
6498 for (i = 0; i < n; ++i) {
6499 /* Prevent argument from being register allocated */
6500 arg_array [i]->flags |= MONO_INST_VOLATILE;
6501 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6503 #endif
6505 ins = (MonoInst*)call;
6506 ins->inst_p0 = cmethod;
6507 ins->inst_p1 = arg_array [0];
6508 MONO_ADD_INS (bblock, ins);
6509 link_bblock (cfg, bblock, end_bblock);
6510 start_new_bblock = 1;
6512 CHECK_CFG_EXCEPTION;
6514 /* skip CEE_RET as well */
6515 ip += 6;
6516 ins_flag = 0;
6517 break;
6520 /* Conversion to a JIT intrinsic */
6521 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6522 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6523 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6524 *sp = ins;
6525 sp++;
6528 CHECK_CFG_EXCEPTION;
6530 ip += 5;
6531 ins_flag = 0;
6532 break;
6535 /* Inlining */
6536 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6537 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6538 mono_method_check_inlining (cfg, cmethod) &&
6539 !g_list_find (dont_inline, cmethod)) {
6540 int costs;
6541 gboolean allways = FALSE;
6543 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6544 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6545 /* Prevent inlining of methods that call wrappers */
6546 INLINE_FAILURE;
6547 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6548 allways = TRUE;
6551 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6552 ip += 5;
6553 cfg->real_offset += 5;
6554 bblock = cfg->cbb;
6556 if (!MONO_TYPE_IS_VOID (fsig->ret))
6557 /* *sp is already set by inline_method */
6558 sp++;
6560 inline_costs += costs;
6561 ins_flag = 0;
6562 break;
6566 inline_costs += 10 * num_calls++;
6568 /* Tail recursion elimination */
6569 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6570 gboolean has_vtargs = FALSE;
6571 int i;
6573 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6574 INLINE_FAILURE;
6576 /* keep it simple */
6577 for (i = fsig->param_count - 1; i >= 0; i--) {
6578 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6579 has_vtargs = TRUE;
6582 if (!has_vtargs) {
6583 for (i = 0; i < n; ++i)
6584 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6585 MONO_INST_NEW (cfg, ins, OP_BR);
6586 MONO_ADD_INS (bblock, ins);
6587 tblock = start_bblock->out_bb [0];
6588 link_bblock (cfg, bblock, tblock);
6589 ins->inst_target_bb = tblock;
6590 start_new_bblock = 1;
6592 /* skip the CEE_RET, too */
6593 if (ip_in_bb (cfg, bblock, ip + 5))
6594 ip += 6;
6595 else
6596 ip += 5;
6598 ins_flag = 0;
6599 break;
6603 /* Generic sharing */
6604 /* FIXME: only do this for generic methods if
6605 they are not shared! */
6606 if (context_used && !imt_arg && !array_rank &&
6607 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6608 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6609 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6610 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6611 INLINE_FAILURE;
6613 g_assert (cfg->generic_sharing_context && cmethod);
6614 g_assert (!addr);
6617 * We are compiling a call to a
6618 * generic method from shared code,
6619 * which means that we have to look up
6620 * the method in the rgctx and do an
6621 * indirect call.
6623 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6626 /* Indirect calls */
6627 if (addr) {
6628 g_assert (!imt_arg);
6630 if (*ip == CEE_CALL)
6631 g_assert (context_used);
6632 else if (*ip == CEE_CALLI)
6633 g_assert (!vtable_arg);
6634 else
6635 /* FIXME: what the hell is this??? */
6636 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6637 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6639 /* Prevent inlining of methods with indirect calls */
6640 INLINE_FAILURE;
6642 if (vtable_arg) {
6643 MonoCallInst *call;
6644 int rgctx_reg = mono_alloc_preg (cfg);
6646 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6647 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6648 call = (MonoCallInst*)ins;
6649 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6650 } else {
6651 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6653 * Instead of emitting an indirect call, emit a direct call
6654 * with the contents of the aotconst as the patch info.
6656 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6657 NULLIFY_INS (addr);
6658 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6659 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6660 NULLIFY_INS (addr);
6661 } else {
6662 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6665 if (!MONO_TYPE_IS_VOID (fsig->ret))
6666 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6668 CHECK_CFG_EXCEPTION;
6670 ip += 5;
6671 ins_flag = 0;
6672 break;
6675 /* Array methods */
6676 if (array_rank) {
6677 MonoInst *addr;
6679 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6680 if (sp [fsig->param_count]->type == STACK_OBJ) {
6681 MonoInst *iargs [2];
6683 iargs [0] = sp [0];
6684 iargs [1] = sp [fsig->param_count];
6686 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6689 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6690 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6691 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6692 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6694 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6696 *sp++ = ins;
6697 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6698 if (!cmethod->klass->element_class->valuetype && !readonly)
6699 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6700 CHECK_TYPELOAD (cmethod->klass);
6702 readonly = FALSE;
6703 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6704 *sp++ = addr;
6705 } else {
6706 g_assert_not_reached ();
6709 CHECK_CFG_EXCEPTION;
6711 ip += 5;
6712 ins_flag = 0;
6713 break;
6716 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6717 if (ins) {
6718 if (!MONO_TYPE_IS_VOID (fsig->ret))
6719 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6721 CHECK_CFG_EXCEPTION;
6723 ip += 5;
6724 ins_flag = 0;
6725 break;
6728 /* Common call */
6729 INLINE_FAILURE;
6730 if (vtable_arg) {
6731 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6732 NULL, vtable_arg);
6733 } else if (imt_arg) {
6734 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6735 } else {
6736 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6739 if (!MONO_TYPE_IS_VOID (fsig->ret))
6740 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6742 CHECK_CFG_EXCEPTION;
6744 ip += 5;
6745 ins_flag = 0;
6746 break;
6748 case CEE_RET:
6749 if (cfg->method != method) {
6750 /* return from inlined method */
6752 * If in_count == 0, that means the ret is unreachable due to
6753 * being preceeded by a throw. In that case, inline_method () will
6754 * handle setting the return value
6755 * (test case: test_0_inline_throw ()).
6757 if (return_var && cfg->cbb->in_count) {
6758 MonoInst *store;
6759 CHECK_STACK (1);
6760 --sp;
6761 //g_assert (returnvar != -1);
6762 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6763 cfg->ret_var_set = TRUE;
6765 } else {
6766 if (cfg->ret) {
6767 MonoType *ret_type = mono_method_signature (method)->ret;
6769 if (seq_points) {
6771 * Place a seq point here too even through the IL stack is not
6772 * empty, so a step over on
6773 * call <FOO>
6774 * ret
6775 * will work correctly.
6777 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6778 MONO_ADD_INS (cfg->cbb, ins);
6781 g_assert (!return_var);
6782 CHECK_STACK (1);
6783 --sp;
6784 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6785 MonoInst *ret_addr;
6787 if (!cfg->vret_addr) {
6788 MonoInst *ins;
6790 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6791 } else {
6792 EMIT_NEW_RETLOADA (cfg, ret_addr);
6794 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6795 ins->klass = mono_class_from_mono_type (ret_type);
6797 } else {
6798 #ifdef MONO_ARCH_SOFT_FLOAT
6799 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6800 MonoInst *iargs [1];
6801 MonoInst *conv;
6803 iargs [0] = *sp;
6804 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6805 mono_arch_emit_setret (cfg, method, conv);
6806 } else {
6807 mono_arch_emit_setret (cfg, method, *sp);
6809 #else
6810 mono_arch_emit_setret (cfg, method, *sp);
6811 #endif
6815 if (sp != stack_start)
6816 UNVERIFIED;
6817 MONO_INST_NEW (cfg, ins, OP_BR);
6818 ip++;
6819 ins->inst_target_bb = end_bblock;
6820 MONO_ADD_INS (bblock, ins);
6821 link_bblock (cfg, bblock, end_bblock);
6822 start_new_bblock = 1;
6823 break;
6824 case CEE_BR_S:
6825 CHECK_OPSIZE (2);
6826 MONO_INST_NEW (cfg, ins, OP_BR);
6827 ip++;
6828 target = ip + 1 + (signed char)(*ip);
6829 ++ip;
6830 GET_BBLOCK (cfg, tblock, target);
6831 link_bblock (cfg, bblock, tblock);
6832 ins->inst_target_bb = tblock;
6833 if (sp != stack_start) {
6834 handle_stack_args (cfg, stack_start, sp - stack_start);
6835 sp = stack_start;
6836 CHECK_UNVERIFIABLE (cfg);
6838 MONO_ADD_INS (bblock, ins);
6839 start_new_bblock = 1;
6840 inline_costs += BRANCH_COST;
6841 break;
6842 case CEE_BEQ_S:
6843 case CEE_BGE_S:
6844 case CEE_BGT_S:
6845 case CEE_BLE_S:
6846 case CEE_BLT_S:
6847 case CEE_BNE_UN_S:
6848 case CEE_BGE_UN_S:
6849 case CEE_BGT_UN_S:
6850 case CEE_BLE_UN_S:
6851 case CEE_BLT_UN_S:
6852 CHECK_OPSIZE (2);
6853 CHECK_STACK (2);
6854 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6855 ip++;
6856 target = ip + 1 + *(signed char*)ip;
6857 ip++;
6859 ADD_BINCOND (NULL);
6861 sp = stack_start;
6862 inline_costs += BRANCH_COST;
6863 break;
6864 case CEE_BR:
6865 CHECK_OPSIZE (5);
6866 MONO_INST_NEW (cfg, ins, OP_BR);
6867 ip++;
6869 target = ip + 4 + (gint32)read32(ip);
6870 ip += 4;
6871 GET_BBLOCK (cfg, tblock, target);
6872 link_bblock (cfg, bblock, tblock);
6873 ins->inst_target_bb = tblock;
6874 if (sp != stack_start) {
6875 handle_stack_args (cfg, stack_start, sp - stack_start);
6876 sp = stack_start;
6877 CHECK_UNVERIFIABLE (cfg);
6880 MONO_ADD_INS (bblock, ins);
6882 start_new_bblock = 1;
6883 inline_costs += BRANCH_COST;
6884 break;
6885 case CEE_BRFALSE_S:
6886 case CEE_BRTRUE_S:
6887 case CEE_BRFALSE:
6888 case CEE_BRTRUE: {
6889 MonoInst *cmp;
6890 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6891 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6892 guint32 opsize = is_short ? 1 : 4;
6894 CHECK_OPSIZE (opsize);
6895 CHECK_STACK (1);
6896 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6897 UNVERIFIED;
6898 ip ++;
6899 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6900 ip += opsize;
6902 sp--;
6904 GET_BBLOCK (cfg, tblock, target);
6905 link_bblock (cfg, bblock, tblock);
6906 GET_BBLOCK (cfg, tblock, ip);
6907 link_bblock (cfg, bblock, tblock);
6909 if (sp != stack_start) {
6910 handle_stack_args (cfg, stack_start, sp - stack_start);
6911 CHECK_UNVERIFIABLE (cfg);
6914 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6915 cmp->sreg1 = sp [0]->dreg;
6916 type_from_op (cmp, sp [0], NULL);
6917 CHECK_TYPE (cmp);
6919 #if SIZEOF_REGISTER == 4
6920 if (cmp->opcode == OP_LCOMPARE_IMM) {
6921 /* Convert it to OP_LCOMPARE */
6922 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6923 ins->type = STACK_I8;
6924 ins->dreg = alloc_dreg (cfg, STACK_I8);
6925 ins->inst_l = 0;
6926 MONO_ADD_INS (bblock, ins);
6927 cmp->opcode = OP_LCOMPARE;
6928 cmp->sreg2 = ins->dreg;
6930 #endif
6931 MONO_ADD_INS (bblock, cmp);
6933 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6934 type_from_op (ins, sp [0], NULL);
6935 MONO_ADD_INS (bblock, ins);
6936 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6937 GET_BBLOCK (cfg, tblock, target);
6938 ins->inst_true_bb = tblock;
6939 GET_BBLOCK (cfg, tblock, ip);
6940 ins->inst_false_bb = tblock;
6941 start_new_bblock = 2;
6943 sp = stack_start;
6944 inline_costs += BRANCH_COST;
6945 break;
6947 case CEE_BEQ:
6948 case CEE_BGE:
6949 case CEE_BGT:
6950 case CEE_BLE:
6951 case CEE_BLT:
6952 case CEE_BNE_UN:
6953 case CEE_BGE_UN:
6954 case CEE_BGT_UN:
6955 case CEE_BLE_UN:
6956 case CEE_BLT_UN:
6957 CHECK_OPSIZE (5);
6958 CHECK_STACK (2);
6959 MONO_INST_NEW (cfg, ins, *ip);
6960 ip++;
6961 target = ip + 4 + (gint32)read32(ip);
6962 ip += 4;
6964 ADD_BINCOND (NULL);
6966 sp = stack_start;
6967 inline_costs += BRANCH_COST;
6968 break;
6969 case CEE_SWITCH: {
6970 MonoInst *src1;
6971 MonoBasicBlock **targets;
6972 MonoBasicBlock *default_bblock;
6973 MonoJumpInfoBBTable *table;
6974 int offset_reg = alloc_preg (cfg);
6975 int target_reg = alloc_preg (cfg);
6976 int table_reg = alloc_preg (cfg);
6977 int sum_reg = alloc_preg (cfg);
6978 gboolean use_op_switch;
6980 CHECK_OPSIZE (5);
6981 CHECK_STACK (1);
6982 n = read32 (ip + 1);
6983 --sp;
6984 src1 = sp [0];
6985 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6986 UNVERIFIED;
6988 ip += 5;
6989 CHECK_OPSIZE (n * sizeof (guint32));
6990 target = ip + n * sizeof (guint32);
6992 GET_BBLOCK (cfg, default_bblock, target);
6994 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6995 for (i = 0; i < n; ++i) {
6996 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6997 targets [i] = tblock;
6998 ip += 4;
7001 if (sp != stack_start) {
7003 * Link the current bb with the targets as well, so handle_stack_args
7004 * will set their in_stack correctly.
7006 link_bblock (cfg, bblock, default_bblock);
7007 for (i = 0; i < n; ++i)
7008 link_bblock (cfg, bblock, targets [i]);
7010 handle_stack_args (cfg, stack_start, sp - stack_start);
7011 sp = stack_start;
7012 CHECK_UNVERIFIABLE (cfg);
7015 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7016 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7017 bblock = cfg->cbb;
7019 for (i = 0; i < n; ++i)
7020 link_bblock (cfg, bblock, targets [i]);
7022 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7023 table->table = targets;
7024 table->table_size = n;
7026 use_op_switch = FALSE;
7027 #ifdef TARGET_ARM
7028 /* ARM implements SWITCH statements differently */
7029 /* FIXME: Make it use the generic implementation */
7030 if (!cfg->compile_aot)
7031 use_op_switch = TRUE;
7032 #endif
7034 if (COMPILE_LLVM (cfg))
7035 use_op_switch = TRUE;
7037 cfg->cbb->has_jump_table = 1;
7039 if (use_op_switch) {
7040 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7041 ins->sreg1 = src1->dreg;
7042 ins->inst_p0 = table;
7043 ins->inst_many_bb = targets;
7044 ins->klass = GUINT_TO_POINTER (n);
7045 MONO_ADD_INS (cfg->cbb, ins);
7046 } else {
7047 if (sizeof (gpointer) == 8)
7048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7049 else
7050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7052 #if SIZEOF_REGISTER == 8
7053 /* The upper word might not be zero, and we add it to a 64 bit address later */
7054 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7055 #endif
7057 if (cfg->compile_aot) {
7058 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7059 } else {
7060 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7061 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7062 ins->inst_p0 = table;
7063 ins->dreg = table_reg;
7064 MONO_ADD_INS (cfg->cbb, ins);
7067 /* FIXME: Use load_memindex */
7068 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7070 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7072 start_new_bblock = 1;
7073 inline_costs += (BRANCH_COST * 2);
7074 break;
7076 case CEE_LDIND_I1:
7077 case CEE_LDIND_U1:
7078 case CEE_LDIND_I2:
7079 case CEE_LDIND_U2:
7080 case CEE_LDIND_I4:
7081 case CEE_LDIND_U4:
7082 case CEE_LDIND_I8:
7083 case CEE_LDIND_I:
7084 case CEE_LDIND_R4:
7085 case CEE_LDIND_R8:
7086 case CEE_LDIND_REF:
7087 CHECK_STACK (1);
7088 --sp;
7090 switch (*ip) {
7091 case CEE_LDIND_R4:
7092 case CEE_LDIND_R8:
7093 dreg = alloc_freg (cfg);
7094 break;
7095 case CEE_LDIND_I8:
7096 dreg = alloc_lreg (cfg);
7097 break;
7098 default:
7099 dreg = alloc_preg (cfg);
7102 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7103 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7104 ins->flags |= ins_flag;
7105 ins_flag = 0;
7106 MONO_ADD_INS (bblock, ins);
7107 *sp++ = ins;
7108 ++ip;
7109 break;
7110 case CEE_STIND_REF:
7111 case CEE_STIND_I1:
7112 case CEE_STIND_I2:
7113 case CEE_STIND_I4:
7114 case CEE_STIND_I8:
7115 case CEE_STIND_R4:
7116 case CEE_STIND_R8:
7117 case CEE_STIND_I:
7118 CHECK_STACK (2);
7119 sp -= 2;
7121 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7122 ins->flags |= ins_flag;
7123 ins_flag = 0;
7124 MONO_ADD_INS (bblock, ins);
7126 #if HAVE_WRITE_BARRIERS
7127 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7128 MonoInst *dummy_use;
7129 /* insert call to write barrier */
7130 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7131 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7132 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7134 #endif
7136 inline_costs += 1;
7137 ++ip;
7138 break;
7140 case CEE_MUL:
7141 CHECK_STACK (2);
7143 MONO_INST_NEW (cfg, ins, (*ip));
7144 sp -= 2;
7145 ins->sreg1 = sp [0]->dreg;
7146 ins->sreg2 = sp [1]->dreg;
7147 type_from_op (ins, sp [0], sp [1]);
7148 CHECK_TYPE (ins);
7149 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7151 /* Use the immediate opcodes if possible */
7152 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7153 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7154 if (imm_opcode != -1) {
7155 ins->opcode = imm_opcode;
7156 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7157 ins->sreg2 = -1;
7159 sp [1]->opcode = OP_NOP;
7163 MONO_ADD_INS ((cfg)->cbb, (ins));
7165 *sp++ = mono_decompose_opcode (cfg, ins);
7166 ip++;
7167 break;
7168 case CEE_ADD:
7169 case CEE_SUB:
7170 case CEE_DIV:
7171 case CEE_DIV_UN:
7172 case CEE_REM:
7173 case CEE_REM_UN:
7174 case CEE_AND:
7175 case CEE_OR:
7176 case CEE_XOR:
7177 case CEE_SHL:
7178 case CEE_SHR:
7179 case CEE_SHR_UN:
7180 CHECK_STACK (2);
7182 MONO_INST_NEW (cfg, ins, (*ip));
7183 sp -= 2;
7184 ins->sreg1 = sp [0]->dreg;
7185 ins->sreg2 = sp [1]->dreg;
7186 type_from_op (ins, sp [0], sp [1]);
7187 CHECK_TYPE (ins);
7188 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7189 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7191 /* FIXME: Pass opcode to is_inst_imm */
7193 /* Use the immediate opcodes if possible */
7194 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7195 int imm_opcode;
7197 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7198 if (imm_opcode != -1) {
7199 ins->opcode = imm_opcode;
7200 if (sp [1]->opcode == OP_I8CONST) {
7201 #if SIZEOF_REGISTER == 8
7202 ins->inst_imm = sp [1]->inst_l;
7203 #else
7204 ins->inst_ls_word = sp [1]->inst_ls_word;
7205 ins->inst_ms_word = sp [1]->inst_ms_word;
7206 #endif
7208 else
7209 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7210 ins->sreg2 = -1;
7212 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7213 if (sp [1]->next == NULL)
7214 sp [1]->opcode = OP_NOP;
7217 MONO_ADD_INS ((cfg)->cbb, (ins));
7219 *sp++ = mono_decompose_opcode (cfg, ins);
7220 ip++;
7221 break;
7222 case CEE_NEG:
7223 case CEE_NOT:
7224 case CEE_CONV_I1:
7225 case CEE_CONV_I2:
7226 case CEE_CONV_I4:
7227 case CEE_CONV_R4:
7228 case CEE_CONV_R8:
7229 case CEE_CONV_U4:
7230 case CEE_CONV_I8:
7231 case CEE_CONV_U8:
7232 case CEE_CONV_OVF_I8:
7233 case CEE_CONV_OVF_U8:
7234 case CEE_CONV_R_UN:
7235 CHECK_STACK (1);
7237 /* Special case this earlier so we have long constants in the IR */
7238 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7239 int data = sp [-1]->inst_c0;
7240 sp [-1]->opcode = OP_I8CONST;
7241 sp [-1]->type = STACK_I8;
7242 #if SIZEOF_REGISTER == 8
7243 if ((*ip) == CEE_CONV_U8)
7244 sp [-1]->inst_c0 = (guint32)data;
7245 else
7246 sp [-1]->inst_c0 = data;
7247 #else
7248 sp [-1]->inst_ls_word = data;
7249 if ((*ip) == CEE_CONV_U8)
7250 sp [-1]->inst_ms_word = 0;
7251 else
7252 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7253 #endif
7254 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7256 else {
7257 ADD_UNOP (*ip);
7259 ip++;
7260 break;
7261 case CEE_CONV_OVF_I4:
7262 case CEE_CONV_OVF_I1:
7263 case CEE_CONV_OVF_I2:
7264 case CEE_CONV_OVF_I:
7265 case CEE_CONV_OVF_U:
7266 CHECK_STACK (1);
7268 if (sp [-1]->type == STACK_R8) {
7269 ADD_UNOP (CEE_CONV_OVF_I8);
7270 ADD_UNOP (*ip);
7271 } else {
7272 ADD_UNOP (*ip);
7274 ip++;
7275 break;
7276 case CEE_CONV_OVF_U1:
7277 case CEE_CONV_OVF_U2:
7278 case CEE_CONV_OVF_U4:
7279 CHECK_STACK (1);
7281 if (sp [-1]->type == STACK_R8) {
7282 ADD_UNOP (CEE_CONV_OVF_U8);
7283 ADD_UNOP (*ip);
7284 } else {
7285 ADD_UNOP (*ip);
7287 ip++;
7288 break;
7289 case CEE_CONV_OVF_I1_UN:
7290 case CEE_CONV_OVF_I2_UN:
7291 case CEE_CONV_OVF_I4_UN:
7292 case CEE_CONV_OVF_I8_UN:
7293 case CEE_CONV_OVF_U1_UN:
7294 case CEE_CONV_OVF_U2_UN:
7295 case CEE_CONV_OVF_U4_UN:
7296 case CEE_CONV_OVF_U8_UN:
7297 case CEE_CONV_OVF_I_UN:
7298 case CEE_CONV_OVF_U_UN:
7299 case CEE_CONV_U2:
7300 case CEE_CONV_U1:
7301 case CEE_CONV_I:
7302 case CEE_CONV_U:
7303 CHECK_STACK (1);
7304 ADD_UNOP (*ip);
7305 CHECK_CFG_EXCEPTION;
7306 ip++;
7307 break;
7308 case CEE_ADD_OVF:
7309 case CEE_ADD_OVF_UN:
7310 case CEE_MUL_OVF:
7311 case CEE_MUL_OVF_UN:
7312 case CEE_SUB_OVF:
7313 case CEE_SUB_OVF_UN:
7314 CHECK_STACK (2);
7315 ADD_BINOP (*ip);
7316 ip++;
7317 break;
7318 case CEE_CPOBJ:
7319 CHECK_OPSIZE (5);
7320 CHECK_STACK (2);
7321 token = read32 (ip + 1);
7322 klass = mini_get_class (method, token, generic_context);
7323 CHECK_TYPELOAD (klass);
7324 sp -= 2;
7325 if (generic_class_is_reference_type (cfg, klass)) {
7326 MonoInst *store, *load;
7327 int dreg = alloc_preg (cfg);
7329 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7330 load->flags |= ins_flag;
7331 MONO_ADD_INS (cfg->cbb, load);
7333 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7334 store->flags |= ins_flag;
7335 MONO_ADD_INS (cfg->cbb, store);
7337 #if HAVE_WRITE_BARRIERS
7338 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7339 MonoInst *dummy_use;
7340 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7341 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7342 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7344 #endif
7345 } else {
7346 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7348 ins_flag = 0;
7349 ip += 5;
7350 break;
7351 case CEE_LDOBJ: {
7352 int loc_index = -1;
7353 int stloc_len = 0;
7355 CHECK_OPSIZE (5);
7356 CHECK_STACK (1);
7357 --sp;
7358 token = read32 (ip + 1);
7359 klass = mini_get_class (method, token, generic_context);
7360 CHECK_TYPELOAD (klass);
7362 /* Optimize the common ldobj+stloc combination */
7363 switch (ip [5]) {
7364 case CEE_STLOC_S:
7365 loc_index = ip [6];
7366 stloc_len = 2;
7367 break;
7368 case CEE_STLOC_0:
7369 case CEE_STLOC_1:
7370 case CEE_STLOC_2:
7371 case CEE_STLOC_3:
7372 loc_index = ip [5] - CEE_STLOC_0;
7373 stloc_len = 1;
7374 break;
7375 default:
7376 break;
7379 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7380 CHECK_LOCAL (loc_index);
7382 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7383 ins->dreg = cfg->locals [loc_index]->dreg;
7384 ip += 5;
7385 ip += stloc_len;
7386 break;
7389 /* Optimize the ldobj+stobj combination */
7390 /* The reference case ends up being a load+store anyway */
7391 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7392 CHECK_STACK (1);
7394 sp --;
7396 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7398 ip += 5 + 5;
7399 ins_flag = 0;
7400 break;
7403 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7404 *sp++ = ins;
7406 ip += 5;
7407 ins_flag = 0;
7408 inline_costs += 1;
7409 break;
7411 case CEE_LDSTR:
7412 CHECK_STACK_OVF (1);
7413 CHECK_OPSIZE (5);
7414 n = read32 (ip + 1);
7416 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7417 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7418 ins->type = STACK_OBJ;
7419 *sp = ins;
7421 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7422 MonoInst *iargs [1];
7424 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7425 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7426 } else {
7427 if (cfg->opt & MONO_OPT_SHARED) {
7428 MonoInst *iargs [3];
7430 if (cfg->compile_aot) {
7431 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7433 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7434 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7435 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7436 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7437 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7438 } else {
7439 if (bblock->out_of_line) {
7440 MonoInst *iargs [2];
7442 if (image == mono_defaults.corlib) {
7444 * Avoid relocations in AOT and save some space by using a
7445 * version of helper_ldstr specialized to mscorlib.
7447 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7448 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7449 } else {
7450 /* Avoid creating the string object */
7451 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7452 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7453 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7456 else
7457 if (cfg->compile_aot) {
7458 NEW_LDSTRCONST (cfg, ins, image, n);
7459 *sp = ins;
7460 MONO_ADD_INS (bblock, ins);
7462 else {
7463 NEW_PCONST (cfg, ins, NULL);
7464 ins->type = STACK_OBJ;
7465 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7466 *sp = ins;
7467 MONO_ADD_INS (bblock, ins);
7472 sp++;
7473 ip += 5;
7474 break;
7475 case CEE_NEWOBJ: {
7476 MonoInst *iargs [2];
7477 MonoMethodSignature *fsig;
7478 MonoInst this_ins;
7479 MonoInst *alloc;
7480 MonoInst *vtable_arg = NULL;
7482 CHECK_OPSIZE (5);
7483 token = read32 (ip + 1);
7484 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7485 if (!cmethod)
7486 goto load_error;
7487 fsig = mono_method_get_signature (cmethod, image, token);
7488 if (!fsig)
7489 goto load_error;
7491 mono_save_token_info (cfg, image, token, cmethod);
7493 if (!mono_class_init (cmethod->klass))
7494 goto load_error;
7496 if (cfg->generic_sharing_context)
7497 context_used = mono_method_check_context_used (cmethod);
7499 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7500 if (check_linkdemand (cfg, method, cmethod))
7501 INLINE_FAILURE;
7502 CHECK_CFG_EXCEPTION;
7503 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7504 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7507 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7508 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7509 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7510 mono_class_vtable (cfg->domain, cmethod->klass);
7511 CHECK_TYPELOAD (cmethod->klass);
7513 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7514 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7515 } else {
7516 if (context_used) {
7517 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7518 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7519 } else {
7520 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7522 CHECK_TYPELOAD (cmethod->klass);
7523 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7528 n = fsig->param_count;
7529 CHECK_STACK (n);
7532 * Generate smaller code for the common newobj <exception> instruction in
7533 * argument checking code.
7535 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7536 is_exception_class (cmethod->klass) && n <= 2 &&
7537 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7538 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7539 MonoInst *iargs [3];
7541 g_assert (!vtable_arg);
7543 sp -= n;
7545 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7546 switch (n) {
7547 case 0:
7548 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7549 break;
7550 case 1:
7551 iargs [1] = sp [0];
7552 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7553 break;
7554 case 2:
7555 iargs [1] = sp [0];
7556 iargs [2] = sp [1];
7557 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7558 break;
7559 default:
7560 g_assert_not_reached ();
7563 ip += 5;
7564 inline_costs += 5;
7565 break;
7568 /* move the args to allow room for 'this' in the first position */
7569 while (n--) {
7570 --sp;
7571 sp [1] = sp [0];
7574 /* check_call_signature () requires sp[0] to be set */
7575 this_ins.type = STACK_OBJ;
7576 sp [0] = &this_ins;
7577 if (check_call_signature (cfg, fsig, sp))
7578 UNVERIFIED;
7580 iargs [0] = NULL;
7582 if (mini_class_is_system_array (cmethod->klass)) {
7583 g_assert (!vtable_arg);
7585 *sp = emit_get_rgctx_method (cfg, context_used,
7586 cmethod, MONO_RGCTX_INFO_METHOD);
7588 /* Avoid varargs in the common case */
7589 if (fsig->param_count == 1)
7590 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7591 else if (fsig->param_count == 2)
7592 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7593 else if (fsig->param_count == 3)
7594 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7595 else
7596 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7597 } else if (cmethod->string_ctor) {
7598 g_assert (!context_used);
7599 g_assert (!vtable_arg);
7600 /* we simply pass a null pointer */
7601 EMIT_NEW_PCONST (cfg, *sp, NULL);
7602 /* now call the string ctor */
7603 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7604 } else {
7605 MonoInst* callvirt_this_arg = NULL;
7607 if (cmethod->klass->valuetype) {
7608 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7609 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7610 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7612 alloc = NULL;
7615 * The code generated by mini_emit_virtual_call () expects
7616 * iargs [0] to be a boxed instance, but luckily the vcall
7617 * will be transformed into a normal call there.
7619 } else if (context_used) {
7620 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7621 *sp = alloc;
7622 } else {
7623 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7625 CHECK_TYPELOAD (cmethod->klass);
7628 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7629 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7630 * As a workaround, we call class cctors before allocating objects.
7632 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7633 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7634 if (cfg->verbose_level > 2)
7635 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7636 class_inits = g_slist_prepend (class_inits, vtable);
7639 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7640 *sp = alloc;
7642 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7644 if (alloc)
7645 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7647 /* Now call the actual ctor */
7648 /* Avoid virtual calls to ctors if possible */
7649 if (cmethod->klass->marshalbyref)
7650 callvirt_this_arg = sp [0];
7653 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7654 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7655 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7656 *sp = ins;
7657 sp++;
7660 CHECK_CFG_EXCEPTION;
7661 } else
7665 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7666 mono_method_check_inlining (cfg, cmethod) &&
7667 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7668 !g_list_find (dont_inline, cmethod)) {
7669 int costs;
7671 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7672 cfg->real_offset += 5;
7673 bblock = cfg->cbb;
7675 inline_costs += costs - 5;
7676 } else {
7677 INLINE_FAILURE;
7678 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7680 } else if (context_used &&
7681 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7682 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7683 MonoInst *cmethod_addr;
7685 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7686 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7688 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7689 } else {
7690 INLINE_FAILURE;
7691 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7692 callvirt_this_arg, NULL, vtable_arg);
7696 if (alloc == NULL) {
7697 /* Valuetype */
7698 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7699 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7700 *sp++= ins;
7702 else
7703 *sp++ = alloc;
7705 ip += 5;
7706 inline_costs += 5;
7707 break;
7709 case CEE_CASTCLASS:
7710 CHECK_STACK (1);
7711 --sp;
7712 CHECK_OPSIZE (5);
7713 token = read32 (ip + 1);
7714 klass = mini_get_class (method, token, generic_context);
7715 CHECK_TYPELOAD (klass);
7716 if (sp [0]->type != STACK_OBJ)
7717 UNVERIFIED;
7719 if (cfg->generic_sharing_context)
7720 context_used = mono_class_check_context_used (klass);
7722 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7723 MonoInst *args [2];
7725 /* obj */
7726 args [0] = *sp;
7728 /* klass */
7729 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7731 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7732 *sp ++ = ins;
7733 ip += 5;
7734 inline_costs += 2;
7735 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7736 MonoMethod *mono_castclass;
7737 MonoInst *iargs [1];
7738 int costs;
7740 mono_castclass = mono_marshal_get_castclass (klass);
7741 iargs [0] = sp [0];
7743 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7744 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7745 g_assert (costs > 0);
7747 ip += 5;
7748 cfg->real_offset += 5;
7749 bblock = cfg->cbb;
7751 *sp++ = iargs [0];
7753 inline_costs += costs;
7755 else {
7756 ins = handle_castclass (cfg, klass, *sp, context_used);
7757 CHECK_CFG_EXCEPTION;
7758 bblock = cfg->cbb;
7759 *sp ++ = ins;
7760 ip += 5;
7762 break;
7763 case CEE_ISINST: {
7764 CHECK_STACK (1);
7765 --sp;
7766 CHECK_OPSIZE (5);
7767 token = read32 (ip + 1);
7768 klass = mini_get_class (method, token, generic_context);
7769 CHECK_TYPELOAD (klass);
7770 if (sp [0]->type != STACK_OBJ)
7771 UNVERIFIED;
7773 if (cfg->generic_sharing_context)
7774 context_used = mono_class_check_context_used (klass);
7776 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7777 MonoInst *args [2];
7779 /* obj */
7780 args [0] = *sp;
7782 /* klass */
7783 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7785 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7786 sp++;
7787 ip += 5;
7788 inline_costs += 2;
7789 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7790 MonoMethod *mono_isinst;
7791 MonoInst *iargs [1];
7792 int costs;
7794 mono_isinst = mono_marshal_get_isinst (klass);
7795 iargs [0] = sp [0];
7797 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7798 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7799 g_assert (costs > 0);
7801 ip += 5;
7802 cfg->real_offset += 5;
7803 bblock = cfg->cbb;
7805 *sp++= iargs [0];
7807 inline_costs += costs;
7809 else {
7810 ins = handle_isinst (cfg, klass, *sp, context_used);
7811 CHECK_CFG_EXCEPTION;
7812 bblock = cfg->cbb;
7813 *sp ++ = ins;
7814 ip += 5;
7816 break;
7818 case CEE_UNBOX_ANY: {
7819 CHECK_STACK (1);
7820 --sp;
7821 CHECK_OPSIZE (5);
7822 token = read32 (ip + 1);
7823 klass = mini_get_class (method, token, generic_context);
7824 CHECK_TYPELOAD (klass);
7826 mono_save_token_info (cfg, image, token, klass);
7828 if (cfg->generic_sharing_context)
7829 context_used = mono_class_check_context_used (klass);
7831 if (generic_class_is_reference_type (cfg, klass)) {
7832 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7833 if (context_used) {
7834 MonoInst *iargs [2];
7836 /* obj */
7837 iargs [0] = *sp;
7838 /* klass */
7839 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7840 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7841 *sp ++ = ins;
7842 ip += 5;
7843 inline_costs += 2;
7844 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7845 MonoMethod *mono_castclass;
7846 MonoInst *iargs [1];
7847 int costs;
7849 mono_castclass = mono_marshal_get_castclass (klass);
7850 iargs [0] = sp [0];
7852 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7853 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7855 g_assert (costs > 0);
7857 ip += 5;
7858 cfg->real_offset += 5;
7859 bblock = cfg->cbb;
7861 *sp++ = iargs [0];
7862 inline_costs += costs;
7863 } else {
7864 ins = handle_castclass (cfg, klass, *sp, 0);
7865 CHECK_CFG_EXCEPTION;
7866 bblock = cfg->cbb;
7867 *sp ++ = ins;
7868 ip += 5;
7870 break;
7873 if (mono_class_is_nullable (klass)) {
7874 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7875 *sp++= ins;
7876 ip += 5;
7877 break;
7880 /* UNBOX */
7881 ins = handle_unbox (cfg, klass, sp, context_used);
7882 *sp = ins;
7884 ip += 5;
7886 /* LDOBJ */
7887 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7888 *sp++ = ins;
7890 inline_costs += 2;
7891 break;
7893 case CEE_BOX: {
7894 MonoInst *val;
7896 CHECK_STACK (1);
7897 --sp;
7898 val = *sp;
7899 CHECK_OPSIZE (5);
7900 token = read32 (ip + 1);
7901 klass = mini_get_class (method, token, generic_context);
7902 CHECK_TYPELOAD (klass);
7904 mono_save_token_info (cfg, image, token, klass);
7906 if (cfg->generic_sharing_context)
7907 context_used = mono_class_check_context_used (klass);
7909 if (generic_class_is_reference_type (cfg, klass)) {
7910 *sp++ = val;
7911 ip += 5;
7912 break;
7915 if (klass == mono_defaults.void_class)
7916 UNVERIFIED;
7917 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7918 UNVERIFIED;
7919 /* frequent check in generic code: box (struct), brtrue */
7920 if (!mono_class_is_nullable (klass) &&
7921 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7922 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7923 ip += 5;
7924 MONO_INST_NEW (cfg, ins, OP_BR);
7925 if (*ip == CEE_BRTRUE_S) {
7926 CHECK_OPSIZE (2);
7927 ip++;
7928 target = ip + 1 + (signed char)(*ip);
7929 ip++;
7930 } else {
7931 CHECK_OPSIZE (5);
7932 ip++;
7933 target = ip + 4 + (gint)(read32 (ip));
7934 ip += 4;
7936 GET_BBLOCK (cfg, tblock, target);
7937 link_bblock (cfg, bblock, tblock);
7938 ins->inst_target_bb = tblock;
7939 GET_BBLOCK (cfg, tblock, ip);
7941 * This leads to some inconsistency, since the two bblocks are
7942 * not really connected, but it is needed for handling stack
7943 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7944 * FIXME: This should only be needed if sp != stack_start, but that
7945 * doesn't work for some reason (test failure in mcs/tests on x86).
7947 link_bblock (cfg, bblock, tblock);
7948 if (sp != stack_start) {
7949 handle_stack_args (cfg, stack_start, sp - stack_start);
7950 sp = stack_start;
7951 CHECK_UNVERIFIABLE (cfg);
7953 MONO_ADD_INS (bblock, ins);
7954 start_new_bblock = 1;
7955 break;
7958 *sp++ = handle_box (cfg, val, klass, context_used);
7960 CHECK_CFG_EXCEPTION;
7961 ip += 5;
7962 inline_costs += 1;
7963 break;
7965 case CEE_UNBOX: {
7966 CHECK_STACK (1);
7967 --sp;
7968 CHECK_OPSIZE (5);
7969 token = read32 (ip + 1);
7970 klass = mini_get_class (method, token, generic_context);
7971 CHECK_TYPELOAD (klass);
7973 mono_save_token_info (cfg, image, token, klass);
7975 if (cfg->generic_sharing_context)
7976 context_used = mono_class_check_context_used (klass);
7978 if (mono_class_is_nullable (klass)) {
7979 MonoInst *val;
7981 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7982 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7984 *sp++= ins;
7985 } else {
7986 ins = handle_unbox (cfg, klass, sp, context_used);
7987 *sp++ = ins;
7989 ip += 5;
7990 inline_costs += 2;
7991 break;
7993 case CEE_LDFLD:
7994 case CEE_LDFLDA:
7995 case CEE_STFLD: {
7996 MonoClassField *field;
7997 int costs;
7998 guint foffset;
8000 if (*ip == CEE_STFLD) {
8001 CHECK_STACK (2);
8002 sp -= 2;
8003 } else {
8004 CHECK_STACK (1);
8005 --sp;
8007 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8008 UNVERIFIED;
8009 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8010 UNVERIFIED;
8011 CHECK_OPSIZE (5);
8012 token = read32 (ip + 1);
8013 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8014 field = mono_method_get_wrapper_data (method, token);
8015 klass = field->parent;
8017 else {
8018 field = mono_field_from_token (image, token, &klass, generic_context);
8020 if (!field)
8021 goto load_error;
8022 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8023 FIELD_ACCESS_FAILURE;
8024 mono_class_init (klass);
8026 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8027 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8028 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8029 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8032 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8033 if (*ip == CEE_STFLD) {
8034 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8035 UNVERIFIED;
8036 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8037 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8038 MonoInst *iargs [5];
8040 iargs [0] = sp [0];
8041 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8042 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8043 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8044 field->offset);
8045 iargs [4] = sp [1];
8047 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8048 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8049 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8050 g_assert (costs > 0);
8052 cfg->real_offset += 5;
8053 bblock = cfg->cbb;
8055 inline_costs += costs;
8056 } else {
8057 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8059 } else {
8060 MonoInst *store;
8062 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8064 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8065 store->flags |= MONO_INST_FAULT;
8067 #if HAVE_WRITE_BARRIERS
8068 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8069 /* insert call to write barrier */
8070 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8071 MonoInst *iargs [2], *dummy_use;
8072 int dreg;
8074 dreg = alloc_preg (cfg);
8075 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8076 iargs [1] = sp [1];
8077 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8079 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8081 #endif
8083 store->flags |= ins_flag;
8085 ins_flag = 0;
8086 ip += 5;
8087 break;
8090 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8091 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8092 MonoInst *iargs [4];
8094 iargs [0] = sp [0];
8095 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8096 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8097 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8098 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8099 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8100 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8101 bblock = cfg->cbb;
8102 g_assert (costs > 0);
8104 cfg->real_offset += 5;
8106 *sp++ = iargs [0];
8108 inline_costs += costs;
8109 } else {
8110 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8111 *sp++ = ins;
8113 } else {
8114 if (sp [0]->type == STACK_VTYPE) {
8115 MonoInst *var;
8117 /* Have to compute the address of the variable */
8119 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8120 if (!var)
8121 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8122 else
8123 g_assert (var->klass == klass);
8125 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8126 sp [0] = ins;
8129 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8131 if (*ip == CEE_LDFLDA) {
8132 dreg = alloc_preg (cfg);
8134 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8135 ins->klass = mono_class_from_mono_type (field->type);
8136 ins->type = STACK_MP;
8137 *sp++ = ins;
8138 } else {
8139 MonoInst *load;
8141 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8142 load->flags |= ins_flag;
8143 load->flags |= MONO_INST_FAULT;
8144 *sp++ = load;
8147 ins_flag = 0;
8148 ip += 5;
8149 break;
8151 case CEE_LDSFLD:
8152 case CEE_LDSFLDA:
8153 case CEE_STSFLD: {
8154 MonoClassField *field;
8155 gpointer addr = NULL;
8156 gboolean is_special_static;
8158 CHECK_OPSIZE (5);
8159 token = read32 (ip + 1);
8161 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8162 field = mono_method_get_wrapper_data (method, token);
8163 klass = field->parent;
8165 else
8166 field = mono_field_from_token (image, token, &klass, generic_context);
8167 if (!field)
8168 goto load_error;
8169 mono_class_init (klass);
8170 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8171 FIELD_ACCESS_FAILURE;
8173 /* if the class is Critical then transparent code cannot access it's fields */
8174 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8175 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8178 * We can only support shared generic static
8179 * field access on architectures where the
8180 * trampoline code has been extended to handle
8181 * the generic class init.
8183 #ifndef MONO_ARCH_VTABLE_REG
8184 GENERIC_SHARING_FAILURE (*ip);
8185 #endif
8187 if (cfg->generic_sharing_context)
8188 context_used = mono_class_check_context_used (klass);
8190 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8192 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8193 * to be called here.
8195 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8196 mono_class_vtable (cfg->domain, klass);
8197 CHECK_TYPELOAD (klass);
8199 mono_domain_lock (cfg->domain);
8200 if (cfg->domain->special_static_fields)
8201 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8202 mono_domain_unlock (cfg->domain);
8204 is_special_static = mono_class_field_is_special_static (field);
8206 /* Generate IR to compute the field address */
8207 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8209 * Fast access to TLS data
8210 * Inline version of get_thread_static_data () in
8211 * threads.c.
8213 guint32 offset;
8214 int idx, static_data_reg, array_reg, dreg;
8215 MonoInst *thread_ins;
8217 // offset &= 0x7fffffff;
8218 // idx = (offset >> 24) - 1;
8219 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8221 thread_ins = mono_get_thread_intrinsic (cfg);
8222 MONO_ADD_INS (cfg->cbb, thread_ins);
8223 static_data_reg = alloc_ireg (cfg);
8224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8226 if (cfg->compile_aot) {
8227 int offset_reg, offset2_reg, idx_reg;
8229 /* For TLS variables, this will return the TLS offset */
8230 EMIT_NEW_SFLDACONST (cfg, ins, field);
8231 offset_reg = ins->dreg;
8232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8233 idx_reg = alloc_ireg (cfg);
8234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8236 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8237 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8238 array_reg = alloc_ireg (cfg);
8239 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8240 offset2_reg = alloc_ireg (cfg);
8241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8242 dreg = alloc_ireg (cfg);
8243 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8244 } else {
8245 offset = (gsize)addr & 0x7fffffff;
8246 idx = (offset >> 24) - 1;
8248 array_reg = alloc_ireg (cfg);
8249 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8250 dreg = alloc_ireg (cfg);
8251 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8253 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8254 (cfg->compile_aot && is_special_static) ||
8255 (context_used && is_special_static)) {
8256 MonoInst *iargs [2];
8258 g_assert (field->parent);
8259 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8260 if (context_used) {
8261 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8262 field, MONO_RGCTX_INFO_CLASS_FIELD);
8263 } else {
8264 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8266 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8267 } else if (context_used) {
8268 MonoInst *static_data;
8271 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8272 method->klass->name_space, method->klass->name, method->name,
8273 depth, field->offset);
8276 if (mono_class_needs_cctor_run (klass, method))
8277 emit_generic_class_init (cfg, klass);
8280 * The pointer we're computing here is
8282 * super_info.static_data + field->offset
8284 static_data = emit_get_rgctx_klass (cfg, context_used,
8285 klass, MONO_RGCTX_INFO_STATIC_DATA);
8287 if (field->offset == 0) {
8288 ins = static_data;
8289 } else {
8290 int addr_reg = mono_alloc_preg (cfg);
8291 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8293 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8294 MonoInst *iargs [2];
8296 g_assert (field->parent);
8297 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8298 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8299 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8300 } else {
8301 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8303 CHECK_TYPELOAD (klass);
8304 if (!addr) {
8305 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8306 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8307 if (cfg->verbose_level > 2)
8308 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8309 class_inits = g_slist_prepend (class_inits, vtable);
8310 } else {
8311 if (cfg->run_cctors) {
8312 MonoException *ex;
8313 /* This makes so that inline cannot trigger */
8314 /* .cctors: too many apps depend on them */
8315 /* running with a specific order... */
8316 if (! vtable->initialized)
8317 INLINE_FAILURE;
8318 ex = mono_runtime_class_init_full (vtable, FALSE);
8319 if (ex) {
8320 set_exception_object (cfg, ex);
8321 goto exception_exit;
8325 addr = (char*)vtable->data + field->offset;
8327 if (cfg->compile_aot)
8328 EMIT_NEW_SFLDACONST (cfg, ins, field);
8329 else
8330 EMIT_NEW_PCONST (cfg, ins, addr);
8331 } else {
8332 MonoInst *iargs [1];
8333 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8334 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8338 /* Generate IR to do the actual load/store operation */
8340 if (*ip == CEE_LDSFLDA) {
8341 ins->klass = mono_class_from_mono_type (field->type);
8342 ins->type = STACK_PTR;
8343 *sp++ = ins;
8344 } else if (*ip == CEE_STSFLD) {
8345 MonoInst *store;
8346 CHECK_STACK (1);
8347 sp--;
8349 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8350 store->flags |= ins_flag;
8351 } else {
8352 gboolean is_const = FALSE;
8353 MonoVTable *vtable = NULL;
8355 if (!context_used) {
8356 vtable = mono_class_vtable (cfg->domain, klass);
8357 CHECK_TYPELOAD (klass);
8359 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8360 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8361 gpointer addr = (char*)vtable->data + field->offset;
8362 int ro_type = field->type->type;
8363 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8364 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8366 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8367 is_const = TRUE;
8368 switch (ro_type) {
8369 case MONO_TYPE_BOOLEAN:
8370 case MONO_TYPE_U1:
8371 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8372 sp++;
8373 break;
8374 case MONO_TYPE_I1:
8375 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8376 sp++;
8377 break;
8378 case MONO_TYPE_CHAR:
8379 case MONO_TYPE_U2:
8380 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8381 sp++;
8382 break;
8383 case MONO_TYPE_I2:
8384 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8385 sp++;
8386 break;
8387 break;
8388 case MONO_TYPE_I4:
8389 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8390 sp++;
8391 break;
8392 case MONO_TYPE_U4:
8393 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8394 sp++;
8395 break;
8396 case MONO_TYPE_I:
8397 case MONO_TYPE_U:
8398 case MONO_TYPE_PTR:
8399 case MONO_TYPE_FNPTR:
8400 #ifndef HAVE_MOVING_COLLECTOR
8401 case MONO_TYPE_STRING:
8402 case MONO_TYPE_OBJECT:
8403 case MONO_TYPE_CLASS:
8404 case MONO_TYPE_SZARRAY:
8405 case MONO_TYPE_ARRAY:
8406 #endif
8407 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8408 type_to_eval_stack_type ((cfg), field->type, *sp);
8409 sp++;
8410 break;
8411 case MONO_TYPE_I8:
8412 case MONO_TYPE_U8:
8413 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8414 sp++;
8415 break;
8416 case MONO_TYPE_R4:
8417 case MONO_TYPE_R8:
8418 case MONO_TYPE_VALUETYPE:
8419 default:
8420 is_const = FALSE;
8421 break;
8425 if (!is_const) {
8426 MonoInst *load;
8428 CHECK_STACK_OVF (1);
8430 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8431 load->flags |= ins_flag;
8432 ins_flag = 0;
8433 *sp++ = load;
8436 ins_flag = 0;
8437 ip += 5;
8438 break;
8440 case CEE_STOBJ:
8441 CHECK_STACK (2);
8442 sp -= 2;
8443 CHECK_OPSIZE (5);
8444 token = read32 (ip + 1);
8445 klass = mini_get_class (method, token, generic_context);
8446 CHECK_TYPELOAD (klass);
8447 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8448 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8449 #if HAVE_WRITE_BARRIERS
8450 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8451 generic_class_is_reference_type (cfg, klass)) {
8452 MonoInst *dummy_use;
8453 /* insert call to write barrier */
8454 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8455 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8456 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8458 #endif
8459 ins_flag = 0;
8460 ip += 5;
8461 inline_costs += 1;
8462 break;
8465 * Array opcodes
8467 case CEE_NEWARR: {
8468 MonoInst *len_ins;
8469 const char *data_ptr;
8470 int data_size = 0;
8471 guint32 field_token;
8473 CHECK_STACK (1);
8474 --sp;
8476 CHECK_OPSIZE (5);
8477 token = read32 (ip + 1);
8479 klass = mini_get_class (method, token, generic_context);
8480 CHECK_TYPELOAD (klass);
8482 if (cfg->generic_sharing_context)
8483 context_used = mono_class_check_context_used (klass);
8485 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8486 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8487 ins->sreg1 = sp [0]->dreg;
8488 ins->type = STACK_I4;
8489 ins->dreg = alloc_ireg (cfg);
8490 MONO_ADD_INS (cfg->cbb, ins);
8491 *sp = mono_decompose_opcode (cfg, ins);
8494 if (context_used) {
8495 MonoInst *args [3];
8496 MonoClass *array_class = mono_array_class_get (klass, 1);
8497 /* FIXME: we cannot get a managed
8498 allocator because we can't get the
8499 open generic class's vtable. We
8500 have the same problem in
8501 handle_alloc(). This
8502 needs to be solved so that we can
8503 have managed allocs of shared
8504 generic classes. */
8506 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8507 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8509 MonoMethod *managed_alloc = NULL;
8511 /* FIXME: Decompose later to help abcrem */
8513 /* vtable */
8514 args [0] = emit_get_rgctx_klass (cfg, context_used,
8515 array_class, MONO_RGCTX_INFO_VTABLE);
8516 /* array len */
8517 args [1] = sp [0];
8519 if (managed_alloc)
8520 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8521 else
8522 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8523 } else {
8524 if (cfg->opt & MONO_OPT_SHARED) {
8525 /* Decompose now to avoid problems with references to the domainvar */
8526 MonoInst *iargs [3];
8528 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8529 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8530 iargs [2] = sp [0];
8532 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8533 } else {
8534 /* Decompose later since it is needed by abcrem */
8535 MonoClass *array_type = mono_array_class_get (klass, 1);
8536 mono_class_vtable (cfg->domain, array_type);
8537 CHECK_TYPELOAD (array_type);
8539 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8540 ins->dreg = alloc_preg (cfg);
8541 ins->sreg1 = sp [0]->dreg;
8542 ins->inst_newa_class = klass;
8543 ins->type = STACK_OBJ;
8544 ins->klass = klass;
8545 MONO_ADD_INS (cfg->cbb, ins);
8546 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8547 cfg->cbb->has_array_access = TRUE;
8549 /* Needed so mono_emit_load_get_addr () gets called */
8550 mono_get_got_var (cfg);
8554 len_ins = sp [0];
8555 ip += 5;
8556 *sp++ = ins;
8557 inline_costs += 1;
8560 * we inline/optimize the initialization sequence if possible.
8561 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8562 * for small sizes open code the memcpy
8563 * ensure the rva field is big enough
8565 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8566 MonoMethod *memcpy_method = get_memcpy_method ();
8567 MonoInst *iargs [3];
8568 int add_reg = alloc_preg (cfg);
8570 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8571 if (cfg->compile_aot) {
8572 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8573 } else {
8574 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8576 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8577 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8578 ip += 11;
8581 break;
8583 case CEE_LDLEN:
8584 CHECK_STACK (1);
8585 --sp;
8586 if (sp [0]->type != STACK_OBJ)
8587 UNVERIFIED;
8589 dreg = alloc_preg (cfg);
8590 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8591 ins->dreg = alloc_preg (cfg);
8592 ins->sreg1 = sp [0]->dreg;
8593 ins->type = STACK_I4;
8594 MONO_ADD_INS (cfg->cbb, ins);
8595 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8596 cfg->cbb->has_array_access = TRUE;
8597 ip ++;
8598 *sp++ = ins;
8599 break;
8600 case CEE_LDELEMA:
8601 CHECK_STACK (2);
8602 sp -= 2;
8603 CHECK_OPSIZE (5);
8604 if (sp [0]->type != STACK_OBJ)
8605 UNVERIFIED;
8607 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8609 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8610 CHECK_TYPELOAD (klass);
8611 /* we need to make sure that this array is exactly the type it needs
8612 * to be for correctness. the wrappers are lax with their usage
8613 * so we need to ignore them here
8615 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8616 MonoClass *array_class = mono_array_class_get (klass, 1);
8617 mini_emit_check_array_type (cfg, sp [0], array_class);
8618 CHECK_TYPELOAD (array_class);
8621 readonly = FALSE;
8622 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8623 *sp++ = ins;
8624 ip += 5;
8625 break;
8626 case CEE_LDELEM:
8627 case CEE_LDELEM_I1:
8628 case CEE_LDELEM_U1:
8629 case CEE_LDELEM_I2:
8630 case CEE_LDELEM_U2:
8631 case CEE_LDELEM_I4:
8632 case CEE_LDELEM_U4:
8633 case CEE_LDELEM_I8:
8634 case CEE_LDELEM_I:
8635 case CEE_LDELEM_R4:
8636 case CEE_LDELEM_R8:
8637 case CEE_LDELEM_REF: {
8638 MonoInst *addr;
8640 CHECK_STACK (2);
8641 sp -= 2;
8643 if (*ip == CEE_LDELEM) {
8644 CHECK_OPSIZE (5);
8645 token = read32 (ip + 1);
8646 klass = mini_get_class (method, token, generic_context);
8647 CHECK_TYPELOAD (klass);
8648 mono_class_init (klass);
8650 else
8651 klass = array_access_to_klass (*ip);
8653 if (sp [0]->type != STACK_OBJ)
8654 UNVERIFIED;
8656 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8658 if (sp [1]->opcode == OP_ICONST) {
8659 int array_reg = sp [0]->dreg;
8660 int index_reg = sp [1]->dreg;
8661 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8663 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8664 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8665 } else {
8666 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8667 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8669 *sp++ = ins;
8670 if (*ip == CEE_LDELEM)
8671 ip += 5;
8672 else
8673 ++ip;
8674 break;
8676 case CEE_STELEM_I:
8677 case CEE_STELEM_I1:
8678 case CEE_STELEM_I2:
8679 case CEE_STELEM_I4:
8680 case CEE_STELEM_I8:
8681 case CEE_STELEM_R4:
8682 case CEE_STELEM_R8:
8683 case CEE_STELEM_REF:
8684 case CEE_STELEM: {
8685 MonoInst *addr;
8687 CHECK_STACK (3);
8688 sp -= 3;
8690 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8692 if (*ip == CEE_STELEM) {
8693 CHECK_OPSIZE (5);
8694 token = read32 (ip + 1);
8695 klass = mini_get_class (method, token, generic_context);
8696 CHECK_TYPELOAD (klass);
8697 mono_class_init (klass);
8699 else
8700 klass = array_access_to_klass (*ip);
8702 if (sp [0]->type != STACK_OBJ)
8703 UNVERIFIED;
8705 /* storing a NULL doesn't need any of the complex checks in stelemref */
8706 if (generic_class_is_reference_type (cfg, klass) &&
8707 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8708 MonoMethod* helper = mono_marshal_get_stelemref ();
8709 MonoInst *iargs [3];
8711 if (sp [0]->type != STACK_OBJ)
8712 UNVERIFIED;
8713 if (sp [2]->type != STACK_OBJ)
8714 UNVERIFIED;
8716 iargs [2] = sp [2];
8717 iargs [1] = sp [1];
8718 iargs [0] = sp [0];
8720 mono_emit_method_call (cfg, helper, iargs, NULL);
8721 } else {
8722 if (sp [1]->opcode == OP_ICONST) {
8723 int array_reg = sp [0]->dreg;
8724 int index_reg = sp [1]->dreg;
8725 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8727 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8728 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8729 } else {
8730 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8731 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8735 if (*ip == CEE_STELEM)
8736 ip += 5;
8737 else
8738 ++ip;
8739 inline_costs += 1;
8740 break;
8742 case CEE_CKFINITE: {
8743 CHECK_STACK (1);
8744 --sp;
8746 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8747 ins->sreg1 = sp [0]->dreg;
8748 ins->dreg = alloc_freg (cfg);
8749 ins->type = STACK_R8;
8750 MONO_ADD_INS (bblock, ins);
8752 *sp++ = mono_decompose_opcode (cfg, ins);
8754 ++ip;
8755 break;
8757 case CEE_REFANYVAL: {
8758 MonoInst *src_var, *src;
8760 int klass_reg = alloc_preg (cfg);
8761 int dreg = alloc_preg (cfg);
8763 CHECK_STACK (1);
8764 MONO_INST_NEW (cfg, ins, *ip);
8765 --sp;
8766 CHECK_OPSIZE (5);
8767 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8768 CHECK_TYPELOAD (klass);
8769 mono_class_init (klass);
8771 if (cfg->generic_sharing_context)
8772 context_used = mono_class_check_context_used (klass);
8774 // FIXME:
8775 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8776 if (!src_var)
8777 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8778 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8779 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8781 if (context_used) {
8782 MonoInst *klass_ins;
8784 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8785 klass, MONO_RGCTX_INFO_KLASS);
8787 // FIXME:
8788 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8789 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8790 } else {
8791 mini_emit_class_check (cfg, klass_reg, klass);
8793 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8794 ins->type = STACK_MP;
8795 *sp++ = ins;
8796 ip += 5;
8797 break;
8799 case CEE_MKREFANY: {
8800 MonoInst *loc, *addr;
8802 CHECK_STACK (1);
8803 MONO_INST_NEW (cfg, ins, *ip);
8804 --sp;
8805 CHECK_OPSIZE (5);
8806 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8807 CHECK_TYPELOAD (klass);
8808 mono_class_init (klass);
8810 if (cfg->generic_sharing_context)
8811 context_used = mono_class_check_context_used (klass);
8813 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8814 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8816 if (context_used) {
8817 MonoInst *const_ins;
8818 int type_reg = alloc_preg (cfg);
8820 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8823 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8824 } else if (cfg->compile_aot) {
8825 int const_reg = alloc_preg (cfg);
8826 int type_reg = alloc_preg (cfg);
8828 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8831 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8832 } else {
8833 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8834 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8836 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8838 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8839 ins->type = STACK_VTYPE;
8840 ins->klass = mono_defaults.typed_reference_class;
8841 *sp++ = ins;
8842 ip += 5;
8843 break;
8845 case CEE_LDTOKEN: {
8846 gpointer handle;
8847 MonoClass *handle_class;
8849 CHECK_STACK_OVF (1);
8851 CHECK_OPSIZE (5);
8852 n = read32 (ip + 1);
8854 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8855 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8856 handle = mono_method_get_wrapper_data (method, n);
8857 handle_class = mono_method_get_wrapper_data (method, n + 1);
8858 if (handle_class == mono_defaults.typehandle_class)
8859 handle = &((MonoClass*)handle)->byval_arg;
8861 else {
8862 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8864 if (!handle)
8865 goto load_error;
8866 mono_class_init (handle_class);
8867 if (cfg->generic_sharing_context) {
8868 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8869 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8870 /* This case handles ldtoken
8871 of an open type, like for
8872 typeof(Gen<>). */
8873 context_used = 0;
8874 } else if (handle_class == mono_defaults.typehandle_class) {
8875 /* If we get a MONO_TYPE_CLASS
8876 then we need to provide the
8877 open type, not an
8878 instantiation of it. */
8879 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8880 context_used = 0;
8881 else
8882 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8883 } else if (handle_class == mono_defaults.fieldhandle_class)
8884 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8885 else if (handle_class == mono_defaults.methodhandle_class)
8886 context_used = mono_method_check_context_used (handle);
8887 else
8888 g_assert_not_reached ();
8891 if ((cfg->opt & MONO_OPT_SHARED) &&
8892 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8893 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8894 MonoInst *addr, *vtvar, *iargs [3];
8895 int method_context_used;
8897 if (cfg->generic_sharing_context)
8898 method_context_used = mono_method_check_context_used (method);
8899 else
8900 method_context_used = 0;
8902 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8904 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8905 EMIT_NEW_ICONST (cfg, iargs [1], n);
8906 if (method_context_used) {
8907 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8908 method, MONO_RGCTX_INFO_METHOD);
8909 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8910 } else {
8911 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8912 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8914 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8918 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8919 } else {
8920 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8921 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8922 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8923 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8924 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8925 MonoClass *tclass = mono_class_from_mono_type (handle);
8927 mono_class_init (tclass);
8928 if (context_used) {
8929 ins = emit_get_rgctx_klass (cfg, context_used,
8930 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8931 } else if (cfg->compile_aot) {
8932 if (method->wrapper_type) {
8933 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8934 /* Special case for static synchronized wrappers */
8935 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8936 } else {
8937 /* FIXME: n is not a normal token */
8938 cfg->disable_aot = TRUE;
8939 EMIT_NEW_PCONST (cfg, ins, NULL);
8941 } else {
8942 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8944 } else {
8945 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8947 ins->type = STACK_OBJ;
8948 ins->klass = cmethod->klass;
8949 ip += 5;
8950 } else {
8951 MonoInst *addr, *vtvar;
8953 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8955 if (context_used) {
8956 if (handle_class == mono_defaults.typehandle_class) {
8957 ins = emit_get_rgctx_klass (cfg, context_used,
8958 mono_class_from_mono_type (handle),
8959 MONO_RGCTX_INFO_TYPE);
8960 } else if (handle_class == mono_defaults.methodhandle_class) {
8961 ins = emit_get_rgctx_method (cfg, context_used,
8962 handle, MONO_RGCTX_INFO_METHOD);
8963 } else if (handle_class == mono_defaults.fieldhandle_class) {
8964 ins = emit_get_rgctx_field (cfg, context_used,
8965 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8966 } else {
8967 g_assert_not_reached ();
8969 } else if (cfg->compile_aot) {
8970 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8971 } else {
8972 EMIT_NEW_PCONST (cfg, ins, handle);
8974 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8975 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8976 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8980 *sp++ = ins;
8981 ip += 5;
8982 break;
8984 case CEE_THROW:
8985 CHECK_STACK (1);
8986 MONO_INST_NEW (cfg, ins, OP_THROW);
8987 --sp;
8988 ins->sreg1 = sp [0]->dreg;
8989 ip++;
8990 bblock->out_of_line = TRUE;
8991 MONO_ADD_INS (bblock, ins);
8992 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8993 MONO_ADD_INS (bblock, ins);
8994 sp = stack_start;
8996 link_bblock (cfg, bblock, end_bblock);
8997 start_new_bblock = 1;
8998 break;
8999 case CEE_ENDFINALLY:
9000 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9001 MONO_ADD_INS (bblock, ins);
9002 ip++;
9003 start_new_bblock = 1;
9006 * Control will leave the method so empty the stack, otherwise
9007 * the next basic block will start with a nonempty stack.
9009 while (sp != stack_start) {
9010 sp--;
9012 break;
9013 case CEE_LEAVE:
9014 case CEE_LEAVE_S: {
9015 GList *handlers;
9017 if (*ip == CEE_LEAVE) {
9018 CHECK_OPSIZE (5);
9019 target = ip + 5 + (gint32)read32(ip + 1);
9020 } else {
9021 CHECK_OPSIZE (2);
9022 target = ip + 2 + (signed char)(ip [1]);
9025 /* empty the stack */
9026 while (sp != stack_start) {
9027 sp--;
9031 * If this leave statement is in a catch block, check for a
9032 * pending exception, and rethrow it if necessary.
9033 * We avoid doing this in runtime invoke wrappers, since those are called
9034 * by native code which excepts the wrapper to catch all exceptions.
9036 for (i = 0; i < header->num_clauses; ++i) {
9037 MonoExceptionClause *clause = &header->clauses [i];
9040 * Use <= in the final comparison to handle clauses with multiple
9041 * leave statements, like in bug #78024.
9042 * The ordering of the exception clauses guarantees that we find the
9043 * innermost clause.
9045 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9046 MonoInst *exc_ins;
9047 MonoBasicBlock *dont_throw;
9050 MonoInst *load;
9052 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9055 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9057 NEW_BBLOCK (cfg, dont_throw);
9060 * Currently, we allways rethrow the abort exception, despite the
9061 * fact that this is not correct. See thread6.cs for an example.
9062 * But propagating the abort exception is more important than
9063 * getting the sematics right.
9065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9067 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9069 MONO_START_BB (cfg, dont_throw);
9070 bblock = cfg->cbb;
9074 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9075 GList *tmp;
9076 MonoExceptionClause *clause;
9078 for (tmp = handlers; tmp; tmp = tmp->next) {
9079 clause = tmp->data;
9080 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9081 g_assert (tblock);
9082 link_bblock (cfg, bblock, tblock);
9083 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9084 ins->inst_target_bb = tblock;
9085 ins->inst_eh_block = clause;
9086 MONO_ADD_INS (bblock, ins);
9087 bblock->has_call_handler = 1;
9088 if (COMPILE_LLVM (cfg)) {
9089 MonoBasicBlock *target_bb;
9092 * Link the finally bblock with the target, since it will
9093 * conceptually branch there.
9094 * FIXME: Have to link the bblock containing the endfinally.
9096 GET_BBLOCK (cfg, target_bb, target);
9097 link_bblock (cfg, tblock, target_bb);
9100 g_list_free (handlers);
9103 MONO_INST_NEW (cfg, ins, OP_BR);
9104 MONO_ADD_INS (bblock, ins);
9105 GET_BBLOCK (cfg, tblock, target);
9106 link_bblock (cfg, bblock, tblock);
9107 ins->inst_target_bb = tblock;
9108 start_new_bblock = 1;
9110 if (*ip == CEE_LEAVE)
9111 ip += 5;
9112 else
9113 ip += 2;
9115 break;
9119 * Mono specific opcodes
9121 case MONO_CUSTOM_PREFIX: {
9123 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9125 CHECK_OPSIZE (2);
9126 switch (ip [1]) {
9127 case CEE_MONO_ICALL: {
9128 gpointer func;
9129 MonoJitICallInfo *info;
9131 token = read32 (ip + 2);
9132 func = mono_method_get_wrapper_data (method, token);
9133 info = mono_find_jit_icall_by_addr (func);
9134 g_assert (info);
9136 CHECK_STACK (info->sig->param_count);
9137 sp -= info->sig->param_count;
9139 ins = mono_emit_jit_icall (cfg, info->func, sp);
9140 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9141 *sp++ = ins;
9143 ip += 6;
9144 inline_costs += 10 * num_calls++;
9146 break;
9148 case CEE_MONO_LDPTR: {
9149 gpointer ptr;
9151 CHECK_STACK_OVF (1);
9152 CHECK_OPSIZE (6);
9153 token = read32 (ip + 2);
9155 ptr = mono_method_get_wrapper_data (method, token);
9156 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9157 MonoJitICallInfo *callinfo;
9158 const char *icall_name;
9160 icall_name = method->name + strlen ("__icall_wrapper_");
9161 g_assert (icall_name);
9162 callinfo = mono_find_jit_icall_by_name (icall_name);
9163 g_assert (callinfo);
9165 if (ptr == callinfo->func) {
9166 /* Will be transformed into an AOTCONST later */
9167 EMIT_NEW_PCONST (cfg, ins, ptr);
9168 *sp++ = ins;
9169 ip += 6;
9170 break;
9173 /* FIXME: Generalize this */
9174 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9175 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9176 *sp++ = ins;
9177 ip += 6;
9178 break;
9180 EMIT_NEW_PCONST (cfg, ins, ptr);
9181 *sp++ = ins;
9182 ip += 6;
9183 inline_costs += 10 * num_calls++;
9184 /* Can't embed random pointers into AOT code */
9185 cfg->disable_aot = 1;
9186 break;
9188 case CEE_MONO_ICALL_ADDR: {
9189 MonoMethod *cmethod;
9190 gpointer ptr;
9192 CHECK_STACK_OVF (1);
9193 CHECK_OPSIZE (6);
9194 token = read32 (ip + 2);
9196 cmethod = mono_method_get_wrapper_data (method, token);
9198 if (cfg->compile_aot) {
9199 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9200 } else {
9201 ptr = mono_lookup_internal_call (cmethod);
9202 g_assert (ptr);
9203 EMIT_NEW_PCONST (cfg, ins, ptr);
9205 *sp++ = ins;
9206 ip += 6;
9207 break;
9209 case CEE_MONO_VTADDR: {
9210 MonoInst *src_var, *src;
9212 CHECK_STACK (1);
9213 --sp;
9215 // FIXME:
9216 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9217 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9218 *sp++ = src;
9219 ip += 2;
9220 break;
9222 case CEE_MONO_NEWOBJ: {
9223 MonoInst *iargs [2];
9225 CHECK_STACK_OVF (1);
9226 CHECK_OPSIZE (6);
9227 token = read32 (ip + 2);
9228 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9229 mono_class_init (klass);
9230 NEW_DOMAINCONST (cfg, iargs [0]);
9231 MONO_ADD_INS (cfg->cbb, iargs [0]);
9232 NEW_CLASSCONST (cfg, iargs [1], klass);
9233 MONO_ADD_INS (cfg->cbb, iargs [1]);
9234 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9235 ip += 6;
9236 inline_costs += 10 * num_calls++;
9237 break;
9239 case CEE_MONO_OBJADDR:
9240 CHECK_STACK (1);
9241 --sp;
9242 MONO_INST_NEW (cfg, ins, OP_MOVE);
9243 ins->dreg = alloc_preg (cfg);
9244 ins->sreg1 = sp [0]->dreg;
9245 ins->type = STACK_MP;
9246 MONO_ADD_INS (cfg->cbb, ins);
9247 *sp++ = ins;
9248 ip += 2;
9249 break;
9250 case CEE_MONO_LDNATIVEOBJ:
9252 * Similar to LDOBJ, but instead load the unmanaged
9253 * representation of the vtype to the stack.
9255 CHECK_STACK (1);
9256 CHECK_OPSIZE (6);
9257 --sp;
9258 token = read32 (ip + 2);
9259 klass = mono_method_get_wrapper_data (method, token);
9260 g_assert (klass->valuetype);
9261 mono_class_init (klass);
9264 MonoInst *src, *dest, *temp;
9266 src = sp [0];
9267 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9268 temp->backend.is_pinvoke = 1;
9269 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9270 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9272 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9273 dest->type = STACK_VTYPE;
9274 dest->klass = klass;
9276 *sp ++ = dest;
9277 ip += 6;
9279 break;
9280 case CEE_MONO_RETOBJ: {
9282 * Same as RET, but return the native representation of a vtype
9283 * to the caller.
9285 g_assert (cfg->ret);
9286 g_assert (mono_method_signature (method)->pinvoke);
9287 CHECK_STACK (1);
9288 --sp;
9290 CHECK_OPSIZE (6);
9291 token = read32 (ip + 2);
9292 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9294 if (!cfg->vret_addr) {
9295 g_assert (cfg->ret_var_is_local);
9297 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9298 } else {
9299 EMIT_NEW_RETLOADA (cfg, ins);
9301 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9303 if (sp != stack_start)
9304 UNVERIFIED;
9306 MONO_INST_NEW (cfg, ins, OP_BR);
9307 ins->inst_target_bb = end_bblock;
9308 MONO_ADD_INS (bblock, ins);
9309 link_bblock (cfg, bblock, end_bblock);
9310 start_new_bblock = 1;
9311 ip += 6;
9312 break;
9314 case CEE_MONO_CISINST:
9315 case CEE_MONO_CCASTCLASS: {
9316 int token;
9317 CHECK_STACK (1);
9318 --sp;
9319 CHECK_OPSIZE (6);
9320 token = read32 (ip + 2);
9321 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9322 if (ip [1] == CEE_MONO_CISINST)
9323 ins = handle_cisinst (cfg, klass, sp [0]);
9324 else
9325 ins = handle_ccastclass (cfg, klass, sp [0]);
9326 bblock = cfg->cbb;
9327 *sp++ = ins;
9328 ip += 6;
9329 break;
9331 case CEE_MONO_SAVE_LMF:
9332 case CEE_MONO_RESTORE_LMF:
9333 #ifdef MONO_ARCH_HAVE_LMF_OPS
9334 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9335 MONO_ADD_INS (bblock, ins);
9336 cfg->need_lmf_area = TRUE;
9337 #endif
9338 ip += 2;
9339 break;
9340 case CEE_MONO_CLASSCONST:
9341 CHECK_STACK_OVF (1);
9342 CHECK_OPSIZE (6);
9343 token = read32 (ip + 2);
9344 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9345 *sp++ = ins;
9346 ip += 6;
9347 inline_costs += 10 * num_calls++;
9348 break;
9349 case CEE_MONO_NOT_TAKEN:
9350 bblock->out_of_line = TRUE;
9351 ip += 2;
9352 break;
9353 case CEE_MONO_TLS:
9354 CHECK_STACK_OVF (1);
9355 CHECK_OPSIZE (6);
9356 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9357 ins->dreg = alloc_preg (cfg);
9358 ins->inst_offset = (gint32)read32 (ip + 2);
9359 ins->type = STACK_PTR;
9360 MONO_ADD_INS (bblock, ins);
9361 *sp++ = ins;
9362 ip += 6;
9363 break;
9364 case CEE_MONO_DYN_CALL: {
9365 MonoCallInst *call;
9367 /* It would be easier to call a trampoline, but that would put an
9368 * extra frame on the stack, confusing exception handling. So
9369 * implement it inline using an opcode for now.
9372 if (!cfg->dyn_call_var) {
9373 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9374 /* prevent it from being register allocated */
9375 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9378 /* Has to use a call inst since it local regalloc expects it */
9379 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9380 ins = (MonoInst*)call;
9381 sp -= 2;
9382 ins->sreg1 = sp [0]->dreg;
9383 ins->sreg2 = sp [1]->dreg;
9384 MONO_ADD_INS (bblock, ins);
9386 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9387 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9388 #endif
9390 ip += 2;
9391 inline_costs += 10 * num_calls++;
9393 break;
9395 default:
9396 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9397 break;
9399 break;
9402 case CEE_PREFIX1: {
9403 CHECK_OPSIZE (2);
9404 switch (ip [1]) {
9405 case CEE_ARGLIST: {
9406 /* somewhat similar to LDTOKEN */
9407 MonoInst *addr, *vtvar;
9408 CHECK_STACK_OVF (1);
9409 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9411 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9412 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9414 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9415 ins->type = STACK_VTYPE;
9416 ins->klass = mono_defaults.argumenthandle_class;
9417 *sp++ = ins;
9418 ip += 2;
9419 break;
9421 case CEE_CEQ:
9422 case CEE_CGT:
9423 case CEE_CGT_UN:
9424 case CEE_CLT:
9425 case CEE_CLT_UN: {
9426 MonoInst *cmp;
9427 CHECK_STACK (2);
9429 * The following transforms:
9430 * CEE_CEQ into OP_CEQ
9431 * CEE_CGT into OP_CGT
9432 * CEE_CGT_UN into OP_CGT_UN
9433 * CEE_CLT into OP_CLT
9434 * CEE_CLT_UN into OP_CLT_UN
9436 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9438 MONO_INST_NEW (cfg, ins, cmp->opcode);
9439 sp -= 2;
9440 cmp->sreg1 = sp [0]->dreg;
9441 cmp->sreg2 = sp [1]->dreg;
9442 type_from_op (cmp, sp [0], sp [1]);
9443 CHECK_TYPE (cmp);
9444 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9445 cmp->opcode = OP_LCOMPARE;
9446 else if (sp [0]->type == STACK_R8)
9447 cmp->opcode = OP_FCOMPARE;
9448 else
9449 cmp->opcode = OP_ICOMPARE;
9450 MONO_ADD_INS (bblock, cmp);
9451 ins->type = STACK_I4;
9452 ins->dreg = alloc_dreg (cfg, ins->type);
9453 type_from_op (ins, sp [0], sp [1]);
9455 if (cmp->opcode == OP_FCOMPARE) {
9457 * The backends expect the fceq opcodes to do the
9458 * comparison too.
9460 cmp->opcode = OP_NOP;
9461 ins->sreg1 = cmp->sreg1;
9462 ins->sreg2 = cmp->sreg2;
9464 MONO_ADD_INS (bblock, ins);
9465 *sp++ = ins;
9466 ip += 2;
9467 break;
9469 case CEE_LDFTN: {
9470 MonoInst *argconst;
9471 MonoMethod *cil_method;
9472 gboolean needs_static_rgctx_invoke;
9474 CHECK_STACK_OVF (1);
9475 CHECK_OPSIZE (6);
9476 n = read32 (ip + 2);
9477 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9478 if (!cmethod)
9479 goto load_error;
9480 mono_class_init (cmethod->klass);
9482 mono_save_token_info (cfg, image, n, cmethod);
9484 if (cfg->generic_sharing_context)
9485 context_used = mono_method_check_context_used (cmethod);
9487 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9489 cil_method = cmethod;
9490 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9491 METHOD_ACCESS_FAILURE;
9493 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9494 if (check_linkdemand (cfg, method, cmethod))
9495 INLINE_FAILURE;
9496 CHECK_CFG_EXCEPTION;
9497 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9498 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9502 * Optimize the common case of ldftn+delegate creation
9504 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9505 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9506 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9507 MonoMethod *invoke;
9508 int invoke_context_used = 0;
9510 invoke = mono_get_delegate_invoke (ctor_method->klass);
9511 if (!invoke || !mono_method_signature (invoke))
9512 goto load_error;
9514 if (cfg->generic_sharing_context)
9515 invoke_context_used = mono_method_check_context_used (invoke);
9517 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9518 /* FIXME: SGEN support */
9519 if (invoke_context_used == 0) {
9520 MonoInst *target_ins;
9522 ip += 6;
9523 if (cfg->verbose_level > 3)
9524 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9525 target_ins = sp [-1];
9526 sp --;
9527 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9528 CHECK_CFG_EXCEPTION;
9529 ip += 5;
9530 sp ++;
9531 break;
9533 #endif
9537 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9538 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9539 *sp++ = ins;
9541 ip += 6;
9542 inline_costs += 10 * num_calls++;
9543 break;
9545 case CEE_LDVIRTFTN: {
9546 MonoInst *args [2];
9548 CHECK_STACK (1);
9549 CHECK_OPSIZE (6);
9550 n = read32 (ip + 2);
9551 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9552 if (!cmethod)
9553 goto load_error;
9554 mono_class_init (cmethod->klass);
9556 if (cfg->generic_sharing_context)
9557 context_used = mono_method_check_context_used (cmethod);
9559 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9560 if (check_linkdemand (cfg, method, cmethod))
9561 INLINE_FAILURE;
9562 CHECK_CFG_EXCEPTION;
9563 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9564 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9567 --sp;
9568 args [0] = *sp;
9570 args [1] = emit_get_rgctx_method (cfg, context_used,
9571 cmethod, MONO_RGCTX_INFO_METHOD);
9573 if (context_used)
9574 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9575 else
9576 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9578 ip += 6;
9579 inline_costs += 10 * num_calls++;
9580 break;
9582 case CEE_LDARG:
9583 CHECK_STACK_OVF (1);
9584 CHECK_OPSIZE (4);
9585 n = read16 (ip + 2);
9586 CHECK_ARG (n);
9587 EMIT_NEW_ARGLOAD (cfg, ins, n);
9588 *sp++ = ins;
9589 ip += 4;
9590 break;
9591 case CEE_LDARGA:
9592 CHECK_STACK_OVF (1);
9593 CHECK_OPSIZE (4);
9594 n = read16 (ip + 2);
9595 CHECK_ARG (n);
9596 NEW_ARGLOADA (cfg, ins, n);
9597 MONO_ADD_INS (cfg->cbb, ins);
9598 *sp++ = ins;
9599 ip += 4;
9600 break;
9601 case CEE_STARG:
9602 CHECK_STACK (1);
9603 --sp;
9604 CHECK_OPSIZE (4);
9605 n = read16 (ip + 2);
9606 CHECK_ARG (n);
9607 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9608 UNVERIFIED;
9609 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9610 ip += 4;
9611 break;
9612 case CEE_LDLOC:
9613 CHECK_STACK_OVF (1);
9614 CHECK_OPSIZE (4);
9615 n = read16 (ip + 2);
9616 CHECK_LOCAL (n);
9617 EMIT_NEW_LOCLOAD (cfg, ins, n);
9618 *sp++ = ins;
9619 ip += 4;
9620 break;
9621 case CEE_LDLOCA: {
9622 unsigned char *tmp_ip;
9623 CHECK_STACK_OVF (1);
9624 CHECK_OPSIZE (4);
9625 n = read16 (ip + 2);
9626 CHECK_LOCAL (n);
9628 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9629 ip = tmp_ip;
9630 inline_costs += 1;
9631 break;
9634 EMIT_NEW_LOCLOADA (cfg, ins, n);
9635 *sp++ = ins;
9636 ip += 4;
9637 break;
9639 case CEE_STLOC:
9640 CHECK_STACK (1);
9641 --sp;
9642 CHECK_OPSIZE (4);
9643 n = read16 (ip + 2);
9644 CHECK_LOCAL (n);
9645 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9646 UNVERIFIED;
9647 emit_stloc_ir (cfg, sp, header, n);
9648 ip += 4;
9649 inline_costs += 1;
9650 break;
9651 case CEE_LOCALLOC:
9652 CHECK_STACK (1);
9653 --sp;
9654 if (sp != stack_start)
9655 UNVERIFIED;
9656 if (cfg->method != method)
9658 * Inlining this into a loop in a parent could lead to
9659 * stack overflows which is different behavior than the
9660 * non-inlined case, thus disable inlining in this case.
9662 goto inline_failure;
9664 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9665 ins->dreg = alloc_preg (cfg);
9666 ins->sreg1 = sp [0]->dreg;
9667 ins->type = STACK_PTR;
9668 MONO_ADD_INS (cfg->cbb, ins);
9670 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9671 if (init_locals)
9672 ins->flags |= MONO_INST_INIT;
9674 *sp++ = ins;
9675 ip += 2;
9676 break;
9677 case CEE_ENDFILTER: {
9678 MonoExceptionClause *clause, *nearest;
9679 int cc, nearest_num;
9681 CHECK_STACK (1);
9682 --sp;
9683 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9684 UNVERIFIED;
9685 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9686 ins->sreg1 = (*sp)->dreg;
9687 MONO_ADD_INS (bblock, ins);
9688 start_new_bblock = 1;
9689 ip += 2;
9691 nearest = NULL;
9692 nearest_num = 0;
9693 for (cc = 0; cc < header->num_clauses; ++cc) {
9694 clause = &header->clauses [cc];
9695 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9696 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9697 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9698 nearest = clause;
9699 nearest_num = cc;
9702 g_assert (nearest);
9703 if ((ip - header->code) != nearest->handler_offset)
9704 UNVERIFIED;
9706 break;
9708 case CEE_UNALIGNED_:
9709 ins_flag |= MONO_INST_UNALIGNED;
9710 /* FIXME: record alignment? we can assume 1 for now */
9711 CHECK_OPSIZE (3);
9712 ip += 3;
9713 break;
9714 case CEE_VOLATILE_:
9715 ins_flag |= MONO_INST_VOLATILE;
9716 ip += 2;
9717 break;
9718 case CEE_TAIL_:
9719 ins_flag |= MONO_INST_TAILCALL;
9720 cfg->flags |= MONO_CFG_HAS_TAIL;
9721 /* Can't inline tail calls at this time */
9722 inline_costs += 100000;
9723 ip += 2;
9724 break;
9725 case CEE_INITOBJ:
9726 CHECK_STACK (1);
9727 --sp;
9728 CHECK_OPSIZE (6);
9729 token = read32 (ip + 2);
9730 klass = mini_get_class (method, token, generic_context);
9731 CHECK_TYPELOAD (klass);
9732 if (generic_class_is_reference_type (cfg, klass))
9733 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9734 else
9735 mini_emit_initobj (cfg, *sp, NULL, klass);
9736 ip += 6;
9737 inline_costs += 1;
9738 break;
9739 case CEE_CONSTRAINED_:
9740 CHECK_OPSIZE (6);
9741 token = read32 (ip + 2);
9742 if (method->wrapper_type != MONO_WRAPPER_NONE)
9743 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9744 else
9745 constrained_call = mono_class_get_full (image, token, generic_context);
9746 CHECK_TYPELOAD (constrained_call);
9747 ip += 6;
9748 break;
9749 case CEE_CPBLK:
9750 case CEE_INITBLK: {
9751 MonoInst *iargs [3];
9752 CHECK_STACK (3);
9753 sp -= 3;
9755 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9756 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9757 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9758 /* emit_memset only works when val == 0 */
9759 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9760 } else {
9761 iargs [0] = sp [0];
9762 iargs [1] = sp [1];
9763 iargs [2] = sp [2];
9764 if (ip [1] == CEE_CPBLK) {
9765 MonoMethod *memcpy_method = get_memcpy_method ();
9766 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9767 } else {
9768 MonoMethod *memset_method = get_memset_method ();
9769 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9772 ip += 2;
9773 inline_costs += 1;
9774 break;
9776 case CEE_NO_:
9777 CHECK_OPSIZE (3);
9778 if (ip [2] & 0x1)
9779 ins_flag |= MONO_INST_NOTYPECHECK;
9780 if (ip [2] & 0x2)
9781 ins_flag |= MONO_INST_NORANGECHECK;
9782 /* we ignore the no-nullcheck for now since we
9783 * really do it explicitly only when doing callvirt->call
9785 ip += 3;
9786 break;
9787 case CEE_RETHROW: {
9788 MonoInst *load;
9789 int handler_offset = -1;
9791 for (i = 0; i < header->num_clauses; ++i) {
9792 MonoExceptionClause *clause = &header->clauses [i];
9793 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9794 handler_offset = clause->handler_offset;
9795 break;
9799 bblock->flags |= BB_EXCEPTION_UNSAFE;
9801 g_assert (handler_offset != -1);
9803 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9804 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9805 ins->sreg1 = load->dreg;
9806 MONO_ADD_INS (bblock, ins);
9808 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9809 MONO_ADD_INS (bblock, ins);
9811 sp = stack_start;
9812 link_bblock (cfg, bblock, end_bblock);
9813 start_new_bblock = 1;
9814 ip += 2;
9815 break;
9817 case CEE_SIZEOF: {
9818 guint32 align;
9819 int ialign;
9821 CHECK_STACK_OVF (1);
9822 CHECK_OPSIZE (6);
9823 token = read32 (ip + 2);
9824 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9825 MonoType *type = mono_type_create_from_typespec (image, token);
9826 token = mono_type_size (type, &ialign);
9827 } else {
9828 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9829 CHECK_TYPELOAD (klass);
9830 mono_class_init (klass);
9831 token = mono_class_value_size (klass, &align);
9833 EMIT_NEW_ICONST (cfg, ins, token);
9834 *sp++= ins;
9835 ip += 6;
9836 break;
9838 case CEE_REFANYTYPE: {
9839 MonoInst *src_var, *src;
9841 CHECK_STACK (1);
9842 --sp;
9844 // FIXME:
9845 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9846 if (!src_var)
9847 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9848 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9849 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9850 *sp++ = ins;
9851 ip += 2;
9852 break;
9854 case CEE_READONLY_:
9855 readonly = TRUE;
9856 ip += 2;
9857 break;
9859 case CEE_UNUSED56:
9860 case CEE_UNUSED57:
9861 case CEE_UNUSED70:
9862 case CEE_UNUSED:
9863 case CEE_UNUSED99:
9864 UNVERIFIED;
9866 default:
9867 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9868 UNVERIFIED;
9870 break;
9872 case CEE_UNUSED58:
9873 case CEE_UNUSED1:
9874 UNVERIFIED;
9876 default:
9877 g_warning ("opcode 0x%02x not handled", *ip);
9878 UNVERIFIED;
9881 if (start_new_bblock != 1)
9882 UNVERIFIED;
9884 bblock->cil_length = ip - bblock->cil_code;
9885 bblock->next_bb = end_bblock;
9887 if (cfg->method == method && cfg->domainvar) {
9888 MonoInst *store;
9889 MonoInst *get_domain;
9891 cfg->cbb = init_localsbb;
9893 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9894 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9896 else {
9897 get_domain->dreg = alloc_preg (cfg);
9898 MONO_ADD_INS (cfg->cbb, get_domain);
9900 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9901 MONO_ADD_INS (cfg->cbb, store);
9904 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9905 if (cfg->compile_aot)
9906 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9907 mono_get_got_var (cfg);
9908 #endif
9910 if (cfg->method == method && cfg->got_var)
9911 mono_emit_load_got_addr (cfg);
9913 if (init_locals) {
9914 MonoInst *store;
9916 cfg->cbb = init_localsbb;
9917 cfg->ip = NULL;
9918 for (i = 0; i < header->num_locals; ++i) {
9919 MonoType *ptype = header->locals [i];
9920 int t = ptype->type;
9921 dreg = cfg->locals [i]->dreg;
9923 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9924 t = mono_class_enum_basetype (ptype->data.klass)->type;
9925 if (ptype->byref) {
9926 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9927 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9928 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9929 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9930 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9931 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9932 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9933 ins->type = STACK_R8;
9934 ins->inst_p0 = (void*)&r8_0;
9935 ins->dreg = alloc_dreg (cfg, STACK_R8);
9936 MONO_ADD_INS (init_localsbb, ins);
9937 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9938 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9939 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9940 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9941 } else {
9942 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9947 if (cfg->init_ref_vars && cfg->method == method) {
9948 /* Emit initialization for ref vars */
9949 // FIXME: Avoid duplication initialization for IL locals.
9950 for (i = 0; i < cfg->num_varinfo; ++i) {
9951 MonoInst *ins = cfg->varinfo [i];
9953 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9954 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9958 /* Add a sequence point for method entry/exit events */
9959 if (seq_points) {
9960 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9961 MONO_ADD_INS (init_localsbb, ins);
9962 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9963 MONO_ADD_INS (cfg->bb_exit, ins);
9966 cfg->ip = NULL;
9968 if (cfg->method == method) {
9969 MonoBasicBlock *bb;
9970 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9971 bb->region = mono_find_block_region (cfg, bb->real_offset);
9972 if (cfg->spvars)
9973 mono_create_spvar_for_region (cfg, bb->region);
9974 if (cfg->verbose_level > 2)
9975 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9979 g_slist_free (class_inits);
9980 dont_inline = g_list_remove (dont_inline, method);
9982 if (inline_costs < 0) {
9983 char *mname;
9985 /* Method is too large */
9986 mname = mono_method_full_name (method, TRUE);
9987 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9988 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9989 g_free (mname);
9990 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9991 mono_basic_block_free (original_bb);
9992 return -1;
9995 if ((cfg->verbose_level > 2) && (cfg->method == method))
9996 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9998 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9999 mono_basic_block_free (original_bb);
10000 return inline_costs;
10002 exception_exit:
10003 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10004 goto cleanup;
10006 inline_failure:
10007 goto cleanup;
10009 load_error:
10010 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10011 goto cleanup;
10013 unverified:
10014 set_exception_type_from_invalid_il (cfg, method, ip);
10015 goto cleanup;
10017 cleanup:
10018 g_slist_free (class_inits);
10019 mono_basic_block_free (original_bb);
10020 dont_inline = g_list_remove (dont_inline, method);
10021 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10022 return -1;
10025 static int
10026 store_membase_reg_to_store_membase_imm (int opcode)
10028 switch (opcode) {
10029 case OP_STORE_MEMBASE_REG:
10030 return OP_STORE_MEMBASE_IMM;
10031 case OP_STOREI1_MEMBASE_REG:
10032 return OP_STOREI1_MEMBASE_IMM;
10033 case OP_STOREI2_MEMBASE_REG:
10034 return OP_STOREI2_MEMBASE_IMM;
10035 case OP_STOREI4_MEMBASE_REG:
10036 return OP_STOREI4_MEMBASE_IMM;
10037 case OP_STOREI8_MEMBASE_REG:
10038 return OP_STOREI8_MEMBASE_IMM;
10039 default:
10040 g_assert_not_reached ();
10043 return -1;
10046 #endif /* DISABLE_JIT */
10049 mono_op_to_op_imm (int opcode)
10051 switch (opcode) {
10052 case OP_IADD:
10053 return OP_IADD_IMM;
10054 case OP_ISUB:
10055 return OP_ISUB_IMM;
10056 case OP_IDIV:
10057 return OP_IDIV_IMM;
10058 case OP_IDIV_UN:
10059 return OP_IDIV_UN_IMM;
10060 case OP_IREM:
10061 return OP_IREM_IMM;
10062 case OP_IREM_UN:
10063 return OP_IREM_UN_IMM;
10064 case OP_IMUL:
10065 return OP_IMUL_IMM;
10066 case OP_IAND:
10067 return OP_IAND_IMM;
10068 case OP_IOR:
10069 return OP_IOR_IMM;
10070 case OP_IXOR:
10071 return OP_IXOR_IMM;
10072 case OP_ISHL:
10073 return OP_ISHL_IMM;
10074 case OP_ISHR:
10075 return OP_ISHR_IMM;
10076 case OP_ISHR_UN:
10077 return OP_ISHR_UN_IMM;
10079 case OP_LADD:
10080 return OP_LADD_IMM;
10081 case OP_LSUB:
10082 return OP_LSUB_IMM;
10083 case OP_LAND:
10084 return OP_LAND_IMM;
10085 case OP_LOR:
10086 return OP_LOR_IMM;
10087 case OP_LXOR:
10088 return OP_LXOR_IMM;
10089 case OP_LSHL:
10090 return OP_LSHL_IMM;
10091 case OP_LSHR:
10092 return OP_LSHR_IMM;
10093 case OP_LSHR_UN:
10094 return OP_LSHR_UN_IMM;
10096 case OP_COMPARE:
10097 return OP_COMPARE_IMM;
10098 case OP_ICOMPARE:
10099 return OP_ICOMPARE_IMM;
10100 case OP_LCOMPARE:
10101 return OP_LCOMPARE_IMM;
10103 case OP_STORE_MEMBASE_REG:
10104 return OP_STORE_MEMBASE_IMM;
10105 case OP_STOREI1_MEMBASE_REG:
10106 return OP_STOREI1_MEMBASE_IMM;
10107 case OP_STOREI2_MEMBASE_REG:
10108 return OP_STOREI2_MEMBASE_IMM;
10109 case OP_STOREI4_MEMBASE_REG:
10110 return OP_STOREI4_MEMBASE_IMM;
10112 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10113 case OP_X86_PUSH:
10114 return OP_X86_PUSH_IMM;
10115 case OP_X86_COMPARE_MEMBASE_REG:
10116 return OP_X86_COMPARE_MEMBASE_IMM;
10117 #endif
10118 #if defined(TARGET_AMD64)
10119 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10120 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10121 #endif
10122 case OP_VOIDCALL_REG:
10123 return OP_VOIDCALL;
10124 case OP_CALL_REG:
10125 return OP_CALL;
10126 case OP_LCALL_REG:
10127 return OP_LCALL;
10128 case OP_FCALL_REG:
10129 return OP_FCALL;
10130 case OP_LOCALLOC:
10131 return OP_LOCALLOC_IMM;
10134 return -1;
10137 static int
10138 ldind_to_load_membase (int opcode)
10140 switch (opcode) {
10141 case CEE_LDIND_I1:
10142 return OP_LOADI1_MEMBASE;
10143 case CEE_LDIND_U1:
10144 return OP_LOADU1_MEMBASE;
10145 case CEE_LDIND_I2:
10146 return OP_LOADI2_MEMBASE;
10147 case CEE_LDIND_U2:
10148 return OP_LOADU2_MEMBASE;
10149 case CEE_LDIND_I4:
10150 return OP_LOADI4_MEMBASE;
10151 case CEE_LDIND_U4:
10152 return OP_LOADU4_MEMBASE;
10153 case CEE_LDIND_I:
10154 return OP_LOAD_MEMBASE;
10155 case CEE_LDIND_REF:
10156 return OP_LOAD_MEMBASE;
10157 case CEE_LDIND_I8:
10158 return OP_LOADI8_MEMBASE;
10159 case CEE_LDIND_R4:
10160 return OP_LOADR4_MEMBASE;
10161 case CEE_LDIND_R8:
10162 return OP_LOADR8_MEMBASE;
10163 default:
10164 g_assert_not_reached ();
10167 return -1;
10170 static int
10171 stind_to_store_membase (int opcode)
10173 switch (opcode) {
10174 case CEE_STIND_I1:
10175 return OP_STOREI1_MEMBASE_REG;
10176 case CEE_STIND_I2:
10177 return OP_STOREI2_MEMBASE_REG;
10178 case CEE_STIND_I4:
10179 return OP_STOREI4_MEMBASE_REG;
10180 case CEE_STIND_I:
10181 case CEE_STIND_REF:
10182 return OP_STORE_MEMBASE_REG;
10183 case CEE_STIND_I8:
10184 return OP_STOREI8_MEMBASE_REG;
10185 case CEE_STIND_R4:
10186 return OP_STORER4_MEMBASE_REG;
10187 case CEE_STIND_R8:
10188 return OP_STORER8_MEMBASE_REG;
10189 default:
10190 g_assert_not_reached ();
10193 return -1;
10197 mono_load_membase_to_load_mem (int opcode)
10199 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10200 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10201 switch (opcode) {
10202 case OP_LOAD_MEMBASE:
10203 return OP_LOAD_MEM;
10204 case OP_LOADU1_MEMBASE:
10205 return OP_LOADU1_MEM;
10206 case OP_LOADU2_MEMBASE:
10207 return OP_LOADU2_MEM;
10208 case OP_LOADI4_MEMBASE:
10209 return OP_LOADI4_MEM;
10210 case OP_LOADU4_MEMBASE:
10211 return OP_LOADU4_MEM;
10212 #if SIZEOF_REGISTER == 8
10213 case OP_LOADI8_MEMBASE:
10214 return OP_LOADI8_MEM;
10215 #endif
10217 #endif
10219 return -1;
10222 static inline int
10223 op_to_op_dest_membase (int store_opcode, int opcode)
10225 #if defined(TARGET_X86)
10226 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10227 return -1;
10229 switch (opcode) {
10230 case OP_IADD:
10231 return OP_X86_ADD_MEMBASE_REG;
10232 case OP_ISUB:
10233 return OP_X86_SUB_MEMBASE_REG;
10234 case OP_IAND:
10235 return OP_X86_AND_MEMBASE_REG;
10236 case OP_IOR:
10237 return OP_X86_OR_MEMBASE_REG;
10238 case OP_IXOR:
10239 return OP_X86_XOR_MEMBASE_REG;
10240 case OP_ADD_IMM:
10241 case OP_IADD_IMM:
10242 return OP_X86_ADD_MEMBASE_IMM;
10243 case OP_SUB_IMM:
10244 case OP_ISUB_IMM:
10245 return OP_X86_SUB_MEMBASE_IMM;
10246 case OP_AND_IMM:
10247 case OP_IAND_IMM:
10248 return OP_X86_AND_MEMBASE_IMM;
10249 case OP_OR_IMM:
10250 case OP_IOR_IMM:
10251 return OP_X86_OR_MEMBASE_IMM;
10252 case OP_XOR_IMM:
10253 case OP_IXOR_IMM:
10254 return OP_X86_XOR_MEMBASE_IMM;
10255 case OP_MOVE:
10256 return OP_NOP;
10258 #endif
10260 #if defined(TARGET_AMD64)
10261 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10262 return -1;
10264 switch (opcode) {
10265 case OP_IADD:
10266 return OP_X86_ADD_MEMBASE_REG;
10267 case OP_ISUB:
10268 return OP_X86_SUB_MEMBASE_REG;
10269 case OP_IAND:
10270 return OP_X86_AND_MEMBASE_REG;
10271 case OP_IOR:
10272 return OP_X86_OR_MEMBASE_REG;
10273 case OP_IXOR:
10274 return OP_X86_XOR_MEMBASE_REG;
10275 case OP_IADD_IMM:
10276 return OP_X86_ADD_MEMBASE_IMM;
10277 case OP_ISUB_IMM:
10278 return OP_X86_SUB_MEMBASE_IMM;
10279 case OP_IAND_IMM:
10280 return OP_X86_AND_MEMBASE_IMM;
10281 case OP_IOR_IMM:
10282 return OP_X86_OR_MEMBASE_IMM;
10283 case OP_IXOR_IMM:
10284 return OP_X86_XOR_MEMBASE_IMM;
10285 case OP_LADD:
10286 return OP_AMD64_ADD_MEMBASE_REG;
10287 case OP_LSUB:
10288 return OP_AMD64_SUB_MEMBASE_REG;
10289 case OP_LAND:
10290 return OP_AMD64_AND_MEMBASE_REG;
10291 case OP_LOR:
10292 return OP_AMD64_OR_MEMBASE_REG;
10293 case OP_LXOR:
10294 return OP_AMD64_XOR_MEMBASE_REG;
10295 case OP_ADD_IMM:
10296 case OP_LADD_IMM:
10297 return OP_AMD64_ADD_MEMBASE_IMM;
10298 case OP_SUB_IMM:
10299 case OP_LSUB_IMM:
10300 return OP_AMD64_SUB_MEMBASE_IMM;
10301 case OP_AND_IMM:
10302 case OP_LAND_IMM:
10303 return OP_AMD64_AND_MEMBASE_IMM;
10304 case OP_OR_IMM:
10305 case OP_LOR_IMM:
10306 return OP_AMD64_OR_MEMBASE_IMM;
10307 case OP_XOR_IMM:
10308 case OP_LXOR_IMM:
10309 return OP_AMD64_XOR_MEMBASE_IMM;
10310 case OP_MOVE:
10311 return OP_NOP;
10313 #endif
10315 return -1;
10318 static inline int
10319 op_to_op_store_membase (int store_opcode, int opcode)
10321 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10322 switch (opcode) {
10323 case OP_ICEQ:
10324 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10325 return OP_X86_SETEQ_MEMBASE;
10326 case OP_CNE:
10327 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10328 return OP_X86_SETNE_MEMBASE;
10330 #endif
10332 return -1;
10335 static inline int
10336 op_to_op_src1_membase (int load_opcode, int opcode)
10338 #ifdef TARGET_X86
10339 /* FIXME: This has sign extension issues */
10341 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10342 return OP_X86_COMPARE_MEMBASE8_IMM;
10345 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10346 return -1;
10348 switch (opcode) {
10349 case OP_X86_PUSH:
10350 return OP_X86_PUSH_MEMBASE;
10351 case OP_COMPARE_IMM:
10352 case OP_ICOMPARE_IMM:
10353 return OP_X86_COMPARE_MEMBASE_IMM;
10354 case OP_COMPARE:
10355 case OP_ICOMPARE:
10356 return OP_X86_COMPARE_MEMBASE_REG;
10358 #endif
10360 #ifdef TARGET_AMD64
10361 /* FIXME: This has sign extension issues */
10363 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10364 return OP_X86_COMPARE_MEMBASE8_IMM;
10367 switch (opcode) {
10368 case OP_X86_PUSH:
10369 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10370 return OP_X86_PUSH_MEMBASE;
10371 break;
10372 /* FIXME: This only works for 32 bit immediates
10373 case OP_COMPARE_IMM:
10374 case OP_LCOMPARE_IMM:
10375 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10376 return OP_AMD64_COMPARE_MEMBASE_IMM;
10378 case OP_ICOMPARE_IMM:
10379 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10380 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10381 break;
10382 case OP_COMPARE:
10383 case OP_LCOMPARE:
10384 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10385 return OP_AMD64_COMPARE_MEMBASE_REG;
10386 break;
10387 case OP_ICOMPARE:
10388 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10389 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10390 break;
10392 #endif
10394 return -1;
10397 static inline int
10398 op_to_op_src2_membase (int load_opcode, int opcode)
10400 #ifdef TARGET_X86
10401 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10402 return -1;
10404 switch (opcode) {
10405 case OP_COMPARE:
10406 case OP_ICOMPARE:
10407 return OP_X86_COMPARE_REG_MEMBASE;
10408 case OP_IADD:
10409 return OP_X86_ADD_REG_MEMBASE;
10410 case OP_ISUB:
10411 return OP_X86_SUB_REG_MEMBASE;
10412 case OP_IAND:
10413 return OP_X86_AND_REG_MEMBASE;
10414 case OP_IOR:
10415 return OP_X86_OR_REG_MEMBASE;
10416 case OP_IXOR:
10417 return OP_X86_XOR_REG_MEMBASE;
10419 #endif
10421 #ifdef TARGET_AMD64
10422 switch (opcode) {
10423 case OP_ICOMPARE:
10424 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10425 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10426 break;
10427 case OP_COMPARE:
10428 case OP_LCOMPARE:
10429 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10430 return OP_AMD64_COMPARE_REG_MEMBASE;
10431 break;
10432 case OP_IADD:
10433 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10434 return OP_X86_ADD_REG_MEMBASE;
10435 case OP_ISUB:
10436 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10437 return OP_X86_SUB_REG_MEMBASE;
10438 case OP_IAND:
10439 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10440 return OP_X86_AND_REG_MEMBASE;
10441 case OP_IOR:
10442 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10443 return OP_X86_OR_REG_MEMBASE;
10444 case OP_IXOR:
10445 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10446 return OP_X86_XOR_REG_MEMBASE;
10447 case OP_LADD:
10448 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10449 return OP_AMD64_ADD_REG_MEMBASE;
10450 case OP_LSUB:
10451 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10452 return OP_AMD64_SUB_REG_MEMBASE;
10453 case OP_LAND:
10454 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10455 return OP_AMD64_AND_REG_MEMBASE;
10456 case OP_LOR:
10457 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10458 return OP_AMD64_OR_REG_MEMBASE;
10459 case OP_LXOR:
10460 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10461 return OP_AMD64_XOR_REG_MEMBASE;
10463 #endif
10465 return -1;
10469 mono_op_to_op_imm_noemul (int opcode)
10471 switch (opcode) {
10472 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10473 case OP_LSHR:
10474 case OP_LSHL:
10475 case OP_LSHR_UN:
10476 return -1;
10477 #endif
10478 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10479 case OP_IDIV:
10480 case OP_IDIV_UN:
10481 case OP_IREM:
10482 case OP_IREM_UN:
10483 return -1;
10484 #endif
10485 default:
10486 return mono_op_to_op_imm (opcode);
10490 #ifndef DISABLE_JIT
10493 * mono_handle_global_vregs:
10495 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10496 * for them.
10498 void
10499 mono_handle_global_vregs (MonoCompile *cfg)
10501 gint32 *vreg_to_bb;
10502 MonoBasicBlock *bb;
10503 int i, pos;
10505 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10507 #ifdef MONO_ARCH_SIMD_INTRINSICS
10508 if (cfg->uses_simd_intrinsics)
10509 mono_simd_simplify_indirection (cfg);
10510 #endif
10512 /* Find local vregs used in more than one bb */
10513 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10514 MonoInst *ins = bb->code;
10515 int block_num = bb->block_num;
10517 if (cfg->verbose_level > 2)
10518 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10520 cfg->cbb = bb;
10521 for (; ins; ins = ins->next) {
10522 const char *spec = INS_INFO (ins->opcode);
10523 int regtype = 0, regindex;
10524 gint32 prev_bb;
10526 if (G_UNLIKELY (cfg->verbose_level > 2))
10527 mono_print_ins (ins);
10529 g_assert (ins->opcode >= MONO_CEE_LAST);
10531 for (regindex = 0; regindex < 4; regindex ++) {
10532 int vreg = 0;
10534 if (regindex == 0) {
10535 regtype = spec [MONO_INST_DEST];
10536 if (regtype == ' ')
10537 continue;
10538 vreg = ins->dreg;
10539 } else if (regindex == 1) {
10540 regtype = spec [MONO_INST_SRC1];
10541 if (regtype == ' ')
10542 continue;
10543 vreg = ins->sreg1;
10544 } else if (regindex == 2) {
10545 regtype = spec [MONO_INST_SRC2];
10546 if (regtype == ' ')
10547 continue;
10548 vreg = ins->sreg2;
10549 } else if (regindex == 3) {
10550 regtype = spec [MONO_INST_SRC3];
10551 if (regtype == ' ')
10552 continue;
10553 vreg = ins->sreg3;
10556 #if SIZEOF_REGISTER == 4
10557 /* In the LLVM case, the long opcodes are not decomposed */
10558 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10560 * Since some instructions reference the original long vreg,
10561 * and some reference the two component vregs, it is quite hard
10562 * to determine when it needs to be global. So be conservative.
10564 if (!get_vreg_to_inst (cfg, vreg)) {
10565 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10567 if (cfg->verbose_level > 2)
10568 printf ("LONG VREG R%d made global.\n", vreg);
10572 * Make the component vregs volatile since the optimizations can
10573 * get confused otherwise.
10575 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10576 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10578 #endif
10580 g_assert (vreg != -1);
10582 prev_bb = vreg_to_bb [vreg];
10583 if (prev_bb == 0) {
10584 /* 0 is a valid block num */
10585 vreg_to_bb [vreg] = block_num + 1;
10586 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10587 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10588 continue;
10590 if (!get_vreg_to_inst (cfg, vreg)) {
10591 if (G_UNLIKELY (cfg->verbose_level > 2))
10592 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10594 switch (regtype) {
10595 case 'i':
10596 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10597 break;
10598 case 'l':
10599 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10600 break;
10601 case 'f':
10602 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10603 break;
10604 case 'v':
10605 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10606 break;
10607 default:
10608 g_assert_not_reached ();
10612 /* Flag as having been used in more than one bb */
10613 vreg_to_bb [vreg] = -1;
10619 /* If a variable is used in only one bblock, convert it into a local vreg */
10620 for (i = 0; i < cfg->num_varinfo; i++) {
10621 MonoInst *var = cfg->varinfo [i];
10622 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10624 switch (var->type) {
10625 case STACK_I4:
10626 case STACK_OBJ:
10627 case STACK_PTR:
10628 case STACK_MP:
10629 case STACK_VTYPE:
10630 #if SIZEOF_REGISTER == 8
10631 case STACK_I8:
10632 #endif
10633 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10634 /* Enabling this screws up the fp stack on x86 */
10635 case STACK_R8:
10636 #endif
10637 /* Arguments are implicitly global */
10638 /* Putting R4 vars into registers doesn't work currently */
10639 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10641 * Make that the variable's liveness interval doesn't contain a call, since
10642 * that would cause the lvreg to be spilled, making the whole optimization
10643 * useless.
10645 /* This is too slow for JIT compilation */
10646 #if 0
10647 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10648 MonoInst *ins;
10649 int def_index, call_index, ins_index;
10650 gboolean spilled = FALSE;
10652 def_index = -1;
10653 call_index = -1;
10654 ins_index = 0;
10655 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10656 const char *spec = INS_INFO (ins->opcode);
10658 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10659 def_index = ins_index;
10661 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10662 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10663 if (call_index > def_index) {
10664 spilled = TRUE;
10665 break;
10669 if (MONO_IS_CALL (ins))
10670 call_index = ins_index;
10672 ins_index ++;
10675 if (spilled)
10676 break;
10678 #endif
10680 if (G_UNLIKELY (cfg->verbose_level > 2))
10681 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10682 var->flags |= MONO_INST_IS_DEAD;
10683 cfg->vreg_to_inst [var->dreg] = NULL;
10685 break;
10690 * Compress the varinfo and vars tables so the liveness computation is faster and
10691 * takes up less space.
10693 pos = 0;
10694 for (i = 0; i < cfg->num_varinfo; ++i) {
10695 MonoInst *var = cfg->varinfo [i];
10696 if (pos < i && cfg->locals_start == i)
10697 cfg->locals_start = pos;
10698 if (!(var->flags & MONO_INST_IS_DEAD)) {
10699 if (pos < i) {
10700 cfg->varinfo [pos] = cfg->varinfo [i];
10701 cfg->varinfo [pos]->inst_c0 = pos;
10702 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10703 cfg->vars [pos].idx = pos;
10704 #if SIZEOF_REGISTER == 4
10705 if (cfg->varinfo [pos]->type == STACK_I8) {
10706 /* Modify the two component vars too */
10707 MonoInst *var1;
10709 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10710 var1->inst_c0 = pos;
10711 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10712 var1->inst_c0 = pos;
10714 #endif
10716 pos ++;
10719 cfg->num_varinfo = pos;
10720 if (cfg->locals_start > cfg->num_varinfo)
10721 cfg->locals_start = cfg->num_varinfo;
10725 * mono_spill_global_vars:
10727 * Generate spill code for variables which are not allocated to registers,
10728 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10729 * code is generated which could be optimized by the local optimization passes.
10731 void
10732 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10734 MonoBasicBlock *bb;
10735 char spec2 [16];
10736 int orig_next_vreg;
10737 guint32 *vreg_to_lvreg;
10738 guint32 *lvregs;
10739 guint32 i, lvregs_len;
10740 gboolean dest_has_lvreg = FALSE;
10741 guint32 stacktypes [128];
10742 MonoInst **live_range_start, **live_range_end;
10743 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10745 *need_local_opts = FALSE;
10747 memset (spec2, 0, sizeof (spec2));
10749 /* FIXME: Move this function to mini.c */
10750 stacktypes ['i'] = STACK_PTR;
10751 stacktypes ['l'] = STACK_I8;
10752 stacktypes ['f'] = STACK_R8;
10753 #ifdef MONO_ARCH_SIMD_INTRINSICS
10754 stacktypes ['x'] = STACK_VTYPE;
10755 #endif
10757 #if SIZEOF_REGISTER == 4
10758 /* Create MonoInsts for longs */
10759 for (i = 0; i < cfg->num_varinfo; i++) {
10760 MonoInst *ins = cfg->varinfo [i];
10762 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10763 switch (ins->type) {
10764 case STACK_R8:
10765 case STACK_I8: {
10766 MonoInst *tree;
10768 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10769 break;
10771 g_assert (ins->opcode == OP_REGOFFSET);
10773 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10774 g_assert (tree);
10775 tree->opcode = OP_REGOFFSET;
10776 tree->inst_basereg = ins->inst_basereg;
10777 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10779 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10780 g_assert (tree);
10781 tree->opcode = OP_REGOFFSET;
10782 tree->inst_basereg = ins->inst_basereg;
10783 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10784 break;
10786 default:
10787 break;
10791 #endif
10793 /* FIXME: widening and truncation */
10796 * As an optimization, when a variable allocated to the stack is first loaded into
10797 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10798 * the variable again.
10800 orig_next_vreg = cfg->next_vreg;
10801 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10802 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10803 lvregs_len = 0;
10806 * These arrays contain the first and last instructions accessing a given
10807 * variable.
10808 * Since we emit bblocks in the same order we process them here, and we
10809 * don't split live ranges, these will precisely describe the live range of
10810 * the variable, i.e. the instruction range where a valid value can be found
10811 * in the variables location.
10812 * The live range is computed using the liveness info computed by the liveness pass.
10813 * We can't use vmv->range, since that is an abstract live range, and we need
10814 * one which is instruction precise.
10815 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10817 /* FIXME: Only do this if debugging info is requested */
10818 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10819 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10820 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10821 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10823 /* Add spill loads/stores */
10824 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10825 MonoInst *ins;
10827 if (cfg->verbose_level > 2)
10828 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10830 /* Clear vreg_to_lvreg array */
10831 for (i = 0; i < lvregs_len; i++)
10832 vreg_to_lvreg [lvregs [i]] = 0;
10833 lvregs_len = 0;
10835 cfg->cbb = bb;
10836 MONO_BB_FOR_EACH_INS (bb, ins) {
10837 const char *spec = INS_INFO (ins->opcode);
10838 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10839 gboolean store, no_lvreg;
10840 int sregs [MONO_MAX_SRC_REGS];
10842 if (G_UNLIKELY (cfg->verbose_level > 2))
10843 mono_print_ins (ins);
10845 if (ins->opcode == OP_NOP)
10846 continue;
10849 * We handle LDADDR here as well, since it can only be decomposed
10850 * when variable addresses are known.
10852 if (ins->opcode == OP_LDADDR) {
10853 MonoInst *var = ins->inst_p0;
10855 if (var->opcode == OP_VTARG_ADDR) {
10856 /* Happens on SPARC/S390 where vtypes are passed by reference */
10857 MonoInst *vtaddr = var->inst_left;
10858 if (vtaddr->opcode == OP_REGVAR) {
10859 ins->opcode = OP_MOVE;
10860 ins->sreg1 = vtaddr->dreg;
10862 else if (var->inst_left->opcode == OP_REGOFFSET) {
10863 ins->opcode = OP_LOAD_MEMBASE;
10864 ins->inst_basereg = vtaddr->inst_basereg;
10865 ins->inst_offset = vtaddr->inst_offset;
10866 } else
10867 NOT_IMPLEMENTED;
10868 } else {
10869 g_assert (var->opcode == OP_REGOFFSET);
10871 ins->opcode = OP_ADD_IMM;
10872 ins->sreg1 = var->inst_basereg;
10873 ins->inst_imm = var->inst_offset;
10876 *need_local_opts = TRUE;
10877 spec = INS_INFO (ins->opcode);
10880 if (ins->opcode < MONO_CEE_LAST) {
10881 mono_print_ins (ins);
10882 g_assert_not_reached ();
10886 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10887 * src register.
10888 * FIXME:
10890 if (MONO_IS_STORE_MEMBASE (ins)) {
10891 tmp_reg = ins->dreg;
10892 ins->dreg = ins->sreg2;
10893 ins->sreg2 = tmp_reg;
10894 store = TRUE;
10896 spec2 [MONO_INST_DEST] = ' ';
10897 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10898 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10899 spec2 [MONO_INST_SRC3] = ' ';
10900 spec = spec2;
10901 } else if (MONO_IS_STORE_MEMINDEX (ins))
10902 g_assert_not_reached ();
10903 else
10904 store = FALSE;
10905 no_lvreg = FALSE;
10907 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10908 printf ("\t %.3s %d", spec, ins->dreg);
10909 num_sregs = mono_inst_get_src_registers (ins, sregs);
10910 for (srcindex = 0; srcindex < 3; ++srcindex)
10911 printf (" %d", sregs [srcindex]);
10912 printf ("\n");
10915 /***************/
10916 /* DREG */
10917 /***************/
10918 regtype = spec [MONO_INST_DEST];
10919 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10920 prev_dreg = -1;
10922 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10923 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10924 MonoInst *store_ins;
10925 int store_opcode;
10926 MonoInst *def_ins = ins;
10927 int dreg = ins->dreg; /* The original vreg */
10929 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10931 if (var->opcode == OP_REGVAR) {
10932 ins->dreg = var->dreg;
10933 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10935 * Instead of emitting a load+store, use a _membase opcode.
10937 g_assert (var->opcode == OP_REGOFFSET);
10938 if (ins->opcode == OP_MOVE) {
10939 NULLIFY_INS (ins);
10940 def_ins = NULL;
10941 } else {
10942 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10943 ins->inst_basereg = var->inst_basereg;
10944 ins->inst_offset = var->inst_offset;
10945 ins->dreg = -1;
10947 spec = INS_INFO (ins->opcode);
10948 } else {
10949 guint32 lvreg;
10951 g_assert (var->opcode == OP_REGOFFSET);
10953 prev_dreg = ins->dreg;
10955 /* Invalidate any previous lvreg for this vreg */
10956 vreg_to_lvreg [ins->dreg] = 0;
10958 lvreg = 0;
10960 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10961 regtype = 'l';
10962 store_opcode = OP_STOREI8_MEMBASE_REG;
10965 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10967 if (regtype == 'l') {
10968 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10969 mono_bblock_insert_after_ins (bb, ins, store_ins);
10970 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10971 mono_bblock_insert_after_ins (bb, ins, store_ins);
10972 def_ins = store_ins;
10974 else {
10975 g_assert (store_opcode != OP_STOREV_MEMBASE);
10977 /* Try to fuse the store into the instruction itself */
10978 /* FIXME: Add more instructions */
10979 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10980 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10981 ins->inst_imm = ins->inst_c0;
10982 ins->inst_destbasereg = var->inst_basereg;
10983 ins->inst_offset = var->inst_offset;
10984 spec = INS_INFO (ins->opcode);
10985 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10986 ins->opcode = store_opcode;
10987 ins->inst_destbasereg = var->inst_basereg;
10988 ins->inst_offset = var->inst_offset;
10990 no_lvreg = TRUE;
10992 tmp_reg = ins->dreg;
10993 ins->dreg = ins->sreg2;
10994 ins->sreg2 = tmp_reg;
10995 store = TRUE;
10997 spec2 [MONO_INST_DEST] = ' ';
10998 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10999 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11000 spec2 [MONO_INST_SRC3] = ' ';
11001 spec = spec2;
11002 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11003 // FIXME: The backends expect the base reg to be in inst_basereg
11004 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11005 ins->dreg = -1;
11006 ins->inst_basereg = var->inst_basereg;
11007 ins->inst_offset = var->inst_offset;
11008 spec = INS_INFO (ins->opcode);
11009 } else {
11010 /* printf ("INS: "); mono_print_ins (ins); */
11011 /* Create a store instruction */
11012 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11014 /* Insert it after the instruction */
11015 mono_bblock_insert_after_ins (bb, ins, store_ins);
11017 def_ins = store_ins;
11020 * We can't assign ins->dreg to var->dreg here, since the
11021 * sregs could use it. So set a flag, and do it after
11022 * the sregs.
11024 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11025 dest_has_lvreg = TRUE;
11030 if (def_ins && !live_range_start [dreg]) {
11031 live_range_start [dreg] = def_ins;
11032 live_range_start_bb [dreg] = bb;
11036 /************/
11037 /* SREGS */
11038 /************/
11039 num_sregs = mono_inst_get_src_registers (ins, sregs);
11040 for (srcindex = 0; srcindex < 3; ++srcindex) {
11041 regtype = spec [MONO_INST_SRC1 + srcindex];
11042 sreg = sregs [srcindex];
11044 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11045 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11046 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11047 MonoInst *use_ins = ins;
11048 MonoInst *load_ins;
11049 guint32 load_opcode;
11051 if (var->opcode == OP_REGVAR) {
11052 sregs [srcindex] = var->dreg;
11053 //mono_inst_set_src_registers (ins, sregs);
11054 live_range_end [sreg] = use_ins;
11055 live_range_end_bb [sreg] = bb;
11056 continue;
11059 g_assert (var->opcode == OP_REGOFFSET);
11061 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11063 g_assert (load_opcode != OP_LOADV_MEMBASE);
11065 if (vreg_to_lvreg [sreg]) {
11066 g_assert (vreg_to_lvreg [sreg] != -1);
11068 /* The variable is already loaded to an lvreg */
11069 if (G_UNLIKELY (cfg->verbose_level > 2))
11070 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11071 sregs [srcindex] = vreg_to_lvreg [sreg];
11072 //mono_inst_set_src_registers (ins, sregs);
11073 continue;
11076 /* Try to fuse the load into the instruction */
11077 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11078 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11079 sregs [0] = var->inst_basereg;
11080 //mono_inst_set_src_registers (ins, sregs);
11081 ins->inst_offset = var->inst_offset;
11082 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11083 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11084 sregs [1] = var->inst_basereg;
11085 //mono_inst_set_src_registers (ins, sregs);
11086 ins->inst_offset = var->inst_offset;
11087 } else {
11088 if (MONO_IS_REAL_MOVE (ins)) {
11089 ins->opcode = OP_NOP;
11090 sreg = ins->dreg;
11091 } else {
11092 //printf ("%d ", srcindex); mono_print_ins (ins);
11094 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11096 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11097 if (var->dreg == prev_dreg) {
11099 * sreg refers to the value loaded by the load
11100 * emitted below, but we need to use ins->dreg
11101 * since it refers to the store emitted earlier.
11103 sreg = ins->dreg;
11105 g_assert (sreg != -1);
11106 vreg_to_lvreg [var->dreg] = sreg;
11107 g_assert (lvregs_len < 1024);
11108 lvregs [lvregs_len ++] = var->dreg;
11112 sregs [srcindex] = sreg;
11113 //mono_inst_set_src_registers (ins, sregs);
11115 if (regtype == 'l') {
11116 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11117 mono_bblock_insert_before_ins (bb, ins, load_ins);
11118 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11119 mono_bblock_insert_before_ins (bb, ins, load_ins);
11120 use_ins = load_ins;
11122 else {
11123 #if SIZEOF_REGISTER == 4
11124 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11125 #endif
11126 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11127 mono_bblock_insert_before_ins (bb, ins, load_ins);
11128 use_ins = load_ins;
11132 if (var->dreg < orig_next_vreg) {
11133 live_range_end [var->dreg] = use_ins;
11134 live_range_end_bb [var->dreg] = bb;
11138 mono_inst_set_src_registers (ins, sregs);
11140 if (dest_has_lvreg) {
11141 g_assert (ins->dreg != -1);
11142 vreg_to_lvreg [prev_dreg] = ins->dreg;
11143 g_assert (lvregs_len < 1024);
11144 lvregs [lvregs_len ++] = prev_dreg;
11145 dest_has_lvreg = FALSE;
11148 if (store) {
11149 tmp_reg = ins->dreg;
11150 ins->dreg = ins->sreg2;
11151 ins->sreg2 = tmp_reg;
11154 if (MONO_IS_CALL (ins)) {
11155 /* Clear vreg_to_lvreg array */
11156 for (i = 0; i < lvregs_len; i++)
11157 vreg_to_lvreg [lvregs [i]] = 0;
11158 lvregs_len = 0;
11159 } else if (ins->opcode == OP_NOP) {
11160 ins->dreg = -1;
11161 MONO_INST_NULLIFY_SREGS (ins);
11164 if (cfg->verbose_level > 2)
11165 mono_print_ins_index (1, ins);
11168 /* Extend the live range based on the liveness info */
11169 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11170 for (i = 0; i < cfg->num_varinfo; i ++) {
11171 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11173 if (vreg_is_volatile (cfg, vi->vreg))
11174 /* The liveness info is incomplete */
11175 continue;
11177 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11178 /* Live from at least the first ins of this bb */
11179 live_range_start [vi->vreg] = bb->code;
11180 live_range_start_bb [vi->vreg] = bb;
11183 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11184 /* Live at least until the last ins of this bb */
11185 live_range_end [vi->vreg] = bb->last_ins;
11186 live_range_end_bb [vi->vreg] = bb;
11192 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11194 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11195 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11197 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11198 for (i = 0; i < cfg->num_varinfo; ++i) {
11199 int vreg = MONO_VARINFO (cfg, i)->vreg;
11200 MonoInst *ins;
11202 if (live_range_start [vreg]) {
11203 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11204 ins->inst_c0 = i;
11205 ins->inst_c1 = vreg;
11206 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11208 if (live_range_end [vreg]) {
11209 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11210 ins->inst_c0 = i;
11211 ins->inst_c1 = vreg;
11212 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11213 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11214 else
11215 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11219 #endif
11221 g_free (live_range_start);
11222 g_free (live_range_end);
11223 g_free (live_range_start_bb);
11224 g_free (live_range_end_bb);
11228 * FIXME:
11229 * - use 'iadd' instead of 'int_add'
11230 * - handling ovf opcodes: decompose in method_to_ir.
11231 * - unify iregs/fregs
11232 * -> partly done, the missing parts are:
11233 * - a more complete unification would involve unifying the hregs as well, so
11234 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11235 * would no longer map to the machine hregs, so the code generators would need to
11236 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11237 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11238 * fp/non-fp branches speeds it up by about 15%.
11239 * - use sext/zext opcodes instead of shifts
11240 * - add OP_ICALL
11241 * - get rid of TEMPLOADs if possible and use vregs instead
11242 * - clean up usage of OP_P/OP_ opcodes
11243 * - cleanup usage of DUMMY_USE
11244 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11245 * stack
11246 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11247 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11248 * - make sure handle_stack_args () is called before the branch is emitted
11249 * - when the new IR is done, get rid of all unused stuff
11250 * - COMPARE/BEQ as separate instructions or unify them ?
11251 * - keeping them separate allows specialized compare instructions like
11252 * compare_imm, compare_membase
11253 * - most back ends unify fp compare+branch, fp compare+ceq
11254 * - integrate mono_save_args into inline_method
11255 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11256 * - handle long shift opts on 32 bit platforms somehow: they require
11257 * 3 sregs (2 for arg1 and 1 for arg2)
11258 * - make byref a 'normal' type.
11259 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11260 * variable if needed.
11261 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11262 * like inline_method.
11263 * - remove inlining restrictions
11264 * - fix LNEG and enable cfold of INEG
11265 * - generalize x86 optimizations like ldelema as a peephole optimization
11266 * - add store_mem_imm for amd64
11267 * - optimize the loading of the interruption flag in the managed->native wrappers
11268 * - avoid special handling of OP_NOP in passes
11269 * - move code inserting instructions into one function/macro.
11270 * - try a coalescing phase after liveness analysis
11271 * - add float -> vreg conversion + local optimizations on !x86
11272 * - figure out how to handle decomposed branches during optimizations, ie.
11273 * compare+branch, op_jump_table+op_br etc.
11274 * - promote RuntimeXHandles to vregs
11275 * - vtype cleanups:
11276 * - add a NEW_VARLOADA_VREG macro
11277 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11278 * accessing vtype fields.
11279 * - get rid of I8CONST on 64 bit platforms
11280 * - dealing with the increase in code size due to branches created during opcode
11281 * decomposition:
11282 * - use extended basic blocks
11283 * - all parts of the JIT
11284 * - handle_global_vregs () && local regalloc
11285 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11286 * - sources of increase in code size:
11287 * - vtypes
11288 * - long compares
11289 * - isinst and castclass
11290 * - lvregs not allocated to global registers even if used multiple times
11291 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11292 * meaningful.
11293 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11294 * - add all micro optimizations from the old JIT
11295 * - put tree optimizations into the deadce pass
11296 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11297 * specific function.
11298 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11299 * fcompare + branchCC.
11300 * - create a helper function for allocating a stack slot, taking into account
11301 * MONO_CFG_HAS_SPILLUP.
11302 * - merge r68207.
11303 * - merge the ia64 switch changes.
11304 * - optimize mono_regstate2_alloc_int/float.
11305 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11306 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11307 * parts of the tree could be separated by other instructions, killing the tree
11308 * arguments, or stores killing loads etc. Also, should we fold loads into other
11309 * instructions if the result of the load is used multiple times ?
11310 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11311 * - LAST MERGE: 108395.
11312 * - when returning vtypes in registers, generate IR and append it to the end of the
11313 * last bb instead of doing it in the epilog.
11314 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11319 NOTES
11320 -----
11322 - When to decompose opcodes:
11323 - earlier: this makes some optimizations hard to implement, since the low level IR
11324 no longer contains the neccessary information. But it is easier to do.
11325 - later: harder to implement, enables more optimizations.
11326 - Branches inside bblocks:
11327 - created when decomposing complex opcodes.
11328 - branches to another bblock: harmless, but not tracked by the branch
11329 optimizations, so need to branch to a label at the start of the bblock.
11330 - branches to inside the same bblock: very problematic, trips up the local
11331 reg allocator. Can be fixed by spitting the current bblock, but that is a
11332 complex operation, since some local vregs can become global vregs etc.
11333 - Local/global vregs:
11334 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11335 local register allocator.
11336 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11337 structure, created by mono_create_var (). Assigned to hregs or the stack by
11338 the global register allocator.
11339 - When to do optimizations like alu->alu_imm:
11340 - earlier -> saves work later on since the IR will be smaller/simpler
11341 - later -> can work on more instructions
11342 - Handling of valuetypes:
11343 - When a vtype is pushed on the stack, a new temporary is created, an
11344 instruction computing its address (LDADDR) is emitted and pushed on
11345 the stack. Need to optimize cases when the vtype is used immediately as in
11346 argument passing, stloc etc.
11347 - Instead of the to_end stuff in the old JIT, simply call the function handling
11348 the values on the stack before emitting the last instruction of the bb.
11351 #endif /* DISABLE_JIT */