2010-02-22 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / method-to-ir.c
blobc74c2442738208d323d46f42ca84b17d7754ee4c
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
55 #include "mini.h"
56 #include "trace.h"
58 #include "ir-emit.h"
60 #include "jit-icalls.h"
61 #include "jit.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 goto inline_failure;\
69 } while (0)
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 goto exception_exit;\
73 } while (0)
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
82 } while (0)
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
91 } while (0)
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
98 } \
99 } while (0)
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
125 #ifdef MINI_OP
126 #undef MINI_OP
127 #endif
128 #ifdef MINI_OP3
129 #undef MINI_OP3
130 #endif
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
133 #define NONE ' '
134 #define IREG 'i'
135 #define FREG 'f'
136 #define VREG 'v'
137 #define XREG 'x'
138 #if SIZEOF_REGISTER == 8
139 #define LREG IREG
140 #else
141 #define LREG 'l'
142 #endif
143 /* keep in sync with the enum in mini.h */
144 const char
145 ins_info[] = {
146 #include "mini-ops.h"
148 #undef MINI_OP
149 #undef MINI_OP3
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
160 #undef MINI_OP
161 #undef MINI_OP3
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
165 (vi)->reg = -1; \
166 (vi)->idx = (id); \
167 } while (0)
169 void
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
177 guint32
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
183 guint32
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
189 guint32
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
195 guint32
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
201 guint
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
204 if (type->byref)
205 return OP_MOVE;
207 handle_enum:
208 switch (type->type) {
209 case MONO_TYPE_I1:
210 case MONO_TYPE_U1:
211 case MONO_TYPE_BOOLEAN:
212 return OP_MOVE;
213 case MONO_TYPE_I2:
214 case MONO_TYPE_U2:
215 case MONO_TYPE_CHAR:
216 return OP_MOVE;
217 case MONO_TYPE_I4:
218 case MONO_TYPE_U4:
219 return OP_MOVE;
220 case MONO_TYPE_I:
221 case MONO_TYPE_U:
222 case MONO_TYPE_PTR:
223 case MONO_TYPE_FNPTR:
224 return OP_MOVE;
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
230 return OP_MOVE;
231 case MONO_TYPE_I8:
232 case MONO_TYPE_U8:
233 #if SIZEOF_REGISTER == 8
234 return OP_MOVE;
235 #else
236 return OP_LMOVE;
237 #endif
238 case MONO_TYPE_R4:
239 return OP_FMOVE;
240 case MONO_TYPE_R8:
241 return OP_FMOVE;
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
245 goto handle_enum;
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 return OP_XMOVE;
249 return OP_VMOVE;
250 case MONO_TYPE_TYPEDBYREF:
251 return OP_VMOVE;
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
254 goto handle_enum;
255 case MONO_TYPE_VAR:
256 case MONO_TYPE_MVAR:
257 g_assert (cfg->generic_sharing_context);
258 return OP_MOVE;
259 default:
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
262 return -1;
265 void
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
268 int i;
269 MonoInst *tree;
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
274 printf (", OUT: ");
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
277 printf (" ]\n");
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
284 * file.
286 #ifndef DISABLE_JIT
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
292 if (!(tblock)) { \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
298 } while (0)
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
309 } while (0)
310 #endif
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
316 MonoInst *widen; \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
321 } while (0)
322 #else
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #endif
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
328 sp -= 2; \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
332 CHECK_TYPE (ins); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 } while (0)
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
342 sp--; \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
345 CHECK_TYPE (ins); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
349 } while (0)
351 #define ADD_BINCOND(next_block) do { \
352 MonoInst *cmp; \
353 sp -= 2; \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
358 CHECK_TYPE (cmp); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
368 } else { \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
380 } while (0)
382 /* *
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
389 static void
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
393 int i, found;
395 #if 0
396 if (from->cil_code) {
397 if (to->cil_code)
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
399 else
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 } else {
402 if (to->cil_code)
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
404 else
405 printf ("edge from entry to exit\n");
407 #endif
409 found = FALSE;
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
412 found = TRUE;
413 break;
416 if (!found) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
421 newa [i] = to;
422 from->out_count++;
423 from->out_bb = newa;
426 found = FALSE;
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
429 found = TRUE;
430 break;
433 if (!found) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
438 newa [i] = from;
439 to->in_count++;
440 to->in_bb = newa;
444 void
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
456 * Returns:
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
464 static int
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
470 int i;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 else
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
491 return -1;
494 static GList*
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethod *method = cfg->method;
498 MonoMethodHeader *header = mono_method_get_header (method);
499 MonoExceptionClause *clause;
500 MonoBasicBlock *handler;
501 int i;
502 GList *res = NULL;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type) {
509 handler = cfg->cil_offset_to_bb [clause->handler_offset];
510 g_assert (handler);
511 res = g_list_append (res, handler);
515 return res;
518 static void
519 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 MonoInst *var;
523 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
524 if (var)
525 return;
527 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
528 /* prevent it from being register allocated */
529 var->flags |= MONO_INST_INDIRECT;
531 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
534 MonoInst *
535 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
537 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
540 static MonoInst*
541 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 MonoInst *var;
545 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
546 if (var)
547 return var;
549 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
550 /* prevent it from being register allocated */
551 var->flags |= MONO_INST_INDIRECT;
553 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
555 return var;
559 * Returns the type used in the eval stack when @type is loaded.
560 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
562 void
563 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 MonoClass *klass;
567 inst->klass = klass = mono_class_from_mono_type (type);
568 if (type->byref) {
569 inst->type = STACK_MP;
570 return;
573 handle_enum:
574 switch (type->type) {
575 case MONO_TYPE_VOID:
576 inst->type = STACK_INV;
577 return;
578 case MONO_TYPE_I1:
579 case MONO_TYPE_U1:
580 case MONO_TYPE_BOOLEAN:
581 case MONO_TYPE_I2:
582 case MONO_TYPE_U2:
583 case MONO_TYPE_CHAR:
584 case MONO_TYPE_I4:
585 case MONO_TYPE_U4:
586 inst->type = STACK_I4;
587 return;
588 case MONO_TYPE_I:
589 case MONO_TYPE_U:
590 case MONO_TYPE_PTR:
591 case MONO_TYPE_FNPTR:
592 inst->type = STACK_PTR;
593 return;
594 case MONO_TYPE_CLASS:
595 case MONO_TYPE_STRING:
596 case MONO_TYPE_OBJECT:
597 case MONO_TYPE_SZARRAY:
598 case MONO_TYPE_ARRAY:
599 inst->type = STACK_OBJ;
600 return;
601 case MONO_TYPE_I8:
602 case MONO_TYPE_U8:
603 inst->type = STACK_I8;
604 return;
605 case MONO_TYPE_R4:
606 case MONO_TYPE_R8:
607 inst->type = STACK_R8;
608 return;
609 case MONO_TYPE_VALUETYPE:
610 if (type->data.klass->enumtype) {
611 type = mono_class_enum_basetype (type->data.klass);
612 goto handle_enum;
613 } else {
614 inst->klass = klass;
615 inst->type = STACK_VTYPE;
616 return;
618 case MONO_TYPE_TYPEDBYREF:
619 inst->klass = mono_defaults.typed_reference_class;
620 inst->type = STACK_VTYPE;
621 return;
622 case MONO_TYPE_GENERICINST:
623 type = &type->data.generic_class->container_class->byval_arg;
624 goto handle_enum;
625 case MONO_TYPE_VAR :
626 case MONO_TYPE_MVAR :
627 /* FIXME: all the arguments must be references for now,
628 * later look inside cfg and see if the arg num is
629 * really a reference
631 g_assert (cfg->generic_sharing_context);
632 inst->type = STACK_OBJ;
633 return;
634 default:
635 g_error ("unknown type 0x%02x in eval stack type", type->type);
640 * The following tables are used to quickly validate the IL code in type_from_op ().
642 static const char
643 bin_num_table [STACK_MAX] [STACK_MAX] = {
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 static const char
655 neg_table [] = {
656 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
659 /* reduce the size of this table */
660 static const char
661 bin_int_table [STACK_MAX] [STACK_MAX] = {
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
672 static const char
673 bin_comp_table [STACK_MAX] [STACK_MAX] = {
674 /* Inv i L p F & O vt */
675 {0},
676 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
677 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
678 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
679 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
680 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
681 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
682 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
685 /* reduce the size of this table */
686 static const char
687 shift_table [STACK_MAX] [STACK_MAX] = {
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
699 * Tables to map from the non-specific opcode to the matching
700 * type-specific opcode.
702 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
703 static const guint16
704 binops_op_map [STACK_MAX] = {
705 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
708 /* handles from CEE_NEG to CEE_CONV_U8 */
709 static const guint16
710 unops_op_map [STACK_MAX] = {
711 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
714 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
715 static const guint16
716 ovfops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
720 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
721 static const guint16
722 ovf2ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
726 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
727 static const guint16
728 ovf3ops_op_map [STACK_MAX] = {
729 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
732 /* handles from CEE_BEQ to CEE_BLT_UN */
733 static const guint16
734 beqops_op_map [STACK_MAX] = {
735 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
738 /* handles from CEE_CEQ to CEE_CLT_UN */
739 static const guint16
740 ceqops_op_map [STACK_MAX] = {
741 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
745 * Sets ins->type (the type on the eval stack) according to the
746 * type of the opcode and the arguments to it.
747 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
749 * FIXME: this function sets ins->type unconditionally in some cases, but
750 * it should set it to invalid for some types (a conv.x on an object)
752 static void
753 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
755 switch (ins->opcode) {
756 /* binops */
757 case CEE_ADD:
758 case CEE_SUB:
759 case CEE_MUL:
760 case CEE_DIV:
761 case CEE_REM:
762 /* FIXME: check unverifiable args for STACK_MP */
763 ins->type = bin_num_table [src1->type] [src2->type];
764 ins->opcode += binops_op_map [ins->type];
765 break;
766 case CEE_DIV_UN:
767 case CEE_REM_UN:
768 case CEE_AND:
769 case CEE_OR:
770 case CEE_XOR:
771 ins->type = bin_int_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
773 break;
774 case CEE_SHL:
775 case CEE_SHR:
776 case CEE_SHR_UN:
777 ins->type = shift_table [src1->type] [src2->type];
778 ins->opcode += binops_op_map [ins->type];
779 break;
780 case OP_COMPARE:
781 case OP_LCOMPARE:
782 case OP_ICOMPARE:
783 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
784 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
785 ins->opcode = OP_LCOMPARE;
786 else if (src1->type == STACK_R8)
787 ins->opcode = OP_FCOMPARE;
788 else
789 ins->opcode = OP_ICOMPARE;
790 break;
791 case OP_ICOMPARE_IMM:
792 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
793 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
794 ins->opcode = OP_LCOMPARE_IMM;
795 break;
796 case CEE_BEQ:
797 case CEE_BGE:
798 case CEE_BGT:
799 case CEE_BLE:
800 case CEE_BLT:
801 case CEE_BNE_UN:
802 case CEE_BGE_UN:
803 case CEE_BGT_UN:
804 case CEE_BLE_UN:
805 case CEE_BLT_UN:
806 ins->opcode += beqops_op_map [src1->type];
807 break;
808 case OP_CEQ:
809 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
810 ins->opcode += ceqops_op_map [src1->type];
811 break;
812 case OP_CGT:
813 case OP_CGT_UN:
814 case OP_CLT:
815 case OP_CLT_UN:
816 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
818 break;
819 /* unops */
820 case CEE_NEG:
821 ins->type = neg_table [src1->type];
822 ins->opcode += unops_op_map [ins->type];
823 break;
824 case CEE_NOT:
825 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
826 ins->type = src1->type;
827 else
828 ins->type = STACK_INV;
829 ins->opcode += unops_op_map [ins->type];
830 break;
831 case CEE_CONV_I1:
832 case CEE_CONV_I2:
833 case CEE_CONV_I4:
834 case CEE_CONV_U4:
835 ins->type = STACK_I4;
836 ins->opcode += unops_op_map [src1->type];
837 break;
838 case CEE_CONV_R_UN:
839 ins->type = STACK_R8;
840 switch (src1->type) {
841 case STACK_I4:
842 case STACK_PTR:
843 ins->opcode = OP_ICONV_TO_R_UN;
844 break;
845 case STACK_I8:
846 ins->opcode = OP_LCONV_TO_R_UN;
847 break;
849 break;
850 case CEE_CONV_OVF_I1:
851 case CEE_CONV_OVF_U1:
852 case CEE_CONV_OVF_I2:
853 case CEE_CONV_OVF_U2:
854 case CEE_CONV_OVF_I4:
855 case CEE_CONV_OVF_U4:
856 ins->type = STACK_I4;
857 ins->opcode += ovf3ops_op_map [src1->type];
858 break;
859 case CEE_CONV_OVF_I_UN:
860 case CEE_CONV_OVF_U_UN:
861 ins->type = STACK_PTR;
862 ins->opcode += ovf2ops_op_map [src1->type];
863 break;
864 case CEE_CONV_OVF_I1_UN:
865 case CEE_CONV_OVF_I2_UN:
866 case CEE_CONV_OVF_I4_UN:
867 case CEE_CONV_OVF_U1_UN:
868 case CEE_CONV_OVF_U2_UN:
869 case CEE_CONV_OVF_U4_UN:
870 ins->type = STACK_I4;
871 ins->opcode += ovf2ops_op_map [src1->type];
872 break;
873 case CEE_CONV_U:
874 ins->type = STACK_PTR;
875 switch (src1->type) {
876 case STACK_I4:
877 ins->opcode = OP_ICONV_TO_U;
878 break;
879 case STACK_PTR:
880 case STACK_MP:
881 #if SIZEOF_REGISTER == 8
882 ins->opcode = OP_LCONV_TO_U;
883 #else
884 ins->opcode = OP_MOVE;
885 #endif
886 break;
887 case STACK_I8:
888 ins->opcode = OP_LCONV_TO_U;
889 break;
890 case STACK_R8:
891 ins->opcode = OP_FCONV_TO_U;
892 break;
894 break;
895 case CEE_CONV_I8:
896 case CEE_CONV_U8:
897 ins->type = STACK_I8;
898 ins->opcode += unops_op_map [src1->type];
899 break;
900 case CEE_CONV_OVF_I8:
901 case CEE_CONV_OVF_U8:
902 ins->type = STACK_I8;
903 ins->opcode += ovf3ops_op_map [src1->type];
904 break;
905 case CEE_CONV_OVF_U8_UN:
906 case CEE_CONV_OVF_I8_UN:
907 ins->type = STACK_I8;
908 ins->opcode += ovf2ops_op_map [src1->type];
909 break;
910 case CEE_CONV_R4:
911 case CEE_CONV_R8:
912 ins->type = STACK_R8;
913 ins->opcode += unops_op_map [src1->type];
914 break;
915 case OP_CKFINITE:
916 ins->type = STACK_R8;
917 break;
918 case CEE_CONV_U2:
919 case CEE_CONV_U1:
920 ins->type = STACK_I4;
921 ins->opcode += ovfops_op_map [src1->type];
922 break;
923 case CEE_CONV_I:
924 case CEE_CONV_OVF_I:
925 case CEE_CONV_OVF_U:
926 ins->type = STACK_PTR;
927 ins->opcode += ovfops_op_map [src1->type];
928 break;
929 case CEE_ADD_OVF:
930 case CEE_ADD_OVF_UN:
931 case CEE_MUL_OVF:
932 case CEE_MUL_OVF_UN:
933 case CEE_SUB_OVF:
934 case CEE_SUB_OVF_UN:
935 ins->type = bin_num_table [src1->type] [src2->type];
936 ins->opcode += ovfops_op_map [src1->type];
937 if (ins->type == STACK_R8)
938 ins->type = STACK_INV;
939 break;
940 case OP_LOAD_MEMBASE:
941 ins->type = STACK_PTR;
942 break;
943 case OP_LOADI1_MEMBASE:
944 case OP_LOADU1_MEMBASE:
945 case OP_LOADI2_MEMBASE:
946 case OP_LOADU2_MEMBASE:
947 case OP_LOADI4_MEMBASE:
948 case OP_LOADU4_MEMBASE:
949 ins->type = STACK_PTR;
950 break;
951 case OP_LOADI8_MEMBASE:
952 ins->type = STACK_I8;
953 break;
954 case OP_LOADR4_MEMBASE:
955 case OP_LOADR8_MEMBASE:
956 ins->type = STACK_R8;
957 break;
958 default:
959 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
960 break;
963 if (ins->type == STACK_MP)
964 ins->klass = mono_defaults.object_class;
967 static const char
968 ldind_type [] = {
969 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
972 #if 0
974 static const char
975 param_table [STACK_MAX] [STACK_MAX] = {
976 {0},
979 static int
980 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
981 int i;
983 if (sig->hasthis) {
984 switch (args->type) {
985 case STACK_I4:
986 case STACK_I8:
987 case STACK_R8:
988 case STACK_VTYPE:
989 case STACK_INV:
990 return 0;
992 args++;
994 for (i = 0; i < sig->param_count; ++i) {
995 switch (args [i].type) {
996 case STACK_INV:
997 return 0;
998 case STACK_MP:
999 if (!sig->params [i]->byref)
1000 return 0;
1001 continue;
1002 case STACK_OBJ:
1003 if (sig->params [i]->byref)
1004 return 0;
1005 switch (sig->params [i]->type) {
1006 case MONO_TYPE_CLASS:
1007 case MONO_TYPE_STRING:
1008 case MONO_TYPE_OBJECT:
1009 case MONO_TYPE_SZARRAY:
1010 case MONO_TYPE_ARRAY:
1011 break;
1012 default:
1013 return 0;
1015 continue;
1016 case STACK_R8:
1017 if (sig->params [i]->byref)
1018 return 0;
1019 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1020 return 0;
1021 continue;
1022 case STACK_PTR:
1023 case STACK_I4:
1024 case STACK_I8:
1025 case STACK_VTYPE:
1026 break;
1028 /*if (!param_table [args [i].type] [sig->params [i]->type])
1029 return 0;*/
1031 return 1;
1033 #endif
1036 * When we need a pointer to the current domain many times in a method, we
1037 * call mono_domain_get() once and we store the result in a local variable.
1038 * This function returns the variable that represents the MonoDomain*.
1040 inline static MonoInst *
1041 mono_get_domainvar (MonoCompile *cfg)
1043 if (!cfg->domainvar)
1044 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1045 return cfg->domainvar;
1049 * The got_var contains the address of the Global Offset Table when AOT
1050 * compiling.
1052 MonoInst *
1053 mono_get_got_var (MonoCompile *cfg)
1055 #ifdef MONO_ARCH_NEED_GOT_VAR
1056 if (!cfg->compile_aot)
1057 return NULL;
1058 if (!cfg->got_var) {
1059 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1061 return cfg->got_var;
1062 #else
1063 return NULL;
1064 #endif
1067 static MonoInst *
1068 mono_get_vtable_var (MonoCompile *cfg)
1070 g_assert (cfg->generic_sharing_context);
1072 if (!cfg->rgctx_var) {
1073 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 /* force the var to be stack allocated */
1075 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1078 return cfg->rgctx_var;
1081 static MonoType*
1082 type_from_stack_type (MonoInst *ins) {
1083 switch (ins->type) {
1084 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1085 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1086 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1087 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1088 case STACK_MP:
1089 return &ins->klass->this_arg;
1090 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1091 case STACK_VTYPE: return &ins->klass->byval_arg;
1092 default:
1093 g_error ("stack type %d to monotype not handled\n", ins->type);
1095 return NULL;
1098 static G_GNUC_UNUSED int
1099 type_to_stack_type (MonoType *t)
1101 t = mono_type_get_underlying_type (t);
1102 switch (t->type) {
1103 case MONO_TYPE_I1:
1104 case MONO_TYPE_U1:
1105 case MONO_TYPE_BOOLEAN:
1106 case MONO_TYPE_I2:
1107 case MONO_TYPE_U2:
1108 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_I4:
1110 case MONO_TYPE_U4:
1111 return STACK_I4;
1112 case MONO_TYPE_I:
1113 case MONO_TYPE_U:
1114 case MONO_TYPE_PTR:
1115 case MONO_TYPE_FNPTR:
1116 return STACK_PTR;
1117 case MONO_TYPE_CLASS:
1118 case MONO_TYPE_STRING:
1119 case MONO_TYPE_OBJECT:
1120 case MONO_TYPE_SZARRAY:
1121 case MONO_TYPE_ARRAY:
1122 return STACK_OBJ;
1123 case MONO_TYPE_I8:
1124 case MONO_TYPE_U8:
1125 return STACK_I8;
1126 case MONO_TYPE_R4:
1127 case MONO_TYPE_R8:
1128 return STACK_R8;
1129 case MONO_TYPE_VALUETYPE:
1130 case MONO_TYPE_TYPEDBYREF:
1131 return STACK_VTYPE;
1132 case MONO_TYPE_GENERICINST:
1133 if (mono_type_generic_inst_is_valuetype (t))
1134 return STACK_VTYPE;
1135 else
1136 return STACK_OBJ;
1137 break;
1138 default:
1139 g_assert_not_reached ();
1142 return -1;
1145 static MonoClass*
1146 array_access_to_klass (int opcode)
1148 switch (opcode) {
1149 case CEE_LDELEM_U1:
1150 return mono_defaults.byte_class;
1151 case CEE_LDELEM_U2:
1152 return mono_defaults.uint16_class;
1153 case CEE_LDELEM_I:
1154 case CEE_STELEM_I:
1155 return mono_defaults.int_class;
1156 case CEE_LDELEM_I1:
1157 case CEE_STELEM_I1:
1158 return mono_defaults.sbyte_class;
1159 case CEE_LDELEM_I2:
1160 case CEE_STELEM_I2:
1161 return mono_defaults.int16_class;
1162 case CEE_LDELEM_I4:
1163 case CEE_STELEM_I4:
1164 return mono_defaults.int32_class;
1165 case CEE_LDELEM_U4:
1166 return mono_defaults.uint32_class;
1167 case CEE_LDELEM_I8:
1168 case CEE_STELEM_I8:
1169 return mono_defaults.int64_class;
1170 case CEE_LDELEM_R4:
1171 case CEE_STELEM_R4:
1172 return mono_defaults.single_class;
1173 case CEE_LDELEM_R8:
1174 case CEE_STELEM_R8:
1175 return mono_defaults.double_class;
1176 case CEE_LDELEM_REF:
1177 case CEE_STELEM_REF:
1178 return mono_defaults.object_class;
1179 default:
1180 g_assert_not_reached ();
1182 return NULL;
1186 * We try to share variables when possible
1188 static MonoInst *
1189 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 MonoInst *res;
1192 int pos, vnum;
1194 /* inlining can result in deeper stacks */
1195 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1196 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1198 pos = ins->type - 1 + slot * STACK_MAX;
1200 switch (ins->type) {
1201 case STACK_I4:
1202 case STACK_I8:
1203 case STACK_R8:
1204 case STACK_PTR:
1205 case STACK_MP:
1206 case STACK_OBJ:
1207 if ((vnum = cfg->intvars [pos]))
1208 return cfg->varinfo [vnum];
1209 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 cfg->intvars [pos] = res->inst_c0;
1211 break;
1212 default:
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1215 return res;
1218 static void
1219 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1222 * Don't use this if a generic_context is set, since that means AOT can't
1223 * look up the method using just the image+token.
1224 * table == 0 means this is a reference made from a wrapper.
1226 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1227 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1228 jump_info_token->image = image;
1229 jump_info_token->token = token;
1230 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1235 * This function is called to handle items that are left on the evaluation stack
1236 * at basic block boundaries. What happens is that we save the values to local variables
1237 * and we reload them later when first entering the target basic block (with the
1238 * handle_loaded_temps () function).
1239 * A single joint point will use the same variables (stored in the array bb->out_stack or
1240 * bb->in_stack, if the basic block is before or after the joint point).
1242 * This function needs to be called _before_ emitting the last instruction of
1243 * the bb (i.e. before emitting a branch).
1244 * If the stack merge fails at a join point, cfg->unverifiable is set.
1246 static void
1247 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1249 int i, bindex;
1250 MonoBasicBlock *bb = cfg->cbb;
1251 MonoBasicBlock *outb;
1252 MonoInst *inst, **locals;
1253 gboolean found;
1255 if (!count)
1256 return;
1257 if (cfg->verbose_level > 3)
1258 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1259 if (!bb->out_scount) {
1260 bb->out_scount = count;
1261 //printf ("bblock %d has out:", bb->block_num);
1262 found = FALSE;
1263 for (i = 0; i < bb->out_count; ++i) {
1264 outb = bb->out_bb [i];
1265 /* exception handlers are linked, but they should not be considered for stack args */
1266 if (outb->flags & BB_EXCEPTION_HANDLER)
1267 continue;
1268 //printf (" %d", outb->block_num);
1269 if (outb->in_stack) {
1270 found = TRUE;
1271 bb->out_stack = outb->in_stack;
1272 break;
1275 //printf ("\n");
1276 if (!found) {
1277 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1278 for (i = 0; i < count; ++i) {
1280 * try to reuse temps already allocated for this purpouse, if they occupy the same
1281 * stack slot and if they are of the same type.
1282 * This won't cause conflicts since if 'local' is used to
1283 * store one of the values in the in_stack of a bblock, then
1284 * the same variable will be used for the same outgoing stack
1285 * slot as well.
1286 * This doesn't work when inlining methods, since the bblocks
1287 * in the inlined methods do not inherit their in_stack from
1288 * the bblock they are inlined to. See bug #58863 for an
1289 * example.
1291 if (cfg->inlined_method)
1292 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1293 else
1294 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1299 for (i = 0; i < bb->out_count; ++i) {
1300 outb = bb->out_bb [i];
1301 /* exception handlers are linked, but they should not be considered for stack args */
1302 if (outb->flags & BB_EXCEPTION_HANDLER)
1303 continue;
1304 if (outb->in_scount) {
1305 if (outb->in_scount != bb->out_scount) {
1306 cfg->unverifiable = TRUE;
1307 return;
1309 continue; /* check they are the same locals */
1311 outb->in_scount = count;
1312 outb->in_stack = bb->out_stack;
1315 locals = bb->out_stack;
1316 cfg->cbb = bb;
1317 for (i = 0; i < count; ++i) {
1318 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1319 inst->cil_code = sp [i]->cil_code;
1320 sp [i] = locals [i];
1321 if (cfg->verbose_level > 3)
1322 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1326 * It is possible that the out bblocks already have in_stack assigned, and
1327 * the in_stacks differ. In this case, we will store to all the different
1328 * in_stacks.
1331 found = TRUE;
1332 bindex = 0;
1333 while (found) {
1334 /* Find a bblock which has a different in_stack */
1335 found = FALSE;
1336 while (bindex < bb->out_count) {
1337 outb = bb->out_bb [bindex];
1338 /* exception handlers are linked, but they should not be considered for stack args */
1339 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 bindex++;
1341 continue;
1343 if (outb->in_stack != locals) {
1344 for (i = 0; i < count; ++i) {
1345 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1346 inst->cil_code = sp [i]->cil_code;
1347 sp [i] = locals [i];
1348 if (cfg->verbose_level > 3)
1349 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1351 locals = outb->in_stack;
1352 found = TRUE;
1353 break;
1355 bindex ++;
1360 /* Emit code which loads interface_offsets [klass->interface_id]
1361 * The array is stored in memory before vtable.
1363 static void
1364 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1366 if (cfg->compile_aot) {
1367 int ioffset_reg = alloc_preg (cfg);
1368 int iid_reg = alloc_preg (cfg);
1370 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1374 else {
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1380 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1381 * stored in "klass_reg" implements the interface "klass".
1383 static void
1384 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1386 int ibitmap_reg = alloc_preg (cfg);
1387 int ibitmap_byte_reg = alloc_preg (cfg);
1389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1391 if (cfg->compile_aot) {
1392 int iid_reg = alloc_preg (cfg);
1393 int shifted_iid_reg = alloc_preg (cfg);
1394 int ibitmap_byte_address_reg = alloc_preg (cfg);
1395 int masked_iid_reg = alloc_preg (cfg);
1396 int iid_one_bit_reg = alloc_preg (cfg);
1397 int iid_bit_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1403 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1406 } else {
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1414 * stored in "vtable_reg" implements the interface "klass".
1416 static void
1417 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1419 int ibitmap_reg = alloc_preg (cfg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1439 } else {
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1446 * Emit code which checks whenever the interface id of @klass is smaller than
1447 * than the value given by max_iid_reg.
1449 static void
1450 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1451 MonoBasicBlock *false_target)
1453 if (cfg->compile_aot) {
1454 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1458 else
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1460 if (false_target)
1461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1462 else
1463 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1466 /* Same as above, but obtains max_iid from a vtable */
1467 static void
1468 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1469 MonoBasicBlock *false_target)
1471 int max_iid_reg = alloc_preg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1474 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1477 /* Same as above, but obtains max_iid from a klass */
1478 static void
1479 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1480 MonoBasicBlock *false_target)
1482 int max_iid_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1485 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1488 static void
1489 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1491 int idepth_reg = alloc_preg (cfg);
1492 int stypes_reg = alloc_preg (cfg);
1493 int stype = alloc_preg (cfg);
1495 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1502 if (klass_ins) {
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1504 } else if (cfg->compile_aot) {
1505 int const_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1508 } else {
1509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1514 static void
1515 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1517 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1520 static void
1521 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1523 int intf_reg = alloc_preg (cfg);
1525 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1526 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1528 if (true_target)
1529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1530 else
1531 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1535 * Variant of the above that takes a register to the class, not the vtable.
1537 static void
1538 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int intf_bit_reg = alloc_preg (cfg);
1542 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1543 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1545 if (true_target)
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1547 else
1548 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1551 static inline void
1552 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1554 if (klass_inst) {
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1556 } else if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1560 } else {
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1566 static inline void
1567 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1569 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1572 static inline void
1573 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1575 if (cfg->compile_aot) {
1576 int const_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1579 } else {
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1585 static void
1586 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1588 static void
1589 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1591 if (klass->rank) {
1592 int rank_reg = alloc_preg (cfg);
1593 int eclass_reg = alloc_preg (cfg);
1595 g_assert (!klass_inst);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1598 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1599 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1601 if (klass->cast_class == mono_defaults.object_class) {
1602 int parent_reg = alloc_preg (cfg);
1603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1604 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1607 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class == mono_defaults.enum_class) {
1610 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1611 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1612 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1613 } else {
1614 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1615 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1618 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1619 /* Check that the object is a vector too */
1620 int bounds_reg = alloc_preg (cfg);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1622 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1625 } else {
1626 int idepth_reg = alloc_preg (cfg);
1627 int stypes_reg = alloc_preg (cfg);
1628 int stype = alloc_preg (cfg);
1630 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1631 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1637 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1641 static void
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1644 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1647 static void
1648 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1650 int val_reg;
1652 g_assert (val == 0);
1654 if (align == 0)
1655 align = 4;
1657 if ((size <= 4) && (size <= align)) {
1658 switch (size) {
1659 case 1:
1660 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1661 return;
1662 case 2:
1663 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1664 return;
1665 case 4:
1666 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1667 return;
1668 #if SIZEOF_REGISTER == 8
1669 case 8:
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1671 return;
1672 #endif
1676 val_reg = alloc_preg (cfg);
1678 if (SIZEOF_REGISTER == 8)
1679 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1680 else
1681 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1683 if (align < 4) {
1684 /* This could be optimized further if neccesary */
1685 while (size >= 1) {
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1687 offset += 1;
1688 size -= 1;
1690 return;
1693 #if !NO_UNALIGNED_ACCESS
1694 if (SIZEOF_REGISTER == 8) {
1695 if (offset % 8) {
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1697 offset += 4;
1698 size -= 4;
1700 while (size >= 8) {
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1702 offset += 8;
1703 size -= 8;
1706 #endif
1708 while (size >= 4) {
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1710 offset += 4;
1711 size -= 4;
1713 while (size >= 2) {
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1715 offset += 2;
1716 size -= 2;
1718 while (size >= 1) {
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1720 offset += 1;
1721 size -= 1;
1725 #endif /* DISABLE_JIT */
1727 void
1728 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1730 int cur_reg;
1732 if (align == 0)
1733 align = 4;
1735 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1736 g_assert (size < 10000);
1738 if (align < 4) {
1739 /* This could be optimized further if neccesary */
1740 while (size >= 1) {
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1744 doffset += 1;
1745 soffset += 1;
1746 size -= 1;
1750 #if !NO_UNALIGNED_ACCESS
1751 if (SIZEOF_REGISTER == 8) {
1752 while (size >= 8) {
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1756 doffset += 8;
1757 soffset += 8;
1758 size -= 8;
1761 #endif
1763 while (size >= 4) {
1764 cur_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1767 doffset += 4;
1768 soffset += 4;
1769 size -= 4;
1771 while (size >= 2) {
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1775 doffset += 2;
1776 soffset += 2;
1777 size -= 2;
1779 while (size >= 1) {
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 doffset += 1;
1784 soffset += 1;
1785 size -= 1;
1789 #ifndef DISABLE_JIT
1791 static int
1792 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1794 if (type->byref)
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1797 handle_enum:
1798 type = mini_get_basic_type_from_generic (gsctx, type);
1799 switch (type->type) {
1800 case MONO_TYPE_VOID:
1801 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1802 case MONO_TYPE_I1:
1803 case MONO_TYPE_U1:
1804 case MONO_TYPE_BOOLEAN:
1805 case MONO_TYPE_I2:
1806 case MONO_TYPE_U2:
1807 case MONO_TYPE_CHAR:
1808 case MONO_TYPE_I4:
1809 case MONO_TYPE_U4:
1810 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1811 case MONO_TYPE_I:
1812 case MONO_TYPE_U:
1813 case MONO_TYPE_PTR:
1814 case MONO_TYPE_FNPTR:
1815 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1816 case MONO_TYPE_CLASS:
1817 case MONO_TYPE_STRING:
1818 case MONO_TYPE_OBJECT:
1819 case MONO_TYPE_SZARRAY:
1820 case MONO_TYPE_ARRAY:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1822 case MONO_TYPE_I8:
1823 case MONO_TYPE_U8:
1824 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1825 case MONO_TYPE_R4:
1826 case MONO_TYPE_R8:
1827 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1828 case MONO_TYPE_VALUETYPE:
1829 if (type->data.klass->enumtype) {
1830 type = mono_class_enum_basetype (type->data.klass);
1831 goto handle_enum;
1832 } else
1833 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1834 case MONO_TYPE_TYPEDBYREF:
1835 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1836 case MONO_TYPE_GENERICINST:
1837 type = &type->data.generic_class->container_class->byval_arg;
1838 goto handle_enum;
1839 default:
1840 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1842 return -1;
1846 * target_type_is_incompatible:
1847 * @cfg: MonoCompile context
1849 * Check that the item @arg on the evaluation stack can be stored
1850 * in the target type (can be a local, or field, etc).
1851 * The cfg arg can be used to check if we need verification or just
1852 * validity checks.
1854 * Returns: non-0 value if arg can't be stored on a target.
1856 static int
1857 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1859 MonoType *simple_type;
1860 MonoClass *klass;
1862 if (target->byref) {
1863 /* FIXME: check that the pointed to types match */
1864 if (arg->type == STACK_MP)
1865 return arg->klass != mono_class_from_mono_type (target);
1866 if (arg->type == STACK_PTR)
1867 return 0;
1868 return 1;
1871 simple_type = mono_type_get_underlying_type (target);
1872 switch (simple_type->type) {
1873 case MONO_TYPE_VOID:
1874 return 1;
1875 case MONO_TYPE_I1:
1876 case MONO_TYPE_U1:
1877 case MONO_TYPE_BOOLEAN:
1878 case MONO_TYPE_I2:
1879 case MONO_TYPE_U2:
1880 case MONO_TYPE_CHAR:
1881 case MONO_TYPE_I4:
1882 case MONO_TYPE_U4:
1883 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1884 return 1;
1885 return 0;
1886 case MONO_TYPE_PTR:
1887 /* STACK_MP is needed when setting pinned locals */
1888 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1889 return 1;
1890 return 0;
1891 case MONO_TYPE_I:
1892 case MONO_TYPE_U:
1893 case MONO_TYPE_FNPTR:
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1895 return 1;
1896 return 0;
1897 case MONO_TYPE_CLASS:
1898 case MONO_TYPE_STRING:
1899 case MONO_TYPE_OBJECT:
1900 case MONO_TYPE_SZARRAY:
1901 case MONO_TYPE_ARRAY:
1902 if (arg->type != STACK_OBJ)
1903 return 1;
1904 /* FIXME: check type compatibility */
1905 return 0;
1906 case MONO_TYPE_I8:
1907 case MONO_TYPE_U8:
1908 if (arg->type != STACK_I8)
1909 return 1;
1910 return 0;
1911 case MONO_TYPE_R4:
1912 case MONO_TYPE_R8:
1913 if (arg->type != STACK_R8)
1914 return 1;
1915 return 0;
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1918 return 1;
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1921 return 1;
1922 return 0;
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1925 return 1;
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1928 return 1;
1929 return 0;
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 if (arg->type != STACK_VTYPE)
1933 return 1;
1934 klass = mono_class_from_mono_type (simple_type);
1935 if (klass != arg->klass)
1936 return 1;
1937 return 0;
1938 } else {
1939 if (arg->type != STACK_OBJ)
1940 return 1;
1941 /* FIXME: check type compatibility */
1942 return 0;
1944 case MONO_TYPE_VAR:
1945 case MONO_TYPE_MVAR:
1946 /* FIXME: all the arguments must be references for now,
1947 * later look inside cfg and see if the arg num is
1948 * really a reference
1950 g_assert (cfg->generic_sharing_context);
1951 if (arg->type != STACK_OBJ)
1952 return 1;
1953 return 0;
1954 default:
1955 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1957 return 1;
1961 * Prepare arguments for passing to a function call.
1962 * Return a non-zero value if the arguments can't be passed to the given
1963 * signature.
1964 * The type checks are not yet complete and some conversions may need
1965 * casts on 32 or 64 bit architectures.
1967 * FIXME: implement this using target_type_is_incompatible ()
1969 static int
1970 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1972 MonoType *simple_type;
1973 int i;
1975 if (sig->hasthis) {
1976 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1977 return 1;
1978 args++;
1980 for (i = 0; i < sig->param_count; ++i) {
1981 if (sig->params [i]->byref) {
1982 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1983 return 1;
1984 continue;
1986 simple_type = sig->params [i];
1987 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1988 handle_enum:
1989 switch (simple_type->type) {
1990 case MONO_TYPE_VOID:
1991 return 1;
1992 continue;
1993 case MONO_TYPE_I1:
1994 case MONO_TYPE_U1:
1995 case MONO_TYPE_BOOLEAN:
1996 case MONO_TYPE_I2:
1997 case MONO_TYPE_U2:
1998 case MONO_TYPE_CHAR:
1999 case MONO_TYPE_I4:
2000 case MONO_TYPE_U4:
2001 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2002 return 1;
2003 continue;
2004 case MONO_TYPE_I:
2005 case MONO_TYPE_U:
2006 case MONO_TYPE_PTR:
2007 case MONO_TYPE_FNPTR:
2008 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2009 return 1;
2010 continue;
2011 case MONO_TYPE_CLASS:
2012 case MONO_TYPE_STRING:
2013 case MONO_TYPE_OBJECT:
2014 case MONO_TYPE_SZARRAY:
2015 case MONO_TYPE_ARRAY:
2016 if (args [i]->type != STACK_OBJ)
2017 return 1;
2018 continue;
2019 case MONO_TYPE_I8:
2020 case MONO_TYPE_U8:
2021 if (args [i]->type != STACK_I8)
2022 return 1;
2023 continue;
2024 case MONO_TYPE_R4:
2025 case MONO_TYPE_R8:
2026 if (args [i]->type != STACK_R8)
2027 return 1;
2028 continue;
2029 case MONO_TYPE_VALUETYPE:
2030 if (simple_type->data.klass->enumtype) {
2031 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2032 goto handle_enum;
2034 if (args [i]->type != STACK_VTYPE)
2035 return 1;
2036 continue;
2037 case MONO_TYPE_TYPEDBYREF:
2038 if (args [i]->type != STACK_VTYPE)
2039 return 1;
2040 continue;
2041 case MONO_TYPE_GENERICINST:
2042 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2043 goto handle_enum;
2045 default:
2046 g_error ("unknown type 0x%02x in check_call_signature",
2047 simple_type->type);
2050 return 0;
2053 static int
2054 callvirt_to_call (int opcode)
2056 switch (opcode) {
2057 case OP_CALLVIRT:
2058 return OP_CALL;
2059 case OP_VOIDCALLVIRT:
2060 return OP_VOIDCALL;
2061 case OP_FCALLVIRT:
2062 return OP_FCALL;
2063 case OP_VCALLVIRT:
2064 return OP_VCALL;
2065 case OP_LCALLVIRT:
2066 return OP_LCALL;
2067 default:
2068 g_assert_not_reached ();
2071 return -1;
2074 static int
2075 callvirt_to_call_membase (int opcode)
2077 switch (opcode) {
2078 case OP_CALLVIRT:
2079 return OP_CALL_MEMBASE;
2080 case OP_VOIDCALLVIRT:
2081 return OP_VOIDCALL_MEMBASE;
2082 case OP_FCALLVIRT:
2083 return OP_FCALL_MEMBASE;
2084 case OP_LCALLVIRT:
2085 return OP_LCALL_MEMBASE;
2086 case OP_VCALLVIRT:
2087 return OP_VCALL_MEMBASE;
2088 default:
2089 g_assert_not_reached ();
2092 return -1;
2095 #ifdef MONO_ARCH_HAVE_IMT
2096 static void
2097 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2099 #ifdef MONO_ARCH_IMT_REG
2100 int method_reg = alloc_preg (cfg);
2102 if (imt_arg) {
2103 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2104 } else if (cfg->compile_aot) {
2105 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2106 } else {
2107 MonoInst *ins;
2108 MONO_INST_NEW (cfg, ins, OP_PCONST);
2109 ins->inst_p0 = call->method;
2110 ins->dreg = method_reg;
2111 MONO_ADD_INS (cfg->cbb, ins);
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2115 #else
2116 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2117 #endif
2119 #endif
2121 static MonoJumpInfo *
2122 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2124 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2126 ji->ip.i = ip;
2127 ji->type = type;
2128 ji->data.target = target;
2130 return ji;
2133 inline static MonoCallInst *
2134 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2135 MonoInst **args, int calli, int virtual, int tail)
2137 MonoCallInst *call;
2138 #ifdef MONO_ARCH_SOFT_FLOAT
2139 int i;
2140 #endif
2142 if (tail)
2143 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2144 else
2145 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2147 call->args = args;
2148 call->signature = sig;
2150 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2152 if (tail) {
2153 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2154 call->vret_var = cfg->vret_addr;
2155 //g_assert_not_reached ();
2157 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2158 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2159 MonoInst *loada;
2161 temp->backend.is_pinvoke = sig->pinvoke;
2164 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2165 * address of return value to increase optimization opportunities.
2166 * Before vtype decomposition, the dreg of the call ins itself represents the
2167 * fact the call modifies the return value. After decomposition, the call will
2168 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2169 * will be transformed into an LDADDR.
2171 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2172 loada->dreg = alloc_preg (cfg);
2173 loada->inst_p0 = temp;
2174 /* We reference the call too since call->dreg could change during optimization */
2175 loada->inst_p1 = call;
2176 MONO_ADD_INS (cfg->cbb, loada);
2178 call->inst.dreg = temp->dreg;
2180 call->vret_var = loada;
2181 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2182 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2184 #ifdef MONO_ARCH_SOFT_FLOAT
2185 if (COMPILE_SOFT_FLOAT (cfg)) {
2187 * If the call has a float argument, we would need to do an r8->r4 conversion using
2188 * an icall, but that cannot be done during the call sequence since it would clobber
2189 * the call registers + the stack. So we do it before emitting the call.
2191 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2192 MonoType *t;
2193 MonoInst *in = call->args [i];
2195 if (i >= sig->hasthis)
2196 t = sig->params [i - sig->hasthis];
2197 else
2198 t = &mono_defaults.int_class->byval_arg;
2199 t = mono_type_get_underlying_type (t);
2201 if (!t->byref && t->type == MONO_TYPE_R4) {
2202 MonoInst *iargs [1];
2203 MonoInst *conv;
2205 iargs [0] = in;
2206 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2208 /* The result will be in an int vreg */
2209 call->args [i] = conv;
2213 #endif
2215 #ifdef ENABLE_LLVM
2216 if (COMPILE_LLVM (cfg))
2217 mono_llvm_emit_call (cfg, call);
2218 else
2219 mono_arch_emit_call (cfg, call);
2220 #else
2221 mono_arch_emit_call (cfg, call);
2222 #endif
2224 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2225 cfg->flags |= MONO_CFG_HAS_CALLS;
2227 return call;
2230 inline static MonoInst*
2231 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2233 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2235 call->inst.sreg1 = addr->dreg;
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2242 inline static MonoInst*
2243 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2245 #ifdef MONO_ARCH_RGCTX_REG
2246 MonoCallInst *call;
2247 int rgctx_reg = -1;
2249 if (rgctx_arg) {
2250 rgctx_reg = mono_alloc_preg (cfg);
2251 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2253 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2254 if (rgctx_arg) {
2255 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2256 cfg->uses_rgctx_reg = TRUE;
2257 call->rgctx_reg = TRUE;
2259 return (MonoInst*)call;
2260 #else
2261 g_assert_not_reached ();
2262 return NULL;
2263 #endif
2266 static MonoInst*
2267 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 static MonoInst*
2269 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 static MonoInst*
2272 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2273 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2275 gboolean might_be_remote;
2276 gboolean virtual = this != NULL;
2277 gboolean enable_for_aot = TRUE;
2278 int context_used;
2279 MonoCallInst *call;
2281 if (method->string_ctor) {
2282 /* Create the real signature */
2283 /* FIXME: Cache these */
2284 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2285 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2287 sig = ctor_sig;
2290 might_be_remote = this && sig->hasthis &&
2291 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2292 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2294 context_used = mono_method_check_context_used (method);
2295 if (might_be_remote && context_used) {
2296 MonoInst *addr;
2298 g_assert (cfg->generic_sharing_context);
2300 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2302 return mono_emit_calli (cfg, sig, args, addr);
2305 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2307 if (might_be_remote)
2308 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 else
2310 call->method = method;
2311 call->inst.flags |= MONO_INST_HAS_METHOD;
2312 call->inst.inst_left = this;
2314 if (virtual) {
2315 int vtable_reg, slot_reg, this_reg;
2317 this_reg = this->dreg;
2319 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2320 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2321 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2323 /* Make a call to delegate->invoke_impl */
2324 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2325 call->inst.inst_basereg = this_reg;
2326 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2327 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2329 return (MonoInst*)call;
2331 #endif
2333 if ((!cfg->compile_aot || enable_for_aot) &&
2334 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2335 (MONO_METHOD_IS_FINAL (method) &&
2336 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2337 !(method->klass->marshalbyref && context_used)) {
2339 * the method is not virtual, we just need to ensure this is not null
2340 * and then we can call the method directly.
2342 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2344 * The check above ensures method is not gshared, this is needed since
2345 * gshared methods can't have wrappers.
2347 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2350 if (!method->string_ctor)
2351 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2353 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2355 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2357 return (MonoInst*)call;
2360 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2362 * the method is virtual, but we can statically dispatch since either
2363 * it's class or the method itself are sealed.
2364 * But first we need to ensure it's not a null reference.
2366 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2368 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2374 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2376 vtable_reg = alloc_preg (cfg);
2377 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2378 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 slot_reg = -1;
2380 #ifdef MONO_ARCH_HAVE_IMT
2381 if (mono_use_imt) {
2382 guint32 imt_slot = mono_method_get_imt_slot (method);
2383 emit_imt_argument (cfg, call, imt_arg);
2384 slot_reg = vtable_reg;
2385 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 #endif
2388 if (slot_reg == -1) {
2389 slot_reg = alloc_preg (cfg);
2390 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2391 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 } else {
2394 slot_reg = vtable_reg;
2395 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2396 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2397 #ifdef MONO_ARCH_HAVE_IMT
2398 if (imt_arg) {
2399 g_assert (mono_method_signature (method)->generic_param_count);
2400 emit_imt_argument (cfg, call, imt_arg);
2402 #endif
2405 call->inst.sreg1 = slot_reg;
2406 call->virtual = TRUE;
2409 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2411 return (MonoInst*)call;
2414 static MonoInst*
2415 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2416 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2418 #ifdef MONO_ARCH_RGCTX_REG
2419 int rgctx_reg = 0;
2420 #endif
2421 MonoInst *ins;
2422 MonoCallInst *call;
2424 if (vtable_arg) {
2425 #ifdef MONO_ARCH_RGCTX_REG
2426 rgctx_reg = mono_alloc_preg (cfg);
2427 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2428 #else
2429 NOT_IMPLEMENTED;
2430 #endif
2432 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2434 call = (MonoCallInst*)ins;
2435 if (vtable_arg) {
2436 #ifdef MONO_ARCH_RGCTX_REG
2437 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2438 cfg->uses_rgctx_reg = TRUE;
2439 call->rgctx_reg = TRUE;
2440 #else
2441 NOT_IMPLEMENTED;
2442 #endif
2445 return ins;
2448 MonoInst*
2449 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2451 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2454 MonoInst*
2455 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2456 MonoInst **args)
2458 MonoCallInst *call;
2460 g_assert (sig);
2462 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2463 call->fptr = func;
2465 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2467 return (MonoInst*)call;
2470 MonoInst*
2471 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2473 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2475 g_assert (info);
2477 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2481 * mono_emit_abs_call:
2483 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2485 inline static MonoInst*
2486 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2487 MonoMethodSignature *sig, MonoInst **args)
2489 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2490 MonoInst *ins;
2493 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2494 * handle it.
2496 if (cfg->abs_patches == NULL)
2497 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2498 g_hash_table_insert (cfg->abs_patches, ji, ji);
2499 ins = mono_emit_native_call (cfg, ji, sig, args);
2500 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2501 return ins;
2504 static MonoInst*
2505 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2507 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2508 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2509 int widen_op = -1;
2512 * Native code might return non register sized integers
2513 * without initializing the upper bits.
2515 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2516 case OP_LOADI1_MEMBASE:
2517 widen_op = OP_ICONV_TO_I1;
2518 break;
2519 case OP_LOADU1_MEMBASE:
2520 widen_op = OP_ICONV_TO_U1;
2521 break;
2522 case OP_LOADI2_MEMBASE:
2523 widen_op = OP_ICONV_TO_I2;
2524 break;
2525 case OP_LOADU2_MEMBASE:
2526 widen_op = OP_ICONV_TO_U2;
2527 break;
2528 default:
2529 break;
2532 if (widen_op != -1) {
2533 int dreg = alloc_preg (cfg);
2534 MonoInst *widen;
2536 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2537 widen->type = ins->type;
2538 ins = widen;
2543 return ins;
2546 static MonoMethod*
2547 get_memcpy_method (void)
2549 static MonoMethod *memcpy_method = NULL;
2550 if (!memcpy_method) {
2551 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2552 if (!memcpy_method)
2553 g_error ("Old corlib found. Install a new one");
2555 return memcpy_method;
2559 * Emit code to copy a valuetype of type @klass whose address is stored in
2560 * @src->dreg to memory whose address is stored at @dest->dreg.
2562 void
2563 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2565 MonoInst *iargs [3];
2566 int n;
2567 guint32 align = 0;
2568 MonoMethod *memcpy_method;
2570 g_assert (klass);
2572 * This check breaks with spilled vars... need to handle it during verification anyway.
2573 * g_assert (klass && klass == src->klass && klass == dest->klass);
2576 if (native)
2577 n = mono_class_native_size (klass, &align);
2578 else
2579 n = mono_class_value_size (klass, &align);
2581 #if HAVE_WRITE_BARRIERS
2582 /* if native is true there should be no references in the struct */
2583 if (klass->has_references && !native) {
2584 /* Avoid barriers when storing to the stack */
2585 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2586 (dest->opcode == OP_LDADDR))) {
2587 int context_used = 0;
2589 iargs [0] = dest;
2590 iargs [1] = src;
2592 if (cfg->generic_sharing_context)
2593 context_used = mono_class_check_context_used (klass);
2594 if (context_used) {
2595 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2596 } else {
2597 if (cfg->compile_aot) {
2598 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2599 } else {
2600 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2601 mono_class_compute_gc_descriptor (klass);
2605 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2608 #endif
2610 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2611 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2612 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2613 } else {
2614 iargs [0] = dest;
2615 iargs [1] = src;
2616 EMIT_NEW_ICONST (cfg, iargs [2], n);
2618 memcpy_method = get_memcpy_method ();
2619 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2623 static MonoMethod*
2624 get_memset_method (void)
2626 static MonoMethod *memset_method = NULL;
2627 if (!memset_method) {
2628 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2629 if (!memset_method)
2630 g_error ("Old corlib found. Install a new one");
2632 return memset_method;
2635 void
2636 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2638 MonoInst *iargs [3];
2639 int n;
2640 guint32 align;
2641 MonoMethod *memset_method;
2643 /* FIXME: Optimize this for the case when dest is an LDADDR */
2645 mono_class_init (klass);
2646 n = mono_class_value_size (klass, &align);
2648 if (n <= sizeof (gpointer) * 5) {
2649 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2651 else {
2652 memset_method = get_memset_method ();
2653 iargs [0] = dest;
2654 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2655 EMIT_NEW_ICONST (cfg, iargs [2], n);
2656 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2660 static MonoInst*
2661 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2663 MonoInst *this = NULL;
2665 g_assert (cfg->generic_sharing_context);
2667 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2668 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2669 !method->klass->valuetype)
2670 EMIT_NEW_ARGLOAD (cfg, this, 0);
2672 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2673 MonoInst *mrgctx_loc, *mrgctx_var;
2675 g_assert (!this);
2676 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2678 mrgctx_loc = mono_get_vtable_var (cfg);
2679 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2681 return mrgctx_var;
2682 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2683 MonoInst *vtable_loc, *vtable_var;
2685 g_assert (!this);
2687 vtable_loc = mono_get_vtable_var (cfg);
2688 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2690 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2691 MonoInst *mrgctx_var = vtable_var;
2692 int vtable_reg;
2694 vtable_reg = alloc_preg (cfg);
2695 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2696 vtable_var->type = STACK_PTR;
2699 return vtable_var;
2700 } else {
2701 MonoInst *ins;
2702 int vtable_reg, res_reg;
2704 vtable_reg = alloc_preg (cfg);
2705 res_reg = alloc_preg (cfg);
2706 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2707 return ins;
2711 static MonoJumpInfoRgctxEntry *
2712 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2714 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2715 res->method = method;
2716 res->in_mrgctx = in_mrgctx;
2717 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2718 res->data->type = patch_type;
2719 res->data->data.target = patch_data;
2720 res->info_type = info_type;
2722 return res;
2725 static inline MonoInst*
2726 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2728 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2731 static MonoInst*
2732 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2733 MonoClass *klass, int rgctx_type)
2735 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2736 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2738 return emit_rgctx_fetch (cfg, rgctx, entry);
2742 * emit_get_rgctx_method:
2744 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2745 * normal constants, else emit a load from the rgctx.
2747 static MonoInst*
2748 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2749 MonoMethod *cmethod, int rgctx_type)
2751 if (!context_used) {
2752 MonoInst *ins;
2754 switch (rgctx_type) {
2755 case MONO_RGCTX_INFO_METHOD:
2756 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2757 return ins;
2758 case MONO_RGCTX_INFO_METHOD_RGCTX:
2759 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2760 return ins;
2761 default:
2762 g_assert_not_reached ();
2764 } else {
2765 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2766 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2768 return emit_rgctx_fetch (cfg, rgctx, entry);
2772 static MonoInst*
2773 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2774 MonoClassField *field, int rgctx_type)
2776 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2777 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2779 return emit_rgctx_fetch (cfg, rgctx, entry);
2783 * On return the caller must check @klass for load errors.
2785 static void
2786 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2788 MonoInst *vtable_arg;
2789 MonoCallInst *call;
2790 int context_used = 0;
2792 if (cfg->generic_sharing_context)
2793 context_used = mono_class_check_context_used (klass);
2795 if (context_used) {
2796 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2797 klass, MONO_RGCTX_INFO_VTABLE);
2798 } else {
2799 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2801 if (!vtable)
2802 return;
2803 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2806 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2807 #ifdef MONO_ARCH_VTABLE_REG
2808 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2809 cfg->uses_vtable_reg = TRUE;
2810 #else
2811 NOT_IMPLEMENTED;
2812 #endif
2816 * On return the caller must check @array_class for load errors
2818 static void
2819 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2821 int vtable_reg = alloc_preg (cfg);
2822 int context_used = 0;
2824 if (cfg->generic_sharing_context)
2825 context_used = mono_class_check_context_used (array_class);
2827 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2829 if (cfg->opt & MONO_OPT_SHARED) {
2830 int class_reg = alloc_preg (cfg);
2831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2832 if (cfg->compile_aot) {
2833 int klass_reg = alloc_preg (cfg);
2834 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2836 } else {
2837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2839 } else if (context_used) {
2840 MonoInst *vtable_ins;
2842 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2844 } else {
2845 if (cfg->compile_aot) {
2846 int vt_reg;
2847 MonoVTable *vtable;
2849 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2850 return;
2851 vt_reg = alloc_preg (cfg);
2852 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2853 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2854 } else {
2855 MonoVTable *vtable;
2856 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2857 return;
2858 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2862 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2865 static void
2866 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2868 if (mini_get_debug_options ()->better_cast_details) {
2869 int to_klass_reg = alloc_preg (cfg);
2870 int vtable_reg = alloc_preg (cfg);
2871 int klass_reg = alloc_preg (cfg);
2872 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2874 if (!tls_get) {
2875 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2876 exit (1);
2879 MONO_ADD_INS (cfg->cbb, tls_get);
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2884 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2889 static void
2890 reset_cast_details (MonoCompile *cfg)
2892 /* Reset the variables holding the cast details */
2893 if (mini_get_debug_options ()->better_cast_details) {
2894 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2896 MONO_ADD_INS (cfg->cbb, tls_get);
2897 /* It is enough to reset the from field */
2898 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2903 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2904 * generic code is generated.
2906 static MonoInst*
2907 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2909 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2911 if (context_used) {
2912 MonoInst *rgctx, *addr;
2914 /* FIXME: What if the class is shared? We might not
2915 have to get the address of the method from the
2916 RGCTX. */
2917 addr = emit_get_rgctx_method (cfg, context_used, method,
2918 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2920 rgctx = emit_get_rgctx (cfg, method, context_used);
2922 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2923 } else {
2924 return mono_emit_method_call (cfg, method, &val, NULL);
2928 static MonoInst*
2929 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2931 MonoInst *add;
2932 int obj_reg;
2933 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2934 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2935 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2936 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2938 obj_reg = sp [0]->dreg;
2939 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2940 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2942 /* FIXME: generics */
2943 g_assert (klass->rank == 0);
2945 // Check rank == 0
2946 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2947 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2949 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2950 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2952 if (context_used) {
2953 MonoInst *element_class;
2955 /* This assertion is from the unboxcast insn */
2956 g_assert (klass->rank == 0);
2958 element_class = emit_get_rgctx_klass (cfg, context_used,
2959 klass->element_class, MONO_RGCTX_INFO_KLASS);
2961 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2962 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2963 } else {
2964 save_cast_details (cfg, klass->element_class, obj_reg);
2965 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2966 reset_cast_details (cfg);
2969 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2970 MONO_ADD_INS (cfg->cbb, add);
2971 add->type = STACK_MP;
2972 add->klass = klass;
2974 return add;
2978 * Returns NULL and set the cfg exception on error.
2980 static MonoInst*
2981 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2983 MonoInst *iargs [2];
2984 void *alloc_ftn;
2986 if (cfg->opt & MONO_OPT_SHARED) {
2987 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2988 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2990 alloc_ftn = mono_object_new;
2991 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2992 /* This happens often in argument checking code, eg. throw new FooException... */
2993 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2994 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2995 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2996 } else {
2997 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2998 MonoMethod *managed_alloc = NULL;
2999 gboolean pass_lw;
3001 if (!vtable) {
3002 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3003 cfg->exception_ptr = klass;
3004 return NULL;
3007 #ifndef MONO_CROSS_COMPILE
3008 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3009 #endif
3011 if (managed_alloc) {
3012 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3013 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3015 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3016 if (pass_lw) {
3017 guint32 lw = vtable->klass->instance_size;
3018 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3019 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3020 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3022 else {
3023 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3027 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3030 static MonoInst*
3031 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3032 gboolean for_box)
3034 MonoInst *iargs [2];
3035 MonoMethod *managed_alloc = NULL;
3036 void *alloc_ftn;
3039 FIXME: we cannot get managed_alloc here because we can't get
3040 the class's vtable (because it's not a closed class)
3042 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3043 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3046 if (cfg->opt & MONO_OPT_SHARED) {
3047 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3048 iargs [1] = data_inst;
3049 alloc_ftn = mono_object_new;
3050 } else {
3051 if (managed_alloc) {
3052 iargs [0] = data_inst;
3053 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3056 iargs [0] = data_inst;
3057 alloc_ftn = mono_object_new_specific;
3060 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3064 * Returns NULL and set the cfg exception on error.
3066 static MonoInst*
3067 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3069 MonoInst *alloc, *ins;
3071 if (mono_class_is_nullable (klass)) {
3072 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3073 return mono_emit_method_call (cfg, method, &val, NULL);
3076 alloc = handle_alloc (cfg, klass, TRUE);
3077 if (!alloc)
3078 return NULL;
3080 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3082 return alloc;
3085 static MonoInst *
3086 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3088 MonoInst *alloc, *ins;
3090 if (mono_class_is_nullable (klass)) {
3091 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3092 /* FIXME: What if the class is shared? We might not
3093 have to get the method address from the RGCTX. */
3094 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3095 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3096 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3098 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3099 } else {
3100 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3102 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3104 return alloc;
3108 // FIXME: This doesn't work yet (class libs tests fail?)
3109 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3112 * Returns NULL and set the cfg exception on error.
3114 static MonoInst*
3115 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3117 MonoBasicBlock *is_null_bb;
3118 int obj_reg = src->dreg;
3119 int vtable_reg = alloc_preg (cfg);
3120 MonoInst *klass_inst = NULL;
3122 if (context_used) {
3123 MonoInst *args [2];
3125 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3126 klass, MONO_RGCTX_INFO_KLASS);
3128 if (is_complex_isinst (klass)) {
3129 /* Complex case, handle by an icall */
3131 /* obj */
3132 args [0] = src;
3134 /* klass */
3135 args [1] = klass_inst;
3137 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3138 } else {
3139 /* Simple case, handled by the code below */
3143 NEW_BBLOCK (cfg, is_null_bb);
3145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3148 save_cast_details (cfg, klass, obj_reg);
3150 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3153 } else {
3154 int klass_reg = alloc_preg (cfg);
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3158 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3159 /* the remoting code is broken, access the class for now */
3160 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3161 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3162 if (!vt) {
3163 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3164 cfg->exception_ptr = klass;
3165 return NULL;
3167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3168 } else {
3169 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3172 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3173 } else {
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3175 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3179 MONO_START_BB (cfg, is_null_bb);
3181 reset_cast_details (cfg);
3183 return src;
3187 * Returns NULL and set the cfg exception on error.
3189 static MonoInst*
3190 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3192 MonoInst *ins;
3193 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3194 int obj_reg = src->dreg;
3195 int vtable_reg = alloc_preg (cfg);
3196 int res_reg = alloc_preg (cfg);
3197 MonoInst *klass_inst = NULL;
3199 if (context_used) {
3200 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3202 if (is_complex_isinst (klass)) {
3203 MonoInst *args [2];
3205 /* Complex case, handle by an icall */
3207 /* obj */
3208 args [0] = src;
3210 /* klass */
3211 args [1] = klass_inst;
3213 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3214 } else {
3215 /* Simple case, the code below can handle it */
3219 NEW_BBLOCK (cfg, is_null_bb);
3220 NEW_BBLOCK (cfg, false_bb);
3221 NEW_BBLOCK (cfg, end_bb);
3223 /* Do the assignment at the beginning, so the other assignment can be if converted */
3224 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3225 ins->type = STACK_OBJ;
3226 ins->klass = klass;
3228 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3229 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3231 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3233 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3234 g_assert (!context_used);
3235 /* the is_null_bb target simply copies the input register to the output */
3236 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3237 } else {
3238 int klass_reg = alloc_preg (cfg);
3240 if (klass->rank) {
3241 int rank_reg = alloc_preg (cfg);
3242 int eclass_reg = alloc_preg (cfg);
3244 g_assert (!context_used);
3245 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3249 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3250 if (klass->cast_class == mono_defaults.object_class) {
3251 int parent_reg = alloc_preg (cfg);
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3253 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3254 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3255 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3256 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3257 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3258 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3259 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3260 } else if (klass->cast_class == mono_defaults.enum_class) {
3261 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3263 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3264 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3265 } else {
3266 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3267 /* Check that the object is a vector too */
3268 int bounds_reg = alloc_preg (cfg);
3269 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3271 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3274 /* the is_null_bb target simply copies the input register to the output */
3275 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3277 } else if (mono_class_is_nullable (klass)) {
3278 g_assert (!context_used);
3279 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3280 /* the is_null_bb target simply copies the input register to the output */
3281 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3282 } else {
3283 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3284 g_assert (!context_used);
3285 /* the remoting code is broken, access the class for now */
3286 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3287 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3288 if (!vt) {
3289 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3290 cfg->exception_ptr = klass;
3291 return NULL;
3293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3294 } else {
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3298 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3300 } else {
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3302 /* the is_null_bb target simply copies the input register to the output */
3303 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3308 MONO_START_BB (cfg, false_bb);
3310 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3311 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3313 MONO_START_BB (cfg, is_null_bb);
3315 MONO_START_BB (cfg, end_bb);
3317 return ins;
3320 static MonoInst*
3321 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3323 /* This opcode takes as input an object reference and a class, and returns:
3324 0) if the object is an instance of the class,
3325 1) if the object is not instance of the class,
3326 2) if the object is a proxy whose type cannot be determined */
3328 MonoInst *ins;
3329 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3330 int obj_reg = src->dreg;
3331 int dreg = alloc_ireg (cfg);
3332 int tmp_reg;
3333 int klass_reg = alloc_preg (cfg);
3335 NEW_BBLOCK (cfg, true_bb);
3336 NEW_BBLOCK (cfg, false_bb);
3337 NEW_BBLOCK (cfg, false2_bb);
3338 NEW_BBLOCK (cfg, end_bb);
3339 NEW_BBLOCK (cfg, no_proxy_bb);
3341 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3344 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3345 NEW_BBLOCK (cfg, interface_fail_bb);
3347 tmp_reg = alloc_preg (cfg);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3349 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3350 MONO_START_BB (cfg, interface_fail_bb);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3353 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3355 tmp_reg = alloc_preg (cfg);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3359 } else {
3360 tmp_reg = alloc_preg (cfg);
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3364 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3365 tmp_reg = alloc_preg (cfg);
3366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3369 tmp_reg = alloc_preg (cfg);
3370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3371 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3372 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3374 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3377 MONO_START_BB (cfg, no_proxy_bb);
3379 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3382 MONO_START_BB (cfg, false_bb);
3384 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3387 MONO_START_BB (cfg, false2_bb);
3389 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3390 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3392 MONO_START_BB (cfg, true_bb);
3394 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3396 MONO_START_BB (cfg, end_bb);
3398 /* FIXME: */
3399 MONO_INST_NEW (cfg, ins, OP_ICONST);
3400 ins->dreg = dreg;
3401 ins->type = STACK_I4;
3403 return ins;
3406 static MonoInst*
3407 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3409 /* This opcode takes as input an object reference and a class, and returns:
3410 0) if the object is an instance of the class,
3411 1) if the object is a proxy whose type cannot be determined
3412 an InvalidCastException exception is thrown otherwhise*/
3414 MonoInst *ins;
3415 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3416 int obj_reg = src->dreg;
3417 int dreg = alloc_ireg (cfg);
3418 int tmp_reg = alloc_preg (cfg);
3419 int klass_reg = alloc_preg (cfg);
3421 NEW_BBLOCK (cfg, end_bb);
3422 NEW_BBLOCK (cfg, ok_result_bb);
3424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3427 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3428 NEW_BBLOCK (cfg, interface_fail_bb);
3430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3431 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3432 MONO_START_BB (cfg, interface_fail_bb);
3433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3435 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3437 tmp_reg = alloc_preg (cfg);
3438 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3440 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3442 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3445 } else {
3446 NEW_BBLOCK (cfg, no_proxy_bb);
3448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3450 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3452 tmp_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3456 tmp_reg = alloc_preg (cfg);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3458 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3461 NEW_BBLOCK (cfg, fail_1_bb);
3463 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3465 MONO_START_BB (cfg, fail_1_bb);
3467 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3470 MONO_START_BB (cfg, no_proxy_bb);
3472 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3475 MONO_START_BB (cfg, ok_result_bb);
3477 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3479 MONO_START_BB (cfg, end_bb);
3481 /* FIXME: */
3482 MONO_INST_NEW (cfg, ins, OP_ICONST);
3483 ins->dreg = dreg;
3484 ins->type = STACK_I4;
3486 return ins;
3490 * Returns NULL and set the cfg exception on error.
3492 static G_GNUC_UNUSED MonoInst*
3493 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3495 gpointer *trampoline;
3496 MonoInst *obj, *method_ins, *tramp_ins;
3497 MonoDomain *domain;
3498 guint8 **code_slot;
3500 obj = handle_alloc (cfg, klass, FALSE);
3501 if (!obj)
3502 return NULL;
3504 /* Inline the contents of mono_delegate_ctor */
3506 /* Set target field */
3507 /* Optimize away setting of NULL target */
3508 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3511 /* Set method field */
3512 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3513 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3516 * To avoid looking up the compiled code belonging to the target method
3517 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3518 * store it, and we fill it after the method has been compiled.
3520 if (!cfg->compile_aot && !method->dynamic) {
3521 MonoInst *code_slot_ins;
3523 if (context_used) {
3524 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3525 } else {
3526 domain = mono_domain_get ();
3527 mono_domain_lock (domain);
3528 if (!domain_jit_info (domain)->method_code_hash)
3529 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3530 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3531 if (!code_slot) {
3532 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3533 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3535 mono_domain_unlock (domain);
3537 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3539 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3542 /* Set invoke_impl field */
3543 if (cfg->compile_aot) {
3544 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3545 } else {
3546 trampoline = mono_create_delegate_trampoline (klass);
3547 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3551 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3553 return obj;
3556 static MonoInst*
3557 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3559 MonoJitICallInfo *info;
3561 /* Need to register the icall so it gets an icall wrapper */
3562 info = mono_get_array_new_va_icall (rank);
3564 cfg->flags |= MONO_CFG_HAS_VARARGS;
3566 /* mono_array_new_va () needs a vararg calling convention */
3567 cfg->disable_llvm = TRUE;
3569 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3570 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3573 static void
3574 mono_emit_load_got_addr (MonoCompile *cfg)
3576 MonoInst *getaddr, *dummy_use;
3578 if (!cfg->got_var || cfg->got_var_allocated)
3579 return;
3581 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3582 getaddr->dreg = cfg->got_var->dreg;
3584 /* Add it to the start of the first bblock */
3585 if (cfg->bb_entry->code) {
3586 getaddr->next = cfg->bb_entry->code;
3587 cfg->bb_entry->code = getaddr;
3589 else
3590 MONO_ADD_INS (cfg->bb_entry, getaddr);
3592 cfg->got_var_allocated = TRUE;
3595 * Add a dummy use to keep the got_var alive, since real uses might
3596 * only be generated by the back ends.
3597 * Add it to end_bblock, so the variable's lifetime covers the whole
3598 * method.
3599 * It would be better to make the usage of the got var explicit in all
3600 * cases when the backend needs it (i.e. calls, throw etc.), so this
3601 * wouldn't be needed.
3603 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3604 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3607 static int inline_limit;
3608 static gboolean inline_limit_inited;
3610 static gboolean
3611 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3613 MonoMethodHeaderSummary header;
3614 MonoVTable *vtable;
3615 #ifdef MONO_ARCH_SOFT_FLOAT
3616 MonoMethodSignature *sig = mono_method_signature (method);
3617 int i;
3618 #endif
3620 if (cfg->generic_sharing_context)
3621 return FALSE;
3623 if (cfg->inline_depth > 10)
3624 return FALSE;
3626 #ifdef MONO_ARCH_HAVE_LMF_OPS
3627 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3628 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3629 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3630 return TRUE;
3631 #endif
3634 if (!mono_method_get_header_summary (method, &header))
3635 return FALSE;
3637 /*runtime, icall and pinvoke are checked by summary call*/
3638 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3639 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3640 (method->klass->marshalbyref) ||
3641 header.has_clauses)
3642 return FALSE;
3644 /* also consider num_locals? */
3645 /* Do the size check early to avoid creating vtables */
3646 if (!inline_limit_inited) {
3647 if (getenv ("MONO_INLINELIMIT"))
3648 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3649 else
3650 inline_limit = INLINE_LENGTH_LIMIT;
3651 inline_limit_inited = TRUE;
3653 if (header.code_size >= inline_limit)
3654 return FALSE;
3657 * if we can initialize the class of the method right away, we do,
3658 * otherwise we don't allow inlining if the class needs initialization,
3659 * since it would mean inserting a call to mono_runtime_class_init()
3660 * inside the inlined code
3662 if (!(cfg->opt & MONO_OPT_SHARED)) {
3663 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3664 if (cfg->run_cctors && method->klass->has_cctor) {
3665 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3666 if (!method->klass->runtime_info)
3667 /* No vtable created yet */
3668 return FALSE;
3669 vtable = mono_class_vtable (cfg->domain, method->klass);
3670 if (!vtable)
3671 return FALSE;
3672 /* This makes so that inline cannot trigger */
3673 /* .cctors: too many apps depend on them */
3674 /* running with a specific order... */
3675 if (! vtable->initialized)
3676 return FALSE;
3677 mono_runtime_class_init (vtable);
3679 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3680 if (!method->klass->runtime_info)
3681 /* No vtable created yet */
3682 return FALSE;
3683 vtable = mono_class_vtable (cfg->domain, method->klass);
3684 if (!vtable)
3685 return FALSE;
3686 if (!vtable->initialized)
3687 return FALSE;
3689 } else {
3691 * If we're compiling for shared code
3692 * the cctor will need to be run at aot method load time, for example,
3693 * or at the end of the compilation of the inlining method.
3695 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3696 return FALSE;
3700 * CAS - do not inline methods with declarative security
3701 * Note: this has to be before any possible return TRUE;
3703 if (mono_method_has_declsec (method))
3704 return FALSE;
3706 #ifdef MONO_ARCH_SOFT_FLOAT
3707 /* FIXME: */
3708 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3709 return FALSE;
3710 for (i = 0; i < sig->param_count; ++i)
3711 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3712 return FALSE;
3713 #endif
3715 return TRUE;
3718 static gboolean
3719 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3721 if (vtable->initialized && !cfg->compile_aot)
3722 return FALSE;
3724 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3725 return FALSE;
3727 if (!mono_class_needs_cctor_run (vtable->klass, method))
3728 return FALSE;
3730 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3731 /* The initialization is already done before the method is called */
3732 return FALSE;
3734 return TRUE;
3737 static MonoInst*
3738 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3740 MonoInst *ins;
3741 guint32 size;
3742 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3744 mono_class_init (klass);
3745 size = mono_class_array_element_size (klass);
3747 mult_reg = alloc_preg (cfg);
3748 array_reg = arr->dreg;
3749 index_reg = index->dreg;
3751 #if SIZEOF_REGISTER == 8
3752 /* The array reg is 64 bits but the index reg is only 32 */
3753 if (COMPILE_LLVM (cfg)) {
3754 /* Not needed */
3755 index2_reg = index_reg;
3756 } else {
3757 index2_reg = alloc_preg (cfg);
3758 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3760 #else
3761 if (index->type == STACK_I8) {
3762 index2_reg = alloc_preg (cfg);
3763 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3764 } else {
3765 index2_reg = index_reg;
3767 #endif
3769 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3771 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3772 if (size == 1 || size == 2 || size == 4 || size == 8) {
3773 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3775 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3776 ins->type = STACK_PTR;
3778 return ins;
3780 #endif
3782 add_reg = alloc_preg (cfg);
3784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3785 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3786 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3787 ins->type = STACK_PTR;
3788 MONO_ADD_INS (cfg->cbb, ins);
3790 return ins;
3793 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3794 static MonoInst*
3795 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3797 int bounds_reg = alloc_preg (cfg);
3798 int add_reg = alloc_preg (cfg);
3799 int mult_reg = alloc_preg (cfg);
3800 int mult2_reg = alloc_preg (cfg);
3801 int low1_reg = alloc_preg (cfg);
3802 int low2_reg = alloc_preg (cfg);
3803 int high1_reg = alloc_preg (cfg);
3804 int high2_reg = alloc_preg (cfg);
3805 int realidx1_reg = alloc_preg (cfg);
3806 int realidx2_reg = alloc_preg (cfg);
3807 int sum_reg = alloc_preg (cfg);
3808 int index1, index2;
3809 MonoInst *ins;
3810 guint32 size;
3812 mono_class_init (klass);
3813 size = mono_class_array_element_size (klass);
3815 index1 = index_ins1->dreg;
3816 index2 = index_ins2->dreg;
3818 /* range checking */
3819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3820 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3823 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3824 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3825 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3826 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3827 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3828 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3831 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3832 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3834 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3836 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3838 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3839 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3841 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3842 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3844 ins->type = STACK_MP;
3845 ins->klass = klass;
3846 MONO_ADD_INS (cfg->cbb, ins);
3848 return ins;
3850 #endif
3852 static MonoInst*
3853 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3855 int rank;
3856 MonoInst *addr;
3857 MonoMethod *addr_method;
3858 int element_size;
3860 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3862 if (rank == 1)
3863 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3865 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3866 /* emit_ldelema_2 depends on OP_LMUL */
3867 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3868 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3870 #endif
3872 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3873 addr_method = mono_marshal_get_array_address (rank, element_size);
3874 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3876 return addr;
3879 static MonoBreakPolicy
3880 always_insert_breakpoint (MonoMethod *method)
3882 return MONO_BREAK_POLICY_ALWAYS;
3885 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3888 * mono_set_break_policy:
3889 * policy_callback: the new callback function
3891 * Allow embedders to decide wherther to actually obey breakpoint instructions
3892 * (both break IL instructions and Debugger.Break () method calls), for example
3893 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3894 * untrusted or semi-trusted code.
3896 * @policy_callback will be called every time a break point instruction needs to
3897 * be inserted with the method argument being the method that calls Debugger.Break()
3898 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3899 * if it wants the breakpoint to not be effective in the given method.
3900 * #MONO_BREAK_POLICY_ALWAYS is the default.
3902 void
3903 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3905 if (policy_callback)
3906 break_policy_func = policy_callback;
3907 else
3908 break_policy_func = always_insert_breakpoint;
3911 static gboolean
3912 should_insert_brekpoint (MonoMethod *method) {
3913 switch (break_policy_func (method)) {
3914 case MONO_BREAK_POLICY_ALWAYS:
3915 return TRUE;
3916 case MONO_BREAK_POLICY_NEVER:
3917 return FALSE;
3918 case MONO_BREAK_POLICY_ON_DBG:
3919 return mono_debug_using_mono_debugger ();
3920 default:
3921 g_warning ("Incorrect value returned from break policy callback");
3922 return FALSE;
3926 static MonoInst*
3927 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3929 MonoInst *ins = NULL;
3931 static MonoClass *runtime_helpers_class = NULL;
3932 if (! runtime_helpers_class)
3933 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3934 "System.Runtime.CompilerServices", "RuntimeHelpers");
3936 if (cmethod->klass == mono_defaults.string_class) {
3937 if (strcmp (cmethod->name, "get_Chars") == 0) {
3938 int dreg = alloc_ireg (cfg);
3939 int index_reg = alloc_preg (cfg);
3940 int mult_reg = alloc_preg (cfg);
3941 int add_reg = alloc_preg (cfg);
3943 #if SIZEOF_REGISTER == 8
3944 /* The array reg is 64 bits but the index reg is only 32 */
3945 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3946 #else
3947 index_reg = args [1]->dreg;
3948 #endif
3949 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3951 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3952 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3953 add_reg = ins->dreg;
3954 /* Avoid a warning */
3955 mult_reg = 0;
3956 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3957 add_reg, 0);
3958 #else
3959 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3960 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3961 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3962 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3963 #endif
3964 type_from_op (ins, NULL, NULL);
3965 return ins;
3966 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3967 int dreg = alloc_ireg (cfg);
3968 /* Decompose later to allow more optimizations */
3969 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3970 ins->type = STACK_I4;
3971 cfg->cbb->has_array_access = TRUE;
3972 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3974 return ins;
3975 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3976 int mult_reg = alloc_preg (cfg);
3977 int add_reg = alloc_preg (cfg);
3979 /* The corlib functions check for oob already. */
3980 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3981 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3982 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3983 } else
3984 return NULL;
3985 } else if (cmethod->klass == mono_defaults.object_class) {
3987 if (strcmp (cmethod->name, "GetType") == 0) {
3988 int dreg = alloc_preg (cfg);
3989 int vt_reg = alloc_preg (cfg);
3990 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3991 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3992 type_from_op (ins, NULL, NULL);
3994 return ins;
3995 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3996 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3997 int dreg = alloc_ireg (cfg);
3998 int t1 = alloc_ireg (cfg);
4000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4001 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4002 ins->type = STACK_I4;
4004 return ins;
4005 #endif
4006 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4007 MONO_INST_NEW (cfg, ins, OP_NOP);
4008 MONO_ADD_INS (cfg->cbb, ins);
4009 return ins;
4010 } else
4011 return NULL;
4012 } else if (cmethod->klass == mono_defaults.array_class) {
4013 if (cmethod->name [0] != 'g')
4014 return NULL;
4016 if (strcmp (cmethod->name, "get_Rank") == 0) {
4017 int dreg = alloc_ireg (cfg);
4018 int vtable_reg = alloc_preg (cfg);
4019 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4020 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4021 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4022 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4023 type_from_op (ins, NULL, NULL);
4025 return ins;
4026 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4027 int dreg = alloc_ireg (cfg);
4029 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4030 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4031 type_from_op (ins, NULL, NULL);
4033 return ins;
4034 } else
4035 return NULL;
4036 } else if (cmethod->klass == runtime_helpers_class) {
4038 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4039 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4040 return ins;
4041 } else
4042 return NULL;
4043 } else if (cmethod->klass == mono_defaults.thread_class) {
4044 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4045 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4046 MONO_ADD_INS (cfg->cbb, ins);
4047 return ins;
4048 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4049 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4050 MONO_ADD_INS (cfg->cbb, ins);
4051 return ins;
4053 } else if (cmethod->klass == mono_defaults.monitor_class) {
4054 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4055 if (strcmp (cmethod->name, "Enter") == 0) {
4056 MonoCallInst *call;
4058 if (COMPILE_LLVM (cfg)) {
4060 * Pass the argument normally, the LLVM backend will handle the
4061 * calling convention problems.
4063 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4064 } else {
4065 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4066 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4067 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4068 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4071 return (MonoInst*)call;
4072 } else if (strcmp (cmethod->name, "Exit") == 0) {
4073 MonoCallInst *call;
4075 if (COMPILE_LLVM (cfg)) {
4076 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4077 } else {
4078 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4079 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4080 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4081 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4084 return (MonoInst*)call;
4086 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4087 MonoMethod *fast_method = NULL;
4089 /* Avoid infinite recursion */
4090 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4091 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4092 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4093 return NULL;
4095 if (strcmp (cmethod->name, "Enter") == 0 ||
4096 strcmp (cmethod->name, "Exit") == 0)
4097 fast_method = mono_monitor_get_fast_path (cmethod);
4098 if (!fast_method)
4099 return NULL;
4101 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4102 #endif
4103 } else if (mini_class_is_system_array (cmethod->klass) &&
4104 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4105 MonoInst *addr, *store, *load;
4106 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4108 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4109 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4110 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4111 return store;
4112 } else if (cmethod->klass->image == mono_defaults.corlib &&
4113 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4114 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4115 ins = NULL;
4117 #if SIZEOF_REGISTER == 8
4118 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4119 /* 64 bit reads are already atomic */
4120 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4121 ins->dreg = mono_alloc_preg (cfg);
4122 ins->inst_basereg = args [0]->dreg;
4123 ins->inst_offset = 0;
4124 MONO_ADD_INS (cfg->cbb, ins);
4126 #endif
4128 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4129 if (strcmp (cmethod->name, "Increment") == 0) {
4130 MonoInst *ins_iconst;
4131 guint32 opcode = 0;
4133 if (fsig->params [0]->type == MONO_TYPE_I4)
4134 opcode = OP_ATOMIC_ADD_NEW_I4;
4135 #if SIZEOF_REGISTER == 8
4136 else if (fsig->params [0]->type == MONO_TYPE_I8)
4137 opcode = OP_ATOMIC_ADD_NEW_I8;
4138 #endif
4139 if (opcode) {
4140 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4141 ins_iconst->inst_c0 = 1;
4142 ins_iconst->dreg = mono_alloc_ireg (cfg);
4143 MONO_ADD_INS (cfg->cbb, ins_iconst);
4145 MONO_INST_NEW (cfg, ins, opcode);
4146 ins->dreg = mono_alloc_ireg (cfg);
4147 ins->inst_basereg = args [0]->dreg;
4148 ins->inst_offset = 0;
4149 ins->sreg2 = ins_iconst->dreg;
4150 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4151 MONO_ADD_INS (cfg->cbb, ins);
4153 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4154 MonoInst *ins_iconst;
4155 guint32 opcode = 0;
4157 if (fsig->params [0]->type == MONO_TYPE_I4)
4158 opcode = OP_ATOMIC_ADD_NEW_I4;
4159 #if SIZEOF_REGISTER == 8
4160 else if (fsig->params [0]->type == MONO_TYPE_I8)
4161 opcode = OP_ATOMIC_ADD_NEW_I8;
4162 #endif
4163 if (opcode) {
4164 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4165 ins_iconst->inst_c0 = -1;
4166 ins_iconst->dreg = mono_alloc_ireg (cfg);
4167 MONO_ADD_INS (cfg->cbb, ins_iconst);
4169 MONO_INST_NEW (cfg, ins, opcode);
4170 ins->dreg = mono_alloc_ireg (cfg);
4171 ins->inst_basereg = args [0]->dreg;
4172 ins->inst_offset = 0;
4173 ins->sreg2 = ins_iconst->dreg;
4174 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4175 MONO_ADD_INS (cfg->cbb, ins);
4177 } else if (strcmp (cmethod->name, "Add") == 0) {
4178 guint32 opcode = 0;
4180 if (fsig->params [0]->type == MONO_TYPE_I4)
4181 opcode = OP_ATOMIC_ADD_NEW_I4;
4182 #if SIZEOF_REGISTER == 8
4183 else if (fsig->params [0]->type == MONO_TYPE_I8)
4184 opcode = OP_ATOMIC_ADD_NEW_I8;
4185 #endif
4187 if (opcode) {
4188 MONO_INST_NEW (cfg, ins, opcode);
4189 ins->dreg = mono_alloc_ireg (cfg);
4190 ins->inst_basereg = args [0]->dreg;
4191 ins->inst_offset = 0;
4192 ins->sreg2 = args [1]->dreg;
4193 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4194 MONO_ADD_INS (cfg->cbb, ins);
4197 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4199 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4200 if (strcmp (cmethod->name, "Exchange") == 0) {
4201 guint32 opcode;
4202 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4204 if (fsig->params [0]->type == MONO_TYPE_I4)
4205 opcode = OP_ATOMIC_EXCHANGE_I4;
4206 #if SIZEOF_REGISTER == 8
4207 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4208 (fsig->params [0]->type == MONO_TYPE_I))
4209 opcode = OP_ATOMIC_EXCHANGE_I8;
4210 #else
4211 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4212 opcode = OP_ATOMIC_EXCHANGE_I4;
4213 #endif
4214 else
4215 return NULL;
4217 MONO_INST_NEW (cfg, ins, opcode);
4218 ins->dreg = mono_alloc_ireg (cfg);
4219 ins->inst_basereg = args [0]->dreg;
4220 ins->inst_offset = 0;
4221 ins->sreg2 = args [1]->dreg;
4222 MONO_ADD_INS (cfg->cbb, ins);
4224 switch (fsig->params [0]->type) {
4225 case MONO_TYPE_I4:
4226 ins->type = STACK_I4;
4227 break;
4228 case MONO_TYPE_I8:
4229 case MONO_TYPE_I:
4230 ins->type = STACK_I8;
4231 break;
4232 case MONO_TYPE_OBJECT:
4233 ins->type = STACK_OBJ;
4234 break;
4235 default:
4236 g_assert_not_reached ();
4239 #if HAVE_WRITE_BARRIERS
4240 if (is_ref) {
4241 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4242 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4244 #endif
4246 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4248 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4249 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4250 int size = 0;
4251 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4252 if (fsig->params [1]->type == MONO_TYPE_I4)
4253 size = 4;
4254 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4255 size = sizeof (gpointer);
4256 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4257 size = 8;
4258 if (size == 4) {
4259 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4260 ins->dreg = alloc_ireg (cfg);
4261 ins->sreg1 = args [0]->dreg;
4262 ins->sreg2 = args [1]->dreg;
4263 ins->sreg3 = args [2]->dreg;
4264 ins->type = STACK_I4;
4265 MONO_ADD_INS (cfg->cbb, ins);
4266 } else if (size == 8) {
4267 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4268 ins->dreg = alloc_ireg (cfg);
4269 ins->sreg1 = args [0]->dreg;
4270 ins->sreg2 = args [1]->dreg;
4271 ins->sreg3 = args [2]->dreg;
4272 ins->type = STACK_I8;
4273 MONO_ADD_INS (cfg->cbb, ins);
4274 } else {
4275 /* g_assert_not_reached (); */
4277 #if HAVE_WRITE_BARRIERS
4278 if (is_ref) {
4279 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4280 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4282 #endif
4284 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4286 if (ins)
4287 return ins;
4288 } else if (cmethod->klass->image == mono_defaults.corlib) {
4289 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4290 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4291 if (should_insert_brekpoint (cfg->method))
4292 MONO_INST_NEW (cfg, ins, OP_BREAK);
4293 else
4294 MONO_INST_NEW (cfg, ins, OP_NOP);
4295 MONO_ADD_INS (cfg->cbb, ins);
4296 return ins;
4298 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4299 && strcmp (cmethod->klass->name, "Environment") == 0) {
4300 #ifdef TARGET_WIN32
4301 EMIT_NEW_ICONST (cfg, ins, 1);
4302 #else
4303 EMIT_NEW_ICONST (cfg, ins, 0);
4304 #endif
4305 return ins;
4307 } else if (cmethod->klass == mono_defaults.math_class) {
4309 * There is general branches code for Min/Max, but it does not work for
4310 * all inputs:
4311 * http://everything2.com/?node_id=1051618
4315 #ifdef MONO_ARCH_SIMD_INTRINSICS
4316 if (cfg->opt & MONO_OPT_SIMD) {
4317 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4318 if (ins)
4319 return ins;
4321 #endif
4323 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4327 * This entry point could be used later for arbitrary method
4328 * redirection.
4330 inline static MonoInst*
4331 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4332 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4334 if (method->klass == mono_defaults.string_class) {
4335 /* managed string allocation support */
4336 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4337 MonoInst *iargs [2];
4338 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4339 MonoMethod *managed_alloc = NULL;
4341 g_assert (vtable); /*Should not fail since it System.String*/
4342 #ifndef MONO_CROSS_COMPILE
4343 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4344 #endif
4345 if (!managed_alloc)
4346 return NULL;
4347 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4348 iargs [1] = args [0];
4349 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4352 return NULL;
4355 static void
4356 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4358 MonoInst *store, *temp;
4359 int i;
4361 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4362 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4365 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4366 * would be different than the MonoInst's used to represent arguments, and
4367 * the ldelema implementation can't deal with that.
4368 * Solution: When ldelema is used on an inline argument, create a var for
4369 * it, emit ldelema on that var, and emit the saving code below in
4370 * inline_method () if needed.
4372 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4373 cfg->args [i] = temp;
4374 /* This uses cfg->args [i] which is set by the preceeding line */
4375 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4376 store->cil_code = sp [0]->cil_code;
4377 sp++;
4381 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4382 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4384 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4385 static gboolean
4386 check_inline_called_method_name_limit (MonoMethod *called_method)
4388 int strncmp_result;
4389 static char *limit = NULL;
4391 if (limit == NULL) {
4392 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4394 if (limit_string != NULL)
4395 limit = limit_string;
4396 else
4397 limit = (char *) "";
4400 if (limit [0] != '\0') {
4401 char *called_method_name = mono_method_full_name (called_method, TRUE);
4403 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4404 g_free (called_method_name);
4406 //return (strncmp_result <= 0);
4407 return (strncmp_result == 0);
4408 } else {
4409 return TRUE;
4412 #endif
4414 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4415 static gboolean
4416 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4418 int strncmp_result;
4419 static char *limit = NULL;
4421 if (limit == NULL) {
4422 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4423 if (limit_string != NULL) {
4424 limit = limit_string;
4425 } else {
4426 limit = (char *) "";
4430 if (limit [0] != '\0') {
4431 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4433 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4434 g_free (caller_method_name);
4436 //return (strncmp_result <= 0);
4437 return (strncmp_result == 0);
4438 } else {
4439 return TRUE;
4442 #endif
4444 static int
4445 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4446 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4448 MonoInst *ins, *rvar = NULL;
4449 MonoMethodHeader *cheader;
4450 MonoBasicBlock *ebblock, *sbblock;
4451 int i, costs;
4452 MonoMethod *prev_inlined_method;
4453 MonoInst **prev_locals, **prev_args;
4454 MonoType **prev_arg_types;
4455 guint prev_real_offset;
4456 GHashTable *prev_cbb_hash;
4457 MonoBasicBlock **prev_cil_offset_to_bb;
4458 MonoBasicBlock *prev_cbb;
4459 unsigned char* prev_cil_start;
4460 guint32 prev_cil_offset_to_bb_len;
4461 MonoMethod *prev_current_method;
4462 MonoGenericContext *prev_generic_context;
4463 gboolean ret_var_set, prev_ret_var_set;
4465 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4467 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4468 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4469 return 0;
4470 #endif
4471 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4472 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4473 return 0;
4474 #endif
4476 if (cfg->verbose_level > 2)
4477 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4479 if (!cmethod->inline_info) {
4480 mono_jit_stats.inlineable_methods++;
4481 cmethod->inline_info = 1;
4483 /* allocate space to store the return value */
4484 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4485 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4488 /* allocate local variables */
4489 cheader = mono_method_get_header (cmethod);
4490 prev_locals = cfg->locals;
4491 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4492 for (i = 0; i < cheader->num_locals; ++i)
4493 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4495 /* allocate start and end blocks */
4496 /* This is needed so if the inline is aborted, we can clean up */
4497 NEW_BBLOCK (cfg, sbblock);
4498 sbblock->real_offset = real_offset;
4500 NEW_BBLOCK (cfg, ebblock);
4501 ebblock->block_num = cfg->num_bblocks++;
4502 ebblock->real_offset = real_offset;
4504 prev_args = cfg->args;
4505 prev_arg_types = cfg->arg_types;
4506 prev_inlined_method = cfg->inlined_method;
4507 cfg->inlined_method = cmethod;
4508 cfg->ret_var_set = FALSE;
4509 cfg->inline_depth ++;
4510 prev_real_offset = cfg->real_offset;
4511 prev_cbb_hash = cfg->cbb_hash;
4512 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4513 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4514 prev_cil_start = cfg->cil_start;
4515 prev_cbb = cfg->cbb;
4516 prev_current_method = cfg->current_method;
4517 prev_generic_context = cfg->generic_context;
4518 prev_ret_var_set = cfg->ret_var_set;
4520 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4522 ret_var_set = cfg->ret_var_set;
4524 cfg->inlined_method = prev_inlined_method;
4525 cfg->real_offset = prev_real_offset;
4526 cfg->cbb_hash = prev_cbb_hash;
4527 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4528 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4529 cfg->cil_start = prev_cil_start;
4530 cfg->locals = prev_locals;
4531 cfg->args = prev_args;
4532 cfg->arg_types = prev_arg_types;
4533 cfg->current_method = prev_current_method;
4534 cfg->generic_context = prev_generic_context;
4535 cfg->ret_var_set = prev_ret_var_set;
4536 cfg->inline_depth --;
4538 if ((costs >= 0 && costs < 60) || inline_allways) {
4539 if (cfg->verbose_level > 2)
4540 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4542 mono_jit_stats.inlined_methods++;
4544 /* always add some code to avoid block split failures */
4545 MONO_INST_NEW (cfg, ins, OP_NOP);
4546 MONO_ADD_INS (prev_cbb, ins);
4548 prev_cbb->next_bb = sbblock;
4549 link_bblock (cfg, prev_cbb, sbblock);
4552 * Get rid of the begin and end bblocks if possible to aid local
4553 * optimizations.
4555 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4557 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4558 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4560 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4561 MonoBasicBlock *prev = ebblock->in_bb [0];
4562 mono_merge_basic_blocks (cfg, prev, ebblock);
4563 cfg->cbb = prev;
4564 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4565 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4566 cfg->cbb = prev_cbb;
4568 } else {
4569 cfg->cbb = ebblock;
4572 if (rvar) {
4574 * If the inlined method contains only a throw, then the ret var is not
4575 * set, so set it to a dummy value.
4577 if (!ret_var_set) {
4578 static double r8_0 = 0.0;
4580 switch (rvar->type) {
4581 case STACK_I4:
4582 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4583 break;
4584 case STACK_I8:
4585 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4586 break;
4587 case STACK_PTR:
4588 case STACK_MP:
4589 case STACK_OBJ:
4590 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4591 break;
4592 case STACK_R8:
4593 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4594 ins->type = STACK_R8;
4595 ins->inst_p0 = (void*)&r8_0;
4596 ins->dreg = rvar->dreg;
4597 MONO_ADD_INS (cfg->cbb, ins);
4598 break;
4599 case STACK_VTYPE:
4600 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4601 break;
4602 default:
4603 g_assert_not_reached ();
4607 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4608 *sp++ = ins;
4610 return costs + 1;
4611 } else {
4612 if (cfg->verbose_level > 2)
4613 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4614 cfg->exception_type = MONO_EXCEPTION_NONE;
4615 mono_loader_clear_error ();
4617 /* This gets rid of the newly added bblocks */
4618 cfg->cbb = prev_cbb;
4620 return 0;
4624 * Some of these comments may well be out-of-date.
4625 * Design decisions: we do a single pass over the IL code (and we do bblock
4626 * splitting/merging in the few cases when it's required: a back jump to an IL
4627 * address that was not already seen as bblock starting point).
4628 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4629 * Complex operations are decomposed in simpler ones right away. We need to let the
4630 * arch-specific code peek and poke inside this process somehow (except when the
4631 * optimizations can take advantage of the full semantic info of coarse opcodes).
4632 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4633 * MonoInst->opcode initially is the IL opcode or some simplification of that
4634 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4635 * opcode with value bigger than OP_LAST.
4636 * At this point the IR can be handed over to an interpreter, a dumb code generator
4637 * or to the optimizing code generator that will translate it to SSA form.
4639 * Profiling directed optimizations.
4640 * We may compile by default with few or no optimizations and instrument the code
4641 * or the user may indicate what methods to optimize the most either in a config file
4642 * or through repeated runs where the compiler applies offline the optimizations to
4643 * each method and then decides if it was worth it.
4646 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4647 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4648 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4649 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4650 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4651 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4652 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4653 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4655 /* offset from br.s -> br like opcodes */
4656 #define BIG_BRANCH_OFFSET 13
4658 static gboolean
4659 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4661 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4663 return b == NULL || b == bb;
4666 static int
4667 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4669 unsigned char *ip = start;
4670 unsigned char *target;
4671 int i;
4672 guint cli_addr;
4673 MonoBasicBlock *bblock;
4674 const MonoOpcode *opcode;
4676 while (ip < end) {
4677 cli_addr = ip - start;
4678 i = mono_opcode_value ((const guint8 **)&ip, end);
4679 if (i < 0)
4680 UNVERIFIED;
4681 opcode = &mono_opcodes [i];
4682 switch (opcode->argument) {
4683 case MonoInlineNone:
4684 ip++;
4685 break;
4686 case MonoInlineString:
4687 case MonoInlineType:
4688 case MonoInlineField:
4689 case MonoInlineMethod:
4690 case MonoInlineTok:
4691 case MonoInlineSig:
4692 case MonoShortInlineR:
4693 case MonoInlineI:
4694 ip += 5;
4695 break;
4696 case MonoInlineVar:
4697 ip += 3;
4698 break;
4699 case MonoShortInlineVar:
4700 case MonoShortInlineI:
4701 ip += 2;
4702 break;
4703 case MonoShortInlineBrTarget:
4704 target = start + cli_addr + 2 + (signed char)ip [1];
4705 GET_BBLOCK (cfg, bblock, target);
4706 ip += 2;
4707 if (ip < end)
4708 GET_BBLOCK (cfg, bblock, ip);
4709 break;
4710 case MonoInlineBrTarget:
4711 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4712 GET_BBLOCK (cfg, bblock, target);
4713 ip += 5;
4714 if (ip < end)
4715 GET_BBLOCK (cfg, bblock, ip);
4716 break;
4717 case MonoInlineSwitch: {
4718 guint32 n = read32 (ip + 1);
4719 guint32 j;
4720 ip += 5;
4721 cli_addr += 5 + 4 * n;
4722 target = start + cli_addr;
4723 GET_BBLOCK (cfg, bblock, target);
4725 for (j = 0; j < n; ++j) {
4726 target = start + cli_addr + (gint32)read32 (ip);
4727 GET_BBLOCK (cfg, bblock, target);
4728 ip += 4;
4730 break;
4732 case MonoInlineR:
4733 case MonoInlineI8:
4734 ip += 9;
4735 break;
4736 default:
4737 g_assert_not_reached ();
4740 if (i == CEE_THROW) {
4741 unsigned char *bb_start = ip - 1;
4743 /* Find the start of the bblock containing the throw */
4744 bblock = NULL;
4745 while ((bb_start >= start) && !bblock) {
4746 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4747 bb_start --;
4749 if (bblock)
4750 bblock->out_of_line = 1;
4753 return 0;
4754 unverified:
4755 *pos = ip;
4756 return 1;
4759 static inline MonoMethod *
4760 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4762 MonoMethod *method;
4764 if (m->wrapper_type != MONO_WRAPPER_NONE)
4765 return mono_method_get_wrapper_data (m, token);
4767 method = mono_get_method_full (m->klass->image, token, klass, context);
4769 return method;
4772 static inline MonoMethod *
4773 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4775 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4777 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4778 return NULL;
4780 return method;
4783 static inline MonoClass*
4784 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4786 MonoClass *klass;
4788 if (method->wrapper_type != MONO_WRAPPER_NONE)
4789 klass = mono_method_get_wrapper_data (method, token);
4790 else
4791 klass = mono_class_get_full (method->klass->image, token, context);
4792 if (klass)
4793 mono_class_init (klass);
4794 return klass;
4798 * Returns TRUE if the JIT should abort inlining because "callee"
4799 * is influenced by security attributes.
4801 static
4802 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4804 guint32 result;
4806 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4807 return TRUE;
4810 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4811 if (result == MONO_JIT_SECURITY_OK)
4812 return FALSE;
4814 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4815 /* Generate code to throw a SecurityException before the actual call/link */
4816 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4817 MonoInst *args [2];
4819 NEW_ICONST (cfg, args [0], 4);
4820 NEW_METHODCONST (cfg, args [1], caller);
4821 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4822 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4823 /* don't hide previous results */
4824 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4825 cfg->exception_data = result;
4826 return TRUE;
4829 return FALSE;
4832 static MonoMethod*
4833 throw_exception (void)
4835 static MonoMethod *method = NULL;
4837 if (!method) {
4838 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4839 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4841 g_assert (method);
4842 return method;
4845 static void
4846 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4848 MonoMethod *thrower = throw_exception ();
4849 MonoInst *args [1];
4851 EMIT_NEW_PCONST (cfg, args [0], ex);
4852 mono_emit_method_call (cfg, thrower, args, NULL);
4856 * Return the original method is a wrapper is specified. We can only access
4857 * the custom attributes from the original method.
4859 static MonoMethod*
4860 get_original_method (MonoMethod *method)
4862 if (method->wrapper_type == MONO_WRAPPER_NONE)
4863 return method;
4865 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4866 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4867 return NULL;
4869 /* in other cases we need to find the original method */
4870 return mono_marshal_method_from_wrapper (method);
4873 static void
4874 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4875 MonoBasicBlock *bblock, unsigned char *ip)
4877 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4878 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4879 return;
4881 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4882 caller = get_original_method (caller);
4883 if (!caller)
4884 return;
4886 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4887 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4888 emit_throw_exception (cfg, mono_get_exception_field_access ());
4891 static void
4892 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4893 MonoBasicBlock *bblock, unsigned char *ip)
4895 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4896 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4897 return;
4899 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4900 caller = get_original_method (caller);
4901 if (!caller)
4902 return;
4904 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4905 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4906 emit_throw_exception (cfg, mono_get_exception_method_access ());
4910 * Check that the IL instructions at ip are the array initialization
4911 * sequence and return the pointer to the data and the size.
4913 static const char*
4914 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4917 * newarr[System.Int32]
4918 * dup
4919 * ldtoken field valuetype ...
4920 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4922 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4923 guint32 token = read32 (ip + 7);
4924 guint32 field_token = read32 (ip + 2);
4925 guint32 field_index = field_token & 0xffffff;
4926 guint32 rva;
4927 const char *data_ptr;
4928 int size = 0;
4929 MonoMethod *cmethod;
4930 MonoClass *dummy_class;
4931 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4932 int dummy_align;
4934 if (!field)
4935 return NULL;
4937 *out_field_token = field_token;
4939 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4940 if (!cmethod)
4941 return NULL;
4942 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4943 return NULL;
4944 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4945 case MONO_TYPE_BOOLEAN:
4946 case MONO_TYPE_I1:
4947 case MONO_TYPE_U1:
4948 size = 1; break;
4949 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4950 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4951 case MONO_TYPE_CHAR:
4952 case MONO_TYPE_I2:
4953 case MONO_TYPE_U2:
4954 size = 2; break;
4955 case MONO_TYPE_I4:
4956 case MONO_TYPE_U4:
4957 case MONO_TYPE_R4:
4958 size = 4; break;
4959 case MONO_TYPE_R8:
4960 #ifdef ARM_FPU_FPA
4961 return NULL; /* stupid ARM FP swapped format */
4962 #endif
4963 case MONO_TYPE_I8:
4964 case MONO_TYPE_U8:
4965 size = 8; break;
4966 #endif
4967 default:
4968 return NULL;
4970 size *= len;
4971 if (size > mono_type_size (field->type, &dummy_align))
4972 return NULL;
4973 *out_size = size;
4974 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4975 if (!method->klass->image->dynamic) {
4976 field_index = read32 (ip + 2) & 0xffffff;
4977 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4978 data_ptr = mono_image_rva_map (method->klass->image, rva);
4979 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4980 /* for aot code we do the lookup on load */
4981 if (aot && data_ptr)
4982 return GUINT_TO_POINTER (rva);
4983 } else {
4984 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4985 g_assert (!aot);
4986 data_ptr = mono_field_get_data (field);
4988 return data_ptr;
4990 return NULL;
4993 static void
4994 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4996 char *method_fname = mono_method_full_name (method, TRUE);
4997 char *method_code;
4999 if (mono_method_get_header (method)->code_size == 0)
5000 method_code = g_strdup ("method body is empty.");
5001 else
5002 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5003 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5004 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5005 g_free (method_fname);
5006 g_free (method_code);
5009 static void
5010 set_exception_object (MonoCompile *cfg, MonoException *exception)
5012 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5013 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5014 cfg->exception_ptr = exception;
5017 static gboolean
5018 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5020 MonoType *type;
5022 if (cfg->generic_sharing_context)
5023 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5024 else
5025 type = &klass->byval_arg;
5026 return MONO_TYPE_IS_REFERENCE (type);
5029 static void
5030 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5032 MonoInst *ins;
5033 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5034 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5035 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5036 /* Optimize reg-reg moves away */
5038 * Can't optimize other opcodes, since sp[0] might point to
5039 * the last ins of a decomposed opcode.
5041 sp [0]->dreg = (cfg)->locals [n]->dreg;
5042 } else {
5043 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5048 * ldloca inhibits many optimizations so try to get rid of it in common
5049 * cases.
5051 static inline unsigned char *
5052 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5054 int local, token;
5055 MonoClass *klass;
5057 if (size == 1) {
5058 local = ip [1];
5059 ip += 2;
5060 } else {
5061 local = read16 (ip + 2);
5062 ip += 4;
5065 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5066 gboolean skip = FALSE;
5068 /* From the INITOBJ case */
5069 token = read32 (ip + 2);
5070 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5071 CHECK_TYPELOAD (klass);
5072 if (generic_class_is_reference_type (cfg, klass)) {
5073 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5074 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5075 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5076 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5077 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5078 } else {
5079 skip = TRUE;
5082 if (!skip)
5083 return ip + 6;
5085 load_error:
5086 return NULL;
5089 static gboolean
5090 is_exception_class (MonoClass *class)
5092 while (class) {
5093 if (class == mono_defaults.exception_class)
5094 return TRUE;
5095 class = class->parent;
5097 return FALSE;
5101 * mono_method_to_ir:
5103 * Translate the .net IL into linear IR.
5106 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5107 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5108 guint inline_offset, gboolean is_virtual_call)
5110 MonoError error;
5111 MonoInst *ins, **sp, **stack_start;
5112 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5113 MonoSimpleBasicBlock *bb = NULL;
5114 MonoMethod *cmethod, *method_definition;
5115 MonoInst **arg_array;
5116 MonoMethodHeader *header;
5117 MonoImage *image;
5118 guint32 token, ins_flag;
5119 MonoClass *klass;
5120 MonoClass *constrained_call = NULL;
5121 unsigned char *ip, *end, *target, *err_pos;
5122 static double r8_0 = 0.0;
5123 MonoMethodSignature *sig;
5124 MonoGenericContext *generic_context = NULL;
5125 MonoGenericContainer *generic_container = NULL;
5126 MonoType **param_types;
5127 int i, n, start_new_bblock, dreg;
5128 int num_calls = 0, inline_costs = 0;
5129 int breakpoint_id = 0;
5130 guint num_args;
5131 MonoBoolean security, pinvoke;
5132 MonoSecurityManager* secman = NULL;
5133 MonoDeclSecurityActions actions;
5134 GSList *class_inits = NULL;
5135 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5136 int context_used;
5137 gboolean init_locals, seq_points, skip_dead_blocks;
5139 /* serialization and xdomain stuff may need access to private fields and methods */
5140 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5141 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5142 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5143 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5144 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5145 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5147 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5149 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5150 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5151 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5152 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5154 image = method->klass->image;
5155 header = mono_method_get_header (method);
5156 generic_container = mono_method_get_generic_container (method);
5157 sig = mono_method_signature (method);
5158 num_args = sig->hasthis + sig->param_count;
5159 ip = (unsigned char*)header->code;
5160 cfg->cil_start = ip;
5161 end = ip + header->code_size;
5162 mono_jit_stats.cil_code_size += header->code_size;
5163 init_locals = header->init_locals;
5165 seq_points = cfg->gen_seq_points && cfg->method == method;
5168 * Methods without init_locals set could cause asserts in various passes
5169 * (#497220).
5171 init_locals = TRUE;
5173 method_definition = method;
5174 while (method_definition->is_inflated) {
5175 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5176 method_definition = imethod->declaring;
5179 /* SkipVerification is not allowed if core-clr is enabled */
5180 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5181 dont_verify = TRUE;
5182 dont_verify_stloc = TRUE;
5185 if (!dont_verify && mini_method_verify (cfg, method_definition))
5186 goto exception_exit;
5188 if (mono_debug_using_mono_debugger ())
5189 cfg->keep_cil_nops = TRUE;
5191 if (sig->is_inflated)
5192 generic_context = mono_method_get_context (method);
5193 else if (generic_container)
5194 generic_context = &generic_container->context;
5195 cfg->generic_context = generic_context;
5197 if (!cfg->generic_sharing_context)
5198 g_assert (!sig->has_type_parameters);
5200 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5201 g_assert (method->is_inflated);
5202 g_assert (mono_method_get_context (method)->method_inst);
5204 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5205 g_assert (sig->generic_param_count);
5207 if (cfg->method == method) {
5208 cfg->real_offset = 0;
5209 } else {
5210 cfg->real_offset = inline_offset;
5213 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5214 cfg->cil_offset_to_bb_len = header->code_size;
5216 cfg->current_method = method;
5218 if (cfg->verbose_level > 2)
5219 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5221 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5222 if (sig->hasthis)
5223 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5224 for (n = 0; n < sig->param_count; ++n)
5225 param_types [n + sig->hasthis] = sig->params [n];
5226 cfg->arg_types = param_types;
5228 dont_inline = g_list_prepend (dont_inline, method);
5229 if (cfg->method == method) {
5231 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5232 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5234 /* ENTRY BLOCK */
5235 NEW_BBLOCK (cfg, start_bblock);
5236 cfg->bb_entry = start_bblock;
5237 start_bblock->cil_code = NULL;
5238 start_bblock->cil_length = 0;
5240 /* EXIT BLOCK */
5241 NEW_BBLOCK (cfg, end_bblock);
5242 cfg->bb_exit = end_bblock;
5243 end_bblock->cil_code = NULL;
5244 end_bblock->cil_length = 0;
5245 g_assert (cfg->num_bblocks == 2);
5247 arg_array = cfg->args;
5249 if (header->num_clauses) {
5250 cfg->spvars = g_hash_table_new (NULL, NULL);
5251 cfg->exvars = g_hash_table_new (NULL, NULL);
5253 /* handle exception clauses */
5254 for (i = 0; i < header->num_clauses; ++i) {
5255 MonoBasicBlock *try_bb;
5256 MonoExceptionClause *clause = &header->clauses [i];
5257 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5258 try_bb->real_offset = clause->try_offset;
5259 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5260 tblock->real_offset = clause->handler_offset;
5261 tblock->flags |= BB_EXCEPTION_HANDLER;
5263 link_bblock (cfg, try_bb, tblock);
5265 if (*(ip + clause->handler_offset) == CEE_POP)
5266 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5268 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5269 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5270 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5271 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5272 MONO_ADD_INS (tblock, ins);
5274 /* todo: is a fault block unsafe to optimize? */
5275 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5276 tblock->flags |= BB_EXCEPTION_UNSAFE;
5280 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5281 while (p < end) {
5282 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5284 /* catch and filter blocks get the exception object on the stack */
5285 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5286 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5287 MonoInst *dummy_use;
5289 /* mostly like handle_stack_args (), but just sets the input args */
5290 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5291 tblock->in_scount = 1;
5292 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5293 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5296 * Add a dummy use for the exvar so its liveness info will be
5297 * correct.
5299 cfg->cbb = tblock;
5300 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5302 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5303 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5304 tblock->flags |= BB_EXCEPTION_HANDLER;
5305 tblock->real_offset = clause->data.filter_offset;
5306 tblock->in_scount = 1;
5307 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5308 /* The filter block shares the exvar with the handler block */
5309 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5310 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5311 MONO_ADD_INS (tblock, ins);
5315 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5316 clause->data.catch_class &&
5317 cfg->generic_sharing_context &&
5318 mono_class_check_context_used (clause->data.catch_class)) {
5320 * In shared generic code with catch
5321 * clauses containing type variables
5322 * the exception handling code has to
5323 * be able to get to the rgctx.
5324 * Therefore we have to make sure that
5325 * the vtable/mrgctx argument (for
5326 * static or generic methods) or the
5327 * "this" argument (for non-static
5328 * methods) are live.
5330 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5331 mini_method_get_context (method)->method_inst ||
5332 method->klass->valuetype) {
5333 mono_get_vtable_var (cfg);
5334 } else {
5335 MonoInst *dummy_use;
5337 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5341 } else {
5342 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5343 cfg->cbb = start_bblock;
5344 cfg->args = arg_array;
5345 mono_save_args (cfg, sig, inline_args);
5348 /* FIRST CODE BLOCK */
5349 NEW_BBLOCK (cfg, bblock);
5350 bblock->cil_code = ip;
5351 cfg->cbb = bblock;
5352 cfg->ip = ip;
5354 ADD_BBLOCK (cfg, bblock);
5356 if (cfg->method == method) {
5357 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5358 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5359 MONO_INST_NEW (cfg, ins, OP_BREAK);
5360 MONO_ADD_INS (bblock, ins);
5364 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5365 secman = mono_security_manager_get_methods ();
5367 security = (secman && mono_method_has_declsec (method));
5368 /* at this point having security doesn't mean we have any code to generate */
5369 if (security && (cfg->method == method)) {
5370 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5371 * And we do not want to enter the next section (with allocation) if we
5372 * have nothing to generate */
5373 security = mono_declsec_get_demands (method, &actions);
5376 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5377 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5378 if (pinvoke) {
5379 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5380 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5381 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5383 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5384 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5385 pinvoke = FALSE;
5387 if (custom)
5388 mono_custom_attrs_free (custom);
5390 if (pinvoke) {
5391 custom = mono_custom_attrs_from_class (wrapped->klass);
5392 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5393 pinvoke = FALSE;
5395 if (custom)
5396 mono_custom_attrs_free (custom);
5398 } else {
5399 /* not a P/Invoke after all */
5400 pinvoke = FALSE;
5404 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5405 /* we use a separate basic block for the initialization code */
5406 NEW_BBLOCK (cfg, init_localsbb);
5407 cfg->bb_init = init_localsbb;
5408 init_localsbb->real_offset = cfg->real_offset;
5409 start_bblock->next_bb = init_localsbb;
5410 init_localsbb->next_bb = bblock;
5411 link_bblock (cfg, start_bblock, init_localsbb);
5412 link_bblock (cfg, init_localsbb, bblock);
5414 cfg->cbb = init_localsbb;
5415 } else {
5416 start_bblock->next_bb = bblock;
5417 link_bblock (cfg, start_bblock, bblock);
5420 /* at this point we know, if security is TRUE, that some code needs to be generated */
5421 if (security && (cfg->method == method)) {
5422 MonoInst *args [2];
5424 mono_jit_stats.cas_demand_generation++;
5426 if (actions.demand.blob) {
5427 /* Add code for SecurityAction.Demand */
5428 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5429 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5430 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5431 mono_emit_method_call (cfg, secman->demand, args, NULL);
5433 if (actions.noncasdemand.blob) {
5434 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5435 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5436 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5437 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5438 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5439 mono_emit_method_call (cfg, secman->demand, args, NULL);
5441 if (actions.demandchoice.blob) {
5442 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5443 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5444 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5445 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5446 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5450 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5451 if (pinvoke) {
5452 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5455 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5456 /* check if this is native code, e.g. an icall or a p/invoke */
5457 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5458 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5459 if (wrapped) {
5460 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5461 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5463 /* if this ia a native call then it can only be JITted from platform code */
5464 if ((icall || pinvk) && method->klass && method->klass->image) {
5465 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5466 MonoException *ex = icall ? mono_get_exception_security () :
5467 mono_get_exception_method_access ();
5468 emit_throw_exception (cfg, ex);
5475 if (header->code_size == 0)
5476 UNVERIFIED;
5478 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5479 ip = err_pos;
5480 UNVERIFIED;
5483 if (cfg->method == method)
5484 mono_debug_init_method (cfg, bblock, breakpoint_id);
5486 for (n = 0; n < header->num_locals; ++n) {
5487 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5488 UNVERIFIED;
5490 class_inits = NULL;
5492 /* We force the vtable variable here for all shared methods
5493 for the possibility that they might show up in a stack
5494 trace where their exact instantiation is needed. */
5495 if (cfg->generic_sharing_context && method == cfg->method) {
5496 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5497 mini_method_get_context (method)->method_inst ||
5498 method->klass->valuetype) {
5499 mono_get_vtable_var (cfg);
5500 } else {
5501 /* FIXME: Is there a better way to do this?
5502 We need the variable live for the duration
5503 of the whole method. */
5504 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5508 /* add a check for this != NULL to inlined methods */
5509 if (is_virtual_call) {
5510 MonoInst *arg_ins;
5512 NEW_ARGLOAD (cfg, arg_ins, 0);
5513 MONO_ADD_INS (cfg->cbb, arg_ins);
5514 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5517 skip_dead_blocks = !dont_verify;
5518 if (skip_dead_blocks) {
5519 bb = mono_basic_block_split (method, &error);
5520 if (!mono_error_ok (&error)) {
5521 mono_error_cleanup (&error);
5522 UNVERIFIED;
5524 g_assert (bb);
5527 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5528 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5530 ins_flag = 0;
5531 start_new_bblock = 0;
5532 cfg->cbb = bblock;
5533 while (ip < end) {
5534 if (cfg->method == method)
5535 cfg->real_offset = ip - header->code;
5536 else
5537 cfg->real_offset = inline_offset;
5538 cfg->ip = ip;
5540 context_used = 0;
5542 if (start_new_bblock) {
5543 bblock->cil_length = ip - bblock->cil_code;
5544 if (start_new_bblock == 2) {
5545 g_assert (ip == tblock->cil_code);
5546 } else {
5547 GET_BBLOCK (cfg, tblock, ip);
5549 bblock->next_bb = tblock;
5550 bblock = tblock;
5551 cfg->cbb = bblock;
5552 start_new_bblock = 0;
5553 for (i = 0; i < bblock->in_scount; ++i) {
5554 if (cfg->verbose_level > 3)
5555 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5556 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5557 *sp++ = ins;
5559 if (class_inits)
5560 g_slist_free (class_inits);
5561 class_inits = NULL;
5562 } else {
5563 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5564 link_bblock (cfg, bblock, tblock);
5565 if (sp != stack_start) {
5566 handle_stack_args (cfg, stack_start, sp - stack_start);
5567 sp = stack_start;
5568 CHECK_UNVERIFIABLE (cfg);
5570 bblock->next_bb = tblock;
5571 bblock = tblock;
5572 cfg->cbb = bblock;
5573 for (i = 0; i < bblock->in_scount; ++i) {
5574 if (cfg->verbose_level > 3)
5575 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5576 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5577 *sp++ = ins;
5579 g_slist_free (class_inits);
5580 class_inits = NULL;
5584 if (skip_dead_blocks) {
5585 int ip_offset = ip - header->code;
5587 if (ip_offset == bb->end)
5588 bb = bb->next;
5590 if (bb->dead) {
5591 int op_size = mono_opcode_size (ip, end);
5592 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5594 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5596 if (ip_offset + op_size == bb->end) {
5597 MONO_INST_NEW (cfg, ins, OP_NOP);
5598 MONO_ADD_INS (bblock, ins);
5599 start_new_bblock = 1;
5602 ip += op_size;
5603 continue;
5607 * Sequence points are points where the debugger can place a breakpoint.
5608 * Currently, we generate these automatically at points where the IL
5609 * stack is empty.
5611 if (seq_points && sp == stack_start) {
5612 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5613 MONO_ADD_INS (cfg->cbb, ins);
5616 bblock->real_offset = cfg->real_offset;
5618 if ((cfg->method == method) && cfg->coverage_info) {
5619 guint32 cil_offset = ip - header->code;
5620 cfg->coverage_info->data [cil_offset].cil_code = ip;
5622 /* TODO: Use an increment here */
5623 #if defined(TARGET_X86)
5624 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5625 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5626 ins->inst_imm = 1;
5627 MONO_ADD_INS (cfg->cbb, ins);
5628 #else
5629 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5630 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5631 #endif
5634 if (cfg->verbose_level > 3)
5635 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5637 switch (*ip) {
5638 case CEE_NOP:
5639 if (cfg->keep_cil_nops)
5640 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5641 else
5642 MONO_INST_NEW (cfg, ins, OP_NOP);
5643 ip++;
5644 MONO_ADD_INS (bblock, ins);
5645 break;
5646 case CEE_BREAK:
5647 if (should_insert_brekpoint (cfg->method))
5648 MONO_INST_NEW (cfg, ins, OP_BREAK);
5649 else
5650 MONO_INST_NEW (cfg, ins, OP_NOP);
5651 ip++;
5652 MONO_ADD_INS (bblock, ins);
5653 break;
5654 case CEE_LDARG_0:
5655 case CEE_LDARG_1:
5656 case CEE_LDARG_2:
5657 case CEE_LDARG_3:
5658 CHECK_STACK_OVF (1);
5659 n = (*ip)-CEE_LDARG_0;
5660 CHECK_ARG (n);
5661 EMIT_NEW_ARGLOAD (cfg, ins, n);
5662 ip++;
5663 *sp++ = ins;
5664 break;
5665 case CEE_LDLOC_0:
5666 case CEE_LDLOC_1:
5667 case CEE_LDLOC_2:
5668 case CEE_LDLOC_3:
5669 CHECK_STACK_OVF (1);
5670 n = (*ip)-CEE_LDLOC_0;
5671 CHECK_LOCAL (n);
5672 EMIT_NEW_LOCLOAD (cfg, ins, n);
5673 ip++;
5674 *sp++ = ins;
5675 break;
5676 case CEE_STLOC_0:
5677 case CEE_STLOC_1:
5678 case CEE_STLOC_2:
5679 case CEE_STLOC_3: {
5680 CHECK_STACK (1);
5681 n = (*ip)-CEE_STLOC_0;
5682 CHECK_LOCAL (n);
5683 --sp;
5684 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5685 UNVERIFIED;
5686 emit_stloc_ir (cfg, sp, header, n);
5687 ++ip;
5688 inline_costs += 1;
5689 break;
5691 case CEE_LDARG_S:
5692 CHECK_OPSIZE (2);
5693 CHECK_STACK_OVF (1);
5694 n = ip [1];
5695 CHECK_ARG (n);
5696 EMIT_NEW_ARGLOAD (cfg, ins, n);
5697 *sp++ = ins;
5698 ip += 2;
5699 break;
5700 case CEE_LDARGA_S:
5701 CHECK_OPSIZE (2);
5702 CHECK_STACK_OVF (1);
5703 n = ip [1];
5704 CHECK_ARG (n);
5705 NEW_ARGLOADA (cfg, ins, n);
5706 MONO_ADD_INS (cfg->cbb, ins);
5707 *sp++ = ins;
5708 ip += 2;
5709 break;
5710 case CEE_STARG_S:
5711 CHECK_OPSIZE (2);
5712 CHECK_STACK (1);
5713 --sp;
5714 n = ip [1];
5715 CHECK_ARG (n);
5716 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5717 UNVERIFIED;
5718 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5719 ip += 2;
5720 break;
5721 case CEE_LDLOC_S:
5722 CHECK_OPSIZE (2);
5723 CHECK_STACK_OVF (1);
5724 n = ip [1];
5725 CHECK_LOCAL (n);
5726 EMIT_NEW_LOCLOAD (cfg, ins, n);
5727 *sp++ = ins;
5728 ip += 2;
5729 break;
5730 case CEE_LDLOCA_S: {
5731 unsigned char *tmp_ip;
5732 CHECK_OPSIZE (2);
5733 CHECK_STACK_OVF (1);
5734 CHECK_LOCAL (ip [1]);
5736 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5737 ip = tmp_ip;
5738 inline_costs += 1;
5739 break;
5742 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5743 *sp++ = ins;
5744 ip += 2;
5745 break;
5747 case CEE_STLOC_S:
5748 CHECK_OPSIZE (2);
5749 CHECK_STACK (1);
5750 --sp;
5751 CHECK_LOCAL (ip [1]);
5752 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5753 UNVERIFIED;
5754 emit_stloc_ir (cfg, sp, header, ip [1]);
5755 ip += 2;
5756 inline_costs += 1;
5757 break;
5758 case CEE_LDNULL:
5759 CHECK_STACK_OVF (1);
5760 EMIT_NEW_PCONST (cfg, ins, NULL);
5761 ins->type = STACK_OBJ;
5762 ++ip;
5763 *sp++ = ins;
5764 break;
5765 case CEE_LDC_I4_M1:
5766 CHECK_STACK_OVF (1);
5767 EMIT_NEW_ICONST (cfg, ins, -1);
5768 ++ip;
5769 *sp++ = ins;
5770 break;
5771 case CEE_LDC_I4_0:
5772 case CEE_LDC_I4_1:
5773 case CEE_LDC_I4_2:
5774 case CEE_LDC_I4_3:
5775 case CEE_LDC_I4_4:
5776 case CEE_LDC_I4_5:
5777 case CEE_LDC_I4_6:
5778 case CEE_LDC_I4_7:
5779 case CEE_LDC_I4_8:
5780 CHECK_STACK_OVF (1);
5781 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5782 ++ip;
5783 *sp++ = ins;
5784 break;
5785 case CEE_LDC_I4_S:
5786 CHECK_OPSIZE (2);
5787 CHECK_STACK_OVF (1);
5788 ++ip;
5789 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5790 ++ip;
5791 *sp++ = ins;
5792 break;
5793 case CEE_LDC_I4:
5794 CHECK_OPSIZE (5);
5795 CHECK_STACK_OVF (1);
5796 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5797 ip += 5;
5798 *sp++ = ins;
5799 break;
5800 case CEE_LDC_I8:
5801 CHECK_OPSIZE (9);
5802 CHECK_STACK_OVF (1);
5803 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5804 ins->type = STACK_I8;
5805 ins->dreg = alloc_dreg (cfg, STACK_I8);
5806 ++ip;
5807 ins->inst_l = (gint64)read64 (ip);
5808 MONO_ADD_INS (bblock, ins);
5809 ip += 8;
5810 *sp++ = ins;
5811 break;
5812 case CEE_LDC_R4: {
5813 float *f;
5814 gboolean use_aotconst = FALSE;
5816 #ifdef TARGET_POWERPC
5817 /* FIXME: Clean this up */
5818 if (cfg->compile_aot)
5819 use_aotconst = TRUE;
5820 #endif
5822 /* FIXME: we should really allocate this only late in the compilation process */
5823 f = mono_domain_alloc (cfg->domain, sizeof (float));
5824 CHECK_OPSIZE (5);
5825 CHECK_STACK_OVF (1);
5827 if (use_aotconst) {
5828 MonoInst *cons;
5829 int dreg;
5831 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5833 dreg = alloc_freg (cfg);
5834 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5835 ins->type = STACK_R8;
5836 } else {
5837 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5838 ins->type = STACK_R8;
5839 ins->dreg = alloc_dreg (cfg, STACK_R8);
5840 ins->inst_p0 = f;
5841 MONO_ADD_INS (bblock, ins);
5843 ++ip;
5844 readr4 (ip, f);
5845 ip += 4;
5846 *sp++ = ins;
5847 break;
5849 case CEE_LDC_R8: {
5850 double *d;
5851 gboolean use_aotconst = FALSE;
5853 #ifdef TARGET_POWERPC
5854 /* FIXME: Clean this up */
5855 if (cfg->compile_aot)
5856 use_aotconst = TRUE;
5857 #endif
5859 /* FIXME: we should really allocate this only late in the compilation process */
5860 d = mono_domain_alloc (cfg->domain, sizeof (double));
5861 CHECK_OPSIZE (9);
5862 CHECK_STACK_OVF (1);
5864 if (use_aotconst) {
5865 MonoInst *cons;
5866 int dreg;
5868 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5870 dreg = alloc_freg (cfg);
5871 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5872 ins->type = STACK_R8;
5873 } else {
5874 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5875 ins->type = STACK_R8;
5876 ins->dreg = alloc_dreg (cfg, STACK_R8);
5877 ins->inst_p0 = d;
5878 MONO_ADD_INS (bblock, ins);
5880 ++ip;
5881 readr8 (ip, d);
5882 ip += 8;
5883 *sp++ = ins;
5884 break;
5886 case CEE_DUP: {
5887 MonoInst *temp, *store;
5888 CHECK_STACK (1);
5889 CHECK_STACK_OVF (1);
5890 sp--;
5891 ins = *sp;
5893 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5894 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5896 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5897 *sp++ = ins;
5899 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5900 *sp++ = ins;
5902 ++ip;
5903 inline_costs += 2;
5904 break;
5906 case CEE_POP:
5907 CHECK_STACK (1);
5908 ip++;
5909 --sp;
5911 #ifdef TARGET_X86
5912 if (sp [0]->type == STACK_R8)
5913 /* we need to pop the value from the x86 FP stack */
5914 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5915 #endif
5916 break;
5917 case CEE_JMP: {
5918 MonoCallInst *call;
5920 INLINE_FAILURE;
5922 CHECK_OPSIZE (5);
5923 if (stack_start != sp)
5924 UNVERIFIED;
5925 token = read32 (ip + 1);
5926 /* FIXME: check the signature matches */
5927 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5929 if (!cmethod)
5930 goto load_error;
5932 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5933 GENERIC_SHARING_FAILURE (CEE_JMP);
5935 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5936 CHECK_CFG_EXCEPTION;
5938 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5940 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5941 int i, n;
5943 /* Handle tail calls similarly to calls */
5944 n = fsig->param_count + fsig->hasthis;
5946 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5947 call->method = cmethod;
5948 call->tail_call = TRUE;
5949 call->signature = mono_method_signature (cmethod);
5950 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5951 call->inst.inst_p0 = cmethod;
5952 for (i = 0; i < n; ++i)
5953 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5955 mono_arch_emit_call (cfg, call);
5956 MONO_ADD_INS (bblock, (MonoInst*)call);
5958 #else
5959 for (i = 0; i < num_args; ++i)
5960 /* Prevent arguments from being optimized away */
5961 arg_array [i]->flags |= MONO_INST_VOLATILE;
5963 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5964 ins = (MonoInst*)call;
5965 ins->inst_p0 = cmethod;
5966 MONO_ADD_INS (bblock, ins);
5967 #endif
5969 ip += 5;
5970 start_new_bblock = 1;
5971 break;
5973 case CEE_CALLI:
5974 case CEE_CALL:
5975 case CEE_CALLVIRT: {
5976 MonoInst *addr = NULL;
5977 MonoMethodSignature *fsig = NULL;
5978 int array_rank = 0;
5979 int virtual = *ip == CEE_CALLVIRT;
5980 int calli = *ip == CEE_CALLI;
5981 gboolean pass_imt_from_rgctx = FALSE;
5982 MonoInst *imt_arg = NULL;
5983 gboolean pass_vtable = FALSE;
5984 gboolean pass_mrgctx = FALSE;
5985 MonoInst *vtable_arg = NULL;
5986 gboolean check_this = FALSE;
5987 gboolean supported_tail_call = FALSE;
5989 CHECK_OPSIZE (5);
5990 token = read32 (ip + 1);
5992 if (calli) {
5993 cmethod = NULL;
5994 CHECK_STACK (1);
5995 --sp;
5996 addr = *sp;
5997 if (method->wrapper_type != MONO_WRAPPER_NONE)
5998 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5999 else
6000 fsig = mono_metadata_parse_signature (image, token);
6002 n = fsig->param_count + fsig->hasthis;
6004 if (method->dynamic && fsig->pinvoke) {
6005 MonoInst *args [3];
6008 * This is a call through a function pointer using a pinvoke
6009 * signature. Have to create a wrapper and call that instead.
6010 * FIXME: This is very slow, need to create a wrapper at JIT time
6011 * instead based on the signature.
6013 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6014 EMIT_NEW_PCONST (cfg, args [1], fsig);
6015 args [2] = addr;
6016 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6018 } else {
6019 MonoMethod *cil_method;
6021 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6022 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6023 cil_method = cmethod;
6024 } else if (constrained_call) {
6025 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6027 * This is needed since get_method_constrained can't find
6028 * the method in klass representing a type var.
6029 * The type var is guaranteed to be a reference type in this
6030 * case.
6032 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6033 cil_method = cmethod;
6034 g_assert (!cmethod->klass->valuetype);
6035 } else {
6036 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6038 } else {
6039 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6040 cil_method = cmethod;
6043 if (!cmethod)
6044 goto load_error;
6045 if (!dont_verify && !cfg->skip_visibility) {
6046 MonoMethod *target_method = cil_method;
6047 if (method->is_inflated) {
6048 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6050 if (!mono_method_can_access_method (method_definition, target_method) &&
6051 !mono_method_can_access_method (method, cil_method))
6052 METHOD_ACCESS_FAILURE;
6055 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6056 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6058 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6059 /* MS.NET seems to silently convert this to a callvirt */
6060 virtual = 1;
6062 if (!cmethod->klass->inited)
6063 if (!mono_class_init (cmethod->klass))
6064 goto load_error;
6066 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6067 mini_class_is_system_array (cmethod->klass)) {
6068 array_rank = cmethod->klass->rank;
6069 fsig = mono_method_signature (cmethod);
6070 } else {
6071 if (mono_method_signature (cmethod)->pinvoke) {
6072 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6073 check_for_pending_exc, FALSE);
6074 fsig = mono_method_signature (wrapper);
6075 } else if (constrained_call) {
6076 fsig = mono_method_signature (cmethod);
6077 } else {
6078 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6082 mono_save_token_info (cfg, image, token, cil_method);
6084 n = fsig->param_count + fsig->hasthis;
6086 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6087 if (check_linkdemand (cfg, method, cmethod))
6088 INLINE_FAILURE;
6089 CHECK_CFG_EXCEPTION;
6092 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6093 g_assert_not_reached ();
6096 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6097 UNVERIFIED;
6099 if (!cfg->generic_sharing_context && cmethod)
6100 g_assert (!mono_method_check_context_used (cmethod));
6102 CHECK_STACK (n);
6104 //g_assert (!virtual || fsig->hasthis);
6106 sp -= n;
6108 if (constrained_call) {
6110 * We have the `constrained.' prefix opcode.
6112 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6114 * The type parameter is instantiated as a valuetype,
6115 * but that type doesn't override the method we're
6116 * calling, so we need to box `this'.
6118 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6119 ins->klass = constrained_call;
6120 sp [0] = handle_box (cfg, ins, constrained_call);
6121 CHECK_CFG_EXCEPTION;
6122 } else if (!constrained_call->valuetype) {
6123 int dreg = alloc_preg (cfg);
6126 * The type parameter is instantiated as a reference
6127 * type. We have a managed pointer on the stack, so
6128 * we need to dereference it here.
6130 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6131 ins->type = STACK_OBJ;
6132 sp [0] = ins;
6133 } else if (cmethod->klass->valuetype)
6134 virtual = 0;
6135 constrained_call = NULL;
6138 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6139 UNVERIFIED;
6142 * If the callee is a shared method, then its static cctor
6143 * might not get called after the call was patched.
6145 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6146 emit_generic_class_init (cfg, cmethod->klass);
6147 CHECK_TYPELOAD (cmethod->klass);
6150 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6151 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6152 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6153 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6154 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6157 * Pass vtable iff target method might
6158 * be shared, which means that sharing
6159 * is enabled for its class and its
6160 * context is sharable (and it's not a
6161 * generic method).
6163 if (sharing_enabled && context_sharable &&
6164 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6165 pass_vtable = TRUE;
6168 if (cmethod && mini_method_get_context (cmethod) &&
6169 mini_method_get_context (cmethod)->method_inst) {
6170 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6171 MonoGenericContext *context = mini_method_get_context (cmethod);
6172 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6174 g_assert (!pass_vtable);
6176 if (sharing_enabled && context_sharable)
6177 pass_mrgctx = TRUE;
6180 if (cfg->generic_sharing_context && cmethod) {
6181 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6183 context_used = mono_method_check_context_used (cmethod);
6185 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6186 /* Generic method interface
6187 calls are resolved via a
6188 helper function and don't
6189 need an imt. */
6190 if (!cmethod_context || !cmethod_context->method_inst)
6191 pass_imt_from_rgctx = TRUE;
6195 * If a shared method calls another
6196 * shared method then the caller must
6197 * have a generic sharing context
6198 * because the magic trampoline
6199 * requires it. FIXME: We shouldn't
6200 * have to force the vtable/mrgctx
6201 * variable here. Instead there
6202 * should be a flag in the cfg to
6203 * request a generic sharing context.
6205 if (context_used &&
6206 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6207 mono_get_vtable_var (cfg);
6210 if (pass_vtable) {
6211 if (context_used) {
6212 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6213 } else {
6214 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6216 CHECK_TYPELOAD (cmethod->klass);
6217 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6221 if (pass_mrgctx) {
6222 g_assert (!vtable_arg);
6224 if (!cfg->compile_aot) {
6226 * emit_get_rgctx_method () calls mono_class_vtable () so check
6227 * for type load errors before.
6229 mono_class_vtable (cfg->domain, cmethod->klass);
6230 CHECK_TYPELOAD (cmethod->klass);
6233 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6235 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6236 MONO_METHOD_IS_FINAL (cmethod)) {
6237 if (virtual)
6238 check_this = TRUE;
6239 virtual = 0;
6243 if (pass_imt_from_rgctx) {
6244 g_assert (!pass_vtable);
6245 g_assert (cmethod);
6247 imt_arg = emit_get_rgctx_method (cfg, context_used,
6248 cmethod, MONO_RGCTX_INFO_METHOD);
6251 if (check_this)
6252 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6254 /* Calling virtual generic methods */
6255 if (cmethod && virtual &&
6256 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6257 !(MONO_METHOD_IS_FINAL (cmethod) &&
6258 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6259 mono_method_signature (cmethod)->generic_param_count) {
6260 MonoInst *this_temp, *this_arg_temp, *store;
6261 MonoInst *iargs [4];
6263 g_assert (mono_method_signature (cmethod)->is_inflated);
6265 /* Prevent inlining of methods that contain indirect calls */
6266 INLINE_FAILURE;
6268 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6269 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6270 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6271 g_assert (!imt_arg);
6272 if (!context_used)
6273 g_assert (cmethod->is_inflated);
6274 imt_arg = emit_get_rgctx_method (cfg, context_used,
6275 cmethod, MONO_RGCTX_INFO_METHOD);
6276 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6277 } else
6278 #endif
6280 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6281 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6282 MONO_ADD_INS (bblock, store);
6284 /* FIXME: This should be a managed pointer */
6285 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6287 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6288 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6289 cmethod, MONO_RGCTX_INFO_METHOD);
6290 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6291 addr = mono_emit_jit_icall (cfg,
6292 mono_helper_compile_generic_method, iargs);
6294 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6296 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6299 if (!MONO_TYPE_IS_VOID (fsig->ret))
6300 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6302 ip += 5;
6303 ins_flag = 0;
6304 break;
6307 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6308 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6309 #else
6310 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6311 #endif
6313 /* Tail prefix */
6314 /* FIXME: runtime generic context pointer for jumps? */
6315 /* FIXME: handle this for generic sharing eventually */
6316 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6317 MonoCallInst *call;
6319 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6320 INLINE_FAILURE;
6322 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6323 /* Handle tail calls similarly to calls */
6324 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6325 #else
6326 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6327 call->tail_call = TRUE;
6328 call->method = cmethod;
6329 call->signature = mono_method_signature (cmethod);
6332 * We implement tail calls by storing the actual arguments into the
6333 * argument variables, then emitting a CEE_JMP.
6335 for (i = 0; i < n; ++i) {
6336 /* Prevent argument from being register allocated */
6337 arg_array [i]->flags |= MONO_INST_VOLATILE;
6338 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6340 #endif
6342 ins = (MonoInst*)call;
6343 ins->inst_p0 = cmethod;
6344 ins->inst_p1 = arg_array [0];
6345 MONO_ADD_INS (bblock, ins);
6346 link_bblock (cfg, bblock, end_bblock);
6347 start_new_bblock = 1;
6348 /* skip CEE_RET as well */
6349 ip += 6;
6350 ins_flag = 0;
6351 break;
6354 /* Conversion to a JIT intrinsic */
6355 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6356 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6357 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6358 *sp = ins;
6359 sp++;
6362 ip += 5;
6363 ins_flag = 0;
6364 break;
6367 /* Inlining */
6368 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6369 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6370 mono_method_check_inlining (cfg, cmethod) &&
6371 !g_list_find (dont_inline, cmethod)) {
6372 int costs;
6373 gboolean allways = FALSE;
6375 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6376 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6377 /* Prevent inlining of methods that call wrappers */
6378 INLINE_FAILURE;
6379 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6380 allways = TRUE;
6383 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6384 ip += 5;
6385 cfg->real_offset += 5;
6386 bblock = cfg->cbb;
6388 if (!MONO_TYPE_IS_VOID (fsig->ret))
6389 /* *sp is already set by inline_method */
6390 sp++;
6392 inline_costs += costs;
6393 ins_flag = 0;
6394 break;
6398 inline_costs += 10 * num_calls++;
6400 /* Tail recursion elimination */
6401 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6402 gboolean has_vtargs = FALSE;
6403 int i;
6405 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6406 INLINE_FAILURE;
6408 /* keep it simple */
6409 for (i = fsig->param_count - 1; i >= 0; i--) {
6410 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6411 has_vtargs = TRUE;
6414 if (!has_vtargs) {
6415 for (i = 0; i < n; ++i)
6416 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6417 MONO_INST_NEW (cfg, ins, OP_BR);
6418 MONO_ADD_INS (bblock, ins);
6419 tblock = start_bblock->out_bb [0];
6420 link_bblock (cfg, bblock, tblock);
6421 ins->inst_target_bb = tblock;
6422 start_new_bblock = 1;
6424 /* skip the CEE_RET, too */
6425 if (ip_in_bb (cfg, bblock, ip + 5))
6426 ip += 6;
6427 else
6428 ip += 5;
6430 ins_flag = 0;
6431 break;
6435 /* Generic sharing */
6436 /* FIXME: only do this for generic methods if
6437 they are not shared! */
6438 if (context_used && !imt_arg && !array_rank &&
6439 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6440 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6441 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6442 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6443 INLINE_FAILURE;
6445 g_assert (cfg->generic_sharing_context && cmethod);
6446 g_assert (!addr);
6449 * We are compiling a call to a
6450 * generic method from shared code,
6451 * which means that we have to look up
6452 * the method in the rgctx and do an
6453 * indirect call.
6455 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6458 /* Indirect calls */
6459 if (addr) {
6460 g_assert (!imt_arg);
6462 if (*ip == CEE_CALL)
6463 g_assert (context_used);
6464 else if (*ip == CEE_CALLI)
6465 g_assert (!vtable_arg);
6466 else
6467 /* FIXME: what the hell is this??? */
6468 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6469 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6471 /* Prevent inlining of methods with indirect calls */
6472 INLINE_FAILURE;
6474 if (vtable_arg) {
6475 #ifdef MONO_ARCH_RGCTX_REG
6476 MonoCallInst *call;
6477 int rgctx_reg = mono_alloc_preg (cfg);
6479 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6480 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6481 call = (MonoCallInst*)ins;
6482 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6483 cfg->uses_rgctx_reg = TRUE;
6484 call->rgctx_reg = TRUE;
6485 #else
6486 NOT_IMPLEMENTED;
6487 #endif
6488 } else {
6489 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6491 * Instead of emitting an indirect call, emit a direct call
6492 * with the contents of the aotconst as the patch info.
6494 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6495 NULLIFY_INS (addr);
6496 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6497 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6498 NULLIFY_INS (addr);
6499 } else {
6500 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6503 if (!MONO_TYPE_IS_VOID (fsig->ret))
6504 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6506 ip += 5;
6507 ins_flag = 0;
6508 break;
6511 /* Array methods */
6512 if (array_rank) {
6513 MonoInst *addr;
6515 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6516 if (sp [fsig->param_count]->type == STACK_OBJ) {
6517 MonoInst *iargs [2];
6519 iargs [0] = sp [0];
6520 iargs [1] = sp [fsig->param_count];
6522 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6525 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6526 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6527 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6528 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6530 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6532 *sp++ = ins;
6533 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6534 if (!cmethod->klass->element_class->valuetype && !readonly)
6535 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6536 CHECK_TYPELOAD (cmethod->klass);
6538 readonly = FALSE;
6539 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6540 *sp++ = addr;
6541 } else {
6542 g_assert_not_reached ();
6545 ip += 5;
6546 ins_flag = 0;
6547 break;
6550 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6551 if (ins) {
6552 if (!MONO_TYPE_IS_VOID (fsig->ret))
6553 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6555 ip += 5;
6556 ins_flag = 0;
6557 break;
6560 /* Common call */
6561 INLINE_FAILURE;
6562 if (vtable_arg) {
6563 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6564 NULL, vtable_arg);
6565 } else if (imt_arg) {
6566 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6567 } else {
6568 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6571 if (!MONO_TYPE_IS_VOID (fsig->ret))
6572 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6574 ip += 5;
6575 ins_flag = 0;
6576 break;
6578 case CEE_RET:
6579 if (cfg->method != method) {
6580 /* return from inlined method */
6582 * If in_count == 0, that means the ret is unreachable due to
6583 * being preceeded by a throw. In that case, inline_method () will
6584 * handle setting the return value
6585 * (test case: test_0_inline_throw ()).
6587 if (return_var && cfg->cbb->in_count) {
6588 MonoInst *store;
6589 CHECK_STACK (1);
6590 --sp;
6591 //g_assert (returnvar != -1);
6592 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6593 cfg->ret_var_set = TRUE;
6595 } else {
6596 if (cfg->ret) {
6597 MonoType *ret_type = mono_method_signature (method)->ret;
6599 if (seq_points) {
6601 * Place a seq point here too even through the IL stack is not
6602 * empty, so a step over on
6603 * call <FOO>
6604 * ret
6605 * will work correctly.
6607 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6608 MONO_ADD_INS (cfg->cbb, ins);
6611 g_assert (!return_var);
6612 CHECK_STACK (1);
6613 --sp;
6614 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6615 MonoInst *ret_addr;
6617 if (!cfg->vret_addr) {
6618 MonoInst *ins;
6620 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6621 } else {
6622 EMIT_NEW_RETLOADA (cfg, ret_addr);
6624 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6625 ins->klass = mono_class_from_mono_type (ret_type);
6627 } else {
6628 #ifdef MONO_ARCH_SOFT_FLOAT
6629 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6630 MonoInst *iargs [1];
6631 MonoInst *conv;
6633 iargs [0] = *sp;
6634 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6635 mono_arch_emit_setret (cfg, method, conv);
6636 } else {
6637 mono_arch_emit_setret (cfg, method, *sp);
6639 #else
6640 mono_arch_emit_setret (cfg, method, *sp);
6641 #endif
6645 if (sp != stack_start)
6646 UNVERIFIED;
6647 MONO_INST_NEW (cfg, ins, OP_BR);
6648 ip++;
6649 ins->inst_target_bb = end_bblock;
6650 MONO_ADD_INS (bblock, ins);
6651 link_bblock (cfg, bblock, end_bblock);
6652 start_new_bblock = 1;
6653 break;
6654 case CEE_BR_S:
6655 CHECK_OPSIZE (2);
6656 MONO_INST_NEW (cfg, ins, OP_BR);
6657 ip++;
6658 target = ip + 1 + (signed char)(*ip);
6659 ++ip;
6660 GET_BBLOCK (cfg, tblock, target);
6661 link_bblock (cfg, bblock, tblock);
6662 ins->inst_target_bb = tblock;
6663 if (sp != stack_start) {
6664 handle_stack_args (cfg, stack_start, sp - stack_start);
6665 sp = stack_start;
6666 CHECK_UNVERIFIABLE (cfg);
6668 MONO_ADD_INS (bblock, ins);
6669 start_new_bblock = 1;
6670 inline_costs += BRANCH_COST;
6671 break;
6672 case CEE_BEQ_S:
6673 case CEE_BGE_S:
6674 case CEE_BGT_S:
6675 case CEE_BLE_S:
6676 case CEE_BLT_S:
6677 case CEE_BNE_UN_S:
6678 case CEE_BGE_UN_S:
6679 case CEE_BGT_UN_S:
6680 case CEE_BLE_UN_S:
6681 case CEE_BLT_UN_S:
6682 CHECK_OPSIZE (2);
6683 CHECK_STACK (2);
6684 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6685 ip++;
6686 target = ip + 1 + *(signed char*)ip;
6687 ip++;
6689 ADD_BINCOND (NULL);
6691 sp = stack_start;
6692 inline_costs += BRANCH_COST;
6693 break;
6694 case CEE_BR:
6695 CHECK_OPSIZE (5);
6696 MONO_INST_NEW (cfg, ins, OP_BR);
6697 ip++;
6699 target = ip + 4 + (gint32)read32(ip);
6700 ip += 4;
6701 GET_BBLOCK (cfg, tblock, target);
6702 link_bblock (cfg, bblock, tblock);
6703 ins->inst_target_bb = tblock;
6704 if (sp != stack_start) {
6705 handle_stack_args (cfg, stack_start, sp - stack_start);
6706 sp = stack_start;
6707 CHECK_UNVERIFIABLE (cfg);
6710 MONO_ADD_INS (bblock, ins);
6712 start_new_bblock = 1;
6713 inline_costs += BRANCH_COST;
6714 break;
6715 case CEE_BRFALSE_S:
6716 case CEE_BRTRUE_S:
6717 case CEE_BRFALSE:
6718 case CEE_BRTRUE: {
6719 MonoInst *cmp;
6720 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6721 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6722 guint32 opsize = is_short ? 1 : 4;
6724 CHECK_OPSIZE (opsize);
6725 CHECK_STACK (1);
6726 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6727 UNVERIFIED;
6728 ip ++;
6729 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6730 ip += opsize;
6732 sp--;
6734 GET_BBLOCK (cfg, tblock, target);
6735 link_bblock (cfg, bblock, tblock);
6736 GET_BBLOCK (cfg, tblock, ip);
6737 link_bblock (cfg, bblock, tblock);
6739 if (sp != stack_start) {
6740 handle_stack_args (cfg, stack_start, sp - stack_start);
6741 CHECK_UNVERIFIABLE (cfg);
6744 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6745 cmp->sreg1 = sp [0]->dreg;
6746 type_from_op (cmp, sp [0], NULL);
6747 CHECK_TYPE (cmp);
6749 #if SIZEOF_REGISTER == 4
6750 if (cmp->opcode == OP_LCOMPARE_IMM) {
6751 /* Convert it to OP_LCOMPARE */
6752 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6753 ins->type = STACK_I8;
6754 ins->dreg = alloc_dreg (cfg, STACK_I8);
6755 ins->inst_l = 0;
6756 MONO_ADD_INS (bblock, ins);
6757 cmp->opcode = OP_LCOMPARE;
6758 cmp->sreg2 = ins->dreg;
6760 #endif
6761 MONO_ADD_INS (bblock, cmp);
6763 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6764 type_from_op (ins, sp [0], NULL);
6765 MONO_ADD_INS (bblock, ins);
6766 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6767 GET_BBLOCK (cfg, tblock, target);
6768 ins->inst_true_bb = tblock;
6769 GET_BBLOCK (cfg, tblock, ip);
6770 ins->inst_false_bb = tblock;
6771 start_new_bblock = 2;
6773 sp = stack_start;
6774 inline_costs += BRANCH_COST;
6775 break;
6777 case CEE_BEQ:
6778 case CEE_BGE:
6779 case CEE_BGT:
6780 case CEE_BLE:
6781 case CEE_BLT:
6782 case CEE_BNE_UN:
6783 case CEE_BGE_UN:
6784 case CEE_BGT_UN:
6785 case CEE_BLE_UN:
6786 case CEE_BLT_UN:
6787 CHECK_OPSIZE (5);
6788 CHECK_STACK (2);
6789 MONO_INST_NEW (cfg, ins, *ip);
6790 ip++;
6791 target = ip + 4 + (gint32)read32(ip);
6792 ip += 4;
6794 ADD_BINCOND (NULL);
6796 sp = stack_start;
6797 inline_costs += BRANCH_COST;
6798 break;
6799 case CEE_SWITCH: {
6800 MonoInst *src1;
6801 MonoBasicBlock **targets;
6802 MonoBasicBlock *default_bblock;
6803 MonoJumpInfoBBTable *table;
6804 int offset_reg = alloc_preg (cfg);
6805 int target_reg = alloc_preg (cfg);
6806 int table_reg = alloc_preg (cfg);
6807 int sum_reg = alloc_preg (cfg);
6808 gboolean use_op_switch;
6810 CHECK_OPSIZE (5);
6811 CHECK_STACK (1);
6812 n = read32 (ip + 1);
6813 --sp;
6814 src1 = sp [0];
6815 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6816 UNVERIFIED;
6818 ip += 5;
6819 CHECK_OPSIZE (n * sizeof (guint32));
6820 target = ip + n * sizeof (guint32);
6822 GET_BBLOCK (cfg, default_bblock, target);
6824 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6825 for (i = 0; i < n; ++i) {
6826 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6827 targets [i] = tblock;
6828 ip += 4;
6831 if (sp != stack_start) {
6833 * Link the current bb with the targets as well, so handle_stack_args
6834 * will set their in_stack correctly.
6836 link_bblock (cfg, bblock, default_bblock);
6837 for (i = 0; i < n; ++i)
6838 link_bblock (cfg, bblock, targets [i]);
6840 handle_stack_args (cfg, stack_start, sp - stack_start);
6841 sp = stack_start;
6842 CHECK_UNVERIFIABLE (cfg);
6845 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6846 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6847 bblock = cfg->cbb;
6849 for (i = 0; i < n; ++i)
6850 link_bblock (cfg, bblock, targets [i]);
6852 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6853 table->table = targets;
6854 table->table_size = n;
6856 use_op_switch = FALSE;
6857 #ifdef TARGET_ARM
6858 /* ARM implements SWITCH statements differently */
6859 /* FIXME: Make it use the generic implementation */
6860 if (!cfg->compile_aot)
6861 use_op_switch = TRUE;
6862 #endif
6864 if (COMPILE_LLVM (cfg))
6865 use_op_switch = TRUE;
6867 cfg->cbb->has_jump_table = 1;
6869 if (use_op_switch) {
6870 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6871 ins->sreg1 = src1->dreg;
6872 ins->inst_p0 = table;
6873 ins->inst_many_bb = targets;
6874 ins->klass = GUINT_TO_POINTER (n);
6875 MONO_ADD_INS (cfg->cbb, ins);
6876 } else {
6877 if (sizeof (gpointer) == 8)
6878 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6879 else
6880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6882 #if SIZEOF_REGISTER == 8
6883 /* The upper word might not be zero, and we add it to a 64 bit address later */
6884 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6885 #endif
6887 if (cfg->compile_aot) {
6888 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6889 } else {
6890 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6891 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6892 ins->inst_p0 = table;
6893 ins->dreg = table_reg;
6894 MONO_ADD_INS (cfg->cbb, ins);
6897 /* FIXME: Use load_memindex */
6898 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6899 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6900 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6902 start_new_bblock = 1;
6903 inline_costs += (BRANCH_COST * 2);
6904 break;
6906 case CEE_LDIND_I1:
6907 case CEE_LDIND_U1:
6908 case CEE_LDIND_I2:
6909 case CEE_LDIND_U2:
6910 case CEE_LDIND_I4:
6911 case CEE_LDIND_U4:
6912 case CEE_LDIND_I8:
6913 case CEE_LDIND_I:
6914 case CEE_LDIND_R4:
6915 case CEE_LDIND_R8:
6916 case CEE_LDIND_REF:
6917 CHECK_STACK (1);
6918 --sp;
6920 switch (*ip) {
6921 case CEE_LDIND_R4:
6922 case CEE_LDIND_R8:
6923 dreg = alloc_freg (cfg);
6924 break;
6925 case CEE_LDIND_I8:
6926 dreg = alloc_lreg (cfg);
6927 break;
6928 default:
6929 dreg = alloc_preg (cfg);
6932 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6933 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6934 ins->flags |= ins_flag;
6935 ins_flag = 0;
6936 MONO_ADD_INS (bblock, ins);
6937 *sp++ = ins;
6938 ++ip;
6939 break;
6940 case CEE_STIND_REF:
6941 case CEE_STIND_I1:
6942 case CEE_STIND_I2:
6943 case CEE_STIND_I4:
6944 case CEE_STIND_I8:
6945 case CEE_STIND_R4:
6946 case CEE_STIND_R8:
6947 case CEE_STIND_I:
6948 CHECK_STACK (2);
6949 sp -= 2;
6951 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6952 ins->flags |= ins_flag;
6953 ins_flag = 0;
6954 MONO_ADD_INS (bblock, ins);
6956 #if HAVE_WRITE_BARRIERS
6957 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6958 /* insert call to write barrier */
6959 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6960 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6962 #endif
6964 inline_costs += 1;
6965 ++ip;
6966 break;
6968 case CEE_MUL:
6969 CHECK_STACK (2);
6971 MONO_INST_NEW (cfg, ins, (*ip));
6972 sp -= 2;
6973 ins->sreg1 = sp [0]->dreg;
6974 ins->sreg2 = sp [1]->dreg;
6975 type_from_op (ins, sp [0], sp [1]);
6976 CHECK_TYPE (ins);
6977 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6979 /* Use the immediate opcodes if possible */
6980 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6981 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6982 if (imm_opcode != -1) {
6983 ins->opcode = imm_opcode;
6984 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6985 ins->sreg2 = -1;
6987 sp [1]->opcode = OP_NOP;
6991 MONO_ADD_INS ((cfg)->cbb, (ins));
6993 *sp++ = mono_decompose_opcode (cfg, ins);
6994 ip++;
6995 break;
6996 case CEE_ADD:
6997 case CEE_SUB:
6998 case CEE_DIV:
6999 case CEE_DIV_UN:
7000 case CEE_REM:
7001 case CEE_REM_UN:
7002 case CEE_AND:
7003 case CEE_OR:
7004 case CEE_XOR:
7005 case CEE_SHL:
7006 case CEE_SHR:
7007 case CEE_SHR_UN:
7008 CHECK_STACK (2);
7010 MONO_INST_NEW (cfg, ins, (*ip));
7011 sp -= 2;
7012 ins->sreg1 = sp [0]->dreg;
7013 ins->sreg2 = sp [1]->dreg;
7014 type_from_op (ins, sp [0], sp [1]);
7015 CHECK_TYPE (ins);
7016 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7017 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7019 /* FIXME: Pass opcode to is_inst_imm */
7021 /* Use the immediate opcodes if possible */
7022 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7023 int imm_opcode;
7025 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7026 if (imm_opcode != -1) {
7027 ins->opcode = imm_opcode;
7028 if (sp [1]->opcode == OP_I8CONST) {
7029 #if SIZEOF_REGISTER == 8
7030 ins->inst_imm = sp [1]->inst_l;
7031 #else
7032 ins->inst_ls_word = sp [1]->inst_ls_word;
7033 ins->inst_ms_word = sp [1]->inst_ms_word;
7034 #endif
7036 else
7037 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7038 ins->sreg2 = -1;
7040 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7041 if (sp [1]->next == NULL)
7042 sp [1]->opcode = OP_NOP;
7045 MONO_ADD_INS ((cfg)->cbb, (ins));
7047 *sp++ = mono_decompose_opcode (cfg, ins);
7048 ip++;
7049 break;
7050 case CEE_NEG:
7051 case CEE_NOT:
7052 case CEE_CONV_I1:
7053 case CEE_CONV_I2:
7054 case CEE_CONV_I4:
7055 case CEE_CONV_R4:
7056 case CEE_CONV_R8:
7057 case CEE_CONV_U4:
7058 case CEE_CONV_I8:
7059 case CEE_CONV_U8:
7060 case CEE_CONV_OVF_I8:
7061 case CEE_CONV_OVF_U8:
7062 case CEE_CONV_R_UN:
7063 CHECK_STACK (1);
7065 /* Special case this earlier so we have long constants in the IR */
7066 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7067 int data = sp [-1]->inst_c0;
7068 sp [-1]->opcode = OP_I8CONST;
7069 sp [-1]->type = STACK_I8;
7070 #if SIZEOF_REGISTER == 8
7071 if ((*ip) == CEE_CONV_U8)
7072 sp [-1]->inst_c0 = (guint32)data;
7073 else
7074 sp [-1]->inst_c0 = data;
7075 #else
7076 sp [-1]->inst_ls_word = data;
7077 if ((*ip) == CEE_CONV_U8)
7078 sp [-1]->inst_ms_word = 0;
7079 else
7080 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7081 #endif
7082 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7084 else {
7085 ADD_UNOP (*ip);
7087 ip++;
7088 break;
7089 case CEE_CONV_OVF_I4:
7090 case CEE_CONV_OVF_I1:
7091 case CEE_CONV_OVF_I2:
7092 case CEE_CONV_OVF_I:
7093 case CEE_CONV_OVF_U:
7094 CHECK_STACK (1);
7096 if (sp [-1]->type == STACK_R8) {
7097 ADD_UNOP (CEE_CONV_OVF_I8);
7098 ADD_UNOP (*ip);
7099 } else {
7100 ADD_UNOP (*ip);
7102 ip++;
7103 break;
7104 case CEE_CONV_OVF_U1:
7105 case CEE_CONV_OVF_U2:
7106 case CEE_CONV_OVF_U4:
7107 CHECK_STACK (1);
7109 if (sp [-1]->type == STACK_R8) {
7110 ADD_UNOP (CEE_CONV_OVF_U8);
7111 ADD_UNOP (*ip);
7112 } else {
7113 ADD_UNOP (*ip);
7115 ip++;
7116 break;
7117 case CEE_CONV_OVF_I1_UN:
7118 case CEE_CONV_OVF_I2_UN:
7119 case CEE_CONV_OVF_I4_UN:
7120 case CEE_CONV_OVF_I8_UN:
7121 case CEE_CONV_OVF_U1_UN:
7122 case CEE_CONV_OVF_U2_UN:
7123 case CEE_CONV_OVF_U4_UN:
7124 case CEE_CONV_OVF_U8_UN:
7125 case CEE_CONV_OVF_I_UN:
7126 case CEE_CONV_OVF_U_UN:
7127 case CEE_CONV_U2:
7128 case CEE_CONV_U1:
7129 case CEE_CONV_I:
7130 case CEE_CONV_U:
7131 CHECK_STACK (1);
7132 ADD_UNOP (*ip);
7133 ip++;
7134 break;
7135 case CEE_ADD_OVF:
7136 case CEE_ADD_OVF_UN:
7137 case CEE_MUL_OVF:
7138 case CEE_MUL_OVF_UN:
7139 case CEE_SUB_OVF:
7140 case CEE_SUB_OVF_UN:
7141 CHECK_STACK (2);
7142 ADD_BINOP (*ip);
7143 ip++;
7144 break;
7145 case CEE_CPOBJ:
7146 CHECK_OPSIZE (5);
7147 CHECK_STACK (2);
7148 token = read32 (ip + 1);
7149 klass = mini_get_class (method, token, generic_context);
7150 CHECK_TYPELOAD (klass);
7151 sp -= 2;
7152 if (generic_class_is_reference_type (cfg, klass)) {
7153 MonoInst *store, *load;
7154 int dreg = alloc_preg (cfg);
7156 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7157 load->flags |= ins_flag;
7158 MONO_ADD_INS (cfg->cbb, load);
7160 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7161 store->flags |= ins_flag;
7162 MONO_ADD_INS (cfg->cbb, store);
7163 } else {
7164 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7166 ins_flag = 0;
7167 ip += 5;
7168 break;
7169 case CEE_LDOBJ: {
7170 int loc_index = -1;
7171 int stloc_len = 0;
7173 CHECK_OPSIZE (5);
7174 CHECK_STACK (1);
7175 --sp;
7176 token = read32 (ip + 1);
7177 klass = mini_get_class (method, token, generic_context);
7178 CHECK_TYPELOAD (klass);
7180 /* Optimize the common ldobj+stloc combination */
7181 switch (ip [5]) {
7182 case CEE_STLOC_S:
7183 loc_index = ip [6];
7184 stloc_len = 2;
7185 break;
7186 case CEE_STLOC_0:
7187 case CEE_STLOC_1:
7188 case CEE_STLOC_2:
7189 case CEE_STLOC_3:
7190 loc_index = ip [5] - CEE_STLOC_0;
7191 stloc_len = 1;
7192 break;
7193 default:
7194 break;
7197 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7198 CHECK_LOCAL (loc_index);
7200 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7201 ins->dreg = cfg->locals [loc_index]->dreg;
7202 ip += 5;
7203 ip += stloc_len;
7204 break;
7207 /* Optimize the ldobj+stobj combination */
7208 /* The reference case ends up being a load+store anyway */
7209 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7210 CHECK_STACK (1);
7212 sp --;
7214 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7216 ip += 5 + 5;
7217 ins_flag = 0;
7218 break;
7221 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7222 *sp++ = ins;
7224 ip += 5;
7225 ins_flag = 0;
7226 inline_costs += 1;
7227 break;
7229 case CEE_LDSTR:
7230 CHECK_STACK_OVF (1);
7231 CHECK_OPSIZE (5);
7232 n = read32 (ip + 1);
7234 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7235 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7236 ins->type = STACK_OBJ;
7237 *sp = ins;
7239 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7240 MonoInst *iargs [1];
7242 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7243 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7244 } else {
7245 if (cfg->opt & MONO_OPT_SHARED) {
7246 MonoInst *iargs [3];
7248 if (cfg->compile_aot) {
7249 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7251 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7252 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7253 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7254 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7255 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7256 } else {
7257 if (bblock->out_of_line) {
7258 MonoInst *iargs [2];
7260 if (image == mono_defaults.corlib) {
7262 * Avoid relocations in AOT and save some space by using a
7263 * version of helper_ldstr specialized to mscorlib.
7265 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7266 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7267 } else {
7268 /* Avoid creating the string object */
7269 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7270 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7271 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7274 else
7275 if (cfg->compile_aot) {
7276 NEW_LDSTRCONST (cfg, ins, image, n);
7277 *sp = ins;
7278 MONO_ADD_INS (bblock, ins);
7280 else {
7281 NEW_PCONST (cfg, ins, NULL);
7282 ins->type = STACK_OBJ;
7283 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7284 *sp = ins;
7285 MONO_ADD_INS (bblock, ins);
7290 sp++;
7291 ip += 5;
7292 break;
7293 case CEE_NEWOBJ: {
7294 MonoInst *iargs [2];
7295 MonoMethodSignature *fsig;
7296 MonoInst this_ins;
7297 MonoInst *alloc;
7298 MonoInst *vtable_arg = NULL;
7300 CHECK_OPSIZE (5);
7301 token = read32 (ip + 1);
7302 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7303 if (!cmethod)
7304 goto load_error;
7305 fsig = mono_method_get_signature (cmethod, image, token);
7306 if (!fsig)
7307 goto load_error;
7309 mono_save_token_info (cfg, image, token, cmethod);
7311 if (!mono_class_init (cmethod->klass))
7312 goto load_error;
7314 if (cfg->generic_sharing_context)
7315 context_used = mono_method_check_context_used (cmethod);
7317 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7318 if (check_linkdemand (cfg, method, cmethod))
7319 INLINE_FAILURE;
7320 CHECK_CFG_EXCEPTION;
7321 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7322 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7325 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7326 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7327 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7328 mono_class_vtable (cfg->domain, cmethod->klass);
7329 CHECK_TYPELOAD (cmethod->klass);
7331 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7332 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7333 } else {
7334 if (context_used) {
7335 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7336 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7337 } else {
7338 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7340 CHECK_TYPELOAD (cmethod->klass);
7341 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7346 n = fsig->param_count;
7347 CHECK_STACK (n);
7350 * Generate smaller code for the common newobj <exception> instruction in
7351 * argument checking code.
7353 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7354 is_exception_class (cmethod->klass) && n <= 2 &&
7355 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7356 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7357 MonoInst *iargs [3];
7359 g_assert (!vtable_arg);
7361 sp -= n;
7363 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7364 switch (n) {
7365 case 0:
7366 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7367 break;
7368 case 1:
7369 iargs [1] = sp [0];
7370 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7371 break;
7372 case 2:
7373 iargs [1] = sp [0];
7374 iargs [2] = sp [1];
7375 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7376 break;
7377 default:
7378 g_assert_not_reached ();
7381 ip += 5;
7382 inline_costs += 5;
7383 break;
7386 /* move the args to allow room for 'this' in the first position */
7387 while (n--) {
7388 --sp;
7389 sp [1] = sp [0];
7392 /* check_call_signature () requires sp[0] to be set */
7393 this_ins.type = STACK_OBJ;
7394 sp [0] = &this_ins;
7395 if (check_call_signature (cfg, fsig, sp))
7396 UNVERIFIED;
7398 iargs [0] = NULL;
7400 if (mini_class_is_system_array (cmethod->klass)) {
7401 g_assert (!vtable_arg);
7403 *sp = emit_get_rgctx_method (cfg, context_used,
7404 cmethod, MONO_RGCTX_INFO_METHOD);
7406 /* Avoid varargs in the common case */
7407 if (fsig->param_count == 1)
7408 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7409 else if (fsig->param_count == 2)
7410 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7411 else if (fsig->param_count == 3)
7412 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7413 else
7414 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7415 } else if (cmethod->string_ctor) {
7416 g_assert (!context_used);
7417 g_assert (!vtable_arg);
7418 /* we simply pass a null pointer */
7419 EMIT_NEW_PCONST (cfg, *sp, NULL);
7420 /* now call the string ctor */
7421 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7422 } else {
7423 MonoInst* callvirt_this_arg = NULL;
7425 if (cmethod->klass->valuetype) {
7426 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7427 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7428 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7430 alloc = NULL;
7433 * The code generated by mini_emit_virtual_call () expects
7434 * iargs [0] to be a boxed instance, but luckily the vcall
7435 * will be transformed into a normal call there.
7437 } else if (context_used) {
7438 MonoInst *data;
7439 int rgctx_info;
7441 if (cfg->opt & MONO_OPT_SHARED)
7442 rgctx_info = MONO_RGCTX_INFO_KLASS;
7443 else
7444 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7445 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7447 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7448 *sp = alloc;
7449 } else {
7450 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7452 CHECK_TYPELOAD (cmethod->klass);
7455 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7456 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7457 * As a workaround, we call class cctors before allocating objects.
7459 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7460 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7461 if (cfg->verbose_level > 2)
7462 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7463 class_inits = g_slist_prepend (class_inits, vtable);
7466 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7467 *sp = alloc;
7469 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7471 if (alloc)
7472 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7474 /* Now call the actual ctor */
7475 /* Avoid virtual calls to ctors if possible */
7476 if (cmethod->klass->marshalbyref)
7477 callvirt_this_arg = sp [0];
7479 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7480 mono_method_check_inlining (cfg, cmethod) &&
7481 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7482 !g_list_find (dont_inline, cmethod)) {
7483 int costs;
7485 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7486 cfg->real_offset += 5;
7487 bblock = cfg->cbb;
7489 inline_costs += costs - 5;
7490 } else {
7491 INLINE_FAILURE;
7492 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7494 } else if (context_used &&
7495 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7496 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7497 MonoInst *cmethod_addr;
7499 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7500 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7502 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7503 } else {
7504 INLINE_FAILURE;
7505 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7506 callvirt_this_arg, NULL, vtable_arg);
7510 if (alloc == NULL) {
7511 /* Valuetype */
7512 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7513 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7514 *sp++= ins;
7516 else
7517 *sp++ = alloc;
7519 ip += 5;
7520 inline_costs += 5;
7521 break;
7523 case CEE_CASTCLASS:
7524 CHECK_STACK (1);
7525 --sp;
7526 CHECK_OPSIZE (5);
7527 token = read32 (ip + 1);
7528 klass = mini_get_class (method, token, generic_context);
7529 CHECK_TYPELOAD (klass);
7530 if (sp [0]->type != STACK_OBJ)
7531 UNVERIFIED;
7533 if (cfg->generic_sharing_context)
7534 context_used = mono_class_check_context_used (klass);
7536 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7537 MonoInst *args [2];
7539 /* obj */
7540 args [0] = *sp;
7542 /* klass */
7543 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7545 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7546 *sp ++ = ins;
7547 ip += 5;
7548 inline_costs += 2;
7549 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7550 MonoMethod *mono_castclass;
7551 MonoInst *iargs [1];
7552 int costs;
7554 mono_castclass = mono_marshal_get_castclass (klass);
7555 iargs [0] = sp [0];
7557 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7558 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7559 g_assert (costs > 0);
7561 ip += 5;
7562 cfg->real_offset += 5;
7563 bblock = cfg->cbb;
7565 *sp++ = iargs [0];
7567 inline_costs += costs;
7569 else {
7570 ins = handle_castclass (cfg, klass, *sp, context_used);
7571 CHECK_CFG_EXCEPTION;
7572 bblock = cfg->cbb;
7573 *sp ++ = ins;
7574 ip += 5;
7576 break;
7577 case CEE_ISINST: {
7578 CHECK_STACK (1);
7579 --sp;
7580 CHECK_OPSIZE (5);
7581 token = read32 (ip + 1);
7582 klass = mini_get_class (method, token, generic_context);
7583 CHECK_TYPELOAD (klass);
7584 if (sp [0]->type != STACK_OBJ)
7585 UNVERIFIED;
7587 if (cfg->generic_sharing_context)
7588 context_used = mono_class_check_context_used (klass);
7590 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7591 MonoInst *args [2];
7593 /* obj */
7594 args [0] = *sp;
7596 /* klass */
7597 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7599 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7600 sp++;
7601 ip += 5;
7602 inline_costs += 2;
7603 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7604 MonoMethod *mono_isinst;
7605 MonoInst *iargs [1];
7606 int costs;
7608 mono_isinst = mono_marshal_get_isinst (klass);
7609 iargs [0] = sp [0];
7611 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7612 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7613 g_assert (costs > 0);
7615 ip += 5;
7616 cfg->real_offset += 5;
7617 bblock = cfg->cbb;
7619 *sp++= iargs [0];
7621 inline_costs += costs;
7623 else {
7624 ins = handle_isinst (cfg, klass, *sp, context_used);
7625 CHECK_CFG_EXCEPTION;
7626 bblock = cfg->cbb;
7627 *sp ++ = ins;
7628 ip += 5;
7630 break;
7632 case CEE_UNBOX_ANY: {
7633 CHECK_STACK (1);
7634 --sp;
7635 CHECK_OPSIZE (5);
7636 token = read32 (ip + 1);
7637 klass = mini_get_class (method, token, generic_context);
7638 CHECK_TYPELOAD (klass);
7640 mono_save_token_info (cfg, image, token, klass);
7642 if (cfg->generic_sharing_context)
7643 context_used = mono_class_check_context_used (klass);
7645 if (generic_class_is_reference_type (cfg, klass)) {
7646 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7647 if (context_used) {
7648 MonoInst *iargs [2];
7650 /* obj */
7651 iargs [0] = *sp;
7652 /* klass */
7653 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7654 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7655 *sp ++ = ins;
7656 ip += 5;
7657 inline_costs += 2;
7658 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7659 MonoMethod *mono_castclass;
7660 MonoInst *iargs [1];
7661 int costs;
7663 mono_castclass = mono_marshal_get_castclass (klass);
7664 iargs [0] = sp [0];
7666 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7667 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7669 g_assert (costs > 0);
7671 ip += 5;
7672 cfg->real_offset += 5;
7673 bblock = cfg->cbb;
7675 *sp++ = iargs [0];
7676 inline_costs += costs;
7677 } else {
7678 ins = handle_castclass (cfg, klass, *sp, 0);
7679 CHECK_CFG_EXCEPTION;
7680 bblock = cfg->cbb;
7681 *sp ++ = ins;
7682 ip += 5;
7684 break;
7687 if (mono_class_is_nullable (klass)) {
7688 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7689 *sp++= ins;
7690 ip += 5;
7691 break;
7694 /* UNBOX */
7695 ins = handle_unbox (cfg, klass, sp, context_used);
7696 *sp = ins;
7698 ip += 5;
7700 /* LDOBJ */
7701 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7702 *sp++ = ins;
7704 inline_costs += 2;
7705 break;
7707 case CEE_BOX: {
7708 MonoInst *val;
7710 CHECK_STACK (1);
7711 --sp;
7712 val = *sp;
7713 CHECK_OPSIZE (5);
7714 token = read32 (ip + 1);
7715 klass = mini_get_class (method, token, generic_context);
7716 CHECK_TYPELOAD (klass);
7718 mono_save_token_info (cfg, image, token, klass);
7720 if (cfg->generic_sharing_context)
7721 context_used = mono_class_check_context_used (klass);
7723 if (generic_class_is_reference_type (cfg, klass)) {
7724 *sp++ = val;
7725 ip += 5;
7726 break;
7729 if (klass == mono_defaults.void_class)
7730 UNVERIFIED;
7731 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7732 UNVERIFIED;
7733 /* frequent check in generic code: box (struct), brtrue */
7734 if (!mono_class_is_nullable (klass) &&
7735 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7736 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7737 ip += 5;
7738 MONO_INST_NEW (cfg, ins, OP_BR);
7739 if (*ip == CEE_BRTRUE_S) {
7740 CHECK_OPSIZE (2);
7741 ip++;
7742 target = ip + 1 + (signed char)(*ip);
7743 ip++;
7744 } else {
7745 CHECK_OPSIZE (5);
7746 ip++;
7747 target = ip + 4 + (gint)(read32 (ip));
7748 ip += 4;
7750 GET_BBLOCK (cfg, tblock, target);
7751 link_bblock (cfg, bblock, tblock);
7752 ins->inst_target_bb = tblock;
7753 GET_BBLOCK (cfg, tblock, ip);
7755 * This leads to some inconsistency, since the two bblocks are
7756 * not really connected, but it is needed for handling stack
7757 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7758 * FIXME: This should only be needed if sp != stack_start, but that
7759 * doesn't work for some reason (test failure in mcs/tests on x86).
7761 link_bblock (cfg, bblock, tblock);
7762 if (sp != stack_start) {
7763 handle_stack_args (cfg, stack_start, sp - stack_start);
7764 sp = stack_start;
7765 CHECK_UNVERIFIABLE (cfg);
7767 MONO_ADD_INS (bblock, ins);
7768 start_new_bblock = 1;
7769 break;
7772 if (context_used) {
7773 MonoInst *data;
7774 int rgctx_info;
7776 if (cfg->opt & MONO_OPT_SHARED)
7777 rgctx_info = MONO_RGCTX_INFO_KLASS;
7778 else
7779 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7780 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7781 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7782 } else {
7783 *sp++ = handle_box (cfg, val, klass);
7786 CHECK_CFG_EXCEPTION;
7787 ip += 5;
7788 inline_costs += 1;
7789 break;
7791 case CEE_UNBOX: {
7792 CHECK_STACK (1);
7793 --sp;
7794 CHECK_OPSIZE (5);
7795 token = read32 (ip + 1);
7796 klass = mini_get_class (method, token, generic_context);
7797 CHECK_TYPELOAD (klass);
7799 mono_save_token_info (cfg, image, token, klass);
7801 if (cfg->generic_sharing_context)
7802 context_used = mono_class_check_context_used (klass);
7804 if (mono_class_is_nullable (klass)) {
7805 MonoInst *val;
7807 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7808 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7810 *sp++= ins;
7811 } else {
7812 ins = handle_unbox (cfg, klass, sp, context_used);
7813 *sp++ = ins;
7815 ip += 5;
7816 inline_costs += 2;
7817 break;
7819 case CEE_LDFLD:
7820 case CEE_LDFLDA:
7821 case CEE_STFLD: {
7822 MonoClassField *field;
7823 int costs;
7824 guint foffset;
7826 if (*ip == CEE_STFLD) {
7827 CHECK_STACK (2);
7828 sp -= 2;
7829 } else {
7830 CHECK_STACK (1);
7831 --sp;
7833 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7834 UNVERIFIED;
7835 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7836 UNVERIFIED;
7837 CHECK_OPSIZE (5);
7838 token = read32 (ip + 1);
7839 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7840 field = mono_method_get_wrapper_data (method, token);
7841 klass = field->parent;
7843 else {
7844 field = mono_field_from_token (image, token, &klass, generic_context);
7846 if (!field)
7847 goto load_error;
7848 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7849 FIELD_ACCESS_FAILURE;
7850 mono_class_init (klass);
7852 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7853 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7854 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7855 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7858 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7859 if (*ip == CEE_STFLD) {
7860 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7861 UNVERIFIED;
7862 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7863 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7864 MonoInst *iargs [5];
7866 iargs [0] = sp [0];
7867 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7868 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7869 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7870 field->offset);
7871 iargs [4] = sp [1];
7873 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7874 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7875 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7876 g_assert (costs > 0);
7878 cfg->real_offset += 5;
7879 bblock = cfg->cbb;
7881 inline_costs += costs;
7882 } else {
7883 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7885 } else {
7886 MonoInst *store;
7888 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7890 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7892 #if HAVE_WRITE_BARRIERS
7893 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7894 /* insert call to write barrier */
7895 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7896 MonoInst *iargs [2];
7897 int dreg;
7899 dreg = alloc_preg (cfg);
7900 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7901 iargs [1] = sp [1];
7902 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7904 #endif
7906 store->flags |= ins_flag;
7908 ins_flag = 0;
7909 ip += 5;
7910 break;
7913 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7914 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7915 MonoInst *iargs [4];
7917 iargs [0] = sp [0];
7918 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7919 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7920 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7921 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7922 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7923 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7924 bblock = cfg->cbb;
7925 g_assert (costs > 0);
7927 cfg->real_offset += 5;
7929 *sp++ = iargs [0];
7931 inline_costs += costs;
7932 } else {
7933 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7934 *sp++ = ins;
7936 } else {
7937 if (sp [0]->type == STACK_VTYPE) {
7938 MonoInst *var;
7940 /* Have to compute the address of the variable */
7942 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7943 if (!var)
7944 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7945 else
7946 g_assert (var->klass == klass);
7948 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7949 sp [0] = ins;
7952 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7954 if (*ip == CEE_LDFLDA) {
7955 dreg = alloc_preg (cfg);
7957 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7958 ins->klass = mono_class_from_mono_type (field->type);
7959 ins->type = STACK_MP;
7960 *sp++ = ins;
7961 } else {
7962 MonoInst *load;
7964 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7965 load->flags |= ins_flag;
7966 load->flags |= MONO_INST_FAULT;
7967 *sp++ = load;
7970 ins_flag = 0;
7971 ip += 5;
7972 break;
7974 case CEE_LDSFLD:
7975 case CEE_LDSFLDA:
7976 case CEE_STSFLD: {
7977 MonoClassField *field;
7978 gpointer addr = NULL;
7979 gboolean is_special_static;
7981 CHECK_OPSIZE (5);
7982 token = read32 (ip + 1);
7984 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7985 field = mono_method_get_wrapper_data (method, token);
7986 klass = field->parent;
7988 else
7989 field = mono_field_from_token (image, token, &klass, generic_context);
7990 if (!field)
7991 goto load_error;
7992 mono_class_init (klass);
7993 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7994 FIELD_ACCESS_FAILURE;
7996 /* if the class is Critical then transparent code cannot access it's fields */
7997 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7998 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8001 * We can only support shared generic static
8002 * field access on architectures where the
8003 * trampoline code has been extended to handle
8004 * the generic class init.
8006 #ifndef MONO_ARCH_VTABLE_REG
8007 GENERIC_SHARING_FAILURE (*ip);
8008 #endif
8010 if (cfg->generic_sharing_context)
8011 context_used = mono_class_check_context_used (klass);
8013 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8015 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8016 * to be called here.
8018 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8019 mono_class_vtable (cfg->domain, klass);
8020 CHECK_TYPELOAD (klass);
8022 mono_domain_lock (cfg->domain);
8023 if (cfg->domain->special_static_fields)
8024 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8025 mono_domain_unlock (cfg->domain);
8027 is_special_static = mono_class_field_is_special_static (field);
8029 /* Generate IR to compute the field address */
8031 if ((cfg->opt & MONO_OPT_SHARED) ||
8032 (cfg->compile_aot && is_special_static) ||
8033 (context_used && is_special_static)) {
8034 MonoInst *iargs [2];
8036 g_assert (field->parent);
8037 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8038 if (context_used) {
8039 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8040 field, MONO_RGCTX_INFO_CLASS_FIELD);
8041 } else {
8042 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8044 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8045 } else if (context_used) {
8046 MonoInst *static_data;
8049 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8050 method->klass->name_space, method->klass->name, method->name,
8051 depth, field->offset);
8054 if (mono_class_needs_cctor_run (klass, method)) {
8055 MonoCallInst *call;
8056 MonoInst *vtable;
8058 vtable = emit_get_rgctx_klass (cfg, context_used,
8059 klass, MONO_RGCTX_INFO_VTABLE);
8061 // FIXME: This doesn't work since it tries to pass the argument
8062 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8064 * The vtable pointer is always passed in a register regardless of
8065 * the calling convention, so assign it manually, and make a call
8066 * using a signature without parameters.
8068 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8069 #ifdef MONO_ARCH_VTABLE_REG
8070 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8071 cfg->uses_vtable_reg = TRUE;
8072 #else
8073 NOT_IMPLEMENTED;
8074 #endif
8078 * The pointer we're computing here is
8080 * super_info.static_data + field->offset
8082 static_data = emit_get_rgctx_klass (cfg, context_used,
8083 klass, MONO_RGCTX_INFO_STATIC_DATA);
8085 if (field->offset == 0) {
8086 ins = static_data;
8087 } else {
8088 int addr_reg = mono_alloc_preg (cfg);
8089 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8091 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8092 MonoInst *iargs [2];
8094 g_assert (field->parent);
8095 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8096 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8097 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8098 } else {
8099 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8101 CHECK_TYPELOAD (klass);
8102 if (!addr) {
8103 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8104 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8105 if (cfg->verbose_level > 2)
8106 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8107 class_inits = g_slist_prepend (class_inits, vtable);
8108 } else {
8109 if (cfg->run_cctors) {
8110 MonoException *ex;
8111 /* This makes so that inline cannot trigger */
8112 /* .cctors: too many apps depend on them */
8113 /* running with a specific order... */
8114 if (! vtable->initialized)
8115 INLINE_FAILURE;
8116 ex = mono_runtime_class_init_full (vtable, FALSE);
8117 if (ex) {
8118 set_exception_object (cfg, ex);
8119 goto exception_exit;
8123 addr = (char*)vtable->data + field->offset;
8125 if (cfg->compile_aot)
8126 EMIT_NEW_SFLDACONST (cfg, ins, field);
8127 else
8128 EMIT_NEW_PCONST (cfg, ins, addr);
8129 } else {
8131 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8132 * This could be later optimized to do just a couple of
8133 * memory dereferences with constant offsets.
8135 MonoInst *iargs [1];
8136 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8137 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8141 /* Generate IR to do the actual load/store operation */
8143 if (*ip == CEE_LDSFLDA) {
8144 ins->klass = mono_class_from_mono_type (field->type);
8145 ins->type = STACK_PTR;
8146 *sp++ = ins;
8147 } else if (*ip == CEE_STSFLD) {
8148 MonoInst *store;
8149 CHECK_STACK (1);
8150 sp--;
8152 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8153 store->flags |= ins_flag;
8154 } else {
8155 gboolean is_const = FALSE;
8156 MonoVTable *vtable = NULL;
8158 if (!context_used) {
8159 vtable = mono_class_vtable (cfg->domain, klass);
8160 CHECK_TYPELOAD (klass);
8162 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8163 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8164 gpointer addr = (char*)vtable->data + field->offset;
8165 int ro_type = field->type->type;
8166 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8167 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8169 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8170 is_const = TRUE;
8171 switch (ro_type) {
8172 case MONO_TYPE_BOOLEAN:
8173 case MONO_TYPE_U1:
8174 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8175 sp++;
8176 break;
8177 case MONO_TYPE_I1:
8178 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8179 sp++;
8180 break;
8181 case MONO_TYPE_CHAR:
8182 case MONO_TYPE_U2:
8183 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8184 sp++;
8185 break;
8186 case MONO_TYPE_I2:
8187 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8188 sp++;
8189 break;
8190 break;
8191 case MONO_TYPE_I4:
8192 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8193 sp++;
8194 break;
8195 case MONO_TYPE_U4:
8196 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8197 sp++;
8198 break;
8199 #ifndef HAVE_MOVING_COLLECTOR
8200 case MONO_TYPE_I:
8201 case MONO_TYPE_U:
8202 case MONO_TYPE_STRING:
8203 case MONO_TYPE_OBJECT:
8204 case MONO_TYPE_CLASS:
8205 case MONO_TYPE_SZARRAY:
8206 case MONO_TYPE_PTR:
8207 case MONO_TYPE_FNPTR:
8208 case MONO_TYPE_ARRAY:
8209 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8210 type_to_eval_stack_type ((cfg), field->type, *sp);
8211 sp++;
8212 break;
8213 #endif
8214 case MONO_TYPE_I8:
8215 case MONO_TYPE_U8:
8216 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8217 sp++;
8218 break;
8219 case MONO_TYPE_R4:
8220 case MONO_TYPE_R8:
8221 case MONO_TYPE_VALUETYPE:
8222 default:
8223 is_const = FALSE;
8224 break;
8228 if (!is_const) {
8229 MonoInst *load;
8231 CHECK_STACK_OVF (1);
8233 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8234 load->flags |= ins_flag;
8235 ins_flag = 0;
8236 *sp++ = load;
8239 ins_flag = 0;
8240 ip += 5;
8241 break;
8243 case CEE_STOBJ:
8244 CHECK_STACK (2);
8245 sp -= 2;
8246 CHECK_OPSIZE (5);
8247 token = read32 (ip + 1);
8248 klass = mini_get_class (method, token, generic_context);
8249 CHECK_TYPELOAD (klass);
8250 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8251 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8252 ins_flag = 0;
8253 ip += 5;
8254 inline_costs += 1;
8255 break;
8258 * Array opcodes
8260 case CEE_NEWARR: {
8261 MonoInst *len_ins;
8262 const char *data_ptr;
8263 int data_size = 0;
8264 guint32 field_token;
8266 CHECK_STACK (1);
8267 --sp;
8269 CHECK_OPSIZE (5);
8270 token = read32 (ip + 1);
8272 klass = mini_get_class (method, token, generic_context);
8273 CHECK_TYPELOAD (klass);
8275 if (cfg->generic_sharing_context)
8276 context_used = mono_class_check_context_used (klass);
8278 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8279 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8280 ins->sreg1 = sp [0]->dreg;
8281 ins->type = STACK_I4;
8282 ins->dreg = alloc_ireg (cfg);
8283 MONO_ADD_INS (cfg->cbb, ins);
8284 *sp = mono_decompose_opcode (cfg, ins);
8287 if (context_used) {
8288 MonoInst *args [3];
8289 MonoClass *array_class = mono_array_class_get (klass, 1);
8290 /* FIXME: we cannot get a managed
8291 allocator because we can't get the
8292 open generic class's vtable. We
8293 have the same problem in
8294 handle_alloc_from_inst(). This
8295 needs to be solved so that we can
8296 have managed allocs of shared
8297 generic classes. */
8299 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8300 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8302 MonoMethod *managed_alloc = NULL;
8304 /* FIXME: Decompose later to help abcrem */
8306 /* vtable */
8307 args [0] = emit_get_rgctx_klass (cfg, context_used,
8308 array_class, MONO_RGCTX_INFO_VTABLE);
8309 /* array len */
8310 args [1] = sp [0];
8312 if (managed_alloc)
8313 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8314 else
8315 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8316 } else {
8317 if (cfg->opt & MONO_OPT_SHARED) {
8318 /* Decompose now to avoid problems with references to the domainvar */
8319 MonoInst *iargs [3];
8321 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8322 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8323 iargs [2] = sp [0];
8325 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8326 } else {
8327 /* Decompose later since it is needed by abcrem */
8328 MonoClass *array_type = mono_array_class_get (klass, 1);
8329 mono_class_vtable (cfg->domain, array_type);
8330 CHECK_TYPELOAD (array_type);
8332 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8333 ins->dreg = alloc_preg (cfg);
8334 ins->sreg1 = sp [0]->dreg;
8335 ins->inst_newa_class = klass;
8336 ins->type = STACK_OBJ;
8337 ins->klass = klass;
8338 MONO_ADD_INS (cfg->cbb, ins);
8339 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8340 cfg->cbb->has_array_access = TRUE;
8342 /* Needed so mono_emit_load_get_addr () gets called */
8343 mono_get_got_var (cfg);
8347 len_ins = sp [0];
8348 ip += 5;
8349 *sp++ = ins;
8350 inline_costs += 1;
8353 * we inline/optimize the initialization sequence if possible.
8354 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8355 * for small sizes open code the memcpy
8356 * ensure the rva field is big enough
8358 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8359 MonoMethod *memcpy_method = get_memcpy_method ();
8360 MonoInst *iargs [3];
8361 int add_reg = alloc_preg (cfg);
8363 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8364 if (cfg->compile_aot) {
8365 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8366 } else {
8367 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8369 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8370 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8371 ip += 11;
8374 break;
8376 case CEE_LDLEN:
8377 CHECK_STACK (1);
8378 --sp;
8379 if (sp [0]->type != STACK_OBJ)
8380 UNVERIFIED;
8382 dreg = alloc_preg (cfg);
8383 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8384 ins->dreg = alloc_preg (cfg);
8385 ins->sreg1 = sp [0]->dreg;
8386 ins->type = STACK_I4;
8387 MONO_ADD_INS (cfg->cbb, ins);
8388 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8389 cfg->cbb->has_array_access = TRUE;
8390 ip ++;
8391 *sp++ = ins;
8392 break;
8393 case CEE_LDELEMA:
8394 CHECK_STACK (2);
8395 sp -= 2;
8396 CHECK_OPSIZE (5);
8397 if (sp [0]->type != STACK_OBJ)
8398 UNVERIFIED;
8400 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8402 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8403 CHECK_TYPELOAD (klass);
8404 /* we need to make sure that this array is exactly the type it needs
8405 * to be for correctness. the wrappers are lax with their usage
8406 * so we need to ignore them here
8408 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8409 MonoClass *array_class = mono_array_class_get (klass, 1);
8410 mini_emit_check_array_type (cfg, sp [0], array_class);
8411 CHECK_TYPELOAD (array_class);
8414 readonly = FALSE;
8415 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8416 *sp++ = ins;
8417 ip += 5;
8418 break;
8419 case CEE_LDELEM:
8420 case CEE_LDELEM_I1:
8421 case CEE_LDELEM_U1:
8422 case CEE_LDELEM_I2:
8423 case CEE_LDELEM_U2:
8424 case CEE_LDELEM_I4:
8425 case CEE_LDELEM_U4:
8426 case CEE_LDELEM_I8:
8427 case CEE_LDELEM_I:
8428 case CEE_LDELEM_R4:
8429 case CEE_LDELEM_R8:
8430 case CEE_LDELEM_REF: {
8431 MonoInst *addr;
8433 CHECK_STACK (2);
8434 sp -= 2;
8436 if (*ip == CEE_LDELEM) {
8437 CHECK_OPSIZE (5);
8438 token = read32 (ip + 1);
8439 klass = mini_get_class (method, token, generic_context);
8440 CHECK_TYPELOAD (klass);
8441 mono_class_init (klass);
8443 else
8444 klass = array_access_to_klass (*ip);
8446 if (sp [0]->type != STACK_OBJ)
8447 UNVERIFIED;
8449 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8451 if (sp [1]->opcode == OP_ICONST) {
8452 int array_reg = sp [0]->dreg;
8453 int index_reg = sp [1]->dreg;
8454 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8456 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8457 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8458 } else {
8459 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8460 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8462 *sp++ = ins;
8463 if (*ip == CEE_LDELEM)
8464 ip += 5;
8465 else
8466 ++ip;
8467 break;
8469 case CEE_STELEM_I:
8470 case CEE_STELEM_I1:
8471 case CEE_STELEM_I2:
8472 case CEE_STELEM_I4:
8473 case CEE_STELEM_I8:
8474 case CEE_STELEM_R4:
8475 case CEE_STELEM_R8:
8476 case CEE_STELEM_REF:
8477 case CEE_STELEM: {
8478 MonoInst *addr;
8480 CHECK_STACK (3);
8481 sp -= 3;
8483 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8485 if (*ip == CEE_STELEM) {
8486 CHECK_OPSIZE (5);
8487 token = read32 (ip + 1);
8488 klass = mini_get_class (method, token, generic_context);
8489 CHECK_TYPELOAD (klass);
8490 mono_class_init (klass);
8492 else
8493 klass = array_access_to_klass (*ip);
8495 if (sp [0]->type != STACK_OBJ)
8496 UNVERIFIED;
8498 /* storing a NULL doesn't need any of the complex checks in stelemref */
8499 if (generic_class_is_reference_type (cfg, klass) &&
8500 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8501 MonoMethod* helper = mono_marshal_get_stelemref ();
8502 MonoInst *iargs [3];
8504 if (sp [0]->type != STACK_OBJ)
8505 UNVERIFIED;
8506 if (sp [2]->type != STACK_OBJ)
8507 UNVERIFIED;
8509 iargs [2] = sp [2];
8510 iargs [1] = sp [1];
8511 iargs [0] = sp [0];
8513 mono_emit_method_call (cfg, helper, iargs, NULL);
8514 } else {
8515 if (sp [1]->opcode == OP_ICONST) {
8516 int array_reg = sp [0]->dreg;
8517 int index_reg = sp [1]->dreg;
8518 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8520 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8521 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8522 } else {
8523 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8524 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8528 if (*ip == CEE_STELEM)
8529 ip += 5;
8530 else
8531 ++ip;
8532 inline_costs += 1;
8533 break;
8535 case CEE_CKFINITE: {
8536 CHECK_STACK (1);
8537 --sp;
8539 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8540 ins->sreg1 = sp [0]->dreg;
8541 ins->dreg = alloc_freg (cfg);
8542 ins->type = STACK_R8;
8543 MONO_ADD_INS (bblock, ins);
8545 *sp++ = mono_decompose_opcode (cfg, ins);
8547 ++ip;
8548 break;
8550 case CEE_REFANYVAL: {
8551 MonoInst *src_var, *src;
8553 int klass_reg = alloc_preg (cfg);
8554 int dreg = alloc_preg (cfg);
8556 CHECK_STACK (1);
8557 MONO_INST_NEW (cfg, ins, *ip);
8558 --sp;
8559 CHECK_OPSIZE (5);
8560 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8561 CHECK_TYPELOAD (klass);
8562 mono_class_init (klass);
8564 if (cfg->generic_sharing_context)
8565 context_used = mono_class_check_context_used (klass);
8567 // FIXME:
8568 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8569 if (!src_var)
8570 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8571 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8574 if (context_used) {
8575 MonoInst *klass_ins;
8577 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8578 klass, MONO_RGCTX_INFO_KLASS);
8580 // FIXME:
8581 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8582 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8583 } else {
8584 mini_emit_class_check (cfg, klass_reg, klass);
8586 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8587 ins->type = STACK_MP;
8588 *sp++ = ins;
8589 ip += 5;
8590 break;
8592 case CEE_MKREFANY: {
8593 MonoInst *loc, *addr;
8595 CHECK_STACK (1);
8596 MONO_INST_NEW (cfg, ins, *ip);
8597 --sp;
8598 CHECK_OPSIZE (5);
8599 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8600 CHECK_TYPELOAD (klass);
8601 mono_class_init (klass);
8603 if (cfg->generic_sharing_context)
8604 context_used = mono_class_check_context_used (klass);
8606 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8607 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8609 if (context_used) {
8610 MonoInst *const_ins;
8611 int type_reg = alloc_preg (cfg);
8613 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8614 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8616 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8617 } else if (cfg->compile_aot) {
8618 int const_reg = alloc_preg (cfg);
8619 int type_reg = alloc_preg (cfg);
8621 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8622 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8624 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8625 } else {
8626 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8627 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8631 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8632 ins->type = STACK_VTYPE;
8633 ins->klass = mono_defaults.typed_reference_class;
8634 *sp++ = ins;
8635 ip += 5;
8636 break;
8638 case CEE_LDTOKEN: {
8639 gpointer handle;
8640 MonoClass *handle_class;
8642 CHECK_STACK_OVF (1);
8644 CHECK_OPSIZE (5);
8645 n = read32 (ip + 1);
8647 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8648 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8649 handle = mono_method_get_wrapper_data (method, n);
8650 handle_class = mono_method_get_wrapper_data (method, n + 1);
8651 if (handle_class == mono_defaults.typehandle_class)
8652 handle = &((MonoClass*)handle)->byval_arg;
8654 else {
8655 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8657 if (!handle)
8658 goto load_error;
8659 mono_class_init (handle_class);
8660 if (cfg->generic_sharing_context) {
8661 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8662 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8663 /* This case handles ldtoken
8664 of an open type, like for
8665 typeof(Gen<>). */
8666 context_used = 0;
8667 } else if (handle_class == mono_defaults.typehandle_class) {
8668 /* If we get a MONO_TYPE_CLASS
8669 then we need to provide the
8670 open type, not an
8671 instantiation of it. */
8672 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8673 context_used = 0;
8674 else
8675 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8676 } else if (handle_class == mono_defaults.fieldhandle_class)
8677 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8678 else if (handle_class == mono_defaults.methodhandle_class)
8679 context_used = mono_method_check_context_used (handle);
8680 else
8681 g_assert_not_reached ();
8684 if ((cfg->opt & MONO_OPT_SHARED) &&
8685 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8686 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8687 MonoInst *addr, *vtvar, *iargs [3];
8688 int method_context_used;
8690 if (cfg->generic_sharing_context)
8691 method_context_used = mono_method_check_context_used (method);
8692 else
8693 method_context_used = 0;
8695 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8697 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8698 EMIT_NEW_ICONST (cfg, iargs [1], n);
8699 if (method_context_used) {
8700 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8701 method, MONO_RGCTX_INFO_METHOD);
8702 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8703 } else {
8704 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8705 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8707 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8711 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8712 } else {
8713 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8714 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8715 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8716 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8717 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8718 MonoClass *tclass = mono_class_from_mono_type (handle);
8720 mono_class_init (tclass);
8721 if (context_used) {
8722 ins = emit_get_rgctx_klass (cfg, context_used,
8723 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8724 } else if (cfg->compile_aot) {
8725 if (method->wrapper_type) {
8726 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8727 /* Special case for static synchronized wrappers */
8728 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8729 } else {
8730 /* FIXME: n is not a normal token */
8731 cfg->disable_aot = TRUE;
8732 EMIT_NEW_PCONST (cfg, ins, NULL);
8734 } else {
8735 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8737 } else {
8738 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8740 ins->type = STACK_OBJ;
8741 ins->klass = cmethod->klass;
8742 ip += 5;
8743 } else {
8744 MonoInst *addr, *vtvar;
8746 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8748 if (context_used) {
8749 if (handle_class == mono_defaults.typehandle_class) {
8750 ins = emit_get_rgctx_klass (cfg, context_used,
8751 mono_class_from_mono_type (handle),
8752 MONO_RGCTX_INFO_TYPE);
8753 } else if (handle_class == mono_defaults.methodhandle_class) {
8754 ins = emit_get_rgctx_method (cfg, context_used,
8755 handle, MONO_RGCTX_INFO_METHOD);
8756 } else if (handle_class == mono_defaults.fieldhandle_class) {
8757 ins = emit_get_rgctx_field (cfg, context_used,
8758 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8759 } else {
8760 g_assert_not_reached ();
8762 } else if (cfg->compile_aot) {
8763 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8764 } else {
8765 EMIT_NEW_PCONST (cfg, ins, handle);
8767 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8769 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8773 *sp++ = ins;
8774 ip += 5;
8775 break;
8777 case CEE_THROW:
8778 CHECK_STACK (1);
8779 MONO_INST_NEW (cfg, ins, OP_THROW);
8780 --sp;
8781 ins->sreg1 = sp [0]->dreg;
8782 ip++;
8783 bblock->out_of_line = TRUE;
8784 MONO_ADD_INS (bblock, ins);
8785 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8786 MONO_ADD_INS (bblock, ins);
8787 sp = stack_start;
8789 link_bblock (cfg, bblock, end_bblock);
8790 start_new_bblock = 1;
8791 break;
8792 case CEE_ENDFINALLY:
8793 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8794 MONO_ADD_INS (bblock, ins);
8795 ip++;
8796 start_new_bblock = 1;
8799 * Control will leave the method so empty the stack, otherwise
8800 * the next basic block will start with a nonempty stack.
8802 while (sp != stack_start) {
8803 sp--;
8805 break;
8806 case CEE_LEAVE:
8807 case CEE_LEAVE_S: {
8808 GList *handlers;
8810 if (*ip == CEE_LEAVE) {
8811 CHECK_OPSIZE (5);
8812 target = ip + 5 + (gint32)read32(ip + 1);
8813 } else {
8814 CHECK_OPSIZE (2);
8815 target = ip + 2 + (signed char)(ip [1]);
8818 /* empty the stack */
8819 while (sp != stack_start) {
8820 sp--;
8824 * If this leave statement is in a catch block, check for a
8825 * pending exception, and rethrow it if necessary.
8826 * We avoid doing this in runtime invoke wrappers, since those are called
8827 * by native code which excepts the wrapper to catch all exceptions.
8829 for (i = 0; i < header->num_clauses; ++i) {
8830 MonoExceptionClause *clause = &header->clauses [i];
8833 * Use <= in the final comparison to handle clauses with multiple
8834 * leave statements, like in bug #78024.
8835 * The ordering of the exception clauses guarantees that we find the
8836 * innermost clause.
8838 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8839 MonoInst *exc_ins;
8840 MonoBasicBlock *dont_throw;
8843 MonoInst *load;
8845 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8848 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8850 NEW_BBLOCK (cfg, dont_throw);
8853 * Currently, we allways rethrow the abort exception, despite the
8854 * fact that this is not correct. See thread6.cs for an example.
8855 * But propagating the abort exception is more important than
8856 * getting the sematics right.
8858 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8859 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8860 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8862 MONO_START_BB (cfg, dont_throw);
8863 bblock = cfg->cbb;
8867 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8868 GList *tmp;
8869 for (tmp = handlers; tmp; tmp = tmp->next) {
8870 tblock = tmp->data;
8871 link_bblock (cfg, bblock, tblock);
8872 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8873 ins->inst_target_bb = tblock;
8874 MONO_ADD_INS (bblock, ins);
8875 bblock->has_call_handler = 1;
8876 if (COMPILE_LLVM (cfg)) {
8877 MonoBasicBlock *target_bb;
8880 * Link the finally bblock with the target, since it will
8881 * conceptually branch there.
8882 * FIXME: Have to link the bblock containing the endfinally.
8884 GET_BBLOCK (cfg, target_bb, target);
8885 link_bblock (cfg, tblock, target_bb);
8888 g_list_free (handlers);
8891 MONO_INST_NEW (cfg, ins, OP_BR);
8892 MONO_ADD_INS (bblock, ins);
8893 GET_BBLOCK (cfg, tblock, target);
8894 link_bblock (cfg, bblock, tblock);
8895 ins->inst_target_bb = tblock;
8896 start_new_bblock = 1;
8898 if (*ip == CEE_LEAVE)
8899 ip += 5;
8900 else
8901 ip += 2;
8903 break;
8907 * Mono specific opcodes
8909 case MONO_CUSTOM_PREFIX: {
8911 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8913 CHECK_OPSIZE (2);
8914 switch (ip [1]) {
8915 case CEE_MONO_ICALL: {
8916 gpointer func;
8917 MonoJitICallInfo *info;
8919 token = read32 (ip + 2);
8920 func = mono_method_get_wrapper_data (method, token);
8921 info = mono_find_jit_icall_by_addr (func);
8922 g_assert (info);
8924 CHECK_STACK (info->sig->param_count);
8925 sp -= info->sig->param_count;
8927 ins = mono_emit_jit_icall (cfg, info->func, sp);
8928 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8929 *sp++ = ins;
8931 ip += 6;
8932 inline_costs += 10 * num_calls++;
8934 break;
8936 case CEE_MONO_LDPTR: {
8937 gpointer ptr;
8939 CHECK_STACK_OVF (1);
8940 CHECK_OPSIZE (6);
8941 token = read32 (ip + 2);
8943 ptr = mono_method_get_wrapper_data (method, token);
8944 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8945 MonoJitICallInfo *callinfo;
8946 const char *icall_name;
8948 icall_name = method->name + strlen ("__icall_wrapper_");
8949 g_assert (icall_name);
8950 callinfo = mono_find_jit_icall_by_name (icall_name);
8951 g_assert (callinfo);
8953 if (ptr == callinfo->func) {
8954 /* Will be transformed into an AOTCONST later */
8955 EMIT_NEW_PCONST (cfg, ins, ptr);
8956 *sp++ = ins;
8957 ip += 6;
8958 break;
8961 /* FIXME: Generalize this */
8962 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8963 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8964 *sp++ = ins;
8965 ip += 6;
8966 break;
8968 EMIT_NEW_PCONST (cfg, ins, ptr);
8969 *sp++ = ins;
8970 ip += 6;
8971 inline_costs += 10 * num_calls++;
8972 /* Can't embed random pointers into AOT code */
8973 cfg->disable_aot = 1;
8974 break;
8976 case CEE_MONO_ICALL_ADDR: {
8977 MonoMethod *cmethod;
8978 gpointer ptr;
8980 CHECK_STACK_OVF (1);
8981 CHECK_OPSIZE (6);
8982 token = read32 (ip + 2);
8984 cmethod = mono_method_get_wrapper_data (method, token);
8986 if (cfg->compile_aot) {
8987 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8988 } else {
8989 ptr = mono_lookup_internal_call (cmethod);
8990 g_assert (ptr);
8991 EMIT_NEW_PCONST (cfg, ins, ptr);
8993 *sp++ = ins;
8994 ip += 6;
8995 break;
8997 case CEE_MONO_VTADDR: {
8998 MonoInst *src_var, *src;
9000 CHECK_STACK (1);
9001 --sp;
9003 // FIXME:
9004 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9005 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9006 *sp++ = src;
9007 ip += 2;
9008 break;
9010 case CEE_MONO_NEWOBJ: {
9011 MonoInst *iargs [2];
9013 CHECK_STACK_OVF (1);
9014 CHECK_OPSIZE (6);
9015 token = read32 (ip + 2);
9016 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9017 mono_class_init (klass);
9018 NEW_DOMAINCONST (cfg, iargs [0]);
9019 MONO_ADD_INS (cfg->cbb, iargs [0]);
9020 NEW_CLASSCONST (cfg, iargs [1], klass);
9021 MONO_ADD_INS (cfg->cbb, iargs [1]);
9022 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9023 ip += 6;
9024 inline_costs += 10 * num_calls++;
9025 break;
9027 case CEE_MONO_OBJADDR:
9028 CHECK_STACK (1);
9029 --sp;
9030 MONO_INST_NEW (cfg, ins, OP_MOVE);
9031 ins->dreg = alloc_preg (cfg);
9032 ins->sreg1 = sp [0]->dreg;
9033 ins->type = STACK_MP;
9034 MONO_ADD_INS (cfg->cbb, ins);
9035 *sp++ = ins;
9036 ip += 2;
9037 break;
9038 case CEE_MONO_LDNATIVEOBJ:
9040 * Similar to LDOBJ, but instead load the unmanaged
9041 * representation of the vtype to the stack.
9043 CHECK_STACK (1);
9044 CHECK_OPSIZE (6);
9045 --sp;
9046 token = read32 (ip + 2);
9047 klass = mono_method_get_wrapper_data (method, token);
9048 g_assert (klass->valuetype);
9049 mono_class_init (klass);
9052 MonoInst *src, *dest, *temp;
9054 src = sp [0];
9055 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9056 temp->backend.is_pinvoke = 1;
9057 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9058 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9060 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9061 dest->type = STACK_VTYPE;
9062 dest->klass = klass;
9064 *sp ++ = dest;
9065 ip += 6;
9067 break;
9068 case CEE_MONO_RETOBJ: {
9070 * Same as RET, but return the native representation of a vtype
9071 * to the caller.
9073 g_assert (cfg->ret);
9074 g_assert (mono_method_signature (method)->pinvoke);
9075 CHECK_STACK (1);
9076 --sp;
9078 CHECK_OPSIZE (6);
9079 token = read32 (ip + 2);
9080 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9082 if (!cfg->vret_addr) {
9083 g_assert (cfg->ret_var_is_local);
9085 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9086 } else {
9087 EMIT_NEW_RETLOADA (cfg, ins);
9089 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9091 if (sp != stack_start)
9092 UNVERIFIED;
9094 MONO_INST_NEW (cfg, ins, OP_BR);
9095 ins->inst_target_bb = end_bblock;
9096 MONO_ADD_INS (bblock, ins);
9097 link_bblock (cfg, bblock, end_bblock);
9098 start_new_bblock = 1;
9099 ip += 6;
9100 break;
9102 case CEE_MONO_CISINST:
9103 case CEE_MONO_CCASTCLASS: {
9104 int token;
9105 CHECK_STACK (1);
9106 --sp;
9107 CHECK_OPSIZE (6);
9108 token = read32 (ip + 2);
9109 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9110 if (ip [1] == CEE_MONO_CISINST)
9111 ins = handle_cisinst (cfg, klass, sp [0]);
9112 else
9113 ins = handle_ccastclass (cfg, klass, sp [0]);
9114 bblock = cfg->cbb;
9115 *sp++ = ins;
9116 ip += 6;
9117 break;
9119 case CEE_MONO_SAVE_LMF:
9120 case CEE_MONO_RESTORE_LMF:
9121 #ifdef MONO_ARCH_HAVE_LMF_OPS
9122 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9123 MONO_ADD_INS (bblock, ins);
9124 cfg->need_lmf_area = TRUE;
9125 #endif
9126 ip += 2;
9127 break;
9128 case CEE_MONO_CLASSCONST:
9129 CHECK_STACK_OVF (1);
9130 CHECK_OPSIZE (6);
9131 token = read32 (ip + 2);
9132 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9133 *sp++ = ins;
9134 ip += 6;
9135 inline_costs += 10 * num_calls++;
9136 break;
9137 case CEE_MONO_NOT_TAKEN:
9138 bblock->out_of_line = TRUE;
9139 ip += 2;
9140 break;
9141 case CEE_MONO_TLS:
9142 CHECK_STACK_OVF (1);
9143 CHECK_OPSIZE (6);
9144 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9145 ins->dreg = alloc_preg (cfg);
9146 ins->inst_offset = (gint32)read32 (ip + 2);
9147 ins->type = STACK_PTR;
9148 MONO_ADD_INS (bblock, ins);
9149 *sp++ = ins;
9150 ip += 6;
9151 break;
9152 case CEE_MONO_DYN_CALL: {
9153 MonoCallInst *call;
9155 /* It would be easier to call a trampoline, but that would put an
9156 * extra frame on the stack, confusing exception handling. So
9157 * implement it inline using an opcode for now.
9160 if (!cfg->dyn_call_var) {
9161 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9162 /* prevent it from being register allocated */
9163 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9166 /* Has to use a call inst since it local regalloc expects it */
9167 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9168 ins = (MonoInst*)call;
9169 sp -= 2;
9170 ins->sreg1 = sp [0]->dreg;
9171 ins->sreg2 = sp [1]->dreg;
9172 MONO_ADD_INS (bblock, ins);
9174 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9175 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9176 #endif
9178 ip += 2;
9179 inline_costs += 10 * num_calls++;
9181 break;
9183 default:
9184 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9185 break;
9187 break;
9190 case CEE_PREFIX1: {
9191 CHECK_OPSIZE (2);
9192 switch (ip [1]) {
9193 case CEE_ARGLIST: {
9194 /* somewhat similar to LDTOKEN */
9195 MonoInst *addr, *vtvar;
9196 CHECK_STACK_OVF (1);
9197 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9199 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9200 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9202 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9203 ins->type = STACK_VTYPE;
9204 ins->klass = mono_defaults.argumenthandle_class;
9205 *sp++ = ins;
9206 ip += 2;
9207 break;
9209 case CEE_CEQ:
9210 case CEE_CGT:
9211 case CEE_CGT_UN:
9212 case CEE_CLT:
9213 case CEE_CLT_UN: {
9214 MonoInst *cmp;
9215 CHECK_STACK (2);
9217 * The following transforms:
9218 * CEE_CEQ into OP_CEQ
9219 * CEE_CGT into OP_CGT
9220 * CEE_CGT_UN into OP_CGT_UN
9221 * CEE_CLT into OP_CLT
9222 * CEE_CLT_UN into OP_CLT_UN
9224 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9226 MONO_INST_NEW (cfg, ins, cmp->opcode);
9227 sp -= 2;
9228 cmp->sreg1 = sp [0]->dreg;
9229 cmp->sreg2 = sp [1]->dreg;
9230 type_from_op (cmp, sp [0], sp [1]);
9231 CHECK_TYPE (cmp);
9232 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9233 cmp->opcode = OP_LCOMPARE;
9234 else if (sp [0]->type == STACK_R8)
9235 cmp->opcode = OP_FCOMPARE;
9236 else
9237 cmp->opcode = OP_ICOMPARE;
9238 MONO_ADD_INS (bblock, cmp);
9239 ins->type = STACK_I4;
9240 ins->dreg = alloc_dreg (cfg, ins->type);
9241 type_from_op (ins, sp [0], sp [1]);
9243 if (cmp->opcode == OP_FCOMPARE) {
9245 * The backends expect the fceq opcodes to do the
9246 * comparison too.
9248 cmp->opcode = OP_NOP;
9249 ins->sreg1 = cmp->sreg1;
9250 ins->sreg2 = cmp->sreg2;
9252 MONO_ADD_INS (bblock, ins);
9253 *sp++ = ins;
9254 ip += 2;
9255 break;
9257 case CEE_LDFTN: {
9258 MonoInst *argconst;
9259 MonoMethod *cil_method;
9260 gboolean needs_static_rgctx_invoke;
9262 CHECK_STACK_OVF (1);
9263 CHECK_OPSIZE (6);
9264 n = read32 (ip + 2);
9265 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9266 if (!cmethod)
9267 goto load_error;
9268 mono_class_init (cmethod->klass);
9270 mono_save_token_info (cfg, image, n, cmethod);
9272 if (cfg->generic_sharing_context)
9273 context_used = mono_method_check_context_used (cmethod);
9275 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9277 cil_method = cmethod;
9278 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9279 METHOD_ACCESS_FAILURE;
9281 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9282 if (check_linkdemand (cfg, method, cmethod))
9283 INLINE_FAILURE;
9284 CHECK_CFG_EXCEPTION;
9285 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9286 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9290 * Optimize the common case of ldftn+delegate creation
9292 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9293 /* FIXME: SGEN support */
9294 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9295 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9296 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9297 MonoInst *target_ins;
9298 MonoMethod *invoke;
9299 int invoke_context_used = 0;
9301 invoke = mono_get_delegate_invoke (ctor_method->klass);
9302 if (!invoke || !mono_method_signature (invoke))
9303 goto load_error;
9305 if (cfg->generic_sharing_context)
9306 invoke_context_used = mono_method_check_context_used (invoke);
9308 if (invoke_context_used == 0) {
9309 ip += 6;
9310 if (cfg->verbose_level > 3)
9311 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9312 target_ins = sp [-1];
9313 sp --;
9314 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9315 CHECK_CFG_EXCEPTION;
9316 ip += 5;
9317 sp ++;
9318 break;
9322 #endif
9324 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9325 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9326 *sp++ = ins;
9328 ip += 6;
9329 inline_costs += 10 * num_calls++;
9330 break;
9332 case CEE_LDVIRTFTN: {
9333 MonoInst *args [2];
9335 CHECK_STACK (1);
9336 CHECK_OPSIZE (6);
9337 n = read32 (ip + 2);
9338 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9339 if (!cmethod)
9340 goto load_error;
9341 mono_class_init (cmethod->klass);
9343 if (cfg->generic_sharing_context)
9344 context_used = mono_method_check_context_used (cmethod);
9346 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9347 if (check_linkdemand (cfg, method, cmethod))
9348 INLINE_FAILURE;
9349 CHECK_CFG_EXCEPTION;
9350 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9351 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9354 --sp;
9355 args [0] = *sp;
9357 args [1] = emit_get_rgctx_method (cfg, context_used,
9358 cmethod, MONO_RGCTX_INFO_METHOD);
9360 if (context_used)
9361 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9362 else
9363 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9365 ip += 6;
9366 inline_costs += 10 * num_calls++;
9367 break;
9369 case CEE_LDARG:
9370 CHECK_STACK_OVF (1);
9371 CHECK_OPSIZE (4);
9372 n = read16 (ip + 2);
9373 CHECK_ARG (n);
9374 EMIT_NEW_ARGLOAD (cfg, ins, n);
9375 *sp++ = ins;
9376 ip += 4;
9377 break;
9378 case CEE_LDARGA:
9379 CHECK_STACK_OVF (1);
9380 CHECK_OPSIZE (4);
9381 n = read16 (ip + 2);
9382 CHECK_ARG (n);
9383 NEW_ARGLOADA (cfg, ins, n);
9384 MONO_ADD_INS (cfg->cbb, ins);
9385 *sp++ = ins;
9386 ip += 4;
9387 break;
9388 case CEE_STARG:
9389 CHECK_STACK (1);
9390 --sp;
9391 CHECK_OPSIZE (4);
9392 n = read16 (ip + 2);
9393 CHECK_ARG (n);
9394 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9395 UNVERIFIED;
9396 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9397 ip += 4;
9398 break;
9399 case CEE_LDLOC:
9400 CHECK_STACK_OVF (1);
9401 CHECK_OPSIZE (4);
9402 n = read16 (ip + 2);
9403 CHECK_LOCAL (n);
9404 EMIT_NEW_LOCLOAD (cfg, ins, n);
9405 *sp++ = ins;
9406 ip += 4;
9407 break;
9408 case CEE_LDLOCA: {
9409 unsigned char *tmp_ip;
9410 CHECK_STACK_OVF (1);
9411 CHECK_OPSIZE (4);
9412 n = read16 (ip + 2);
9413 CHECK_LOCAL (n);
9415 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9416 ip = tmp_ip;
9417 inline_costs += 1;
9418 break;
9421 EMIT_NEW_LOCLOADA (cfg, ins, n);
9422 *sp++ = ins;
9423 ip += 4;
9424 break;
9426 case CEE_STLOC:
9427 CHECK_STACK (1);
9428 --sp;
9429 CHECK_OPSIZE (4);
9430 n = read16 (ip + 2);
9431 CHECK_LOCAL (n);
9432 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9433 UNVERIFIED;
9434 emit_stloc_ir (cfg, sp, header, n);
9435 ip += 4;
9436 inline_costs += 1;
9437 break;
9438 case CEE_LOCALLOC:
9439 CHECK_STACK (1);
9440 --sp;
9441 if (sp != stack_start)
9442 UNVERIFIED;
9443 if (cfg->method != method)
9445 * Inlining this into a loop in a parent could lead to
9446 * stack overflows which is different behavior than the
9447 * non-inlined case, thus disable inlining in this case.
9449 goto inline_failure;
9451 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9452 ins->dreg = alloc_preg (cfg);
9453 ins->sreg1 = sp [0]->dreg;
9454 ins->type = STACK_PTR;
9455 MONO_ADD_INS (cfg->cbb, ins);
9457 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9458 if (init_locals)
9459 ins->flags |= MONO_INST_INIT;
9461 *sp++ = ins;
9462 ip += 2;
9463 break;
9464 case CEE_ENDFILTER: {
9465 MonoExceptionClause *clause, *nearest;
9466 int cc, nearest_num;
9468 CHECK_STACK (1);
9469 --sp;
9470 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9471 UNVERIFIED;
9472 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9473 ins->sreg1 = (*sp)->dreg;
9474 MONO_ADD_INS (bblock, ins);
9475 start_new_bblock = 1;
9476 ip += 2;
9478 nearest = NULL;
9479 nearest_num = 0;
9480 for (cc = 0; cc < header->num_clauses; ++cc) {
9481 clause = &header->clauses [cc];
9482 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9483 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9484 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9485 nearest = clause;
9486 nearest_num = cc;
9489 g_assert (nearest);
9490 if ((ip - header->code) != nearest->handler_offset)
9491 UNVERIFIED;
9493 break;
9495 case CEE_UNALIGNED_:
9496 ins_flag |= MONO_INST_UNALIGNED;
9497 /* FIXME: record alignment? we can assume 1 for now */
9498 CHECK_OPSIZE (3);
9499 ip += 3;
9500 break;
9501 case CEE_VOLATILE_:
9502 ins_flag |= MONO_INST_VOLATILE;
9503 ip += 2;
9504 break;
9505 case CEE_TAIL_:
9506 ins_flag |= MONO_INST_TAILCALL;
9507 cfg->flags |= MONO_CFG_HAS_TAIL;
9508 /* Can't inline tail calls at this time */
9509 inline_costs += 100000;
9510 ip += 2;
9511 break;
9512 case CEE_INITOBJ:
9513 CHECK_STACK (1);
9514 --sp;
9515 CHECK_OPSIZE (6);
9516 token = read32 (ip + 2);
9517 klass = mini_get_class (method, token, generic_context);
9518 CHECK_TYPELOAD (klass);
9519 if (generic_class_is_reference_type (cfg, klass))
9520 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9521 else
9522 mini_emit_initobj (cfg, *sp, NULL, klass);
9523 ip += 6;
9524 inline_costs += 1;
9525 break;
9526 case CEE_CONSTRAINED_:
9527 CHECK_OPSIZE (6);
9528 token = read32 (ip + 2);
9529 if (method->wrapper_type != MONO_WRAPPER_NONE)
9530 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9531 else
9532 constrained_call = mono_class_get_full (image, token, generic_context);
9533 CHECK_TYPELOAD (constrained_call);
9534 ip += 6;
9535 break;
9536 case CEE_CPBLK:
9537 case CEE_INITBLK: {
9538 MonoInst *iargs [3];
9539 CHECK_STACK (3);
9540 sp -= 3;
9542 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9543 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9544 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9545 /* emit_memset only works when val == 0 */
9546 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9547 } else {
9548 iargs [0] = sp [0];
9549 iargs [1] = sp [1];
9550 iargs [2] = sp [2];
9551 if (ip [1] == CEE_CPBLK) {
9552 MonoMethod *memcpy_method = get_memcpy_method ();
9553 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9554 } else {
9555 MonoMethod *memset_method = get_memset_method ();
9556 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9559 ip += 2;
9560 inline_costs += 1;
9561 break;
9563 case CEE_NO_:
9564 CHECK_OPSIZE (3);
9565 if (ip [2] & 0x1)
9566 ins_flag |= MONO_INST_NOTYPECHECK;
9567 if (ip [2] & 0x2)
9568 ins_flag |= MONO_INST_NORANGECHECK;
9569 /* we ignore the no-nullcheck for now since we
9570 * really do it explicitly only when doing callvirt->call
9572 ip += 3;
9573 break;
9574 case CEE_RETHROW: {
9575 MonoInst *load;
9576 int handler_offset = -1;
9578 for (i = 0; i < header->num_clauses; ++i) {
9579 MonoExceptionClause *clause = &header->clauses [i];
9580 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9581 handler_offset = clause->handler_offset;
9582 break;
9586 bblock->flags |= BB_EXCEPTION_UNSAFE;
9588 g_assert (handler_offset != -1);
9590 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9591 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9592 ins->sreg1 = load->dreg;
9593 MONO_ADD_INS (bblock, ins);
9594 sp = stack_start;
9595 link_bblock (cfg, bblock, end_bblock);
9596 start_new_bblock = 1;
9597 ip += 2;
9598 break;
9600 case CEE_SIZEOF: {
9601 guint32 align;
9602 int ialign;
9604 CHECK_STACK_OVF (1);
9605 CHECK_OPSIZE (6);
9606 token = read32 (ip + 2);
9607 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9608 MonoType *type = mono_type_create_from_typespec (image, token);
9609 token = mono_type_size (type, &ialign);
9610 } else {
9611 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9612 CHECK_TYPELOAD (klass);
9613 mono_class_init (klass);
9614 token = mono_class_value_size (klass, &align);
9616 EMIT_NEW_ICONST (cfg, ins, token);
9617 *sp++= ins;
9618 ip += 6;
9619 break;
9621 case CEE_REFANYTYPE: {
9622 MonoInst *src_var, *src;
9624 CHECK_STACK (1);
9625 --sp;
9627 // FIXME:
9628 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9629 if (!src_var)
9630 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9631 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9632 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9633 *sp++ = ins;
9634 ip += 2;
9635 break;
9637 case CEE_READONLY_:
9638 readonly = TRUE;
9639 ip += 2;
9640 break;
9642 case CEE_UNUSED56:
9643 case CEE_UNUSED57:
9644 case CEE_UNUSED70:
9645 case CEE_UNUSED:
9646 case CEE_UNUSED99:
9647 UNVERIFIED;
9649 default:
9650 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9651 UNVERIFIED;
9653 break;
9655 case CEE_UNUSED58:
9656 case CEE_UNUSED1:
9657 UNVERIFIED;
9659 default:
9660 g_warning ("opcode 0x%02x not handled", *ip);
9661 UNVERIFIED;
9664 if (start_new_bblock != 1)
9665 UNVERIFIED;
9667 bblock->cil_length = ip - bblock->cil_code;
9668 bblock->next_bb = end_bblock;
9670 if (cfg->method == method && cfg->domainvar) {
9671 MonoInst *store;
9672 MonoInst *get_domain;
9674 cfg->cbb = init_localsbb;
9676 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9677 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9679 else {
9680 get_domain->dreg = alloc_preg (cfg);
9681 MONO_ADD_INS (cfg->cbb, get_domain);
9683 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9684 MONO_ADD_INS (cfg->cbb, store);
9687 #ifdef TARGET_POWERPC
9688 if (cfg->compile_aot)
9689 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9690 mono_get_got_var (cfg);
9691 #endif
9693 if (cfg->method == method && cfg->got_var)
9694 mono_emit_load_got_addr (cfg);
9696 if (init_locals) {
9697 MonoInst *store;
9699 cfg->cbb = init_localsbb;
9700 cfg->ip = NULL;
9701 for (i = 0; i < header->num_locals; ++i) {
9702 MonoType *ptype = header->locals [i];
9703 int t = ptype->type;
9704 dreg = cfg->locals [i]->dreg;
9706 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9707 t = mono_class_enum_basetype (ptype->data.klass)->type;
9708 if (ptype->byref) {
9709 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9710 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9711 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9712 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9713 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9714 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9715 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9716 ins->type = STACK_R8;
9717 ins->inst_p0 = (void*)&r8_0;
9718 ins->dreg = alloc_dreg (cfg, STACK_R8);
9719 MONO_ADD_INS (init_localsbb, ins);
9720 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9721 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9722 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9723 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9724 } else {
9725 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9730 if (cfg->init_ref_vars && cfg->method == method) {
9731 /* Emit initialization for ref vars */
9732 // FIXME: Avoid duplication initialization for IL locals.
9733 for (i = 0; i < cfg->num_varinfo; ++i) {
9734 MonoInst *ins = cfg->varinfo [i];
9736 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9737 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9741 /* Add a sequence point for method entry/exit events */
9742 if (seq_points) {
9743 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9744 MONO_ADD_INS (init_localsbb, ins);
9745 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9746 MONO_ADD_INS (cfg->bb_exit, ins);
9749 cfg->ip = NULL;
9751 if (cfg->method == method) {
9752 MonoBasicBlock *bb;
9753 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9754 bb->region = mono_find_block_region (cfg, bb->real_offset);
9755 if (cfg->spvars)
9756 mono_create_spvar_for_region (cfg, bb->region);
9757 if (cfg->verbose_level > 2)
9758 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9762 g_slist_free (class_inits);
9763 dont_inline = g_list_remove (dont_inline, method);
9765 if (inline_costs < 0) {
9766 char *mname;
9768 /* Method is too large */
9769 mname = mono_method_full_name (method, TRUE);
9770 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9771 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9772 g_free (mname);
9773 return -1;
9776 if ((cfg->verbose_level > 2) && (cfg->method == method))
9777 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9779 return inline_costs;
9781 exception_exit:
9782 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9783 g_slist_free (class_inits);
9784 mono_basic_block_free (bb);
9785 dont_inline = g_list_remove (dont_inline, method);
9786 return -1;
9788 inline_failure:
9789 g_slist_free (class_inits);
9790 mono_basic_block_free (bb);
9791 dont_inline = g_list_remove (dont_inline, method);
9792 return -1;
9794 load_error:
9795 g_slist_free (class_inits);
9796 mono_basic_block_free (bb);
9797 dont_inline = g_list_remove (dont_inline, method);
9798 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9799 return -1;
9801 unverified:
9802 g_slist_free (class_inits);
9803 mono_basic_block_free (bb);
9804 dont_inline = g_list_remove (dont_inline, method);
9805 set_exception_type_from_invalid_il (cfg, method, ip);
9806 return -1;
9809 static int
9810 store_membase_reg_to_store_membase_imm (int opcode)
9812 switch (opcode) {
9813 case OP_STORE_MEMBASE_REG:
9814 return OP_STORE_MEMBASE_IMM;
9815 case OP_STOREI1_MEMBASE_REG:
9816 return OP_STOREI1_MEMBASE_IMM;
9817 case OP_STOREI2_MEMBASE_REG:
9818 return OP_STOREI2_MEMBASE_IMM;
9819 case OP_STOREI4_MEMBASE_REG:
9820 return OP_STOREI4_MEMBASE_IMM;
9821 case OP_STOREI8_MEMBASE_REG:
9822 return OP_STOREI8_MEMBASE_IMM;
9823 default:
9824 g_assert_not_reached ();
9827 return -1;
9830 #endif /* DISABLE_JIT */
9833 mono_op_to_op_imm (int opcode)
9835 switch (opcode) {
9836 case OP_IADD:
9837 return OP_IADD_IMM;
9838 case OP_ISUB:
9839 return OP_ISUB_IMM;
9840 case OP_IDIV:
9841 return OP_IDIV_IMM;
9842 case OP_IDIV_UN:
9843 return OP_IDIV_UN_IMM;
9844 case OP_IREM:
9845 return OP_IREM_IMM;
9846 case OP_IREM_UN:
9847 return OP_IREM_UN_IMM;
9848 case OP_IMUL:
9849 return OP_IMUL_IMM;
9850 case OP_IAND:
9851 return OP_IAND_IMM;
9852 case OP_IOR:
9853 return OP_IOR_IMM;
9854 case OP_IXOR:
9855 return OP_IXOR_IMM;
9856 case OP_ISHL:
9857 return OP_ISHL_IMM;
9858 case OP_ISHR:
9859 return OP_ISHR_IMM;
9860 case OP_ISHR_UN:
9861 return OP_ISHR_UN_IMM;
9863 case OP_LADD:
9864 return OP_LADD_IMM;
9865 case OP_LSUB:
9866 return OP_LSUB_IMM;
9867 case OP_LAND:
9868 return OP_LAND_IMM;
9869 case OP_LOR:
9870 return OP_LOR_IMM;
9871 case OP_LXOR:
9872 return OP_LXOR_IMM;
9873 case OP_LSHL:
9874 return OP_LSHL_IMM;
9875 case OP_LSHR:
9876 return OP_LSHR_IMM;
9877 case OP_LSHR_UN:
9878 return OP_LSHR_UN_IMM;
9880 case OP_COMPARE:
9881 return OP_COMPARE_IMM;
9882 case OP_ICOMPARE:
9883 return OP_ICOMPARE_IMM;
9884 case OP_LCOMPARE:
9885 return OP_LCOMPARE_IMM;
9887 case OP_STORE_MEMBASE_REG:
9888 return OP_STORE_MEMBASE_IMM;
9889 case OP_STOREI1_MEMBASE_REG:
9890 return OP_STOREI1_MEMBASE_IMM;
9891 case OP_STOREI2_MEMBASE_REG:
9892 return OP_STOREI2_MEMBASE_IMM;
9893 case OP_STOREI4_MEMBASE_REG:
9894 return OP_STOREI4_MEMBASE_IMM;
9896 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9897 case OP_X86_PUSH:
9898 return OP_X86_PUSH_IMM;
9899 case OP_X86_COMPARE_MEMBASE_REG:
9900 return OP_X86_COMPARE_MEMBASE_IMM;
9901 #endif
9902 #if defined(TARGET_AMD64)
9903 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9904 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9905 #endif
9906 case OP_VOIDCALL_REG:
9907 return OP_VOIDCALL;
9908 case OP_CALL_REG:
9909 return OP_CALL;
9910 case OP_LCALL_REG:
9911 return OP_LCALL;
9912 case OP_FCALL_REG:
9913 return OP_FCALL;
9914 case OP_LOCALLOC:
9915 return OP_LOCALLOC_IMM;
9918 return -1;
9921 static int
9922 ldind_to_load_membase (int opcode)
9924 switch (opcode) {
9925 case CEE_LDIND_I1:
9926 return OP_LOADI1_MEMBASE;
9927 case CEE_LDIND_U1:
9928 return OP_LOADU1_MEMBASE;
9929 case CEE_LDIND_I2:
9930 return OP_LOADI2_MEMBASE;
9931 case CEE_LDIND_U2:
9932 return OP_LOADU2_MEMBASE;
9933 case CEE_LDIND_I4:
9934 return OP_LOADI4_MEMBASE;
9935 case CEE_LDIND_U4:
9936 return OP_LOADU4_MEMBASE;
9937 case CEE_LDIND_I:
9938 return OP_LOAD_MEMBASE;
9939 case CEE_LDIND_REF:
9940 return OP_LOAD_MEMBASE;
9941 case CEE_LDIND_I8:
9942 return OP_LOADI8_MEMBASE;
9943 case CEE_LDIND_R4:
9944 return OP_LOADR4_MEMBASE;
9945 case CEE_LDIND_R8:
9946 return OP_LOADR8_MEMBASE;
9947 default:
9948 g_assert_not_reached ();
9951 return -1;
9954 static int
9955 stind_to_store_membase (int opcode)
9957 switch (opcode) {
9958 case CEE_STIND_I1:
9959 return OP_STOREI1_MEMBASE_REG;
9960 case CEE_STIND_I2:
9961 return OP_STOREI2_MEMBASE_REG;
9962 case CEE_STIND_I4:
9963 return OP_STOREI4_MEMBASE_REG;
9964 case CEE_STIND_I:
9965 case CEE_STIND_REF:
9966 return OP_STORE_MEMBASE_REG;
9967 case CEE_STIND_I8:
9968 return OP_STOREI8_MEMBASE_REG;
9969 case CEE_STIND_R4:
9970 return OP_STORER4_MEMBASE_REG;
9971 case CEE_STIND_R8:
9972 return OP_STORER8_MEMBASE_REG;
9973 default:
9974 g_assert_not_reached ();
9977 return -1;
9981 mono_load_membase_to_load_mem (int opcode)
9983 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9984 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9985 switch (opcode) {
9986 case OP_LOAD_MEMBASE:
9987 return OP_LOAD_MEM;
9988 case OP_LOADU1_MEMBASE:
9989 return OP_LOADU1_MEM;
9990 case OP_LOADU2_MEMBASE:
9991 return OP_LOADU2_MEM;
9992 case OP_LOADI4_MEMBASE:
9993 return OP_LOADI4_MEM;
9994 case OP_LOADU4_MEMBASE:
9995 return OP_LOADU4_MEM;
9996 #if SIZEOF_REGISTER == 8
9997 case OP_LOADI8_MEMBASE:
9998 return OP_LOADI8_MEM;
9999 #endif
10001 #endif
10003 return -1;
10006 static inline int
10007 op_to_op_dest_membase (int store_opcode, int opcode)
10009 #if defined(TARGET_X86)
10010 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10011 return -1;
10013 switch (opcode) {
10014 case OP_IADD:
10015 return OP_X86_ADD_MEMBASE_REG;
10016 case OP_ISUB:
10017 return OP_X86_SUB_MEMBASE_REG;
10018 case OP_IAND:
10019 return OP_X86_AND_MEMBASE_REG;
10020 case OP_IOR:
10021 return OP_X86_OR_MEMBASE_REG;
10022 case OP_IXOR:
10023 return OP_X86_XOR_MEMBASE_REG;
10024 case OP_ADD_IMM:
10025 case OP_IADD_IMM:
10026 return OP_X86_ADD_MEMBASE_IMM;
10027 case OP_SUB_IMM:
10028 case OP_ISUB_IMM:
10029 return OP_X86_SUB_MEMBASE_IMM;
10030 case OP_AND_IMM:
10031 case OP_IAND_IMM:
10032 return OP_X86_AND_MEMBASE_IMM;
10033 case OP_OR_IMM:
10034 case OP_IOR_IMM:
10035 return OP_X86_OR_MEMBASE_IMM;
10036 case OP_XOR_IMM:
10037 case OP_IXOR_IMM:
10038 return OP_X86_XOR_MEMBASE_IMM;
10039 case OP_MOVE:
10040 return OP_NOP;
10042 #endif
10044 #if defined(TARGET_AMD64)
10045 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10046 return -1;
10048 switch (opcode) {
10049 case OP_IADD:
10050 return OP_X86_ADD_MEMBASE_REG;
10051 case OP_ISUB:
10052 return OP_X86_SUB_MEMBASE_REG;
10053 case OP_IAND:
10054 return OP_X86_AND_MEMBASE_REG;
10055 case OP_IOR:
10056 return OP_X86_OR_MEMBASE_REG;
10057 case OP_IXOR:
10058 return OP_X86_XOR_MEMBASE_REG;
10059 case OP_IADD_IMM:
10060 return OP_X86_ADD_MEMBASE_IMM;
10061 case OP_ISUB_IMM:
10062 return OP_X86_SUB_MEMBASE_IMM;
10063 case OP_IAND_IMM:
10064 return OP_X86_AND_MEMBASE_IMM;
10065 case OP_IOR_IMM:
10066 return OP_X86_OR_MEMBASE_IMM;
10067 case OP_IXOR_IMM:
10068 return OP_X86_XOR_MEMBASE_IMM;
10069 case OP_LADD:
10070 return OP_AMD64_ADD_MEMBASE_REG;
10071 case OP_LSUB:
10072 return OP_AMD64_SUB_MEMBASE_REG;
10073 case OP_LAND:
10074 return OP_AMD64_AND_MEMBASE_REG;
10075 case OP_LOR:
10076 return OP_AMD64_OR_MEMBASE_REG;
10077 case OP_LXOR:
10078 return OP_AMD64_XOR_MEMBASE_REG;
10079 case OP_ADD_IMM:
10080 case OP_LADD_IMM:
10081 return OP_AMD64_ADD_MEMBASE_IMM;
10082 case OP_SUB_IMM:
10083 case OP_LSUB_IMM:
10084 return OP_AMD64_SUB_MEMBASE_IMM;
10085 case OP_AND_IMM:
10086 case OP_LAND_IMM:
10087 return OP_AMD64_AND_MEMBASE_IMM;
10088 case OP_OR_IMM:
10089 case OP_LOR_IMM:
10090 return OP_AMD64_OR_MEMBASE_IMM;
10091 case OP_XOR_IMM:
10092 case OP_LXOR_IMM:
10093 return OP_AMD64_XOR_MEMBASE_IMM;
10094 case OP_MOVE:
10095 return OP_NOP;
10097 #endif
10099 return -1;
10102 static inline int
10103 op_to_op_store_membase (int store_opcode, int opcode)
10105 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10106 switch (opcode) {
10107 case OP_ICEQ:
10108 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10109 return OP_X86_SETEQ_MEMBASE;
10110 case OP_CNE:
10111 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10112 return OP_X86_SETNE_MEMBASE;
10114 #endif
10116 return -1;
10119 static inline int
10120 op_to_op_src1_membase (int load_opcode, int opcode)
10122 #ifdef TARGET_X86
10123 /* FIXME: This has sign extension issues */
10125 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10126 return OP_X86_COMPARE_MEMBASE8_IMM;
10129 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10130 return -1;
10132 switch (opcode) {
10133 case OP_X86_PUSH:
10134 return OP_X86_PUSH_MEMBASE;
10135 case OP_COMPARE_IMM:
10136 case OP_ICOMPARE_IMM:
10137 return OP_X86_COMPARE_MEMBASE_IMM;
10138 case OP_COMPARE:
10139 case OP_ICOMPARE:
10140 return OP_X86_COMPARE_MEMBASE_REG;
10142 #endif
10144 #ifdef TARGET_AMD64
10145 /* FIXME: This has sign extension issues */
10147 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10148 return OP_X86_COMPARE_MEMBASE8_IMM;
10151 switch (opcode) {
10152 case OP_X86_PUSH:
10153 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10154 return OP_X86_PUSH_MEMBASE;
10155 break;
10156 /* FIXME: This only works for 32 bit immediates
10157 case OP_COMPARE_IMM:
10158 case OP_LCOMPARE_IMM:
10159 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10160 return OP_AMD64_COMPARE_MEMBASE_IMM;
10162 case OP_ICOMPARE_IMM:
10163 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10164 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10165 break;
10166 case OP_COMPARE:
10167 case OP_LCOMPARE:
10168 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10169 return OP_AMD64_COMPARE_MEMBASE_REG;
10170 break;
10171 case OP_ICOMPARE:
10172 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10173 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10174 break;
10176 #endif
10178 return -1;
10181 static inline int
10182 op_to_op_src2_membase (int load_opcode, int opcode)
10184 #ifdef TARGET_X86
10185 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10186 return -1;
10188 switch (opcode) {
10189 case OP_COMPARE:
10190 case OP_ICOMPARE:
10191 return OP_X86_COMPARE_REG_MEMBASE;
10192 case OP_IADD:
10193 return OP_X86_ADD_REG_MEMBASE;
10194 case OP_ISUB:
10195 return OP_X86_SUB_REG_MEMBASE;
10196 case OP_IAND:
10197 return OP_X86_AND_REG_MEMBASE;
10198 case OP_IOR:
10199 return OP_X86_OR_REG_MEMBASE;
10200 case OP_IXOR:
10201 return OP_X86_XOR_REG_MEMBASE;
10203 #endif
10205 #ifdef TARGET_AMD64
10206 switch (opcode) {
10207 case OP_ICOMPARE:
10208 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10209 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10210 break;
10211 case OP_COMPARE:
10212 case OP_LCOMPARE:
10213 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10214 return OP_AMD64_COMPARE_REG_MEMBASE;
10215 break;
10216 case OP_IADD:
10217 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10218 return OP_X86_ADD_REG_MEMBASE;
10219 case OP_ISUB:
10220 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10221 return OP_X86_SUB_REG_MEMBASE;
10222 case OP_IAND:
10223 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10224 return OP_X86_AND_REG_MEMBASE;
10225 case OP_IOR:
10226 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10227 return OP_X86_OR_REG_MEMBASE;
10228 case OP_IXOR:
10229 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10230 return OP_X86_XOR_REG_MEMBASE;
10231 case OP_LADD:
10232 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10233 return OP_AMD64_ADD_REG_MEMBASE;
10234 case OP_LSUB:
10235 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10236 return OP_AMD64_SUB_REG_MEMBASE;
10237 case OP_LAND:
10238 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10239 return OP_AMD64_AND_REG_MEMBASE;
10240 case OP_LOR:
10241 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10242 return OP_AMD64_OR_REG_MEMBASE;
10243 case OP_LXOR:
10244 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10245 return OP_AMD64_XOR_REG_MEMBASE;
10247 #endif
10249 return -1;
10253 mono_op_to_op_imm_noemul (int opcode)
10255 switch (opcode) {
10256 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10257 case OP_LSHR:
10258 case OP_LSHL:
10259 case OP_LSHR_UN:
10260 #endif
10261 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10262 case OP_IDIV:
10263 case OP_IDIV_UN:
10264 case OP_IREM:
10265 case OP_IREM_UN:
10266 #endif
10267 return -1;
10268 default:
10269 return mono_op_to_op_imm (opcode);
10273 #ifndef DISABLE_JIT
10276 * mono_handle_global_vregs:
10278 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10279 * for them.
10281 void
10282 mono_handle_global_vregs (MonoCompile *cfg)
10284 gint32 *vreg_to_bb;
10285 MonoBasicBlock *bb;
10286 int i, pos;
10288 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10290 #ifdef MONO_ARCH_SIMD_INTRINSICS
10291 if (cfg->uses_simd_intrinsics)
10292 mono_simd_simplify_indirection (cfg);
10293 #endif
10295 /* Find local vregs used in more than one bb */
10296 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10297 MonoInst *ins = bb->code;
10298 int block_num = bb->block_num;
10300 if (cfg->verbose_level > 2)
10301 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10303 cfg->cbb = bb;
10304 for (; ins; ins = ins->next) {
10305 const char *spec = INS_INFO (ins->opcode);
10306 int regtype = 0, regindex;
10307 gint32 prev_bb;
10309 if (G_UNLIKELY (cfg->verbose_level > 2))
10310 mono_print_ins (ins);
10312 g_assert (ins->opcode >= MONO_CEE_LAST);
10314 for (regindex = 0; regindex < 4; regindex ++) {
10315 int vreg = 0;
10317 if (regindex == 0) {
10318 regtype = spec [MONO_INST_DEST];
10319 if (regtype == ' ')
10320 continue;
10321 vreg = ins->dreg;
10322 } else if (regindex == 1) {
10323 regtype = spec [MONO_INST_SRC1];
10324 if (regtype == ' ')
10325 continue;
10326 vreg = ins->sreg1;
10327 } else if (regindex == 2) {
10328 regtype = spec [MONO_INST_SRC2];
10329 if (regtype == ' ')
10330 continue;
10331 vreg = ins->sreg2;
10332 } else if (regindex == 3) {
10333 regtype = spec [MONO_INST_SRC3];
10334 if (regtype == ' ')
10335 continue;
10336 vreg = ins->sreg3;
10339 #if SIZEOF_REGISTER == 4
10340 /* In the LLVM case, the long opcodes are not decomposed */
10341 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10343 * Since some instructions reference the original long vreg,
10344 * and some reference the two component vregs, it is quite hard
10345 * to determine when it needs to be global. So be conservative.
10347 if (!get_vreg_to_inst (cfg, vreg)) {
10348 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10350 if (cfg->verbose_level > 2)
10351 printf ("LONG VREG R%d made global.\n", vreg);
10355 * Make the component vregs volatile since the optimizations can
10356 * get confused otherwise.
10358 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10359 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10361 #endif
10363 g_assert (vreg != -1);
10365 prev_bb = vreg_to_bb [vreg];
10366 if (prev_bb == 0) {
10367 /* 0 is a valid block num */
10368 vreg_to_bb [vreg] = block_num + 1;
10369 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10370 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10371 continue;
10373 if (!get_vreg_to_inst (cfg, vreg)) {
10374 if (G_UNLIKELY (cfg->verbose_level > 2))
10375 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10377 switch (regtype) {
10378 case 'i':
10379 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10380 break;
10381 case 'l':
10382 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10383 break;
10384 case 'f':
10385 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10386 break;
10387 case 'v':
10388 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10389 break;
10390 default:
10391 g_assert_not_reached ();
10395 /* Flag as having been used in more than one bb */
10396 vreg_to_bb [vreg] = -1;
10402 /* If a variable is used in only one bblock, convert it into a local vreg */
10403 for (i = 0; i < cfg->num_varinfo; i++) {
10404 MonoInst *var = cfg->varinfo [i];
10405 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10407 switch (var->type) {
10408 case STACK_I4:
10409 case STACK_OBJ:
10410 case STACK_PTR:
10411 case STACK_MP:
10412 case STACK_VTYPE:
10413 #if SIZEOF_REGISTER == 8
10414 case STACK_I8:
10415 #endif
10416 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10417 /* Enabling this screws up the fp stack on x86 */
10418 case STACK_R8:
10419 #endif
10420 /* Arguments are implicitly global */
10421 /* Putting R4 vars into registers doesn't work currently */
10422 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10424 * Make that the variable's liveness interval doesn't contain a call, since
10425 * that would cause the lvreg to be spilled, making the whole optimization
10426 * useless.
10428 /* This is too slow for JIT compilation */
10429 #if 0
10430 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10431 MonoInst *ins;
10432 int def_index, call_index, ins_index;
10433 gboolean spilled = FALSE;
10435 def_index = -1;
10436 call_index = -1;
10437 ins_index = 0;
10438 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10439 const char *spec = INS_INFO (ins->opcode);
10441 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10442 def_index = ins_index;
10444 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10445 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10446 if (call_index > def_index) {
10447 spilled = TRUE;
10448 break;
10452 if (MONO_IS_CALL (ins))
10453 call_index = ins_index;
10455 ins_index ++;
10458 if (spilled)
10459 break;
10461 #endif
10463 if (G_UNLIKELY (cfg->verbose_level > 2))
10464 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10465 var->flags |= MONO_INST_IS_DEAD;
10466 cfg->vreg_to_inst [var->dreg] = NULL;
10468 break;
10473 * Compress the varinfo and vars tables so the liveness computation is faster and
10474 * takes up less space.
10476 pos = 0;
10477 for (i = 0; i < cfg->num_varinfo; ++i) {
10478 MonoInst *var = cfg->varinfo [i];
10479 if (pos < i && cfg->locals_start == i)
10480 cfg->locals_start = pos;
10481 if (!(var->flags & MONO_INST_IS_DEAD)) {
10482 if (pos < i) {
10483 cfg->varinfo [pos] = cfg->varinfo [i];
10484 cfg->varinfo [pos]->inst_c0 = pos;
10485 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10486 cfg->vars [pos].idx = pos;
10487 #if SIZEOF_REGISTER == 4
10488 if (cfg->varinfo [pos]->type == STACK_I8) {
10489 /* Modify the two component vars too */
10490 MonoInst *var1;
10492 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10493 var1->inst_c0 = pos;
10494 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10495 var1->inst_c0 = pos;
10497 #endif
10499 pos ++;
10502 cfg->num_varinfo = pos;
10503 if (cfg->locals_start > cfg->num_varinfo)
10504 cfg->locals_start = cfg->num_varinfo;
10508 * mono_spill_global_vars:
10510 * Generate spill code for variables which are not allocated to registers,
10511 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10512 * code is generated which could be optimized by the local optimization passes.
10514 void
10515 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10517 MonoBasicBlock *bb;
10518 char spec2 [16];
10519 int orig_next_vreg;
10520 guint32 *vreg_to_lvreg;
10521 guint32 *lvregs;
10522 guint32 i, lvregs_len;
10523 gboolean dest_has_lvreg = FALSE;
10524 guint32 stacktypes [128];
10525 MonoInst **live_range_start, **live_range_end;
10526 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10528 *need_local_opts = FALSE;
10530 memset (spec2, 0, sizeof (spec2));
10532 /* FIXME: Move this function to mini.c */
10533 stacktypes ['i'] = STACK_PTR;
10534 stacktypes ['l'] = STACK_I8;
10535 stacktypes ['f'] = STACK_R8;
10536 #ifdef MONO_ARCH_SIMD_INTRINSICS
10537 stacktypes ['x'] = STACK_VTYPE;
10538 #endif
10540 #if SIZEOF_REGISTER == 4
10541 /* Create MonoInsts for longs */
10542 for (i = 0; i < cfg->num_varinfo; i++) {
10543 MonoInst *ins = cfg->varinfo [i];
10545 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10546 switch (ins->type) {
10547 case STACK_R8:
10548 case STACK_I8: {
10549 MonoInst *tree;
10551 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10552 break;
10554 g_assert (ins->opcode == OP_REGOFFSET);
10556 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10557 g_assert (tree);
10558 tree->opcode = OP_REGOFFSET;
10559 tree->inst_basereg = ins->inst_basereg;
10560 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10562 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10563 g_assert (tree);
10564 tree->opcode = OP_REGOFFSET;
10565 tree->inst_basereg = ins->inst_basereg;
10566 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10567 break;
10569 default:
10570 break;
10574 #endif
10576 /* FIXME: widening and truncation */
10579 * As an optimization, when a variable allocated to the stack is first loaded into
10580 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10581 * the variable again.
10583 orig_next_vreg = cfg->next_vreg;
10584 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10585 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10586 lvregs_len = 0;
10589 * These arrays contain the first and last instructions accessing a given
10590 * variable.
10591 * Since we emit bblocks in the same order we process them here, and we
10592 * don't split live ranges, these will precisely describe the live range of
10593 * the variable, i.e. the instruction range where a valid value can be found
10594 * in the variables location.
10595 * The live range is computed using the liveness info computed by the liveness pass.
10596 * We can't use vmv->range, since that is an abstract live range, and we need
10597 * one which is instruction precise.
10598 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10600 /* FIXME: Only do this if debugging info is requested */
10601 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10602 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10603 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10604 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10606 /* Add spill loads/stores */
10607 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10608 MonoInst *ins;
10610 if (cfg->verbose_level > 2)
10611 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10613 /* Clear vreg_to_lvreg array */
10614 for (i = 0; i < lvregs_len; i++)
10615 vreg_to_lvreg [lvregs [i]] = 0;
10616 lvregs_len = 0;
10618 cfg->cbb = bb;
10619 MONO_BB_FOR_EACH_INS (bb, ins) {
10620 const char *spec = INS_INFO (ins->opcode);
10621 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10622 gboolean store, no_lvreg;
10623 int sregs [MONO_MAX_SRC_REGS];
10625 if (G_UNLIKELY (cfg->verbose_level > 2))
10626 mono_print_ins (ins);
10628 if (ins->opcode == OP_NOP)
10629 continue;
10632 * We handle LDADDR here as well, since it can only be decomposed
10633 * when variable addresses are known.
10635 if (ins->opcode == OP_LDADDR) {
10636 MonoInst *var = ins->inst_p0;
10638 if (var->opcode == OP_VTARG_ADDR) {
10639 /* Happens on SPARC/S390 where vtypes are passed by reference */
10640 MonoInst *vtaddr = var->inst_left;
10641 if (vtaddr->opcode == OP_REGVAR) {
10642 ins->opcode = OP_MOVE;
10643 ins->sreg1 = vtaddr->dreg;
10645 else if (var->inst_left->opcode == OP_REGOFFSET) {
10646 ins->opcode = OP_LOAD_MEMBASE;
10647 ins->inst_basereg = vtaddr->inst_basereg;
10648 ins->inst_offset = vtaddr->inst_offset;
10649 } else
10650 NOT_IMPLEMENTED;
10651 } else {
10652 g_assert (var->opcode == OP_REGOFFSET);
10654 ins->opcode = OP_ADD_IMM;
10655 ins->sreg1 = var->inst_basereg;
10656 ins->inst_imm = var->inst_offset;
10659 *need_local_opts = TRUE;
10660 spec = INS_INFO (ins->opcode);
10663 if (ins->opcode < MONO_CEE_LAST) {
10664 mono_print_ins (ins);
10665 g_assert_not_reached ();
10669 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10670 * src register.
10671 * FIXME:
10673 if (MONO_IS_STORE_MEMBASE (ins)) {
10674 tmp_reg = ins->dreg;
10675 ins->dreg = ins->sreg2;
10676 ins->sreg2 = tmp_reg;
10677 store = TRUE;
10679 spec2 [MONO_INST_DEST] = ' ';
10680 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10681 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10682 spec2 [MONO_INST_SRC3] = ' ';
10683 spec = spec2;
10684 } else if (MONO_IS_STORE_MEMINDEX (ins))
10685 g_assert_not_reached ();
10686 else
10687 store = FALSE;
10688 no_lvreg = FALSE;
10690 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10691 printf ("\t %.3s %d", spec, ins->dreg);
10692 num_sregs = mono_inst_get_src_registers (ins, sregs);
10693 for (srcindex = 0; srcindex < 3; ++srcindex)
10694 printf (" %d", sregs [srcindex]);
10695 printf ("\n");
10698 /***************/
10699 /* DREG */
10700 /***************/
10701 regtype = spec [MONO_INST_DEST];
10702 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10703 prev_dreg = -1;
10705 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10706 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10707 MonoInst *store_ins;
10708 int store_opcode;
10709 MonoInst *def_ins = ins;
10710 int dreg = ins->dreg; /* The original vreg */
10712 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10714 if (var->opcode == OP_REGVAR) {
10715 ins->dreg = var->dreg;
10716 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10718 * Instead of emitting a load+store, use a _membase opcode.
10720 g_assert (var->opcode == OP_REGOFFSET);
10721 if (ins->opcode == OP_MOVE) {
10722 NULLIFY_INS (ins);
10723 def_ins = NULL;
10724 } else {
10725 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10726 ins->inst_basereg = var->inst_basereg;
10727 ins->inst_offset = var->inst_offset;
10728 ins->dreg = -1;
10730 spec = INS_INFO (ins->opcode);
10731 } else {
10732 guint32 lvreg;
10734 g_assert (var->opcode == OP_REGOFFSET);
10736 prev_dreg = ins->dreg;
10738 /* Invalidate any previous lvreg for this vreg */
10739 vreg_to_lvreg [ins->dreg] = 0;
10741 lvreg = 0;
10743 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10744 regtype = 'l';
10745 store_opcode = OP_STOREI8_MEMBASE_REG;
10748 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10750 if (regtype == 'l') {
10751 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10752 mono_bblock_insert_after_ins (bb, ins, store_ins);
10753 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10754 mono_bblock_insert_after_ins (bb, ins, store_ins);
10755 def_ins = store_ins;
10757 else {
10758 g_assert (store_opcode != OP_STOREV_MEMBASE);
10760 /* Try to fuse the store into the instruction itself */
10761 /* FIXME: Add more instructions */
10762 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10763 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10764 ins->inst_imm = ins->inst_c0;
10765 ins->inst_destbasereg = var->inst_basereg;
10766 ins->inst_offset = var->inst_offset;
10767 spec = INS_INFO (ins->opcode);
10768 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10769 ins->opcode = store_opcode;
10770 ins->inst_destbasereg = var->inst_basereg;
10771 ins->inst_offset = var->inst_offset;
10773 no_lvreg = TRUE;
10775 tmp_reg = ins->dreg;
10776 ins->dreg = ins->sreg2;
10777 ins->sreg2 = tmp_reg;
10778 store = TRUE;
10780 spec2 [MONO_INST_DEST] = ' ';
10781 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10782 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10783 spec2 [MONO_INST_SRC3] = ' ';
10784 spec = spec2;
10785 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10786 // FIXME: The backends expect the base reg to be in inst_basereg
10787 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10788 ins->dreg = -1;
10789 ins->inst_basereg = var->inst_basereg;
10790 ins->inst_offset = var->inst_offset;
10791 spec = INS_INFO (ins->opcode);
10792 } else {
10793 /* printf ("INS: "); mono_print_ins (ins); */
10794 /* Create a store instruction */
10795 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10797 /* Insert it after the instruction */
10798 mono_bblock_insert_after_ins (bb, ins, store_ins);
10800 def_ins = store_ins;
10803 * We can't assign ins->dreg to var->dreg here, since the
10804 * sregs could use it. So set a flag, and do it after
10805 * the sregs.
10807 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10808 dest_has_lvreg = TRUE;
10813 if (def_ins && !live_range_start [dreg]) {
10814 live_range_start [dreg] = def_ins;
10815 live_range_start_bb [dreg] = bb;
10819 /************/
10820 /* SREGS */
10821 /************/
10822 num_sregs = mono_inst_get_src_registers (ins, sregs);
10823 for (srcindex = 0; srcindex < 3; ++srcindex) {
10824 regtype = spec [MONO_INST_SRC1 + srcindex];
10825 sreg = sregs [srcindex];
10827 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10828 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10829 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10830 MonoInst *use_ins = ins;
10831 MonoInst *load_ins;
10832 guint32 load_opcode;
10834 if (var->opcode == OP_REGVAR) {
10835 sregs [srcindex] = var->dreg;
10836 //mono_inst_set_src_registers (ins, sregs);
10837 live_range_end [sreg] = use_ins;
10838 live_range_end_bb [sreg] = bb;
10839 continue;
10842 g_assert (var->opcode == OP_REGOFFSET);
10844 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10846 g_assert (load_opcode != OP_LOADV_MEMBASE);
10848 if (vreg_to_lvreg [sreg]) {
10849 g_assert (vreg_to_lvreg [sreg] != -1);
10851 /* The variable is already loaded to an lvreg */
10852 if (G_UNLIKELY (cfg->verbose_level > 2))
10853 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10854 sregs [srcindex] = vreg_to_lvreg [sreg];
10855 //mono_inst_set_src_registers (ins, sregs);
10856 continue;
10859 /* Try to fuse the load into the instruction */
10860 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10861 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10862 sregs [0] = var->inst_basereg;
10863 //mono_inst_set_src_registers (ins, sregs);
10864 ins->inst_offset = var->inst_offset;
10865 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10866 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10867 sregs [1] = var->inst_basereg;
10868 //mono_inst_set_src_registers (ins, sregs);
10869 ins->inst_offset = var->inst_offset;
10870 } else {
10871 if (MONO_IS_REAL_MOVE (ins)) {
10872 ins->opcode = OP_NOP;
10873 sreg = ins->dreg;
10874 } else {
10875 //printf ("%d ", srcindex); mono_print_ins (ins);
10877 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10879 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10880 if (var->dreg == prev_dreg) {
10882 * sreg refers to the value loaded by the load
10883 * emitted below, but we need to use ins->dreg
10884 * since it refers to the store emitted earlier.
10886 sreg = ins->dreg;
10888 g_assert (sreg != -1);
10889 vreg_to_lvreg [var->dreg] = sreg;
10890 g_assert (lvregs_len < 1024);
10891 lvregs [lvregs_len ++] = var->dreg;
10895 sregs [srcindex] = sreg;
10896 //mono_inst_set_src_registers (ins, sregs);
10898 if (regtype == 'l') {
10899 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10900 mono_bblock_insert_before_ins (bb, ins, load_ins);
10901 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10902 mono_bblock_insert_before_ins (bb, ins, load_ins);
10903 use_ins = load_ins;
10905 else {
10906 #if SIZEOF_REGISTER == 4
10907 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10908 #endif
10909 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10910 mono_bblock_insert_before_ins (bb, ins, load_ins);
10911 use_ins = load_ins;
10915 if (var->dreg < orig_next_vreg) {
10916 live_range_end [var->dreg] = use_ins;
10917 live_range_end_bb [var->dreg] = bb;
10921 mono_inst_set_src_registers (ins, sregs);
10923 if (dest_has_lvreg) {
10924 g_assert (ins->dreg != -1);
10925 vreg_to_lvreg [prev_dreg] = ins->dreg;
10926 g_assert (lvregs_len < 1024);
10927 lvregs [lvregs_len ++] = prev_dreg;
10928 dest_has_lvreg = FALSE;
10931 if (store) {
10932 tmp_reg = ins->dreg;
10933 ins->dreg = ins->sreg2;
10934 ins->sreg2 = tmp_reg;
10937 if (MONO_IS_CALL (ins)) {
10938 /* Clear vreg_to_lvreg array */
10939 for (i = 0; i < lvregs_len; i++)
10940 vreg_to_lvreg [lvregs [i]] = 0;
10941 lvregs_len = 0;
10942 } else if (ins->opcode == OP_NOP) {
10943 ins->dreg = -1;
10944 MONO_INST_NULLIFY_SREGS (ins);
10947 if (cfg->verbose_level > 2)
10948 mono_print_ins_index (1, ins);
10951 /* Extend the live range based on the liveness info */
10952 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
10953 for (i = 0; i < cfg->num_varinfo; i ++) {
10954 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
10956 if (vreg_is_volatile (cfg, vi->vreg))
10957 /* The liveness info is incomplete */
10958 continue;
10960 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
10961 /* Live from at least the first ins of this bb */
10962 live_range_start [vi->vreg] = bb->code;
10963 live_range_start_bb [vi->vreg] = bb;
10966 if (mono_bitset_test_fast (bb->live_out_set, i)) {
10967 /* Live at least until the last ins of this bb */
10968 live_range_end [vi->vreg] = bb->last_ins;
10969 live_range_end_bb [vi->vreg] = bb;
10975 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10977 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10978 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10980 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
10981 for (i = 0; i < cfg->num_varinfo; ++i) {
10982 int vreg = MONO_VARINFO (cfg, i)->vreg;
10983 MonoInst *ins;
10985 if (live_range_start [vreg]) {
10986 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10987 ins->inst_c0 = i;
10988 ins->inst_c1 = vreg;
10989 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10991 if (live_range_end [vreg]) {
10992 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10993 ins->inst_c0 = i;
10994 ins->inst_c1 = vreg;
10995 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
10996 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
10997 else
10998 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11002 #endif
11004 g_free (live_range_start);
11005 g_free (live_range_end);
11006 g_free (live_range_start_bb);
11007 g_free (live_range_end_bb);
11011 * FIXME:
11012 * - use 'iadd' instead of 'int_add'
11013 * - handling ovf opcodes: decompose in method_to_ir.
11014 * - unify iregs/fregs
11015 * -> partly done, the missing parts are:
11016 * - a more complete unification would involve unifying the hregs as well, so
11017 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11018 * would no longer map to the machine hregs, so the code generators would need to
11019 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11020 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11021 * fp/non-fp branches speeds it up by about 15%.
11022 * - use sext/zext opcodes instead of shifts
11023 * - add OP_ICALL
11024 * - get rid of TEMPLOADs if possible and use vregs instead
11025 * - clean up usage of OP_P/OP_ opcodes
11026 * - cleanup usage of DUMMY_USE
11027 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11028 * stack
11029 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11030 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11031 * - make sure handle_stack_args () is called before the branch is emitted
11032 * - when the new IR is done, get rid of all unused stuff
11033 * - COMPARE/BEQ as separate instructions or unify them ?
11034 * - keeping them separate allows specialized compare instructions like
11035 * compare_imm, compare_membase
11036 * - most back ends unify fp compare+branch, fp compare+ceq
11037 * - integrate mono_save_args into inline_method
11038 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11039 * - handle long shift opts on 32 bit platforms somehow: they require
11040 * 3 sregs (2 for arg1 and 1 for arg2)
11041 * - make byref a 'normal' type.
11042 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11043 * variable if needed.
11044 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11045 * like inline_method.
11046 * - remove inlining restrictions
11047 * - fix LNEG and enable cfold of INEG
11048 * - generalize x86 optimizations like ldelema as a peephole optimization
11049 * - add store_mem_imm for amd64
11050 * - optimize the loading of the interruption flag in the managed->native wrappers
11051 * - avoid special handling of OP_NOP in passes
11052 * - move code inserting instructions into one function/macro.
11053 * - try a coalescing phase after liveness analysis
11054 * - add float -> vreg conversion + local optimizations on !x86
11055 * - figure out how to handle decomposed branches during optimizations, ie.
11056 * compare+branch, op_jump_table+op_br etc.
11057 * - promote RuntimeXHandles to vregs
11058 * - vtype cleanups:
11059 * - add a NEW_VARLOADA_VREG macro
11060 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11061 * accessing vtype fields.
11062 * - get rid of I8CONST on 64 bit platforms
11063 * - dealing with the increase in code size due to branches created during opcode
11064 * decomposition:
11065 * - use extended basic blocks
11066 * - all parts of the JIT
11067 * - handle_global_vregs () && local regalloc
11068 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11069 * - sources of increase in code size:
11070 * - vtypes
11071 * - long compares
11072 * - isinst and castclass
11073 * - lvregs not allocated to global registers even if used multiple times
11074 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11075 * meaningful.
11076 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11077 * - add all micro optimizations from the old JIT
11078 * - put tree optimizations into the deadce pass
11079 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11080 * specific function.
11081 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11082 * fcompare + branchCC.
11083 * - create a helper function for allocating a stack slot, taking into account
11084 * MONO_CFG_HAS_SPILLUP.
11085 * - merge r68207.
11086 * - merge the ia64 switch changes.
11087 * - optimize mono_regstate2_alloc_int/float.
11088 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11089 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11090 * parts of the tree could be separated by other instructions, killing the tree
11091 * arguments, or stores killing loads etc. Also, should we fold loads into other
11092 * instructions if the result of the load is used multiple times ?
11093 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11094 * - LAST MERGE: 108395.
11095 * - when returning vtypes in registers, generate IR and append it to the end of the
11096 * last bb instead of doing it in the epilog.
11097 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11102 NOTES
11103 -----
11105 - When to decompose opcodes:
11106 - earlier: this makes some optimizations hard to implement, since the low level IR
11107 no longer contains the neccessary information. But it is easier to do.
11108 - later: harder to implement, enables more optimizations.
11109 - Branches inside bblocks:
11110 - created when decomposing complex opcodes.
11111 - branches to another bblock: harmless, but not tracked by the branch
11112 optimizations, so need to branch to a label at the start of the bblock.
11113 - branches to inside the same bblock: very problematic, trips up the local
11114 reg allocator. Can be fixed by spitting the current bblock, but that is a
11115 complex operation, since some local vregs can become global vregs etc.
11116 - Local/global vregs:
11117 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11118 local register allocator.
11119 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11120 structure, created by mono_create_var (). Assigned to hregs or the stack by
11121 the global register allocator.
11122 - When to do optimizations like alu->alu_imm:
11123 - earlier -> saves work later on since the IR will be smaller/simpler
11124 - later -> can work on more instructions
11125 - Handling of valuetypes:
11126 - When a vtype is pushed on the stack, a new temporary is created, an
11127 instruction computing its address (LDADDR) is emitted and pushed on
11128 the stack. Need to optimize cases when the vtype is used immediately as in
11129 argument passing, stloc etc.
11130 - Instead of the to_end stuff in the old JIT, simply call the function handling
11131 the values on the stack before emitting the last instruction of the bb.
11134 #endif /* DISABLE_JIT */