2010-04-07 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / method-to-ir.c
blobaebed70ac477982b1093636008e309275271ccdf
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
55 #include "mini.h"
56 #include "trace.h"
58 #include "ir-emit.h"
60 #include "jit-icalls.h"
61 #include "jit.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 goto inline_failure;\
69 } while (0)
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 goto exception_exit;\
73 } while (0)
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
82 } while (0)
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
91 } while (0)
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
98 } \
99 } while (0)
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
125 #ifdef MINI_OP
126 #undef MINI_OP
127 #endif
128 #ifdef MINI_OP3
129 #undef MINI_OP3
130 #endif
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
133 #define NONE ' '
134 #define IREG 'i'
135 #define FREG 'f'
136 #define VREG 'v'
137 #define XREG 'x'
138 #if SIZEOF_REGISTER == 8
139 #define LREG IREG
140 #else
141 #define LREG 'l'
142 #endif
143 /* keep in sync with the enum in mini.h */
144 const char
145 ins_info[] = {
146 #include "mini-ops.h"
148 #undef MINI_OP
149 #undef MINI_OP3
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
160 #undef MINI_OP
161 #undef MINI_OP3
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
165 (vi)->reg = -1; \
166 (vi)->idx = (id); \
167 } while (0)
169 void
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
177 guint32
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
183 guint32
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
189 guint32
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
195 guint32
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
201 guint
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
204 if (type->byref)
205 return OP_MOVE;
207 handle_enum:
208 switch (type->type) {
209 case MONO_TYPE_I1:
210 case MONO_TYPE_U1:
211 case MONO_TYPE_BOOLEAN:
212 return OP_MOVE;
213 case MONO_TYPE_I2:
214 case MONO_TYPE_U2:
215 case MONO_TYPE_CHAR:
216 return OP_MOVE;
217 case MONO_TYPE_I4:
218 case MONO_TYPE_U4:
219 return OP_MOVE;
220 case MONO_TYPE_I:
221 case MONO_TYPE_U:
222 case MONO_TYPE_PTR:
223 case MONO_TYPE_FNPTR:
224 return OP_MOVE;
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
230 return OP_MOVE;
231 case MONO_TYPE_I8:
232 case MONO_TYPE_U8:
233 #if SIZEOF_REGISTER == 8
234 return OP_MOVE;
235 #else
236 return OP_LMOVE;
237 #endif
238 case MONO_TYPE_R4:
239 return OP_FMOVE;
240 case MONO_TYPE_R8:
241 return OP_FMOVE;
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
245 goto handle_enum;
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 return OP_XMOVE;
249 return OP_VMOVE;
250 case MONO_TYPE_TYPEDBYREF:
251 return OP_VMOVE;
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
254 goto handle_enum;
255 case MONO_TYPE_VAR:
256 case MONO_TYPE_MVAR:
257 g_assert (cfg->generic_sharing_context);
258 return OP_MOVE;
259 default:
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
262 return -1;
265 void
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
268 int i;
269 MonoInst *tree;
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
274 printf (", OUT: ");
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
277 printf (" ]\n");
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
284 * file.
286 #ifndef DISABLE_JIT
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
292 if (!(tblock)) { \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
298 } while (0)
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
309 } while (0)
310 #endif
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
316 MonoInst *widen; \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
321 } while (0)
322 #else
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #endif
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
328 sp -= 2; \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
332 CHECK_TYPE (ins); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 } while (0)
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
342 sp--; \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
345 CHECK_TYPE (ins); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
349 } while (0)
351 #define ADD_BINCOND(next_block) do { \
352 MonoInst *cmp; \
353 sp -= 2; \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
358 CHECK_TYPE (cmp); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
368 } else { \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
380 } while (0)
382 /* *
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
389 static void
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
393 int i, found;
395 #if 0
396 if (from->cil_code) {
397 if (to->cil_code)
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
399 else
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 } else {
402 if (to->cil_code)
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
404 else
405 printf ("edge from entry to exit\n");
407 #endif
409 found = FALSE;
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
412 found = TRUE;
413 break;
416 if (!found) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
421 newa [i] = to;
422 from->out_count++;
423 from->out_bb = newa;
426 found = FALSE;
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
429 found = TRUE;
430 break;
433 if (!found) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
438 newa [i] = from;
439 to->in_count++;
440 to->in_bb = newa;
444 void
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
456 * Returns:
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
464 static int
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
469 int i;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 else
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
490 return -1;
493 static GList*
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
498 int i;
499 GList *res = NULL;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
504 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
505 if (clause->flags == type)
506 res = g_list_append (res, clause);
509 return res;
512 static void
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
515 MonoInst *var;
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
518 if (var)
519 return;
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
528 MonoInst *
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
534 static MonoInst*
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
537 MonoInst *var;
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
540 if (var)
541 return var;
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
549 return var;
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
556 void
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
559 MonoClass *klass;
561 inst->klass = klass = mono_class_from_mono_type (type);
562 if (type->byref) {
563 inst->type = STACK_MP;
564 return;
567 handle_enum:
568 switch (type->type) {
569 case MONO_TYPE_VOID:
570 inst->type = STACK_INV;
571 return;
572 case MONO_TYPE_I1:
573 case MONO_TYPE_U1:
574 case MONO_TYPE_BOOLEAN:
575 case MONO_TYPE_I2:
576 case MONO_TYPE_U2:
577 case MONO_TYPE_CHAR:
578 case MONO_TYPE_I4:
579 case MONO_TYPE_U4:
580 inst->type = STACK_I4;
581 return;
582 case MONO_TYPE_I:
583 case MONO_TYPE_U:
584 case MONO_TYPE_PTR:
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
587 return;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
594 return;
595 case MONO_TYPE_I8:
596 case MONO_TYPE_U8:
597 inst->type = STACK_I8;
598 return;
599 case MONO_TYPE_R4:
600 case MONO_TYPE_R8:
601 inst->type = STACK_R8;
602 return;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
606 goto handle_enum;
607 } else {
608 inst->klass = klass;
609 inst->type = STACK_VTYPE;
610 return;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
615 return;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
618 goto handle_enum;
619 case MONO_TYPE_VAR :
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
623 * really a reference
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
627 return;
628 default:
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
636 static const char
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
648 static const char
649 neg_table [] = {
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
654 static const char
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
666 static const char
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
669 {0},
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
680 static const char
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
697 static const guint16
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
703 static const guint16
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
709 static const guint16
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
715 static const guint16
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
721 static const guint16
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
727 static const guint16
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
733 static const guint16
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
746 static void
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
750 /* binops */
751 case CEE_ADD:
752 case CEE_SUB:
753 case CEE_MUL:
754 case CEE_DIV:
755 case CEE_REM:
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
759 break;
760 case CEE_DIV_UN:
761 case CEE_REM_UN:
762 case CEE_AND:
763 case CEE_OR:
764 case CEE_XOR:
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
767 break;
768 case CEE_SHL:
769 case CEE_SHR:
770 case CEE_SHR_UN:
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
773 break;
774 case OP_COMPARE:
775 case OP_LCOMPARE:
776 case OP_ICOMPARE:
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
782 else
783 ins->opcode = OP_ICOMPARE;
784 break;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
789 break;
790 case CEE_BEQ:
791 case CEE_BGE:
792 case CEE_BGT:
793 case CEE_BLE:
794 case CEE_BLT:
795 case CEE_BNE_UN:
796 case CEE_BGE_UN:
797 case CEE_BGT_UN:
798 case CEE_BLE_UN:
799 case CEE_BLT_UN:
800 ins->opcode += beqops_op_map [src1->type];
801 break;
802 case OP_CEQ:
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
805 break;
806 case OP_CGT:
807 case OP_CGT_UN:
808 case OP_CLT:
809 case OP_CLT_UN:
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
812 break;
813 /* unops */
814 case CEE_NEG:
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
817 break;
818 case CEE_NOT:
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
821 else
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
824 break;
825 case CEE_CONV_I1:
826 case CEE_CONV_I2:
827 case CEE_CONV_I4:
828 case CEE_CONV_U4:
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
831 break;
832 case CEE_CONV_R_UN:
833 ins->type = STACK_R8;
834 switch (src1->type) {
835 case STACK_I4:
836 case STACK_PTR:
837 ins->opcode = OP_ICONV_TO_R_UN;
838 break;
839 case STACK_I8:
840 ins->opcode = OP_LCONV_TO_R_UN;
841 break;
843 break;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
852 break;
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
857 break;
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
866 break;
867 case CEE_CONV_U:
868 ins->type = STACK_PTR;
869 switch (src1->type) {
870 case STACK_I4:
871 ins->opcode = OP_ICONV_TO_U;
872 break;
873 case STACK_PTR:
874 case STACK_MP:
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
877 #else
878 ins->opcode = OP_MOVE;
879 #endif
880 break;
881 case STACK_I8:
882 ins->opcode = OP_LCONV_TO_U;
883 break;
884 case STACK_R8:
885 ins->opcode = OP_FCONV_TO_U;
886 break;
888 break;
889 case CEE_CONV_I8:
890 case CEE_CONV_U8:
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
893 break;
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
898 break;
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
903 break;
904 case CEE_CONV_R4:
905 case CEE_CONV_R8:
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
908 break;
909 case OP_CKFINITE:
910 ins->type = STACK_R8;
911 break;
912 case CEE_CONV_U2:
913 case CEE_CONV_U1:
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
916 break;
917 case CEE_CONV_I:
918 case CEE_CONV_OVF_I:
919 case CEE_CONV_OVF_U:
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
922 break;
923 case CEE_ADD_OVF:
924 case CEE_ADD_OVF_UN:
925 case CEE_MUL_OVF:
926 case CEE_MUL_OVF_UN:
927 case CEE_SUB_OVF:
928 case CEE_SUB_OVF_UN:
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
933 break;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
936 break;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
944 break;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
947 break;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
951 break;
952 default:
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
954 break;
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
961 static const char
962 ldind_type [] = {
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
966 #if 0
968 static const char
969 param_table [STACK_MAX] [STACK_MAX] = {
970 {0},
973 static int
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
975 int i;
977 if (sig->hasthis) {
978 switch (args->type) {
979 case STACK_I4:
980 case STACK_I8:
981 case STACK_R8:
982 case STACK_VTYPE:
983 case STACK_INV:
984 return 0;
986 args++;
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
990 case STACK_INV:
991 return 0;
992 case STACK_MP:
993 if (!sig->params [i]->byref)
994 return 0;
995 continue;
996 case STACK_OBJ:
997 if (sig->params [i]->byref)
998 return 0;
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1005 break;
1006 default:
1007 return 0;
1009 continue;
1010 case STACK_R8:
1011 if (sig->params [i]->byref)
1012 return 0;
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1014 return 0;
1015 continue;
1016 case STACK_PTR:
1017 case STACK_I4:
1018 case STACK_I8:
1019 case STACK_VTYPE:
1020 break;
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1023 return 0;*/
1025 return 1;
1027 #endif
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1044 * compiling.
1046 MonoInst *
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1051 return NULL;
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1056 #else
1057 return NULL;
1058 #endif
1061 static MonoInst *
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1075 static MonoType*
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1082 case STACK_MP:
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1086 default:
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1089 return NULL;
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 t = mono_type_get_underlying_type (t);
1096 switch (t->type) {
1097 case MONO_TYPE_I1:
1098 case MONO_TYPE_U1:
1099 case MONO_TYPE_BOOLEAN:
1100 case MONO_TYPE_I2:
1101 case MONO_TYPE_U2:
1102 case MONO_TYPE_CHAR:
1103 case MONO_TYPE_I4:
1104 case MONO_TYPE_U4:
1105 return STACK_I4;
1106 case MONO_TYPE_I:
1107 case MONO_TYPE_U:
1108 case MONO_TYPE_PTR:
1109 case MONO_TYPE_FNPTR:
1110 return STACK_PTR;
1111 case MONO_TYPE_CLASS:
1112 case MONO_TYPE_STRING:
1113 case MONO_TYPE_OBJECT:
1114 case MONO_TYPE_SZARRAY:
1115 case MONO_TYPE_ARRAY:
1116 return STACK_OBJ;
1117 case MONO_TYPE_I8:
1118 case MONO_TYPE_U8:
1119 return STACK_I8;
1120 case MONO_TYPE_R4:
1121 case MONO_TYPE_R8:
1122 return STACK_R8;
1123 case MONO_TYPE_VALUETYPE:
1124 case MONO_TYPE_TYPEDBYREF:
1125 return STACK_VTYPE;
1126 case MONO_TYPE_GENERICINST:
1127 if (mono_type_generic_inst_is_valuetype (t))
1128 return STACK_VTYPE;
1129 else
1130 return STACK_OBJ;
1131 break;
1132 default:
1133 g_assert_not_reached ();
1136 return -1;
1139 static MonoClass*
1140 array_access_to_klass (int opcode)
1142 switch (opcode) {
1143 case CEE_LDELEM_U1:
1144 return mono_defaults.byte_class;
1145 case CEE_LDELEM_U2:
1146 return mono_defaults.uint16_class;
1147 case CEE_LDELEM_I:
1148 case CEE_STELEM_I:
1149 return mono_defaults.int_class;
1150 case CEE_LDELEM_I1:
1151 case CEE_STELEM_I1:
1152 return mono_defaults.sbyte_class;
1153 case CEE_LDELEM_I2:
1154 case CEE_STELEM_I2:
1155 return mono_defaults.int16_class;
1156 case CEE_LDELEM_I4:
1157 case CEE_STELEM_I4:
1158 return mono_defaults.int32_class;
1159 case CEE_LDELEM_U4:
1160 return mono_defaults.uint32_class;
1161 case CEE_LDELEM_I8:
1162 case CEE_STELEM_I8:
1163 return mono_defaults.int64_class;
1164 case CEE_LDELEM_R4:
1165 case CEE_STELEM_R4:
1166 return mono_defaults.single_class;
1167 case CEE_LDELEM_R8:
1168 case CEE_STELEM_R8:
1169 return mono_defaults.double_class;
1170 case CEE_LDELEM_REF:
1171 case CEE_STELEM_REF:
1172 return mono_defaults.object_class;
1173 default:
1174 g_assert_not_reached ();
1176 return NULL;
1180 * We try to share variables when possible
1182 static MonoInst *
1183 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1185 MonoInst *res;
1186 int pos, vnum;
1188 /* inlining can result in deeper stacks */
1189 if (slot >= cfg->header->max_stack)
1190 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1192 pos = ins->type - 1 + slot * STACK_MAX;
1194 switch (ins->type) {
1195 case STACK_I4:
1196 case STACK_I8:
1197 case STACK_R8:
1198 case STACK_PTR:
1199 case STACK_MP:
1200 case STACK_OBJ:
1201 if ((vnum = cfg->intvars [pos]))
1202 return cfg->varinfo [vnum];
1203 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 cfg->intvars [pos] = res->inst_c0;
1205 break;
1206 default:
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1209 return res;
1212 static void
1213 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1216 * Don't use this if a generic_context is set, since that means AOT can't
1217 * look up the method using just the image+token.
1218 * table == 0 means this is a reference made from a wrapper.
1220 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1221 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1222 jump_info_token->image = image;
1223 jump_info_token->token = token;
1224 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1229 * This function is called to handle items that are left on the evaluation stack
1230 * at basic block boundaries. What happens is that we save the values to local variables
1231 * and we reload them later when first entering the target basic block (with the
1232 * handle_loaded_temps () function).
1233 * A single joint point will use the same variables (stored in the array bb->out_stack or
1234 * bb->in_stack, if the basic block is before or after the joint point).
1236 * This function needs to be called _before_ emitting the last instruction of
1237 * the bb (i.e. before emitting a branch).
1238 * If the stack merge fails at a join point, cfg->unverifiable is set.
1240 static void
1241 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1243 int i, bindex;
1244 MonoBasicBlock *bb = cfg->cbb;
1245 MonoBasicBlock *outb;
1246 MonoInst *inst, **locals;
1247 gboolean found;
1249 if (!count)
1250 return;
1251 if (cfg->verbose_level > 3)
1252 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1253 if (!bb->out_scount) {
1254 bb->out_scount = count;
1255 //printf ("bblock %d has out:", bb->block_num);
1256 found = FALSE;
1257 for (i = 0; i < bb->out_count; ++i) {
1258 outb = bb->out_bb [i];
1259 /* exception handlers are linked, but they should not be considered for stack args */
1260 if (outb->flags & BB_EXCEPTION_HANDLER)
1261 continue;
1262 //printf (" %d", outb->block_num);
1263 if (outb->in_stack) {
1264 found = TRUE;
1265 bb->out_stack = outb->in_stack;
1266 break;
1269 //printf ("\n");
1270 if (!found) {
1271 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1272 for (i = 0; i < count; ++i) {
1274 * try to reuse temps already allocated for this purpouse, if they occupy the same
1275 * stack slot and if they are of the same type.
1276 * This won't cause conflicts since if 'local' is used to
1277 * store one of the values in the in_stack of a bblock, then
1278 * the same variable will be used for the same outgoing stack
1279 * slot as well.
1280 * This doesn't work when inlining methods, since the bblocks
1281 * in the inlined methods do not inherit their in_stack from
1282 * the bblock they are inlined to. See bug #58863 for an
1283 * example.
1285 if (cfg->inlined_method)
1286 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1287 else
1288 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1293 for (i = 0; i < bb->out_count; ++i) {
1294 outb = bb->out_bb [i];
1295 /* exception handlers are linked, but they should not be considered for stack args */
1296 if (outb->flags & BB_EXCEPTION_HANDLER)
1297 continue;
1298 if (outb->in_scount) {
1299 if (outb->in_scount != bb->out_scount) {
1300 cfg->unverifiable = TRUE;
1301 return;
1303 continue; /* check they are the same locals */
1305 outb->in_scount = count;
1306 outb->in_stack = bb->out_stack;
1309 locals = bb->out_stack;
1310 cfg->cbb = bb;
1311 for (i = 0; i < count; ++i) {
1312 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1313 inst->cil_code = sp [i]->cil_code;
1314 sp [i] = locals [i];
1315 if (cfg->verbose_level > 3)
1316 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1320 * It is possible that the out bblocks already have in_stack assigned, and
1321 * the in_stacks differ. In this case, we will store to all the different
1322 * in_stacks.
1325 found = TRUE;
1326 bindex = 0;
1327 while (found) {
1328 /* Find a bblock which has a different in_stack */
1329 found = FALSE;
1330 while (bindex < bb->out_count) {
1331 outb = bb->out_bb [bindex];
1332 /* exception handlers are linked, but they should not be considered for stack args */
1333 if (outb->flags & BB_EXCEPTION_HANDLER) {
1334 bindex++;
1335 continue;
1337 if (outb->in_stack != locals) {
1338 for (i = 0; i < count; ++i) {
1339 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1340 inst->cil_code = sp [i]->cil_code;
1341 sp [i] = locals [i];
1342 if (cfg->verbose_level > 3)
1343 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1345 locals = outb->in_stack;
1346 found = TRUE;
1347 break;
1349 bindex ++;
1354 /* Emit code which loads interface_offsets [klass->interface_id]
1355 * The array is stored in memory before vtable.
1357 static void
1358 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1360 if (cfg->compile_aot) {
1361 int ioffset_reg = alloc_preg (cfg);
1362 int iid_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1368 else {
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1373 static void
1374 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1376 int ibitmap_reg = alloc_preg (cfg);
1377 #ifdef COMPRESSED_INTERFACE_BITMAP
1378 MonoInst *args [2];
1379 MonoInst *res, *ins;
1380 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1381 MONO_ADD_INS (cfg->cbb, ins);
1382 args [0] = ins;
1383 if (cfg->compile_aot)
1384 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1385 else
1386 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1387 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1388 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1389 #else
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1409 } else {
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 #endif
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1418 * stored in "klass_reg" implements the interface "klass".
1420 static void
1421 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1423 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1427 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1428 * stored in "vtable_reg" implements the interface "klass".
1430 static void
1431 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1433 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1437 * Emit code which checks whenever the interface id of @klass is smaller than
1438 * than the value given by max_iid_reg.
1440 static void
1441 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1442 MonoBasicBlock *false_target)
1444 if (cfg->compile_aot) {
1445 int iid_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1449 else
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1451 if (false_target)
1452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1453 else
1454 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1457 /* Same as above, but obtains max_iid from a vtable */
1458 static void
1459 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 int max_iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1465 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1468 /* Same as above, but obtains max_iid from a klass */
1469 static void
1470 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1471 MonoBasicBlock *false_target)
1473 int max_iid_reg = alloc_preg (cfg);
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1476 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1479 static void
1480 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1482 int idepth_reg = alloc_preg (cfg);
1483 int stypes_reg = alloc_preg (cfg);
1484 int stype = alloc_preg (cfg);
1486 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1493 if (klass_ins) {
1494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1495 } else if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1499 } else {
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1505 static void
1506 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1511 static void
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1519 if (true_target)
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1521 else
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1528 static void
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1536 if (true_target)
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1538 else
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1542 static inline void
1543 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1545 if (klass_inst) {
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1547 } else if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1551 } else {
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1557 static inline void
1558 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1560 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1563 static inline void
1564 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1566 if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1570 } else {
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1576 static void
1577 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1579 static void
1580 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1582 if (klass->rank) {
1583 int rank_reg = alloc_preg (cfg);
1584 int eclass_reg = alloc_preg (cfg);
1586 g_assert (!klass_inst);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1589 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1590 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1592 if (klass->cast_class == mono_defaults.object_class) {
1593 int parent_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1595 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1598 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class) {
1601 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1602 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1603 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1604 } else {
1605 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1606 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1609 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1610 /* Check that the object is a vector too */
1611 int bounds_reg = alloc_preg (cfg);
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1616 } else {
1617 int idepth_reg = alloc_preg (cfg);
1618 int stypes_reg = alloc_preg (cfg);
1619 int stype = alloc_preg (cfg);
1621 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1628 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1632 static void
1633 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1635 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1638 static void
1639 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1641 int val_reg;
1643 g_assert (val == 0);
1645 if (align == 0)
1646 align = 4;
1648 if ((size <= 4) && (size <= align)) {
1649 switch (size) {
1650 case 1:
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1652 return;
1653 case 2:
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1655 return;
1656 case 4:
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1658 return;
1659 #if SIZEOF_REGISTER == 8
1660 case 8:
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1662 return;
1663 #endif
1667 val_reg = alloc_preg (cfg);
1669 if (SIZEOF_REGISTER == 8)
1670 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1671 else
1672 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1674 if (align < 4) {
1675 /* This could be optimized further if neccesary */
1676 while (size >= 1) {
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1678 offset += 1;
1679 size -= 1;
1681 return;
1684 #if !NO_UNALIGNED_ACCESS
1685 if (SIZEOF_REGISTER == 8) {
1686 if (offset % 8) {
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1688 offset += 4;
1689 size -= 4;
1691 while (size >= 8) {
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1693 offset += 8;
1694 size -= 8;
1697 #endif
1699 while (size >= 4) {
1700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1701 offset += 4;
1702 size -= 4;
1704 while (size >= 2) {
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1706 offset += 2;
1707 size -= 2;
1709 while (size >= 1) {
1710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1711 offset += 1;
1712 size -= 1;
1716 #endif /* DISABLE_JIT */
1718 void
1719 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1721 int cur_reg;
1723 if (align == 0)
1724 align = 4;
1726 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1727 g_assert (size < 10000);
1729 if (align < 4) {
1730 /* This could be optimized further if neccesary */
1731 while (size >= 1) {
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1735 doffset += 1;
1736 soffset += 1;
1737 size -= 1;
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1743 while (size >= 8) {
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1747 doffset += 8;
1748 soffset += 8;
1749 size -= 8;
1752 #endif
1754 while (size >= 4) {
1755 cur_reg = alloc_preg (cfg);
1756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1758 doffset += 4;
1759 soffset += 4;
1760 size -= 4;
1762 while (size >= 2) {
1763 cur_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1766 doffset += 2;
1767 soffset += 2;
1768 size -= 2;
1770 while (size >= 1) {
1771 cur_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1774 doffset += 1;
1775 soffset += 1;
1776 size -= 1;
1780 #ifndef DISABLE_JIT
1782 static int
1783 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1785 if (type->byref)
1786 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1788 handle_enum:
1789 type = mini_get_basic_type_from_generic (gsctx, type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1793 case MONO_TYPE_I1:
1794 case MONO_TYPE_U1:
1795 case MONO_TYPE_BOOLEAN:
1796 case MONO_TYPE_I2:
1797 case MONO_TYPE_U2:
1798 case MONO_TYPE_CHAR:
1799 case MONO_TYPE_I4:
1800 case MONO_TYPE_U4:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1802 case MONO_TYPE_I:
1803 case MONO_TYPE_U:
1804 case MONO_TYPE_PTR:
1805 case MONO_TYPE_FNPTR:
1806 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 case MONO_TYPE_CLASS:
1808 case MONO_TYPE_STRING:
1809 case MONO_TYPE_OBJECT:
1810 case MONO_TYPE_SZARRAY:
1811 case MONO_TYPE_ARRAY:
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1813 case MONO_TYPE_I8:
1814 case MONO_TYPE_U8:
1815 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1816 case MONO_TYPE_R4:
1817 case MONO_TYPE_R8:
1818 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1819 case MONO_TYPE_VALUETYPE:
1820 if (type->data.klass->enumtype) {
1821 type = mono_class_enum_basetype (type->data.klass);
1822 goto handle_enum;
1823 } else
1824 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1825 case MONO_TYPE_TYPEDBYREF:
1826 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1827 case MONO_TYPE_GENERICINST:
1828 type = &type->data.generic_class->container_class->byval_arg;
1829 goto handle_enum;
1830 default:
1831 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1833 return -1;
1837 * target_type_is_incompatible:
1838 * @cfg: MonoCompile context
1840 * Check that the item @arg on the evaluation stack can be stored
1841 * in the target type (can be a local, or field, etc).
1842 * The cfg arg can be used to check if we need verification or just
1843 * validity checks.
1845 * Returns: non-0 value if arg can't be stored on a target.
1847 static int
1848 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1850 MonoType *simple_type;
1851 MonoClass *klass;
1853 if (target->byref) {
1854 /* FIXME: check that the pointed to types match */
1855 if (arg->type == STACK_MP)
1856 return arg->klass != mono_class_from_mono_type (target);
1857 if (arg->type == STACK_PTR)
1858 return 0;
1859 return 1;
1862 simple_type = mono_type_get_underlying_type (target);
1863 switch (simple_type->type) {
1864 case MONO_TYPE_VOID:
1865 return 1;
1866 case MONO_TYPE_I1:
1867 case MONO_TYPE_U1:
1868 case MONO_TYPE_BOOLEAN:
1869 case MONO_TYPE_I2:
1870 case MONO_TYPE_U2:
1871 case MONO_TYPE_CHAR:
1872 case MONO_TYPE_I4:
1873 case MONO_TYPE_U4:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1875 return 1;
1876 return 0;
1877 case MONO_TYPE_PTR:
1878 /* STACK_MP is needed when setting pinned locals */
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1880 return 1;
1881 return 0;
1882 case MONO_TYPE_I:
1883 case MONO_TYPE_U:
1884 case MONO_TYPE_FNPTR:
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1886 return 1;
1887 return 0;
1888 case MONO_TYPE_CLASS:
1889 case MONO_TYPE_STRING:
1890 case MONO_TYPE_OBJECT:
1891 case MONO_TYPE_SZARRAY:
1892 case MONO_TYPE_ARRAY:
1893 if (arg->type != STACK_OBJ)
1894 return 1;
1895 /* FIXME: check type compatibility */
1896 return 0;
1897 case MONO_TYPE_I8:
1898 case MONO_TYPE_U8:
1899 if (arg->type != STACK_I8)
1900 return 1;
1901 return 0;
1902 case MONO_TYPE_R4:
1903 case MONO_TYPE_R8:
1904 if (arg->type != STACK_R8)
1905 return 1;
1906 return 0;
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1909 return 1;
1910 klass = mono_class_from_mono_type (simple_type);
1911 if (klass != arg->klass)
1912 return 1;
1913 return 0;
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1916 return 1;
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1919 return 1;
1920 return 0;
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 if (arg->type != STACK_VTYPE)
1924 return 1;
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1927 return 1;
1928 return 0;
1929 } else {
1930 if (arg->type != STACK_OBJ)
1931 return 1;
1932 /* FIXME: check type compatibility */
1933 return 0;
1935 case MONO_TYPE_VAR:
1936 case MONO_TYPE_MVAR:
1937 /* FIXME: all the arguments must be references for now,
1938 * later look inside cfg and see if the arg num is
1939 * really a reference
1941 g_assert (cfg->generic_sharing_context);
1942 if (arg->type != STACK_OBJ)
1943 return 1;
1944 return 0;
1945 default:
1946 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1948 return 1;
1952 * Prepare arguments for passing to a function call.
1953 * Return a non-zero value if the arguments can't be passed to the given
1954 * signature.
1955 * The type checks are not yet complete and some conversions may need
1956 * casts on 32 or 64 bit architectures.
1958 * FIXME: implement this using target_type_is_incompatible ()
1960 static int
1961 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1963 MonoType *simple_type;
1964 int i;
1966 if (sig->hasthis) {
1967 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1968 return 1;
1969 args++;
1971 for (i = 0; i < sig->param_count; ++i) {
1972 if (sig->params [i]->byref) {
1973 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1974 return 1;
1975 continue;
1977 simple_type = sig->params [i];
1978 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1979 handle_enum:
1980 switch (simple_type->type) {
1981 case MONO_TYPE_VOID:
1982 return 1;
1983 continue;
1984 case MONO_TYPE_I1:
1985 case MONO_TYPE_U1:
1986 case MONO_TYPE_BOOLEAN:
1987 case MONO_TYPE_I2:
1988 case MONO_TYPE_U2:
1989 case MONO_TYPE_CHAR:
1990 case MONO_TYPE_I4:
1991 case MONO_TYPE_U4:
1992 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1993 return 1;
1994 continue;
1995 case MONO_TYPE_I:
1996 case MONO_TYPE_U:
1997 case MONO_TYPE_PTR:
1998 case MONO_TYPE_FNPTR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2000 return 1;
2001 continue;
2002 case MONO_TYPE_CLASS:
2003 case MONO_TYPE_STRING:
2004 case MONO_TYPE_OBJECT:
2005 case MONO_TYPE_SZARRAY:
2006 case MONO_TYPE_ARRAY:
2007 if (args [i]->type != STACK_OBJ)
2008 return 1;
2009 continue;
2010 case MONO_TYPE_I8:
2011 case MONO_TYPE_U8:
2012 if (args [i]->type != STACK_I8)
2013 return 1;
2014 continue;
2015 case MONO_TYPE_R4:
2016 case MONO_TYPE_R8:
2017 if (args [i]->type != STACK_R8)
2018 return 1;
2019 continue;
2020 case MONO_TYPE_VALUETYPE:
2021 if (simple_type->data.klass->enumtype) {
2022 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2023 goto handle_enum;
2025 if (args [i]->type != STACK_VTYPE)
2026 return 1;
2027 continue;
2028 case MONO_TYPE_TYPEDBYREF:
2029 if (args [i]->type != STACK_VTYPE)
2030 return 1;
2031 continue;
2032 case MONO_TYPE_GENERICINST:
2033 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2034 goto handle_enum;
2036 default:
2037 g_error ("unknown type 0x%02x in check_call_signature",
2038 simple_type->type);
2041 return 0;
2044 static int
2045 callvirt_to_call (int opcode)
2047 switch (opcode) {
2048 case OP_CALLVIRT:
2049 return OP_CALL;
2050 case OP_VOIDCALLVIRT:
2051 return OP_VOIDCALL;
2052 case OP_FCALLVIRT:
2053 return OP_FCALL;
2054 case OP_VCALLVIRT:
2055 return OP_VCALL;
2056 case OP_LCALLVIRT:
2057 return OP_LCALL;
2058 default:
2059 g_assert_not_reached ();
2062 return -1;
2065 static int
2066 callvirt_to_call_membase (int opcode)
2068 switch (opcode) {
2069 case OP_CALLVIRT:
2070 return OP_CALL_MEMBASE;
2071 case OP_VOIDCALLVIRT:
2072 return OP_VOIDCALL_MEMBASE;
2073 case OP_FCALLVIRT:
2074 return OP_FCALL_MEMBASE;
2075 case OP_LCALLVIRT:
2076 return OP_LCALL_MEMBASE;
2077 case OP_VCALLVIRT:
2078 return OP_VCALL_MEMBASE;
2079 default:
2080 g_assert_not_reached ();
2083 return -1;
2086 #ifdef MONO_ARCH_HAVE_IMT
2087 static void
2088 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2090 #ifdef MONO_ARCH_IMT_REG
2091 int method_reg = alloc_preg (cfg);
2093 if (imt_arg) {
2094 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2095 } else if (cfg->compile_aot) {
2096 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2097 } else {
2098 MonoInst *ins;
2099 MONO_INST_NEW (cfg, ins, OP_PCONST);
2100 ins->inst_p0 = call->method;
2101 ins->dreg = method_reg;
2102 MONO_ADD_INS (cfg->cbb, ins);
2105 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2106 #else
2107 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2108 #endif
2110 #endif
2112 static MonoJumpInfo *
2113 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2115 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2117 ji->ip.i = ip;
2118 ji->type = type;
2119 ji->data.target = target;
2121 return ji;
2124 inline static MonoCallInst *
2125 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2126 MonoInst **args, int calli, int virtual, int tail)
2128 MonoCallInst *call;
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2130 int i;
2131 #endif
2133 if (tail)
2134 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2135 else
2136 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2138 call->args = args;
2139 call->signature = sig;
2141 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2143 if (tail) {
2144 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2145 call->vret_var = cfg->vret_addr;
2146 //g_assert_not_reached ();
2148 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2149 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2150 MonoInst *loada;
2152 temp->backend.is_pinvoke = sig->pinvoke;
2155 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2156 * address of return value to increase optimization opportunities.
2157 * Before vtype decomposition, the dreg of the call ins itself represents the
2158 * fact the call modifies the return value. After decomposition, the call will
2159 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2160 * will be transformed into an LDADDR.
2162 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2163 loada->dreg = alloc_preg (cfg);
2164 loada->inst_p0 = temp;
2165 /* We reference the call too since call->dreg could change during optimization */
2166 loada->inst_p1 = call;
2167 MONO_ADD_INS (cfg->cbb, loada);
2169 call->inst.dreg = temp->dreg;
2171 call->vret_var = loada;
2172 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2173 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2175 #ifdef MONO_ARCH_SOFT_FLOAT
2176 if (COMPILE_SOFT_FLOAT (cfg)) {
2178 * If the call has a float argument, we would need to do an r8->r4 conversion using
2179 * an icall, but that cannot be done during the call sequence since it would clobber
2180 * the call registers + the stack. So we do it before emitting the call.
2182 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2183 MonoType *t;
2184 MonoInst *in = call->args [i];
2186 if (i >= sig->hasthis)
2187 t = sig->params [i - sig->hasthis];
2188 else
2189 t = &mono_defaults.int_class->byval_arg;
2190 t = mono_type_get_underlying_type (t);
2192 if (!t->byref && t->type == MONO_TYPE_R4) {
2193 MonoInst *iargs [1];
2194 MonoInst *conv;
2196 iargs [0] = in;
2197 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2199 /* The result will be in an int vreg */
2200 call->args [i] = conv;
2204 #endif
2206 #ifdef ENABLE_LLVM
2207 if (COMPILE_LLVM (cfg))
2208 mono_llvm_emit_call (cfg, call);
2209 else
2210 mono_arch_emit_call (cfg, call);
2211 #else
2212 mono_arch_emit_call (cfg, call);
2213 #endif
2215 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2216 cfg->flags |= MONO_CFG_HAS_CALLS;
2218 return call;
2221 inline static MonoInst*
2222 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2224 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2226 call->inst.sreg1 = addr->dreg;
2228 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2230 return (MonoInst*)call;
2233 inline static MonoInst*
2234 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2236 #ifdef MONO_ARCH_RGCTX_REG
2237 MonoCallInst *call;
2238 int rgctx_reg = -1;
2240 if (rgctx_arg) {
2241 rgctx_reg = mono_alloc_preg (cfg);
2242 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2244 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2245 if (rgctx_arg) {
2246 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2247 cfg->uses_rgctx_reg = TRUE;
2248 call->rgctx_reg = TRUE;
2250 return (MonoInst*)call;
2251 #else
2252 g_assert_not_reached ();
2253 return NULL;
2254 #endif
2257 static MonoInst*
2258 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2259 static MonoInst*
2260 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2262 static MonoInst*
2263 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2264 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2266 gboolean might_be_remote;
2267 gboolean virtual = this != NULL;
2268 gboolean enable_for_aot = TRUE;
2269 int context_used;
2270 MonoCallInst *call;
2272 if (method->string_ctor) {
2273 /* Create the real signature */
2274 /* FIXME: Cache these */
2275 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2276 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2278 sig = ctor_sig;
2281 might_be_remote = this && sig->hasthis &&
2282 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2283 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2285 context_used = mono_method_check_context_used (method);
2286 if (might_be_remote && context_used) {
2287 MonoInst *addr;
2289 g_assert (cfg->generic_sharing_context);
2291 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2293 return mono_emit_calli (cfg, sig, args, addr);
2296 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2298 if (might_be_remote)
2299 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2300 else
2301 call->method = method;
2302 call->inst.flags |= MONO_INST_HAS_METHOD;
2303 call->inst.inst_left = this;
2305 if (virtual) {
2306 int vtable_reg, slot_reg, this_reg;
2308 this_reg = this->dreg;
2310 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2311 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2312 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2314 /* Make a call to delegate->invoke_impl */
2315 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2316 call->inst.inst_basereg = this_reg;
2317 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2318 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2320 return (MonoInst*)call;
2322 #endif
2324 if ((!cfg->compile_aot || enable_for_aot) &&
2325 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2326 (MONO_METHOD_IS_FINAL (method) &&
2327 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2328 !(method->klass->marshalbyref && context_used)) {
2330 * the method is not virtual, we just need to ensure this is not null
2331 * and then we can call the method directly.
2333 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2335 * The check above ensures method is not gshared, this is needed since
2336 * gshared methods can't have wrappers.
2338 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2341 if (!method->string_ctor)
2342 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2344 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2346 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2348 return (MonoInst*)call;
2351 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2353 * the method is virtual, but we can statically dispatch since either
2354 * it's class or the method itself are sealed.
2355 * But first we need to ensure it's not a null reference.
2357 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2359 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2360 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2362 return (MonoInst*)call;
2365 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2367 vtable_reg = alloc_preg (cfg);
2368 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2369 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2370 slot_reg = -1;
2371 #ifdef MONO_ARCH_HAVE_IMT
2372 if (mono_use_imt) {
2373 guint32 imt_slot = mono_method_get_imt_slot (method);
2374 emit_imt_argument (cfg, call, imt_arg);
2375 slot_reg = vtable_reg;
2376 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2378 #endif
2379 if (slot_reg == -1) {
2380 slot_reg = alloc_preg (cfg);
2381 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2382 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2384 } else {
2385 slot_reg = vtable_reg;
2386 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2387 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2388 #ifdef MONO_ARCH_HAVE_IMT
2389 if (imt_arg) {
2390 g_assert (mono_method_signature (method)->generic_param_count);
2391 emit_imt_argument (cfg, call, imt_arg);
2393 #endif
2396 call->inst.sreg1 = slot_reg;
2397 call->virtual = TRUE;
2400 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2402 return (MonoInst*)call;
2405 static MonoInst*
2406 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2407 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2409 #ifdef MONO_ARCH_RGCTX_REG
2410 int rgctx_reg = 0;
2411 #endif
2412 MonoInst *ins;
2413 MonoCallInst *call;
2415 if (vtable_arg) {
2416 #ifdef MONO_ARCH_RGCTX_REG
2417 rgctx_reg = mono_alloc_preg (cfg);
2418 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2419 #else
2420 NOT_IMPLEMENTED;
2421 #endif
2423 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2425 call = (MonoCallInst*)ins;
2426 if (vtable_arg) {
2427 #ifdef MONO_ARCH_RGCTX_REG
2428 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2429 cfg->uses_rgctx_reg = TRUE;
2430 call->rgctx_reg = TRUE;
2431 #else
2432 NOT_IMPLEMENTED;
2433 #endif
2436 return ins;
2439 MonoInst*
2440 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2442 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2445 MonoInst*
2446 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2447 MonoInst **args)
2449 MonoCallInst *call;
2451 g_assert (sig);
2453 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2454 call->fptr = func;
2456 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2458 return (MonoInst*)call;
2461 MonoInst*
2462 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2464 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2466 g_assert (info);
2468 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2472 * mono_emit_abs_call:
2474 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2476 inline static MonoInst*
2477 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2478 MonoMethodSignature *sig, MonoInst **args)
2480 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2481 MonoInst *ins;
2484 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2485 * handle it.
2487 if (cfg->abs_patches == NULL)
2488 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2489 g_hash_table_insert (cfg->abs_patches, ji, ji);
2490 ins = mono_emit_native_call (cfg, ji, sig, args);
2491 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2492 return ins;
2495 static MonoInst*
2496 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2498 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2499 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2500 int widen_op = -1;
2503 * Native code might return non register sized integers
2504 * without initializing the upper bits.
2506 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2507 case OP_LOADI1_MEMBASE:
2508 widen_op = OP_ICONV_TO_I1;
2509 break;
2510 case OP_LOADU1_MEMBASE:
2511 widen_op = OP_ICONV_TO_U1;
2512 break;
2513 case OP_LOADI2_MEMBASE:
2514 widen_op = OP_ICONV_TO_I2;
2515 break;
2516 case OP_LOADU2_MEMBASE:
2517 widen_op = OP_ICONV_TO_U2;
2518 break;
2519 default:
2520 break;
2523 if (widen_op != -1) {
2524 int dreg = alloc_preg (cfg);
2525 MonoInst *widen;
2527 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2528 widen->type = ins->type;
2529 ins = widen;
2534 return ins;
2537 static MonoMethod*
2538 get_memcpy_method (void)
2540 static MonoMethod *memcpy_method = NULL;
2541 if (!memcpy_method) {
2542 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2543 if (!memcpy_method)
2544 g_error ("Old corlib found. Install a new one");
2546 return memcpy_method;
2550 * Emit code to copy a valuetype of type @klass whose address is stored in
2551 * @src->dreg to memory whose address is stored at @dest->dreg.
2553 void
2554 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2556 MonoInst *iargs [3];
2557 int n;
2558 guint32 align = 0;
2559 MonoMethod *memcpy_method;
2561 g_assert (klass);
2563 * This check breaks with spilled vars... need to handle it during verification anyway.
2564 * g_assert (klass && klass == src->klass && klass == dest->klass);
2567 if (native)
2568 n = mono_class_native_size (klass, &align);
2569 else
2570 n = mono_class_value_size (klass, &align);
2572 #if HAVE_WRITE_BARRIERS
2573 /* if native is true there should be no references in the struct */
2574 if (klass->has_references && !native) {
2575 /* Avoid barriers when storing to the stack */
2576 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2577 (dest->opcode == OP_LDADDR))) {
2578 int context_used = 0;
2580 iargs [0] = dest;
2581 iargs [1] = src;
2583 if (cfg->generic_sharing_context)
2584 context_used = mono_class_check_context_used (klass);
2585 if (context_used) {
2586 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2587 } else {
2588 if (cfg->compile_aot) {
2589 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2590 } else {
2591 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2592 mono_class_compute_gc_descriptor (klass);
2596 /* FIXME: this does the memcpy as well (or
2597 should), so we don't need the memcpy
2598 afterwards */
2599 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2602 #endif
2604 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2605 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2606 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2607 } else {
2608 iargs [0] = dest;
2609 iargs [1] = src;
2610 EMIT_NEW_ICONST (cfg, iargs [2], n);
2612 memcpy_method = get_memcpy_method ();
2613 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2617 static MonoMethod*
2618 get_memset_method (void)
2620 static MonoMethod *memset_method = NULL;
2621 if (!memset_method) {
2622 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2623 if (!memset_method)
2624 g_error ("Old corlib found. Install a new one");
2626 return memset_method;
2629 void
2630 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2632 MonoInst *iargs [3];
2633 int n;
2634 guint32 align;
2635 MonoMethod *memset_method;
2637 /* FIXME: Optimize this for the case when dest is an LDADDR */
2639 mono_class_init (klass);
2640 n = mono_class_value_size (klass, &align);
2642 if (n <= sizeof (gpointer) * 5) {
2643 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2645 else {
2646 memset_method = get_memset_method ();
2647 iargs [0] = dest;
2648 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2649 EMIT_NEW_ICONST (cfg, iargs [2], n);
2650 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2654 static MonoInst*
2655 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2657 MonoInst *this = NULL;
2659 g_assert (cfg->generic_sharing_context);
2661 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2662 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2663 !method->klass->valuetype)
2664 EMIT_NEW_ARGLOAD (cfg, this, 0);
2666 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2667 MonoInst *mrgctx_loc, *mrgctx_var;
2669 g_assert (!this);
2670 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2672 mrgctx_loc = mono_get_vtable_var (cfg);
2673 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2675 return mrgctx_var;
2676 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2677 MonoInst *vtable_loc, *vtable_var;
2679 g_assert (!this);
2681 vtable_loc = mono_get_vtable_var (cfg);
2682 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2684 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2685 MonoInst *mrgctx_var = vtable_var;
2686 int vtable_reg;
2688 vtable_reg = alloc_preg (cfg);
2689 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2690 vtable_var->type = STACK_PTR;
2693 return vtable_var;
2694 } else {
2695 MonoInst *ins;
2696 int vtable_reg, res_reg;
2698 vtable_reg = alloc_preg (cfg);
2699 res_reg = alloc_preg (cfg);
2700 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2701 return ins;
2705 static MonoJumpInfoRgctxEntry *
2706 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2708 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2709 res->method = method;
2710 res->in_mrgctx = in_mrgctx;
2711 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2712 res->data->type = patch_type;
2713 res->data->data.target = patch_data;
2714 res->info_type = info_type;
2716 return res;
2719 static inline MonoInst*
2720 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2722 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2725 static MonoInst*
2726 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2727 MonoClass *klass, int rgctx_type)
2729 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2730 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2732 return emit_rgctx_fetch (cfg, rgctx, entry);
2736 * emit_get_rgctx_method:
2738 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2739 * normal constants, else emit a load from the rgctx.
2741 static MonoInst*
2742 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2743 MonoMethod *cmethod, int rgctx_type)
2745 if (!context_used) {
2746 MonoInst *ins;
2748 switch (rgctx_type) {
2749 case MONO_RGCTX_INFO_METHOD:
2750 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2751 return ins;
2752 case MONO_RGCTX_INFO_METHOD_RGCTX:
2753 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2754 return ins;
2755 default:
2756 g_assert_not_reached ();
2758 } else {
2759 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2760 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2762 return emit_rgctx_fetch (cfg, rgctx, entry);
2766 static MonoInst*
2767 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2768 MonoClassField *field, int rgctx_type)
2770 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2771 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2773 return emit_rgctx_fetch (cfg, rgctx, entry);
2777 * On return the caller must check @klass for load errors.
2779 static void
2780 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2782 MonoInst *vtable_arg;
2783 MonoCallInst *call;
2784 int context_used = 0;
2786 if (cfg->generic_sharing_context)
2787 context_used = mono_class_check_context_used (klass);
2789 if (context_used) {
2790 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2791 klass, MONO_RGCTX_INFO_VTABLE);
2792 } else {
2793 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2795 if (!vtable)
2796 return;
2797 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2800 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2801 #ifdef MONO_ARCH_VTABLE_REG
2802 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2803 cfg->uses_vtable_reg = TRUE;
2804 #else
2805 NOT_IMPLEMENTED;
2806 #endif
2810 * On return the caller must check @array_class for load errors
2812 static void
2813 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2815 int vtable_reg = alloc_preg (cfg);
2816 int context_used = 0;
2818 if (cfg->generic_sharing_context)
2819 context_used = mono_class_check_context_used (array_class);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2823 if (cfg->opt & MONO_OPT_SHARED) {
2824 int class_reg = alloc_preg (cfg);
2825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2826 if (cfg->compile_aot) {
2827 int klass_reg = alloc_preg (cfg);
2828 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2830 } else {
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2833 } else if (context_used) {
2834 MonoInst *vtable_ins;
2836 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2837 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2838 } else {
2839 if (cfg->compile_aot) {
2840 int vt_reg;
2841 MonoVTable *vtable;
2843 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2844 return;
2845 vt_reg = alloc_preg (cfg);
2846 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2848 } else {
2849 MonoVTable *vtable;
2850 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2851 return;
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2856 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2859 static void
2860 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2862 if (mini_get_debug_options ()->better_cast_details) {
2863 int to_klass_reg = alloc_preg (cfg);
2864 int vtable_reg = alloc_preg (cfg);
2865 int klass_reg = alloc_preg (cfg);
2866 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2868 if (!tls_get) {
2869 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2870 exit (1);
2873 MONO_ADD_INS (cfg->cbb, tls_get);
2874 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2878 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2883 static void
2884 reset_cast_details (MonoCompile *cfg)
2886 /* Reset the variables holding the cast details */
2887 if (mini_get_debug_options ()->better_cast_details) {
2888 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2890 MONO_ADD_INS (cfg->cbb, tls_get);
2891 /* It is enough to reset the from field */
2892 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2897 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2898 * generic code is generated.
2900 static MonoInst*
2901 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2903 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2905 if (context_used) {
2906 MonoInst *rgctx, *addr;
2908 /* FIXME: What if the class is shared? We might not
2909 have to get the address of the method from the
2910 RGCTX. */
2911 addr = emit_get_rgctx_method (cfg, context_used, method,
2912 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2914 rgctx = emit_get_rgctx (cfg, method, context_used);
2916 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2917 } else {
2918 return mono_emit_method_call (cfg, method, &val, NULL);
2922 static MonoInst*
2923 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2925 MonoInst *add;
2926 int obj_reg;
2927 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2928 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2929 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2930 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2932 obj_reg = sp [0]->dreg;
2933 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2934 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2936 /* FIXME: generics */
2937 g_assert (klass->rank == 0);
2939 // Check rank == 0
2940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2941 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2946 if (context_used) {
2947 MonoInst *element_class;
2949 /* This assertion is from the unboxcast insn */
2950 g_assert (klass->rank == 0);
2952 element_class = emit_get_rgctx_klass (cfg, context_used,
2953 klass->element_class, MONO_RGCTX_INFO_KLASS);
2955 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2956 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2957 } else {
2958 save_cast_details (cfg, klass->element_class, obj_reg);
2959 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2960 reset_cast_details (cfg);
2963 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2964 MONO_ADD_INS (cfg->cbb, add);
2965 add->type = STACK_MP;
2966 add->klass = klass;
2968 return add;
2972 * Returns NULL and set the cfg exception on error.
2974 static MonoInst*
2975 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2977 MonoInst *iargs [2];
2978 void *alloc_ftn;
2980 if (cfg->opt & MONO_OPT_SHARED) {
2981 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2982 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2984 alloc_ftn = mono_object_new;
2985 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2986 /* This happens often in argument checking code, eg. throw new FooException... */
2987 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2988 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2989 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2990 } else {
2991 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2992 MonoMethod *managed_alloc = NULL;
2993 gboolean pass_lw;
2995 if (!vtable) {
2996 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2997 cfg->exception_ptr = klass;
2998 return NULL;
3001 #ifndef MONO_CROSS_COMPILE
3002 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3003 #endif
3005 if (managed_alloc) {
3006 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3007 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3009 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3010 if (pass_lw) {
3011 guint32 lw = vtable->klass->instance_size;
3012 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3013 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3014 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3016 else {
3017 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3021 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3024 static MonoInst*
3025 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3026 gboolean for_box)
3028 MonoInst *iargs [2];
3029 MonoMethod *managed_alloc = NULL;
3030 void *alloc_ftn;
3033 FIXME: we cannot get managed_alloc here because we can't get
3034 the class's vtable (because it's not a closed class)
3036 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3037 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3040 if (cfg->opt & MONO_OPT_SHARED) {
3041 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3042 iargs [1] = data_inst;
3043 alloc_ftn = mono_object_new;
3044 } else {
3045 if (managed_alloc) {
3046 iargs [0] = data_inst;
3047 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3050 iargs [0] = data_inst;
3051 alloc_ftn = mono_object_new_specific;
3054 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3058 * Returns NULL and set the cfg exception on error.
3060 static MonoInst*
3061 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3063 MonoInst *alloc, *ins;
3065 if (mono_class_is_nullable (klass)) {
3066 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3067 return mono_emit_method_call (cfg, method, &val, NULL);
3070 alloc = handle_alloc (cfg, klass, TRUE);
3071 if (!alloc)
3072 return NULL;
3074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3076 return alloc;
3079 static MonoInst *
3080 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3082 MonoInst *alloc, *ins;
3084 if (mono_class_is_nullable (klass)) {
3085 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3086 /* FIXME: What if the class is shared? We might not
3087 have to get the method address from the RGCTX. */
3088 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3089 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3090 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3092 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3093 } else {
3094 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3096 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3098 return alloc;
3102 // FIXME: This doesn't work yet (class libs tests fail?)
3103 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3106 * Returns NULL and set the cfg exception on error.
3108 static MonoInst*
3109 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3111 MonoBasicBlock *is_null_bb;
3112 int obj_reg = src->dreg;
3113 int vtable_reg = alloc_preg (cfg);
3114 MonoInst *klass_inst = NULL;
3116 if (context_used) {
3117 MonoInst *args [2];
3119 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3120 klass, MONO_RGCTX_INFO_KLASS);
3122 if (is_complex_isinst (klass)) {
3123 /* Complex case, handle by an icall */
3125 /* obj */
3126 args [0] = src;
3128 /* klass */
3129 args [1] = klass_inst;
3131 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3132 } else {
3133 /* Simple case, handled by the code below */
3137 NEW_BBLOCK (cfg, is_null_bb);
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3142 save_cast_details (cfg, klass, obj_reg);
3144 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3146 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3147 } else {
3148 int klass_reg = alloc_preg (cfg);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3153 /* the remoting code is broken, access the class for now */
3154 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3155 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3156 if (!vt) {
3157 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3158 cfg->exception_ptr = klass;
3159 return NULL;
3161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3162 } else {
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3167 } else {
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3169 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3173 MONO_START_BB (cfg, is_null_bb);
3175 reset_cast_details (cfg);
3177 return src;
3181 * Returns NULL and set the cfg exception on error.
3183 static MonoInst*
3184 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3186 MonoInst *ins;
3187 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3188 int obj_reg = src->dreg;
3189 int vtable_reg = alloc_preg (cfg);
3190 int res_reg = alloc_preg (cfg);
3191 MonoInst *klass_inst = NULL;
3193 if (context_used) {
3194 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3196 if (is_complex_isinst (klass)) {
3197 MonoInst *args [2];
3199 /* Complex case, handle by an icall */
3201 /* obj */
3202 args [0] = src;
3204 /* klass */
3205 args [1] = klass_inst;
3207 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3208 } else {
3209 /* Simple case, the code below can handle it */
3213 NEW_BBLOCK (cfg, is_null_bb);
3214 NEW_BBLOCK (cfg, false_bb);
3215 NEW_BBLOCK (cfg, end_bb);
3217 /* Do the assignment at the beginning, so the other assignment can be if converted */
3218 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3219 ins->type = STACK_OBJ;
3220 ins->klass = klass;
3222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3228 g_assert (!context_used);
3229 /* the is_null_bb target simply copies the input register to the output */
3230 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3231 } else {
3232 int klass_reg = alloc_preg (cfg);
3234 if (klass->rank) {
3235 int rank_reg = alloc_preg (cfg);
3236 int eclass_reg = alloc_preg (cfg);
3238 g_assert (!context_used);
3239 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3241 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3244 if (klass->cast_class == mono_defaults.object_class) {
3245 int parent_reg = alloc_preg (cfg);
3246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3247 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3248 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3249 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3250 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3251 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3252 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3254 } else if (klass->cast_class == mono_defaults.enum_class) {
3255 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3257 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3258 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3259 } else {
3260 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3261 /* Check that the object is a vector too */
3262 int bounds_reg = alloc_preg (cfg);
3263 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3268 /* the is_null_bb target simply copies the input register to the output */
3269 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3271 } else if (mono_class_is_nullable (klass)) {
3272 g_assert (!context_used);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 /* the is_null_bb target simply copies the input register to the output */
3275 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3276 } else {
3277 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3278 g_assert (!context_used);
3279 /* the remoting code is broken, access the class for now */
3280 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3282 if (!vt) {
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3285 return NULL;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3288 } else {
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3293 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3294 } else {
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3296 /* the is_null_bb target simply copies the input register to the output */
3297 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3302 MONO_START_BB (cfg, false_bb);
3304 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3305 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3307 MONO_START_BB (cfg, is_null_bb);
3309 MONO_START_BB (cfg, end_bb);
3311 return ins;
3314 static MonoInst*
3315 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3317 /* This opcode takes as input an object reference and a class, and returns:
3318 0) if the object is an instance of the class,
3319 1) if the object is not instance of the class,
3320 2) if the object is a proxy whose type cannot be determined */
3322 MonoInst *ins;
3323 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3324 int obj_reg = src->dreg;
3325 int dreg = alloc_ireg (cfg);
3326 int tmp_reg;
3327 int klass_reg = alloc_preg (cfg);
3329 NEW_BBLOCK (cfg, true_bb);
3330 NEW_BBLOCK (cfg, false_bb);
3331 NEW_BBLOCK (cfg, false2_bb);
3332 NEW_BBLOCK (cfg, end_bb);
3333 NEW_BBLOCK (cfg, no_proxy_bb);
3335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3338 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3339 NEW_BBLOCK (cfg, interface_fail_bb);
3341 tmp_reg = alloc_preg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3343 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3344 MONO_START_BB (cfg, interface_fail_bb);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3347 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3349 tmp_reg = alloc_preg (cfg);
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3352 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3353 } else {
3354 tmp_reg = alloc_preg (cfg);
3355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3358 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3359 tmp_reg = alloc_preg (cfg);
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3363 tmp_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3368 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3371 MONO_START_BB (cfg, no_proxy_bb);
3373 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3376 MONO_START_BB (cfg, false_bb);
3378 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3381 MONO_START_BB (cfg, false2_bb);
3383 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3386 MONO_START_BB (cfg, true_bb);
3388 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3390 MONO_START_BB (cfg, end_bb);
3392 /* FIXME: */
3393 MONO_INST_NEW (cfg, ins, OP_ICONST);
3394 ins->dreg = dreg;
3395 ins->type = STACK_I4;
3397 return ins;
3400 static MonoInst*
3401 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3403 /* This opcode takes as input an object reference and a class, and returns:
3404 0) if the object is an instance of the class,
3405 1) if the object is a proxy whose type cannot be determined
3406 an InvalidCastException exception is thrown otherwhise*/
3408 MonoInst *ins;
3409 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3410 int obj_reg = src->dreg;
3411 int dreg = alloc_ireg (cfg);
3412 int tmp_reg = alloc_preg (cfg);
3413 int klass_reg = alloc_preg (cfg);
3415 NEW_BBLOCK (cfg, end_bb);
3416 NEW_BBLOCK (cfg, ok_result_bb);
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3421 save_cast_details (cfg, klass, obj_reg);
3423 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3424 NEW_BBLOCK (cfg, interface_fail_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3428 MONO_START_BB (cfg, interface_fail_bb);
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3431 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3433 tmp_reg = alloc_preg (cfg);
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3436 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3438 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3441 } else {
3442 NEW_BBLOCK (cfg, no_proxy_bb);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3446 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3448 tmp_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3452 tmp_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3457 NEW_BBLOCK (cfg, fail_1_bb);
3459 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3461 MONO_START_BB (cfg, fail_1_bb);
3463 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3466 MONO_START_BB (cfg, no_proxy_bb);
3468 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3471 MONO_START_BB (cfg, ok_result_bb);
3473 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3475 MONO_START_BB (cfg, end_bb);
3477 /* FIXME: */
3478 MONO_INST_NEW (cfg, ins, OP_ICONST);
3479 ins->dreg = dreg;
3480 ins->type = STACK_I4;
3482 return ins;
3486 * Returns NULL and set the cfg exception on error.
3488 static G_GNUC_UNUSED MonoInst*
3489 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3491 gpointer *trampoline;
3492 MonoInst *obj, *method_ins, *tramp_ins;
3493 MonoDomain *domain;
3494 guint8 **code_slot;
3496 obj = handle_alloc (cfg, klass, FALSE);
3497 if (!obj)
3498 return NULL;
3500 /* Inline the contents of mono_delegate_ctor */
3502 /* Set target field */
3503 /* Optimize away setting of NULL target */
3504 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3507 /* Set method field */
3508 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3512 * To avoid looking up the compiled code belonging to the target method
3513 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3514 * store it, and we fill it after the method has been compiled.
3516 if (!cfg->compile_aot && !method->dynamic) {
3517 MonoInst *code_slot_ins;
3519 if (context_used) {
3520 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3521 } else {
3522 domain = mono_domain_get ();
3523 mono_domain_lock (domain);
3524 if (!domain_jit_info (domain)->method_code_hash)
3525 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3526 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3527 if (!code_slot) {
3528 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3529 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3531 mono_domain_unlock (domain);
3533 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3538 /* Set invoke_impl field */
3539 if (cfg->compile_aot) {
3540 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3541 } else {
3542 trampoline = mono_create_delegate_trampoline (klass);
3543 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3545 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3547 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3549 return obj;
3552 static MonoInst*
3553 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3555 MonoJitICallInfo *info;
3557 /* Need to register the icall so it gets an icall wrapper */
3558 info = mono_get_array_new_va_icall (rank);
3560 cfg->flags |= MONO_CFG_HAS_VARARGS;
3562 /* mono_array_new_va () needs a vararg calling convention */
3563 cfg->disable_llvm = TRUE;
3565 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3566 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3569 static void
3570 mono_emit_load_got_addr (MonoCompile *cfg)
3572 MonoInst *getaddr, *dummy_use;
3574 if (!cfg->got_var || cfg->got_var_allocated)
3575 return;
3577 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3578 getaddr->dreg = cfg->got_var->dreg;
3580 /* Add it to the start of the first bblock */
3581 if (cfg->bb_entry->code) {
3582 getaddr->next = cfg->bb_entry->code;
3583 cfg->bb_entry->code = getaddr;
3585 else
3586 MONO_ADD_INS (cfg->bb_entry, getaddr);
3588 cfg->got_var_allocated = TRUE;
3591 * Add a dummy use to keep the got_var alive, since real uses might
3592 * only be generated by the back ends.
3593 * Add it to end_bblock, so the variable's lifetime covers the whole
3594 * method.
3595 * It would be better to make the usage of the got var explicit in all
3596 * cases when the backend needs it (i.e. calls, throw etc.), so this
3597 * wouldn't be needed.
3599 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3600 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3603 static int inline_limit;
3604 static gboolean inline_limit_inited;
3606 static gboolean
3607 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3609 MonoMethodHeaderSummary header;
3610 MonoVTable *vtable;
3611 #ifdef MONO_ARCH_SOFT_FLOAT
3612 MonoMethodSignature *sig = mono_method_signature (method);
3613 int i;
3614 #endif
3616 if (cfg->generic_sharing_context)
3617 return FALSE;
3619 if (cfg->inline_depth > 10)
3620 return FALSE;
3622 #ifdef MONO_ARCH_HAVE_LMF_OPS
3623 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3624 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3625 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3626 return TRUE;
3627 #endif
3630 if (!mono_method_get_header_summary (method, &header))
3631 return FALSE;
3633 /*runtime, icall and pinvoke are checked by summary call*/
3634 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3635 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3636 (method->klass->marshalbyref) ||
3637 header.has_clauses)
3638 return FALSE;
3640 /* also consider num_locals? */
3641 /* Do the size check early to avoid creating vtables */
3642 if (!inline_limit_inited) {
3643 if (getenv ("MONO_INLINELIMIT"))
3644 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3645 else
3646 inline_limit = INLINE_LENGTH_LIMIT;
3647 inline_limit_inited = TRUE;
3649 if (header.code_size >= inline_limit)
3650 return FALSE;
3653 * if we can initialize the class of the method right away, we do,
3654 * otherwise we don't allow inlining if the class needs initialization,
3655 * since it would mean inserting a call to mono_runtime_class_init()
3656 * inside the inlined code
3658 if (!(cfg->opt & MONO_OPT_SHARED)) {
3659 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3660 if (cfg->run_cctors && method->klass->has_cctor) {
3661 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3662 if (!method->klass->runtime_info)
3663 /* No vtable created yet */
3664 return FALSE;
3665 vtable = mono_class_vtable (cfg->domain, method->klass);
3666 if (!vtable)
3667 return FALSE;
3668 /* This makes so that inline cannot trigger */
3669 /* .cctors: too many apps depend on them */
3670 /* running with a specific order... */
3671 if (! vtable->initialized)
3672 return FALSE;
3673 mono_runtime_class_init (vtable);
3675 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3676 if (!method->klass->runtime_info)
3677 /* No vtable created yet */
3678 return FALSE;
3679 vtable = mono_class_vtable (cfg->domain, method->klass);
3680 if (!vtable)
3681 return FALSE;
3682 if (!vtable->initialized)
3683 return FALSE;
3685 } else {
3687 * If we're compiling for shared code
3688 * the cctor will need to be run at aot method load time, for example,
3689 * or at the end of the compilation of the inlining method.
3691 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3692 return FALSE;
3696 * CAS - do not inline methods with declarative security
3697 * Note: this has to be before any possible return TRUE;
3699 if (mono_method_has_declsec (method))
3700 return FALSE;
3702 #ifdef MONO_ARCH_SOFT_FLOAT
3703 /* FIXME: */
3704 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3705 return FALSE;
3706 for (i = 0; i < sig->param_count; ++i)
3707 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3708 return FALSE;
3709 #endif
3711 return TRUE;
3714 static gboolean
3715 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3717 if (vtable->initialized && !cfg->compile_aot)
3718 return FALSE;
3720 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3721 return FALSE;
3723 if (!mono_class_needs_cctor_run (vtable->klass, method))
3724 return FALSE;
3726 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3727 /* The initialization is already done before the method is called */
3728 return FALSE;
3730 return TRUE;
3733 static MonoInst*
3734 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3736 MonoInst *ins;
3737 guint32 size;
3738 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3740 mono_class_init (klass);
3741 size = mono_class_array_element_size (klass);
3743 mult_reg = alloc_preg (cfg);
3744 array_reg = arr->dreg;
3745 index_reg = index->dreg;
3747 #if SIZEOF_REGISTER == 8
3748 /* The array reg is 64 bits but the index reg is only 32 */
3749 if (COMPILE_LLVM (cfg)) {
3750 /* Not needed */
3751 index2_reg = index_reg;
3752 } else {
3753 index2_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3756 #else
3757 if (index->type == STACK_I8) {
3758 index2_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3760 } else {
3761 index2_reg = index_reg;
3763 #endif
3765 if (bcheck)
3766 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3768 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3769 if (size == 1 || size == 2 || size == 4 || size == 8) {
3770 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3772 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3773 ins->type = STACK_PTR;
3775 return ins;
3777 #endif
3779 add_reg = alloc_preg (cfg);
3781 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3782 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3783 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3784 ins->type = STACK_PTR;
3785 MONO_ADD_INS (cfg->cbb, ins);
3787 return ins;
3790 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3791 static MonoInst*
3792 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3794 int bounds_reg = alloc_preg (cfg);
3795 int add_reg = alloc_preg (cfg);
3796 int mult_reg = alloc_preg (cfg);
3797 int mult2_reg = alloc_preg (cfg);
3798 int low1_reg = alloc_preg (cfg);
3799 int low2_reg = alloc_preg (cfg);
3800 int high1_reg = alloc_preg (cfg);
3801 int high2_reg = alloc_preg (cfg);
3802 int realidx1_reg = alloc_preg (cfg);
3803 int realidx2_reg = alloc_preg (cfg);
3804 int sum_reg = alloc_preg (cfg);
3805 int index1, index2;
3806 MonoInst *ins;
3807 guint32 size;
3809 mono_class_init (klass);
3810 size = mono_class_array_element_size (klass);
3812 index1 = index_ins1->dreg;
3813 index2 = index_ins2->dreg;
3815 /* range checking */
3816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3817 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3820 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3821 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3823 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3824 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3825 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3828 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3829 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3831 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3832 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3833 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3836 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3837 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3838 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3839 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3841 ins->type = STACK_MP;
3842 ins->klass = klass;
3843 MONO_ADD_INS (cfg->cbb, ins);
3845 return ins;
3847 #endif
3849 static MonoInst*
3850 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3852 int rank;
3853 MonoInst *addr;
3854 MonoMethod *addr_method;
3855 int element_size;
3857 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3859 if (rank == 1)
3860 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3862 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3863 /* emit_ldelema_2 depends on OP_LMUL */
3864 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3865 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3867 #endif
3869 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3870 addr_method = mono_marshal_get_array_address (rank, element_size);
3871 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3873 return addr;
3876 static MonoBreakPolicy
3877 always_insert_breakpoint (MonoMethod *method)
3879 return MONO_BREAK_POLICY_ALWAYS;
3882 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3885 * mono_set_break_policy:
3886 * policy_callback: the new callback function
3888 * Allow embedders to decide wherther to actually obey breakpoint instructions
3889 * (both break IL instructions and Debugger.Break () method calls), for example
3890 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3891 * untrusted or semi-trusted code.
3893 * @policy_callback will be called every time a break point instruction needs to
3894 * be inserted with the method argument being the method that calls Debugger.Break()
3895 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3896 * if it wants the breakpoint to not be effective in the given method.
3897 * #MONO_BREAK_POLICY_ALWAYS is the default.
3899 void
3900 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3902 if (policy_callback)
3903 break_policy_func = policy_callback;
3904 else
3905 break_policy_func = always_insert_breakpoint;
3908 static gboolean
3909 should_insert_brekpoint (MonoMethod *method) {
3910 switch (break_policy_func (method)) {
3911 case MONO_BREAK_POLICY_ALWAYS:
3912 return TRUE;
3913 case MONO_BREAK_POLICY_NEVER:
3914 return FALSE;
3915 case MONO_BREAK_POLICY_ON_DBG:
3916 return mono_debug_using_mono_debugger ();
3917 default:
3918 g_warning ("Incorrect value returned from break policy callback");
3919 return FALSE;
3923 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
3924 static MonoInst*
3925 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
3927 MonoInst *addr, *store, *load;
3928 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
3930 /* the bounds check is already done by the callers */
3931 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
3932 if (is_set) {
3933 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
3934 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
3935 } else {
3936 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3937 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3939 return store;
3942 static MonoInst*
3943 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3945 MonoInst *ins = NULL;
3947 static MonoClass *runtime_helpers_class = NULL;
3948 if (! runtime_helpers_class)
3949 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3950 "System.Runtime.CompilerServices", "RuntimeHelpers");
3952 if (cmethod->klass == mono_defaults.string_class) {
3953 if (strcmp (cmethod->name, "get_Chars") == 0) {
3954 int dreg = alloc_ireg (cfg);
3955 int index_reg = alloc_preg (cfg);
3956 int mult_reg = alloc_preg (cfg);
3957 int add_reg = alloc_preg (cfg);
3959 #if SIZEOF_REGISTER == 8
3960 /* The array reg is 64 bits but the index reg is only 32 */
3961 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3962 #else
3963 index_reg = args [1]->dreg;
3964 #endif
3965 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3967 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3968 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3969 add_reg = ins->dreg;
3970 /* Avoid a warning */
3971 mult_reg = 0;
3972 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3973 add_reg, 0);
3974 #else
3975 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3976 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3977 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3978 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3979 #endif
3980 type_from_op (ins, NULL, NULL);
3981 return ins;
3982 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3983 int dreg = alloc_ireg (cfg);
3984 /* Decompose later to allow more optimizations */
3985 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3986 ins->type = STACK_I4;
3987 cfg->cbb->has_array_access = TRUE;
3988 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3990 return ins;
3991 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3992 int mult_reg = alloc_preg (cfg);
3993 int add_reg = alloc_preg (cfg);
3995 /* The corlib functions check for oob already. */
3996 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3997 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3998 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3999 return cfg->cbb->last_ins;
4000 } else
4001 return NULL;
4002 } else if (cmethod->klass == mono_defaults.object_class) {
4004 if (strcmp (cmethod->name, "GetType") == 0) {
4005 int dreg = alloc_preg (cfg);
4006 int vt_reg = alloc_preg (cfg);
4007 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4008 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4009 type_from_op (ins, NULL, NULL);
4011 return ins;
4012 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4013 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4014 int dreg = alloc_ireg (cfg);
4015 int t1 = alloc_ireg (cfg);
4017 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4018 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4019 ins->type = STACK_I4;
4021 return ins;
4022 #endif
4023 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4024 MONO_INST_NEW (cfg, ins, OP_NOP);
4025 MONO_ADD_INS (cfg->cbb, ins);
4026 return ins;
4027 } else
4028 return NULL;
4029 } else if (cmethod->klass == mono_defaults.array_class) {
4030 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4031 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4032 if (cmethod->name [0] != 'g')
4033 return NULL;
4035 if (strcmp (cmethod->name, "get_Rank") == 0) {
4036 int dreg = alloc_ireg (cfg);
4037 int vtable_reg = alloc_preg (cfg);
4038 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4039 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4040 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4041 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4042 type_from_op (ins, NULL, NULL);
4044 return ins;
4045 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4046 int dreg = alloc_ireg (cfg);
4048 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4049 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4050 type_from_op (ins, NULL, NULL);
4052 return ins;
4053 } else
4054 return NULL;
4055 } else if (cmethod->klass == runtime_helpers_class) {
4057 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4058 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4059 return ins;
4060 } else
4061 return NULL;
4062 } else if (cmethod->klass == mono_defaults.thread_class) {
4063 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4064 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4065 MONO_ADD_INS (cfg->cbb, ins);
4066 return ins;
4067 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4068 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4069 MONO_ADD_INS (cfg->cbb, ins);
4070 return ins;
4072 } else if (cmethod->klass == mono_defaults.monitor_class) {
4073 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4074 if (strcmp (cmethod->name, "Enter") == 0) {
4075 MonoCallInst *call;
4077 if (COMPILE_LLVM (cfg)) {
4079 * Pass the argument normally, the LLVM backend will handle the
4080 * calling convention problems.
4082 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4083 } else {
4084 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4085 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4086 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4087 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4090 return (MonoInst*)call;
4091 } else if (strcmp (cmethod->name, "Exit") == 0) {
4092 MonoCallInst *call;
4094 if (COMPILE_LLVM (cfg)) {
4095 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4096 } else {
4097 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4098 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4099 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4100 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4103 return (MonoInst*)call;
4105 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4106 MonoMethod *fast_method = NULL;
4108 /* Avoid infinite recursion */
4109 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4110 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4111 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4112 return NULL;
4114 if (strcmp (cmethod->name, "Enter") == 0 ||
4115 strcmp (cmethod->name, "Exit") == 0)
4116 fast_method = mono_monitor_get_fast_path (cmethod);
4117 if (!fast_method)
4118 return NULL;
4120 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4121 #endif
4122 } else if (cmethod->klass->image == mono_defaults.corlib &&
4123 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4124 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4125 ins = NULL;
4127 #if SIZEOF_REGISTER == 8
4128 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4129 /* 64 bit reads are already atomic */
4130 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4131 ins->dreg = mono_alloc_preg (cfg);
4132 ins->inst_basereg = args [0]->dreg;
4133 ins->inst_offset = 0;
4134 MONO_ADD_INS (cfg->cbb, ins);
4136 #endif
4138 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4139 if (strcmp (cmethod->name, "Increment") == 0) {
4140 MonoInst *ins_iconst;
4141 guint32 opcode = 0;
4143 if (fsig->params [0]->type == MONO_TYPE_I4)
4144 opcode = OP_ATOMIC_ADD_NEW_I4;
4145 #if SIZEOF_REGISTER == 8
4146 else if (fsig->params [0]->type == MONO_TYPE_I8)
4147 opcode = OP_ATOMIC_ADD_NEW_I8;
4148 #endif
4149 if (opcode) {
4150 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4151 ins_iconst->inst_c0 = 1;
4152 ins_iconst->dreg = mono_alloc_ireg (cfg);
4153 MONO_ADD_INS (cfg->cbb, ins_iconst);
4155 MONO_INST_NEW (cfg, ins, opcode);
4156 ins->dreg = mono_alloc_ireg (cfg);
4157 ins->inst_basereg = args [0]->dreg;
4158 ins->inst_offset = 0;
4159 ins->sreg2 = ins_iconst->dreg;
4160 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4161 MONO_ADD_INS (cfg->cbb, ins);
4163 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4164 MonoInst *ins_iconst;
4165 guint32 opcode = 0;
4167 if (fsig->params [0]->type == MONO_TYPE_I4)
4168 opcode = OP_ATOMIC_ADD_NEW_I4;
4169 #if SIZEOF_REGISTER == 8
4170 else if (fsig->params [0]->type == MONO_TYPE_I8)
4171 opcode = OP_ATOMIC_ADD_NEW_I8;
4172 #endif
4173 if (opcode) {
4174 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4175 ins_iconst->inst_c0 = -1;
4176 ins_iconst->dreg = mono_alloc_ireg (cfg);
4177 MONO_ADD_INS (cfg->cbb, ins_iconst);
4179 MONO_INST_NEW (cfg, ins, opcode);
4180 ins->dreg = mono_alloc_ireg (cfg);
4181 ins->inst_basereg = args [0]->dreg;
4182 ins->inst_offset = 0;
4183 ins->sreg2 = ins_iconst->dreg;
4184 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4185 MONO_ADD_INS (cfg->cbb, ins);
4187 } else if (strcmp (cmethod->name, "Add") == 0) {
4188 guint32 opcode = 0;
4190 if (fsig->params [0]->type == MONO_TYPE_I4)
4191 opcode = OP_ATOMIC_ADD_NEW_I4;
4192 #if SIZEOF_REGISTER == 8
4193 else if (fsig->params [0]->type == MONO_TYPE_I8)
4194 opcode = OP_ATOMIC_ADD_NEW_I8;
4195 #endif
4197 if (opcode) {
4198 MONO_INST_NEW (cfg, ins, opcode);
4199 ins->dreg = mono_alloc_ireg (cfg);
4200 ins->inst_basereg = args [0]->dreg;
4201 ins->inst_offset = 0;
4202 ins->sreg2 = args [1]->dreg;
4203 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4204 MONO_ADD_INS (cfg->cbb, ins);
4207 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4209 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4210 if (strcmp (cmethod->name, "Exchange") == 0) {
4211 guint32 opcode;
4212 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4214 if (fsig->params [0]->type == MONO_TYPE_I4)
4215 opcode = OP_ATOMIC_EXCHANGE_I4;
4216 #if SIZEOF_REGISTER == 8
4217 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4218 (fsig->params [0]->type == MONO_TYPE_I))
4219 opcode = OP_ATOMIC_EXCHANGE_I8;
4220 #else
4221 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4222 opcode = OP_ATOMIC_EXCHANGE_I4;
4223 #endif
4224 else
4225 return NULL;
4227 MONO_INST_NEW (cfg, ins, opcode);
4228 ins->dreg = mono_alloc_ireg (cfg);
4229 ins->inst_basereg = args [0]->dreg;
4230 ins->inst_offset = 0;
4231 ins->sreg2 = args [1]->dreg;
4232 MONO_ADD_INS (cfg->cbb, ins);
4234 switch (fsig->params [0]->type) {
4235 case MONO_TYPE_I4:
4236 ins->type = STACK_I4;
4237 break;
4238 case MONO_TYPE_I8:
4239 case MONO_TYPE_I:
4240 ins->type = STACK_I8;
4241 break;
4242 case MONO_TYPE_OBJECT:
4243 ins->type = STACK_OBJ;
4244 break;
4245 default:
4246 g_assert_not_reached ();
4249 #if HAVE_WRITE_BARRIERS
4250 if (is_ref) {
4251 MonoInst *dummy_use;
4252 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4253 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4254 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4256 #endif
4258 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4260 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4261 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4262 int size = 0;
4263 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4264 if (fsig->params [1]->type == MONO_TYPE_I4)
4265 size = 4;
4266 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4267 size = sizeof (gpointer);
4268 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4269 size = 8;
4270 if (size == 4) {
4271 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4272 ins->dreg = alloc_ireg (cfg);
4273 ins->sreg1 = args [0]->dreg;
4274 ins->sreg2 = args [1]->dreg;
4275 ins->sreg3 = args [2]->dreg;
4276 ins->type = STACK_I4;
4277 MONO_ADD_INS (cfg->cbb, ins);
4278 } else if (size == 8) {
4279 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4280 ins->dreg = alloc_ireg (cfg);
4281 ins->sreg1 = args [0]->dreg;
4282 ins->sreg2 = args [1]->dreg;
4283 ins->sreg3 = args [2]->dreg;
4284 ins->type = STACK_I8;
4285 MONO_ADD_INS (cfg->cbb, ins);
4286 } else {
4287 /* g_assert_not_reached (); */
4289 #if HAVE_WRITE_BARRIERS
4290 if (is_ref) {
4291 MonoInst *dummy_use;
4292 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4293 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4294 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4296 #endif
4298 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4300 if (ins)
4301 return ins;
4302 } else if (cmethod->klass->image == mono_defaults.corlib) {
4303 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4304 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4305 if (should_insert_brekpoint (cfg->method))
4306 MONO_INST_NEW (cfg, ins, OP_BREAK);
4307 else
4308 MONO_INST_NEW (cfg, ins, OP_NOP);
4309 MONO_ADD_INS (cfg->cbb, ins);
4310 return ins;
4312 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4313 && strcmp (cmethod->klass->name, "Environment") == 0) {
4314 #ifdef TARGET_WIN32
4315 EMIT_NEW_ICONST (cfg, ins, 1);
4316 #else
4317 EMIT_NEW_ICONST (cfg, ins, 0);
4318 #endif
4319 return ins;
4321 } else if (cmethod->klass == mono_defaults.math_class) {
4323 * There is general branches code for Min/Max, but it does not work for
4324 * all inputs:
4325 * http://everything2.com/?node_id=1051618
4329 #ifdef MONO_ARCH_SIMD_INTRINSICS
4330 if (cfg->opt & MONO_OPT_SIMD) {
4331 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4332 if (ins)
4333 return ins;
4335 #endif
4337 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4341 * This entry point could be used later for arbitrary method
4342 * redirection.
4344 inline static MonoInst*
4345 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4346 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4348 if (method->klass == mono_defaults.string_class) {
4349 /* managed string allocation support */
4350 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4351 MonoInst *iargs [2];
4352 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4353 MonoMethod *managed_alloc = NULL;
4355 g_assert (vtable); /*Should not fail since it System.String*/
4356 #ifndef MONO_CROSS_COMPILE
4357 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4358 #endif
4359 if (!managed_alloc)
4360 return NULL;
4361 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4362 iargs [1] = args [0];
4363 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4366 return NULL;
4369 static void
4370 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4372 MonoInst *store, *temp;
4373 int i;
4375 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4376 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4379 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4380 * would be different than the MonoInst's used to represent arguments, and
4381 * the ldelema implementation can't deal with that.
4382 * Solution: When ldelema is used on an inline argument, create a var for
4383 * it, emit ldelema on that var, and emit the saving code below in
4384 * inline_method () if needed.
4386 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4387 cfg->args [i] = temp;
4388 /* This uses cfg->args [i] which is set by the preceeding line */
4389 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4390 store->cil_code = sp [0]->cil_code;
4391 sp++;
4395 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4396 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4398 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4399 static gboolean
4400 check_inline_called_method_name_limit (MonoMethod *called_method)
4402 int strncmp_result;
4403 static char *limit = NULL;
4405 if (limit == NULL) {
4406 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4408 if (limit_string != NULL)
4409 limit = limit_string;
4410 else
4411 limit = (char *) "";
4414 if (limit [0] != '\0') {
4415 char *called_method_name = mono_method_full_name (called_method, TRUE);
4417 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4418 g_free (called_method_name);
4420 //return (strncmp_result <= 0);
4421 return (strncmp_result == 0);
4422 } else {
4423 return TRUE;
4426 #endif
4428 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4429 static gboolean
4430 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4432 int strncmp_result;
4433 static char *limit = NULL;
4435 if (limit == NULL) {
4436 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4437 if (limit_string != NULL) {
4438 limit = limit_string;
4439 } else {
4440 limit = (char *) "";
4444 if (limit [0] != '\0') {
4445 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4447 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4448 g_free (caller_method_name);
4450 //return (strncmp_result <= 0);
4451 return (strncmp_result == 0);
4452 } else {
4453 return TRUE;
4456 #endif
4458 static int
4459 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4460 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4462 MonoInst *ins, *rvar = NULL;
4463 MonoMethodHeader *cheader;
4464 MonoBasicBlock *ebblock, *sbblock;
4465 int i, costs;
4466 MonoMethod *prev_inlined_method;
4467 MonoInst **prev_locals, **prev_args;
4468 MonoType **prev_arg_types;
4469 guint prev_real_offset;
4470 GHashTable *prev_cbb_hash;
4471 MonoBasicBlock **prev_cil_offset_to_bb;
4472 MonoBasicBlock *prev_cbb;
4473 unsigned char* prev_cil_start;
4474 guint32 prev_cil_offset_to_bb_len;
4475 MonoMethod *prev_current_method;
4476 MonoGenericContext *prev_generic_context;
4477 gboolean ret_var_set, prev_ret_var_set;
4479 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4481 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4482 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4483 return 0;
4484 #endif
4485 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4486 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4487 return 0;
4488 #endif
4490 if (cfg->verbose_level > 2)
4491 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4493 if (!cmethod->inline_info) {
4494 mono_jit_stats.inlineable_methods++;
4495 cmethod->inline_info = 1;
4498 /* allocate local variables */
4499 cheader = mono_method_get_header (cmethod);
4501 if (cheader == NULL || mono_loader_get_last_error ()) {
4502 if (cheader)
4503 mono_metadata_free_mh (cheader);
4504 mono_loader_clear_error ();
4505 return 0;
4508 /* allocate space to store the return value */
4509 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4510 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4514 prev_locals = cfg->locals;
4515 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4516 for (i = 0; i < cheader->num_locals; ++i)
4517 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4519 /* allocate start and end blocks */
4520 /* This is needed so if the inline is aborted, we can clean up */
4521 NEW_BBLOCK (cfg, sbblock);
4522 sbblock->real_offset = real_offset;
4524 NEW_BBLOCK (cfg, ebblock);
4525 ebblock->block_num = cfg->num_bblocks++;
4526 ebblock->real_offset = real_offset;
4528 prev_args = cfg->args;
4529 prev_arg_types = cfg->arg_types;
4530 prev_inlined_method = cfg->inlined_method;
4531 cfg->inlined_method = cmethod;
4532 cfg->ret_var_set = FALSE;
4533 cfg->inline_depth ++;
4534 prev_real_offset = cfg->real_offset;
4535 prev_cbb_hash = cfg->cbb_hash;
4536 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4537 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4538 prev_cil_start = cfg->cil_start;
4539 prev_cbb = cfg->cbb;
4540 prev_current_method = cfg->current_method;
4541 prev_generic_context = cfg->generic_context;
4542 prev_ret_var_set = cfg->ret_var_set;
4544 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4546 ret_var_set = cfg->ret_var_set;
4548 cfg->inlined_method = prev_inlined_method;
4549 cfg->real_offset = prev_real_offset;
4550 cfg->cbb_hash = prev_cbb_hash;
4551 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4552 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4553 cfg->cil_start = prev_cil_start;
4554 cfg->locals = prev_locals;
4555 cfg->args = prev_args;
4556 cfg->arg_types = prev_arg_types;
4557 cfg->current_method = prev_current_method;
4558 cfg->generic_context = prev_generic_context;
4559 cfg->ret_var_set = prev_ret_var_set;
4560 cfg->inline_depth --;
4562 if ((costs >= 0 && costs < 60) || inline_allways) {
4563 if (cfg->verbose_level > 2)
4564 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4566 mono_jit_stats.inlined_methods++;
4568 /* always add some code to avoid block split failures */
4569 MONO_INST_NEW (cfg, ins, OP_NOP);
4570 MONO_ADD_INS (prev_cbb, ins);
4572 prev_cbb->next_bb = sbblock;
4573 link_bblock (cfg, prev_cbb, sbblock);
4576 * Get rid of the begin and end bblocks if possible to aid local
4577 * optimizations.
4579 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4581 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4582 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4584 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4585 MonoBasicBlock *prev = ebblock->in_bb [0];
4586 mono_merge_basic_blocks (cfg, prev, ebblock);
4587 cfg->cbb = prev;
4588 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4589 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4590 cfg->cbb = prev_cbb;
4592 } else {
4593 cfg->cbb = ebblock;
4596 if (rvar) {
4598 * If the inlined method contains only a throw, then the ret var is not
4599 * set, so set it to a dummy value.
4601 if (!ret_var_set) {
4602 static double r8_0 = 0.0;
4604 switch (rvar->type) {
4605 case STACK_I4:
4606 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4607 break;
4608 case STACK_I8:
4609 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4610 break;
4611 case STACK_PTR:
4612 case STACK_MP:
4613 case STACK_OBJ:
4614 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4615 break;
4616 case STACK_R8:
4617 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4618 ins->type = STACK_R8;
4619 ins->inst_p0 = (void*)&r8_0;
4620 ins->dreg = rvar->dreg;
4621 MONO_ADD_INS (cfg->cbb, ins);
4622 break;
4623 case STACK_VTYPE:
4624 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4625 break;
4626 default:
4627 g_assert_not_reached ();
4631 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4632 *sp++ = ins;
4634 mono_metadata_free_mh (cheader);
4635 return costs + 1;
4636 } else {
4637 if (cfg->verbose_level > 2)
4638 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4639 cfg->exception_type = MONO_EXCEPTION_NONE;
4640 mono_loader_clear_error ();
4642 /* This gets rid of the newly added bblocks */
4643 cfg->cbb = prev_cbb;
4645 mono_metadata_free_mh (cheader);
4646 return 0;
4650 * Some of these comments may well be out-of-date.
4651 * Design decisions: we do a single pass over the IL code (and we do bblock
4652 * splitting/merging in the few cases when it's required: a back jump to an IL
4653 * address that was not already seen as bblock starting point).
4654 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4655 * Complex operations are decomposed in simpler ones right away. We need to let the
4656 * arch-specific code peek and poke inside this process somehow (except when the
4657 * optimizations can take advantage of the full semantic info of coarse opcodes).
4658 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4659 * MonoInst->opcode initially is the IL opcode or some simplification of that
4660 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4661 * opcode with value bigger than OP_LAST.
4662 * At this point the IR can be handed over to an interpreter, a dumb code generator
4663 * or to the optimizing code generator that will translate it to SSA form.
4665 * Profiling directed optimizations.
4666 * We may compile by default with few or no optimizations and instrument the code
4667 * or the user may indicate what methods to optimize the most either in a config file
4668 * or through repeated runs where the compiler applies offline the optimizations to
4669 * each method and then decides if it was worth it.
4672 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4673 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4674 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4675 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4676 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4677 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4678 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4679 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4681 /* offset from br.s -> br like opcodes */
4682 #define BIG_BRANCH_OFFSET 13
4684 static gboolean
4685 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4687 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4689 return b == NULL || b == bb;
4692 static int
4693 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4695 unsigned char *ip = start;
4696 unsigned char *target;
4697 int i;
4698 guint cli_addr;
4699 MonoBasicBlock *bblock;
4700 const MonoOpcode *opcode;
4702 while (ip < end) {
4703 cli_addr = ip - start;
4704 i = mono_opcode_value ((const guint8 **)&ip, end);
4705 if (i < 0)
4706 UNVERIFIED;
4707 opcode = &mono_opcodes [i];
4708 switch (opcode->argument) {
4709 case MonoInlineNone:
4710 ip++;
4711 break;
4712 case MonoInlineString:
4713 case MonoInlineType:
4714 case MonoInlineField:
4715 case MonoInlineMethod:
4716 case MonoInlineTok:
4717 case MonoInlineSig:
4718 case MonoShortInlineR:
4719 case MonoInlineI:
4720 ip += 5;
4721 break;
4722 case MonoInlineVar:
4723 ip += 3;
4724 break;
4725 case MonoShortInlineVar:
4726 case MonoShortInlineI:
4727 ip += 2;
4728 break;
4729 case MonoShortInlineBrTarget:
4730 target = start + cli_addr + 2 + (signed char)ip [1];
4731 GET_BBLOCK (cfg, bblock, target);
4732 ip += 2;
4733 if (ip < end)
4734 GET_BBLOCK (cfg, bblock, ip);
4735 break;
4736 case MonoInlineBrTarget:
4737 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4738 GET_BBLOCK (cfg, bblock, target);
4739 ip += 5;
4740 if (ip < end)
4741 GET_BBLOCK (cfg, bblock, ip);
4742 break;
4743 case MonoInlineSwitch: {
4744 guint32 n = read32 (ip + 1);
4745 guint32 j;
4746 ip += 5;
4747 cli_addr += 5 + 4 * n;
4748 target = start + cli_addr;
4749 GET_BBLOCK (cfg, bblock, target);
4751 for (j = 0; j < n; ++j) {
4752 target = start + cli_addr + (gint32)read32 (ip);
4753 GET_BBLOCK (cfg, bblock, target);
4754 ip += 4;
4756 break;
4758 case MonoInlineR:
4759 case MonoInlineI8:
4760 ip += 9;
4761 break;
4762 default:
4763 g_assert_not_reached ();
4766 if (i == CEE_THROW) {
4767 unsigned char *bb_start = ip - 1;
4769 /* Find the start of the bblock containing the throw */
4770 bblock = NULL;
4771 while ((bb_start >= start) && !bblock) {
4772 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4773 bb_start --;
4775 if (bblock)
4776 bblock->out_of_line = 1;
4779 return 0;
4780 unverified:
4781 *pos = ip;
4782 return 1;
4785 static inline MonoMethod *
4786 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4788 MonoMethod *method;
4790 if (m->wrapper_type != MONO_WRAPPER_NONE)
4791 return mono_method_get_wrapper_data (m, token);
4793 method = mono_get_method_full (m->klass->image, token, klass, context);
4795 return method;
4798 static inline MonoMethod *
4799 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4801 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4803 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4804 return NULL;
4806 return method;
4809 static inline MonoClass*
4810 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4812 MonoClass *klass;
4814 if (method->wrapper_type != MONO_WRAPPER_NONE)
4815 klass = mono_method_get_wrapper_data (method, token);
4816 else
4817 klass = mono_class_get_full (method->klass->image, token, context);
4818 if (klass)
4819 mono_class_init (klass);
4820 return klass;
4824 * Returns TRUE if the JIT should abort inlining because "callee"
4825 * is influenced by security attributes.
4827 static
4828 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4830 guint32 result;
4832 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4833 return TRUE;
4836 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4837 if (result == MONO_JIT_SECURITY_OK)
4838 return FALSE;
4840 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4841 /* Generate code to throw a SecurityException before the actual call/link */
4842 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4843 MonoInst *args [2];
4845 NEW_ICONST (cfg, args [0], 4);
4846 NEW_METHODCONST (cfg, args [1], caller);
4847 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4848 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4849 /* don't hide previous results */
4850 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4851 cfg->exception_data = result;
4852 return TRUE;
4855 return FALSE;
4858 static MonoMethod*
4859 throw_exception (void)
4861 static MonoMethod *method = NULL;
4863 if (!method) {
4864 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4865 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4867 g_assert (method);
4868 return method;
4871 static void
4872 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4874 MonoMethod *thrower = throw_exception ();
4875 MonoInst *args [1];
4877 EMIT_NEW_PCONST (cfg, args [0], ex);
4878 mono_emit_method_call (cfg, thrower, args, NULL);
4882 * Return the original method is a wrapper is specified. We can only access
4883 * the custom attributes from the original method.
4885 static MonoMethod*
4886 get_original_method (MonoMethod *method)
4888 if (method->wrapper_type == MONO_WRAPPER_NONE)
4889 return method;
4891 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4892 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4893 return NULL;
4895 /* in other cases we need to find the original method */
4896 return mono_marshal_method_from_wrapper (method);
4899 static void
4900 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4901 MonoBasicBlock *bblock, unsigned char *ip)
4903 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4904 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4905 if (ex)
4906 emit_throw_exception (cfg, ex);
4909 static void
4910 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4911 MonoBasicBlock *bblock, unsigned char *ip)
4913 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4914 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4915 if (ex)
4916 emit_throw_exception (cfg, ex);
4920 * Check that the IL instructions at ip are the array initialization
4921 * sequence and return the pointer to the data and the size.
4923 static const char*
4924 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4927 * newarr[System.Int32]
4928 * dup
4929 * ldtoken field valuetype ...
4930 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4932 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4933 guint32 token = read32 (ip + 7);
4934 guint32 field_token = read32 (ip + 2);
4935 guint32 field_index = field_token & 0xffffff;
4936 guint32 rva;
4937 const char *data_ptr;
4938 int size = 0;
4939 MonoMethod *cmethod;
4940 MonoClass *dummy_class;
4941 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4942 int dummy_align;
4944 if (!field)
4945 return NULL;
4947 *out_field_token = field_token;
4949 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4950 if (!cmethod)
4951 return NULL;
4952 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4953 return NULL;
4954 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4955 case MONO_TYPE_BOOLEAN:
4956 case MONO_TYPE_I1:
4957 case MONO_TYPE_U1:
4958 size = 1; break;
4959 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4960 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4961 case MONO_TYPE_CHAR:
4962 case MONO_TYPE_I2:
4963 case MONO_TYPE_U2:
4964 size = 2; break;
4965 case MONO_TYPE_I4:
4966 case MONO_TYPE_U4:
4967 case MONO_TYPE_R4:
4968 size = 4; break;
4969 case MONO_TYPE_R8:
4970 #ifdef ARM_FPU_FPA
4971 return NULL; /* stupid ARM FP swapped format */
4972 #endif
4973 case MONO_TYPE_I8:
4974 case MONO_TYPE_U8:
4975 size = 8; break;
4976 #endif
4977 default:
4978 return NULL;
4980 size *= len;
4981 if (size > mono_type_size (field->type, &dummy_align))
4982 return NULL;
4983 *out_size = size;
4984 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4985 if (!method->klass->image->dynamic) {
4986 field_index = read32 (ip + 2) & 0xffffff;
4987 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4988 data_ptr = mono_image_rva_map (method->klass->image, rva);
4989 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4990 /* for aot code we do the lookup on load */
4991 if (aot && data_ptr)
4992 return GUINT_TO_POINTER (rva);
4993 } else {
4994 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4995 g_assert (!aot);
4996 data_ptr = mono_field_get_data (field);
4998 return data_ptr;
5000 return NULL;
5003 static void
5004 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5006 char *method_fname = mono_method_full_name (method, TRUE);
5007 char *method_code;
5008 MonoMethodHeader *header = mono_method_get_header (method);
5010 if (header->code_size == 0)
5011 method_code = g_strdup ("method body is empty.");
5012 else
5013 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5014 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5015 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5016 g_free (method_fname);
5017 g_free (method_code);
5018 mono_metadata_free_mh (header);
5021 static void
5022 set_exception_object (MonoCompile *cfg, MonoException *exception)
5024 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5025 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5026 cfg->exception_ptr = exception;
5029 static gboolean
5030 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5032 MonoType *type;
5034 if (cfg->generic_sharing_context)
5035 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5036 else
5037 type = &klass->byval_arg;
5038 return MONO_TYPE_IS_REFERENCE (type);
5041 static void
5042 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5044 MonoInst *ins;
5045 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5046 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5047 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5048 /* Optimize reg-reg moves away */
5050 * Can't optimize other opcodes, since sp[0] might point to
5051 * the last ins of a decomposed opcode.
5053 sp [0]->dreg = (cfg)->locals [n]->dreg;
5054 } else {
5055 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5060 * ldloca inhibits many optimizations so try to get rid of it in common
5061 * cases.
5063 static inline unsigned char *
5064 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5066 int local, token;
5067 MonoClass *klass;
5069 if (size == 1) {
5070 local = ip [1];
5071 ip += 2;
5072 } else {
5073 local = read16 (ip + 2);
5074 ip += 4;
5077 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5078 gboolean skip = FALSE;
5080 /* From the INITOBJ case */
5081 token = read32 (ip + 2);
5082 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5083 CHECK_TYPELOAD (klass);
5084 if (generic_class_is_reference_type (cfg, klass)) {
5085 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5086 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5087 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5088 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5089 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5090 } else {
5091 skip = TRUE;
5094 if (!skip)
5095 return ip + 6;
5097 load_error:
5098 return NULL;
5101 static gboolean
5102 is_exception_class (MonoClass *class)
5104 while (class) {
5105 if (class == mono_defaults.exception_class)
5106 return TRUE;
5107 class = class->parent;
5109 return FALSE;
5113 * mono_method_to_ir:
5115 * Translate the .net IL into linear IR.
5118 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5119 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5120 guint inline_offset, gboolean is_virtual_call)
5122 MonoError error;
5123 MonoInst *ins, **sp, **stack_start;
5124 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5125 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5126 MonoMethod *cmethod, *method_definition;
5127 MonoInst **arg_array;
5128 MonoMethodHeader *header;
5129 MonoImage *image;
5130 guint32 token, ins_flag;
5131 MonoClass *klass;
5132 MonoClass *constrained_call = NULL;
5133 unsigned char *ip, *end, *target, *err_pos;
5134 static double r8_0 = 0.0;
5135 MonoMethodSignature *sig;
5136 MonoGenericContext *generic_context = NULL;
5137 MonoGenericContainer *generic_container = NULL;
5138 MonoType **param_types;
5139 int i, n, start_new_bblock, dreg;
5140 int num_calls = 0, inline_costs = 0;
5141 int breakpoint_id = 0;
5142 guint num_args;
5143 MonoBoolean security, pinvoke;
5144 MonoSecurityManager* secman = NULL;
5145 MonoDeclSecurityActions actions;
5146 GSList *class_inits = NULL;
5147 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5148 int context_used;
5149 gboolean init_locals, seq_points, skip_dead_blocks;
5151 /* serialization and xdomain stuff may need access to private fields and methods */
5152 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5153 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5154 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5155 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5156 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5157 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5159 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5161 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5162 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5163 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5164 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5166 image = method->klass->image;
5167 header = mono_method_get_header (method);
5168 generic_container = mono_method_get_generic_container (method);
5169 sig = mono_method_signature (method);
5170 num_args = sig->hasthis + sig->param_count;
5171 ip = (unsigned char*)header->code;
5172 cfg->cil_start = ip;
5173 end = ip + header->code_size;
5174 mono_jit_stats.cil_code_size += header->code_size;
5175 init_locals = header->init_locals;
5177 seq_points = cfg->gen_seq_points && cfg->method == method;
5180 * Methods without init_locals set could cause asserts in various passes
5181 * (#497220).
5183 init_locals = TRUE;
5185 method_definition = method;
5186 while (method_definition->is_inflated) {
5187 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5188 method_definition = imethod->declaring;
5191 /* SkipVerification is not allowed if core-clr is enabled */
5192 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5193 dont_verify = TRUE;
5194 dont_verify_stloc = TRUE;
5197 if (!dont_verify && mini_method_verify (cfg, method_definition))
5198 goto exception_exit;
5200 if (mono_debug_using_mono_debugger ())
5201 cfg->keep_cil_nops = TRUE;
5203 if (sig->is_inflated)
5204 generic_context = mono_method_get_context (method);
5205 else if (generic_container)
5206 generic_context = &generic_container->context;
5207 cfg->generic_context = generic_context;
5209 if (!cfg->generic_sharing_context)
5210 g_assert (!sig->has_type_parameters);
5212 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5213 g_assert (method->is_inflated);
5214 g_assert (mono_method_get_context (method)->method_inst);
5216 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5217 g_assert (sig->generic_param_count);
5219 if (cfg->method == method) {
5220 cfg->real_offset = 0;
5221 } else {
5222 cfg->real_offset = inline_offset;
5225 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5226 cfg->cil_offset_to_bb_len = header->code_size;
5228 cfg->current_method = method;
5230 if (cfg->verbose_level > 2)
5231 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5233 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5234 if (sig->hasthis)
5235 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5236 for (n = 0; n < sig->param_count; ++n)
5237 param_types [n + sig->hasthis] = sig->params [n];
5238 cfg->arg_types = param_types;
5240 dont_inline = g_list_prepend (dont_inline, method);
5241 if (cfg->method == method) {
5243 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5244 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5246 /* ENTRY BLOCK */
5247 NEW_BBLOCK (cfg, start_bblock);
5248 cfg->bb_entry = start_bblock;
5249 start_bblock->cil_code = NULL;
5250 start_bblock->cil_length = 0;
5252 /* EXIT BLOCK */
5253 NEW_BBLOCK (cfg, end_bblock);
5254 cfg->bb_exit = end_bblock;
5255 end_bblock->cil_code = NULL;
5256 end_bblock->cil_length = 0;
5257 g_assert (cfg->num_bblocks == 2);
5259 arg_array = cfg->args;
5261 if (header->num_clauses) {
5262 cfg->spvars = g_hash_table_new (NULL, NULL);
5263 cfg->exvars = g_hash_table_new (NULL, NULL);
5265 /* handle exception clauses */
5266 for (i = 0; i < header->num_clauses; ++i) {
5267 MonoBasicBlock *try_bb;
5268 MonoExceptionClause *clause = &header->clauses [i];
5269 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5270 try_bb->real_offset = clause->try_offset;
5271 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5272 tblock->real_offset = clause->handler_offset;
5273 tblock->flags |= BB_EXCEPTION_HANDLER;
5275 link_bblock (cfg, try_bb, tblock);
5277 if (*(ip + clause->handler_offset) == CEE_POP)
5278 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5280 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5281 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5282 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5283 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5284 MONO_ADD_INS (tblock, ins);
5286 /* todo: is a fault block unsafe to optimize? */
5287 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5288 tblock->flags |= BB_EXCEPTION_UNSAFE;
5292 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5293 while (p < end) {
5294 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5296 /* catch and filter blocks get the exception object on the stack */
5297 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5298 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5299 MonoInst *dummy_use;
5301 /* mostly like handle_stack_args (), but just sets the input args */
5302 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5303 tblock->in_scount = 1;
5304 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5305 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5308 * Add a dummy use for the exvar so its liveness info will be
5309 * correct.
5311 cfg->cbb = tblock;
5312 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5314 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5315 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5316 tblock->flags |= BB_EXCEPTION_HANDLER;
5317 tblock->real_offset = clause->data.filter_offset;
5318 tblock->in_scount = 1;
5319 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5320 /* The filter block shares the exvar with the handler block */
5321 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5322 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5323 MONO_ADD_INS (tblock, ins);
5327 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5328 clause->data.catch_class &&
5329 cfg->generic_sharing_context &&
5330 mono_class_check_context_used (clause->data.catch_class)) {
5332 * In shared generic code with catch
5333 * clauses containing type variables
5334 * the exception handling code has to
5335 * be able to get to the rgctx.
5336 * Therefore we have to make sure that
5337 * the vtable/mrgctx argument (for
5338 * static or generic methods) or the
5339 * "this" argument (for non-static
5340 * methods) are live.
5342 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5343 mini_method_get_context (method)->method_inst ||
5344 method->klass->valuetype) {
5345 mono_get_vtable_var (cfg);
5346 } else {
5347 MonoInst *dummy_use;
5349 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5353 } else {
5354 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5355 cfg->cbb = start_bblock;
5356 cfg->args = arg_array;
5357 mono_save_args (cfg, sig, inline_args);
5360 /* FIRST CODE BLOCK */
5361 NEW_BBLOCK (cfg, bblock);
5362 bblock->cil_code = ip;
5363 cfg->cbb = bblock;
5364 cfg->ip = ip;
5366 ADD_BBLOCK (cfg, bblock);
5368 if (cfg->method == method) {
5369 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5370 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5371 MONO_INST_NEW (cfg, ins, OP_BREAK);
5372 MONO_ADD_INS (bblock, ins);
5376 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5377 secman = mono_security_manager_get_methods ();
5379 security = (secman && mono_method_has_declsec (method));
5380 /* at this point having security doesn't mean we have any code to generate */
5381 if (security && (cfg->method == method)) {
5382 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5383 * And we do not want to enter the next section (with allocation) if we
5384 * have nothing to generate */
5385 security = mono_declsec_get_demands (method, &actions);
5388 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5389 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5390 if (pinvoke) {
5391 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5392 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5393 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5395 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5396 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5397 pinvoke = FALSE;
5399 if (custom)
5400 mono_custom_attrs_free (custom);
5402 if (pinvoke) {
5403 custom = mono_custom_attrs_from_class (wrapped->klass);
5404 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5405 pinvoke = FALSE;
5407 if (custom)
5408 mono_custom_attrs_free (custom);
5410 } else {
5411 /* not a P/Invoke after all */
5412 pinvoke = FALSE;
5416 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5417 /* we use a separate basic block for the initialization code */
5418 NEW_BBLOCK (cfg, init_localsbb);
5419 cfg->bb_init = init_localsbb;
5420 init_localsbb->real_offset = cfg->real_offset;
5421 start_bblock->next_bb = init_localsbb;
5422 init_localsbb->next_bb = bblock;
5423 link_bblock (cfg, start_bblock, init_localsbb);
5424 link_bblock (cfg, init_localsbb, bblock);
5426 cfg->cbb = init_localsbb;
5427 } else {
5428 start_bblock->next_bb = bblock;
5429 link_bblock (cfg, start_bblock, bblock);
5432 /* at this point we know, if security is TRUE, that some code needs to be generated */
5433 if (security && (cfg->method == method)) {
5434 MonoInst *args [2];
5436 mono_jit_stats.cas_demand_generation++;
5438 if (actions.demand.blob) {
5439 /* Add code for SecurityAction.Demand */
5440 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5441 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5442 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5443 mono_emit_method_call (cfg, secman->demand, args, NULL);
5445 if (actions.noncasdemand.blob) {
5446 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5447 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5448 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5449 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5450 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5451 mono_emit_method_call (cfg, secman->demand, args, NULL);
5453 if (actions.demandchoice.blob) {
5454 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5455 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5456 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5457 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5458 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5462 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5463 if (pinvoke) {
5464 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5467 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5468 /* check if this is native code, e.g. an icall or a p/invoke */
5469 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5470 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5471 if (wrapped) {
5472 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5473 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5475 /* if this ia a native call then it can only be JITted from platform code */
5476 if ((icall || pinvk) && method->klass && method->klass->image) {
5477 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5478 MonoException *ex = icall ? mono_get_exception_security () :
5479 mono_get_exception_method_access ();
5480 emit_throw_exception (cfg, ex);
5487 if (header->code_size == 0)
5488 UNVERIFIED;
5490 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5491 ip = err_pos;
5492 UNVERIFIED;
5495 if (cfg->method == method)
5496 mono_debug_init_method (cfg, bblock, breakpoint_id);
5498 for (n = 0; n < header->num_locals; ++n) {
5499 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5500 UNVERIFIED;
5502 class_inits = NULL;
5504 /* We force the vtable variable here for all shared methods
5505 for the possibility that they might show up in a stack
5506 trace where their exact instantiation is needed. */
5507 if (cfg->generic_sharing_context && method == cfg->method) {
5508 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5509 mini_method_get_context (method)->method_inst ||
5510 method->klass->valuetype) {
5511 mono_get_vtable_var (cfg);
5512 } else {
5513 /* FIXME: Is there a better way to do this?
5514 We need the variable live for the duration
5515 of the whole method. */
5516 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5520 /* add a check for this != NULL to inlined methods */
5521 if (is_virtual_call) {
5522 MonoInst *arg_ins;
5524 NEW_ARGLOAD (cfg, arg_ins, 0);
5525 MONO_ADD_INS (cfg->cbb, arg_ins);
5526 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5529 skip_dead_blocks = !dont_verify;
5530 if (skip_dead_blocks) {
5531 original_bb = bb = mono_basic_block_split (method, &error);
5532 if (!mono_error_ok (&error)) {
5533 mono_error_cleanup (&error);
5534 UNVERIFIED;
5536 g_assert (bb);
5539 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5540 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5542 ins_flag = 0;
5543 start_new_bblock = 0;
5544 cfg->cbb = bblock;
5545 while (ip < end) {
5546 if (cfg->method == method)
5547 cfg->real_offset = ip - header->code;
5548 else
5549 cfg->real_offset = inline_offset;
5550 cfg->ip = ip;
5552 context_used = 0;
5554 if (start_new_bblock) {
5555 bblock->cil_length = ip - bblock->cil_code;
5556 if (start_new_bblock == 2) {
5557 g_assert (ip == tblock->cil_code);
5558 } else {
5559 GET_BBLOCK (cfg, tblock, ip);
5561 bblock->next_bb = tblock;
5562 bblock = tblock;
5563 cfg->cbb = bblock;
5564 start_new_bblock = 0;
5565 for (i = 0; i < bblock->in_scount; ++i) {
5566 if (cfg->verbose_level > 3)
5567 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5568 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5569 *sp++ = ins;
5571 if (class_inits)
5572 g_slist_free (class_inits);
5573 class_inits = NULL;
5574 } else {
5575 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5576 link_bblock (cfg, bblock, tblock);
5577 if (sp != stack_start) {
5578 handle_stack_args (cfg, stack_start, sp - stack_start);
5579 sp = stack_start;
5580 CHECK_UNVERIFIABLE (cfg);
5582 bblock->next_bb = tblock;
5583 bblock = tblock;
5584 cfg->cbb = bblock;
5585 for (i = 0; i < bblock->in_scount; ++i) {
5586 if (cfg->verbose_level > 3)
5587 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5588 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5589 *sp++ = ins;
5591 g_slist_free (class_inits);
5592 class_inits = NULL;
5596 if (skip_dead_blocks) {
5597 int ip_offset = ip - header->code;
5599 if (ip_offset == bb->end)
5600 bb = bb->next;
5602 if (bb->dead) {
5603 int op_size = mono_opcode_size (ip, end);
5604 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5606 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5608 if (ip_offset + op_size == bb->end) {
5609 MONO_INST_NEW (cfg, ins, OP_NOP);
5610 MONO_ADD_INS (bblock, ins);
5611 start_new_bblock = 1;
5614 ip += op_size;
5615 continue;
5619 * Sequence points are points where the debugger can place a breakpoint.
5620 * Currently, we generate these automatically at points where the IL
5621 * stack is empty.
5623 if (seq_points && sp == stack_start) {
5624 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5625 MONO_ADD_INS (cfg->cbb, ins);
5628 bblock->real_offset = cfg->real_offset;
5630 if ((cfg->method == method) && cfg->coverage_info) {
5631 guint32 cil_offset = ip - header->code;
5632 cfg->coverage_info->data [cil_offset].cil_code = ip;
5634 /* TODO: Use an increment here */
5635 #if defined(TARGET_X86)
5636 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5637 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5638 ins->inst_imm = 1;
5639 MONO_ADD_INS (cfg->cbb, ins);
5640 #else
5641 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5643 #endif
5646 if (cfg->verbose_level > 3)
5647 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5649 switch (*ip) {
5650 case CEE_NOP:
5651 if (cfg->keep_cil_nops)
5652 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5653 else
5654 MONO_INST_NEW (cfg, ins, OP_NOP);
5655 ip++;
5656 MONO_ADD_INS (bblock, ins);
5657 break;
5658 case CEE_BREAK:
5659 if (should_insert_brekpoint (cfg->method))
5660 MONO_INST_NEW (cfg, ins, OP_BREAK);
5661 else
5662 MONO_INST_NEW (cfg, ins, OP_NOP);
5663 ip++;
5664 MONO_ADD_INS (bblock, ins);
5665 break;
5666 case CEE_LDARG_0:
5667 case CEE_LDARG_1:
5668 case CEE_LDARG_2:
5669 case CEE_LDARG_3:
5670 CHECK_STACK_OVF (1);
5671 n = (*ip)-CEE_LDARG_0;
5672 CHECK_ARG (n);
5673 EMIT_NEW_ARGLOAD (cfg, ins, n);
5674 ip++;
5675 *sp++ = ins;
5676 break;
5677 case CEE_LDLOC_0:
5678 case CEE_LDLOC_1:
5679 case CEE_LDLOC_2:
5680 case CEE_LDLOC_3:
5681 CHECK_STACK_OVF (1);
5682 n = (*ip)-CEE_LDLOC_0;
5683 CHECK_LOCAL (n);
5684 EMIT_NEW_LOCLOAD (cfg, ins, n);
5685 ip++;
5686 *sp++ = ins;
5687 break;
5688 case CEE_STLOC_0:
5689 case CEE_STLOC_1:
5690 case CEE_STLOC_2:
5691 case CEE_STLOC_3: {
5692 CHECK_STACK (1);
5693 n = (*ip)-CEE_STLOC_0;
5694 CHECK_LOCAL (n);
5695 --sp;
5696 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5697 UNVERIFIED;
5698 emit_stloc_ir (cfg, sp, header, n);
5699 ++ip;
5700 inline_costs += 1;
5701 break;
5703 case CEE_LDARG_S:
5704 CHECK_OPSIZE (2);
5705 CHECK_STACK_OVF (1);
5706 n = ip [1];
5707 CHECK_ARG (n);
5708 EMIT_NEW_ARGLOAD (cfg, ins, n);
5709 *sp++ = ins;
5710 ip += 2;
5711 break;
5712 case CEE_LDARGA_S:
5713 CHECK_OPSIZE (2);
5714 CHECK_STACK_OVF (1);
5715 n = ip [1];
5716 CHECK_ARG (n);
5717 NEW_ARGLOADA (cfg, ins, n);
5718 MONO_ADD_INS (cfg->cbb, ins);
5719 *sp++ = ins;
5720 ip += 2;
5721 break;
5722 case CEE_STARG_S:
5723 CHECK_OPSIZE (2);
5724 CHECK_STACK (1);
5725 --sp;
5726 n = ip [1];
5727 CHECK_ARG (n);
5728 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5729 UNVERIFIED;
5730 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5731 ip += 2;
5732 break;
5733 case CEE_LDLOC_S:
5734 CHECK_OPSIZE (2);
5735 CHECK_STACK_OVF (1);
5736 n = ip [1];
5737 CHECK_LOCAL (n);
5738 EMIT_NEW_LOCLOAD (cfg, ins, n);
5739 *sp++ = ins;
5740 ip += 2;
5741 break;
5742 case CEE_LDLOCA_S: {
5743 unsigned char *tmp_ip;
5744 CHECK_OPSIZE (2);
5745 CHECK_STACK_OVF (1);
5746 CHECK_LOCAL (ip [1]);
5748 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5749 ip = tmp_ip;
5750 inline_costs += 1;
5751 break;
5754 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5755 *sp++ = ins;
5756 ip += 2;
5757 break;
5759 case CEE_STLOC_S:
5760 CHECK_OPSIZE (2);
5761 CHECK_STACK (1);
5762 --sp;
5763 CHECK_LOCAL (ip [1]);
5764 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5765 UNVERIFIED;
5766 emit_stloc_ir (cfg, sp, header, ip [1]);
5767 ip += 2;
5768 inline_costs += 1;
5769 break;
5770 case CEE_LDNULL:
5771 CHECK_STACK_OVF (1);
5772 EMIT_NEW_PCONST (cfg, ins, NULL);
5773 ins->type = STACK_OBJ;
5774 ++ip;
5775 *sp++ = ins;
5776 break;
5777 case CEE_LDC_I4_M1:
5778 CHECK_STACK_OVF (1);
5779 EMIT_NEW_ICONST (cfg, ins, -1);
5780 ++ip;
5781 *sp++ = ins;
5782 break;
5783 case CEE_LDC_I4_0:
5784 case CEE_LDC_I4_1:
5785 case CEE_LDC_I4_2:
5786 case CEE_LDC_I4_3:
5787 case CEE_LDC_I4_4:
5788 case CEE_LDC_I4_5:
5789 case CEE_LDC_I4_6:
5790 case CEE_LDC_I4_7:
5791 case CEE_LDC_I4_8:
5792 CHECK_STACK_OVF (1);
5793 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5794 ++ip;
5795 *sp++ = ins;
5796 break;
5797 case CEE_LDC_I4_S:
5798 CHECK_OPSIZE (2);
5799 CHECK_STACK_OVF (1);
5800 ++ip;
5801 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5802 ++ip;
5803 *sp++ = ins;
5804 break;
5805 case CEE_LDC_I4:
5806 CHECK_OPSIZE (5);
5807 CHECK_STACK_OVF (1);
5808 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5809 ip += 5;
5810 *sp++ = ins;
5811 break;
5812 case CEE_LDC_I8:
5813 CHECK_OPSIZE (9);
5814 CHECK_STACK_OVF (1);
5815 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5816 ins->type = STACK_I8;
5817 ins->dreg = alloc_dreg (cfg, STACK_I8);
5818 ++ip;
5819 ins->inst_l = (gint64)read64 (ip);
5820 MONO_ADD_INS (bblock, ins);
5821 ip += 8;
5822 *sp++ = ins;
5823 break;
5824 case CEE_LDC_R4: {
5825 float *f;
5826 gboolean use_aotconst = FALSE;
5828 #ifdef TARGET_POWERPC
5829 /* FIXME: Clean this up */
5830 if (cfg->compile_aot)
5831 use_aotconst = TRUE;
5832 #endif
5834 /* FIXME: we should really allocate this only late in the compilation process */
5835 f = mono_domain_alloc (cfg->domain, sizeof (float));
5836 CHECK_OPSIZE (5);
5837 CHECK_STACK_OVF (1);
5839 if (use_aotconst) {
5840 MonoInst *cons;
5841 int dreg;
5843 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5845 dreg = alloc_freg (cfg);
5846 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5847 ins->type = STACK_R8;
5848 } else {
5849 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5850 ins->type = STACK_R8;
5851 ins->dreg = alloc_dreg (cfg, STACK_R8);
5852 ins->inst_p0 = f;
5853 MONO_ADD_INS (bblock, ins);
5855 ++ip;
5856 readr4 (ip, f);
5857 ip += 4;
5858 *sp++ = ins;
5859 break;
5861 case CEE_LDC_R8: {
5862 double *d;
5863 gboolean use_aotconst = FALSE;
5865 #ifdef TARGET_POWERPC
5866 /* FIXME: Clean this up */
5867 if (cfg->compile_aot)
5868 use_aotconst = TRUE;
5869 #endif
5871 /* FIXME: we should really allocate this only late in the compilation process */
5872 d = mono_domain_alloc (cfg->domain, sizeof (double));
5873 CHECK_OPSIZE (9);
5874 CHECK_STACK_OVF (1);
5876 if (use_aotconst) {
5877 MonoInst *cons;
5878 int dreg;
5880 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5882 dreg = alloc_freg (cfg);
5883 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5884 ins->type = STACK_R8;
5885 } else {
5886 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5887 ins->type = STACK_R8;
5888 ins->dreg = alloc_dreg (cfg, STACK_R8);
5889 ins->inst_p0 = d;
5890 MONO_ADD_INS (bblock, ins);
5892 ++ip;
5893 readr8 (ip, d);
5894 ip += 8;
5895 *sp++ = ins;
5896 break;
5898 case CEE_DUP: {
5899 MonoInst *temp, *store;
5900 CHECK_STACK (1);
5901 CHECK_STACK_OVF (1);
5902 sp--;
5903 ins = *sp;
5905 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5906 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5908 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5909 *sp++ = ins;
5911 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5912 *sp++ = ins;
5914 ++ip;
5915 inline_costs += 2;
5916 break;
5918 case CEE_POP:
5919 CHECK_STACK (1);
5920 ip++;
5921 --sp;
5923 #ifdef TARGET_X86
5924 if (sp [0]->type == STACK_R8)
5925 /* we need to pop the value from the x86 FP stack */
5926 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5927 #endif
5928 break;
5929 case CEE_JMP: {
5930 MonoCallInst *call;
5932 INLINE_FAILURE;
5934 CHECK_OPSIZE (5);
5935 if (stack_start != sp)
5936 UNVERIFIED;
5937 token = read32 (ip + 1);
5938 /* FIXME: check the signature matches */
5939 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5941 if (!cmethod)
5942 goto load_error;
5944 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5945 GENERIC_SHARING_FAILURE (CEE_JMP);
5947 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5948 CHECK_CFG_EXCEPTION;
5950 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5952 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5953 int i, n;
5955 /* Handle tail calls similarly to calls */
5956 n = fsig->param_count + fsig->hasthis;
5958 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5959 call->method = cmethod;
5960 call->tail_call = TRUE;
5961 call->signature = mono_method_signature (cmethod);
5962 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5963 call->inst.inst_p0 = cmethod;
5964 for (i = 0; i < n; ++i)
5965 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5967 mono_arch_emit_call (cfg, call);
5968 MONO_ADD_INS (bblock, (MonoInst*)call);
5970 #else
5971 for (i = 0; i < num_args; ++i)
5972 /* Prevent arguments from being optimized away */
5973 arg_array [i]->flags |= MONO_INST_VOLATILE;
5975 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5976 ins = (MonoInst*)call;
5977 ins->inst_p0 = cmethod;
5978 MONO_ADD_INS (bblock, ins);
5979 #endif
5981 ip += 5;
5982 start_new_bblock = 1;
5983 break;
5985 case CEE_CALLI:
5986 case CEE_CALL:
5987 case CEE_CALLVIRT: {
5988 MonoInst *addr = NULL;
5989 MonoMethodSignature *fsig = NULL;
5990 int array_rank = 0;
5991 int virtual = *ip == CEE_CALLVIRT;
5992 int calli = *ip == CEE_CALLI;
5993 gboolean pass_imt_from_rgctx = FALSE;
5994 MonoInst *imt_arg = NULL;
5995 gboolean pass_vtable = FALSE;
5996 gboolean pass_mrgctx = FALSE;
5997 MonoInst *vtable_arg = NULL;
5998 gboolean check_this = FALSE;
5999 gboolean supported_tail_call = FALSE;
6001 CHECK_OPSIZE (5);
6002 token = read32 (ip + 1);
6004 if (calli) {
6005 cmethod = NULL;
6006 CHECK_STACK (1);
6007 --sp;
6008 addr = *sp;
6009 if (method->wrapper_type != MONO_WRAPPER_NONE)
6010 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6011 else
6012 fsig = mono_metadata_parse_signature (image, token);
6014 n = fsig->param_count + fsig->hasthis;
6016 if (method->dynamic && fsig->pinvoke) {
6017 MonoInst *args [3];
6020 * This is a call through a function pointer using a pinvoke
6021 * signature. Have to create a wrapper and call that instead.
6022 * FIXME: This is very slow, need to create a wrapper at JIT time
6023 * instead based on the signature.
6025 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6026 EMIT_NEW_PCONST (cfg, args [1], fsig);
6027 args [2] = addr;
6028 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6030 } else {
6031 MonoMethod *cil_method;
6033 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6034 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6035 cil_method = cmethod;
6036 } else if (constrained_call) {
6037 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6039 * This is needed since get_method_constrained can't find
6040 * the method in klass representing a type var.
6041 * The type var is guaranteed to be a reference type in this
6042 * case.
6044 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6045 cil_method = cmethod;
6046 g_assert (!cmethod->klass->valuetype);
6047 } else {
6048 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6050 } else {
6051 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6052 cil_method = cmethod;
6055 if (!cmethod)
6056 goto load_error;
6057 if (!dont_verify && !cfg->skip_visibility) {
6058 MonoMethod *target_method = cil_method;
6059 if (method->is_inflated) {
6060 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6062 if (!mono_method_can_access_method (method_definition, target_method) &&
6063 !mono_method_can_access_method (method, cil_method))
6064 METHOD_ACCESS_FAILURE;
6067 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6068 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6070 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6071 /* MS.NET seems to silently convert this to a callvirt */
6072 virtual = 1;
6076 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6077 * converts to a callvirt.
6079 * tests/bug-515884.il is an example of this behavior
6081 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6082 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6083 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6084 virtual = 1;
6087 if (!cmethod->klass->inited)
6088 if (!mono_class_init (cmethod->klass))
6089 goto load_error;
6091 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6092 mini_class_is_system_array (cmethod->klass)) {
6093 array_rank = cmethod->klass->rank;
6094 fsig = mono_method_signature (cmethod);
6095 } else {
6096 fsig = mono_method_signature (cmethod);
6098 if (!fsig)
6099 goto load_error;
6101 if (fsig->pinvoke) {
6102 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6103 check_for_pending_exc, FALSE);
6104 fsig = mono_method_signature (wrapper);
6105 } else if (constrained_call) {
6106 fsig = mono_method_signature (cmethod);
6107 } else {
6108 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6112 mono_save_token_info (cfg, image, token, cil_method);
6114 n = fsig->param_count + fsig->hasthis;
6116 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6117 if (check_linkdemand (cfg, method, cmethod))
6118 INLINE_FAILURE;
6119 CHECK_CFG_EXCEPTION;
6122 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6123 g_assert_not_reached ();
6126 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6127 UNVERIFIED;
6129 if (!cfg->generic_sharing_context && cmethod)
6130 g_assert (!mono_method_check_context_used (cmethod));
6132 CHECK_STACK (n);
6134 //g_assert (!virtual || fsig->hasthis);
6136 sp -= n;
6138 if (constrained_call) {
6140 * We have the `constrained.' prefix opcode.
6142 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6144 * The type parameter is instantiated as a valuetype,
6145 * but that type doesn't override the method we're
6146 * calling, so we need to box `this'.
6148 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6149 ins->klass = constrained_call;
6150 sp [0] = handle_box (cfg, ins, constrained_call);
6151 CHECK_CFG_EXCEPTION;
6152 } else if (!constrained_call->valuetype) {
6153 int dreg = alloc_preg (cfg);
6156 * The type parameter is instantiated as a reference
6157 * type. We have a managed pointer on the stack, so
6158 * we need to dereference it here.
6160 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6161 ins->type = STACK_OBJ;
6162 sp [0] = ins;
6163 } else if (cmethod->klass->valuetype)
6164 virtual = 0;
6165 constrained_call = NULL;
6168 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6169 UNVERIFIED;
6172 * If the callee is a shared method, then its static cctor
6173 * might not get called after the call was patched.
6175 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6176 emit_generic_class_init (cfg, cmethod->klass);
6177 CHECK_TYPELOAD (cmethod->klass);
6180 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6181 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6182 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6183 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6184 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6187 * Pass vtable iff target method might
6188 * be shared, which means that sharing
6189 * is enabled for its class and its
6190 * context is sharable (and it's not a
6191 * generic method).
6193 if (sharing_enabled && context_sharable &&
6194 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6195 pass_vtable = TRUE;
6198 if (cmethod && mini_method_get_context (cmethod) &&
6199 mini_method_get_context (cmethod)->method_inst) {
6200 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6201 MonoGenericContext *context = mini_method_get_context (cmethod);
6202 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6204 g_assert (!pass_vtable);
6206 if (sharing_enabled && context_sharable)
6207 pass_mrgctx = TRUE;
6210 if (cfg->generic_sharing_context && cmethod) {
6211 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6213 context_used = mono_method_check_context_used (cmethod);
6215 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6216 /* Generic method interface
6217 calls are resolved via a
6218 helper function and don't
6219 need an imt. */
6220 if (!cmethod_context || !cmethod_context->method_inst)
6221 pass_imt_from_rgctx = TRUE;
6225 * If a shared method calls another
6226 * shared method then the caller must
6227 * have a generic sharing context
6228 * because the magic trampoline
6229 * requires it. FIXME: We shouldn't
6230 * have to force the vtable/mrgctx
6231 * variable here. Instead there
6232 * should be a flag in the cfg to
6233 * request a generic sharing context.
6235 if (context_used &&
6236 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6237 mono_get_vtable_var (cfg);
6240 if (pass_vtable) {
6241 if (context_used) {
6242 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6243 } else {
6244 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6246 CHECK_TYPELOAD (cmethod->klass);
6247 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6251 if (pass_mrgctx) {
6252 g_assert (!vtable_arg);
6254 if (!cfg->compile_aot) {
6256 * emit_get_rgctx_method () calls mono_class_vtable () so check
6257 * for type load errors before.
6259 mono_class_setup_vtable (cmethod->klass);
6260 CHECK_TYPELOAD (cmethod->klass);
6263 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6265 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6266 MONO_METHOD_IS_FINAL (cmethod)) {
6267 if (virtual)
6268 check_this = TRUE;
6269 virtual = 0;
6273 if (pass_imt_from_rgctx) {
6274 g_assert (!pass_vtable);
6275 g_assert (cmethod);
6277 imt_arg = emit_get_rgctx_method (cfg, context_used,
6278 cmethod, MONO_RGCTX_INFO_METHOD);
6281 if (check_this)
6282 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6284 /* Calling virtual generic methods */
6285 if (cmethod && virtual &&
6286 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6287 !(MONO_METHOD_IS_FINAL (cmethod) &&
6288 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6289 mono_method_signature (cmethod)->generic_param_count) {
6290 MonoInst *this_temp, *this_arg_temp, *store;
6291 MonoInst *iargs [4];
6293 g_assert (mono_method_signature (cmethod)->is_inflated);
6295 /* Prevent inlining of methods that contain indirect calls */
6296 INLINE_FAILURE;
6298 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6299 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6300 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6301 g_assert (!imt_arg);
6302 if (!context_used)
6303 g_assert (cmethod->is_inflated);
6304 imt_arg = emit_get_rgctx_method (cfg, context_used,
6305 cmethod, MONO_RGCTX_INFO_METHOD);
6306 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6307 } else
6308 #endif
6310 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6311 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6312 MONO_ADD_INS (bblock, store);
6314 /* FIXME: This should be a managed pointer */
6315 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6317 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6318 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6319 cmethod, MONO_RGCTX_INFO_METHOD);
6320 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6321 addr = mono_emit_jit_icall (cfg,
6322 mono_helper_compile_generic_method, iargs);
6324 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6326 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6329 if (!MONO_TYPE_IS_VOID (fsig->ret))
6330 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6332 CHECK_CFG_EXCEPTION;
6334 ip += 5;
6335 ins_flag = 0;
6336 break;
6339 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6340 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6341 #else
6342 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6343 #endif
6345 /* Tail prefix */
6346 /* FIXME: runtime generic context pointer for jumps? */
6347 /* FIXME: handle this for generic sharing eventually */
6348 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6349 MonoCallInst *call;
6351 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6352 INLINE_FAILURE;
6354 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6355 /* Handle tail calls similarly to calls */
6356 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6357 #else
6358 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6359 call->tail_call = TRUE;
6360 call->method = cmethod;
6361 call->signature = mono_method_signature (cmethod);
6364 * We implement tail calls by storing the actual arguments into the
6365 * argument variables, then emitting a CEE_JMP.
6367 for (i = 0; i < n; ++i) {
6368 /* Prevent argument from being register allocated */
6369 arg_array [i]->flags |= MONO_INST_VOLATILE;
6370 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6372 #endif
6374 ins = (MonoInst*)call;
6375 ins->inst_p0 = cmethod;
6376 ins->inst_p1 = arg_array [0];
6377 MONO_ADD_INS (bblock, ins);
6378 link_bblock (cfg, bblock, end_bblock);
6379 start_new_bblock = 1;
6381 CHECK_CFG_EXCEPTION;
6383 /* skip CEE_RET as well */
6384 ip += 6;
6385 ins_flag = 0;
6386 break;
6389 /* Conversion to a JIT intrinsic */
6390 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6391 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6392 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6393 *sp = ins;
6394 sp++;
6397 CHECK_CFG_EXCEPTION;
6399 ip += 5;
6400 ins_flag = 0;
6401 break;
6404 /* Inlining */
6405 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6406 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6407 mono_method_check_inlining (cfg, cmethod) &&
6408 !g_list_find (dont_inline, cmethod)) {
6409 int costs;
6410 gboolean allways = FALSE;
6412 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6413 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6414 /* Prevent inlining of methods that call wrappers */
6415 INLINE_FAILURE;
6416 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6417 allways = TRUE;
6420 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6421 ip += 5;
6422 cfg->real_offset += 5;
6423 bblock = cfg->cbb;
6425 if (!MONO_TYPE_IS_VOID (fsig->ret))
6426 /* *sp is already set by inline_method */
6427 sp++;
6429 inline_costs += costs;
6430 ins_flag = 0;
6431 break;
6435 inline_costs += 10 * num_calls++;
6437 /* Tail recursion elimination */
6438 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6439 gboolean has_vtargs = FALSE;
6440 int i;
6442 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6443 INLINE_FAILURE;
6445 /* keep it simple */
6446 for (i = fsig->param_count - 1; i >= 0; i--) {
6447 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6448 has_vtargs = TRUE;
6451 if (!has_vtargs) {
6452 for (i = 0; i < n; ++i)
6453 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6454 MONO_INST_NEW (cfg, ins, OP_BR);
6455 MONO_ADD_INS (bblock, ins);
6456 tblock = start_bblock->out_bb [0];
6457 link_bblock (cfg, bblock, tblock);
6458 ins->inst_target_bb = tblock;
6459 start_new_bblock = 1;
6461 /* skip the CEE_RET, too */
6462 if (ip_in_bb (cfg, bblock, ip + 5))
6463 ip += 6;
6464 else
6465 ip += 5;
6467 ins_flag = 0;
6468 break;
6472 /* Generic sharing */
6473 /* FIXME: only do this for generic methods if
6474 they are not shared! */
6475 if (context_used && !imt_arg && !array_rank &&
6476 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6477 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6478 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6479 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6480 INLINE_FAILURE;
6482 g_assert (cfg->generic_sharing_context && cmethod);
6483 g_assert (!addr);
6486 * We are compiling a call to a
6487 * generic method from shared code,
6488 * which means that we have to look up
6489 * the method in the rgctx and do an
6490 * indirect call.
6492 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6495 /* Indirect calls */
6496 if (addr) {
6497 g_assert (!imt_arg);
6499 if (*ip == CEE_CALL)
6500 g_assert (context_used);
6501 else if (*ip == CEE_CALLI)
6502 g_assert (!vtable_arg);
6503 else
6504 /* FIXME: what the hell is this??? */
6505 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6506 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6508 /* Prevent inlining of methods with indirect calls */
6509 INLINE_FAILURE;
6511 if (vtable_arg) {
6512 #ifdef MONO_ARCH_RGCTX_REG
6513 MonoCallInst *call;
6514 int rgctx_reg = mono_alloc_preg (cfg);
6516 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6517 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6518 call = (MonoCallInst*)ins;
6519 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6520 cfg->uses_rgctx_reg = TRUE;
6521 call->rgctx_reg = TRUE;
6522 #else
6523 NOT_IMPLEMENTED;
6524 #endif
6525 } else {
6526 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6528 * Instead of emitting an indirect call, emit a direct call
6529 * with the contents of the aotconst as the patch info.
6531 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6532 NULLIFY_INS (addr);
6533 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6534 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6535 NULLIFY_INS (addr);
6536 } else {
6537 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6540 if (!MONO_TYPE_IS_VOID (fsig->ret))
6541 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6543 CHECK_CFG_EXCEPTION;
6545 ip += 5;
6546 ins_flag = 0;
6547 break;
6550 /* Array methods */
6551 if (array_rank) {
6552 MonoInst *addr;
6554 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6555 if (sp [fsig->param_count]->type == STACK_OBJ) {
6556 MonoInst *iargs [2];
6558 iargs [0] = sp [0];
6559 iargs [1] = sp [fsig->param_count];
6561 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6564 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6565 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6566 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6567 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6569 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6571 *sp++ = ins;
6572 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6573 if (!cmethod->klass->element_class->valuetype && !readonly)
6574 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6575 CHECK_TYPELOAD (cmethod->klass);
6577 readonly = FALSE;
6578 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6579 *sp++ = addr;
6580 } else {
6581 g_assert_not_reached ();
6584 CHECK_CFG_EXCEPTION;
6586 ip += 5;
6587 ins_flag = 0;
6588 break;
6591 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6592 if (ins) {
6593 if (!MONO_TYPE_IS_VOID (fsig->ret))
6594 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6596 CHECK_CFG_EXCEPTION;
6598 ip += 5;
6599 ins_flag = 0;
6600 break;
6603 /* Common call */
6604 INLINE_FAILURE;
6605 if (vtable_arg) {
6606 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6607 NULL, vtable_arg);
6608 } else if (imt_arg) {
6609 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6610 } else {
6611 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6614 if (!MONO_TYPE_IS_VOID (fsig->ret))
6615 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6617 CHECK_CFG_EXCEPTION;
6619 ip += 5;
6620 ins_flag = 0;
6621 break;
6623 case CEE_RET:
6624 if (cfg->method != method) {
6625 /* return from inlined method */
6627 * If in_count == 0, that means the ret is unreachable due to
6628 * being preceeded by a throw. In that case, inline_method () will
6629 * handle setting the return value
6630 * (test case: test_0_inline_throw ()).
6632 if (return_var && cfg->cbb->in_count) {
6633 MonoInst *store;
6634 CHECK_STACK (1);
6635 --sp;
6636 //g_assert (returnvar != -1);
6637 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6638 cfg->ret_var_set = TRUE;
6640 } else {
6641 if (cfg->ret) {
6642 MonoType *ret_type = mono_method_signature (method)->ret;
6644 if (seq_points) {
6646 * Place a seq point here too even through the IL stack is not
6647 * empty, so a step over on
6648 * call <FOO>
6649 * ret
6650 * will work correctly.
6652 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6653 MONO_ADD_INS (cfg->cbb, ins);
6656 g_assert (!return_var);
6657 CHECK_STACK (1);
6658 --sp;
6659 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6660 MonoInst *ret_addr;
6662 if (!cfg->vret_addr) {
6663 MonoInst *ins;
6665 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6666 } else {
6667 EMIT_NEW_RETLOADA (cfg, ret_addr);
6669 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6670 ins->klass = mono_class_from_mono_type (ret_type);
6672 } else {
6673 #ifdef MONO_ARCH_SOFT_FLOAT
6674 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6675 MonoInst *iargs [1];
6676 MonoInst *conv;
6678 iargs [0] = *sp;
6679 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6680 mono_arch_emit_setret (cfg, method, conv);
6681 } else {
6682 mono_arch_emit_setret (cfg, method, *sp);
6684 #else
6685 mono_arch_emit_setret (cfg, method, *sp);
6686 #endif
6690 if (sp != stack_start)
6691 UNVERIFIED;
6692 MONO_INST_NEW (cfg, ins, OP_BR);
6693 ip++;
6694 ins->inst_target_bb = end_bblock;
6695 MONO_ADD_INS (bblock, ins);
6696 link_bblock (cfg, bblock, end_bblock);
6697 start_new_bblock = 1;
6698 break;
6699 case CEE_BR_S:
6700 CHECK_OPSIZE (2);
6701 MONO_INST_NEW (cfg, ins, OP_BR);
6702 ip++;
6703 target = ip + 1 + (signed char)(*ip);
6704 ++ip;
6705 GET_BBLOCK (cfg, tblock, target);
6706 link_bblock (cfg, bblock, tblock);
6707 ins->inst_target_bb = tblock;
6708 if (sp != stack_start) {
6709 handle_stack_args (cfg, stack_start, sp - stack_start);
6710 sp = stack_start;
6711 CHECK_UNVERIFIABLE (cfg);
6713 MONO_ADD_INS (bblock, ins);
6714 start_new_bblock = 1;
6715 inline_costs += BRANCH_COST;
6716 break;
6717 case CEE_BEQ_S:
6718 case CEE_BGE_S:
6719 case CEE_BGT_S:
6720 case CEE_BLE_S:
6721 case CEE_BLT_S:
6722 case CEE_BNE_UN_S:
6723 case CEE_BGE_UN_S:
6724 case CEE_BGT_UN_S:
6725 case CEE_BLE_UN_S:
6726 case CEE_BLT_UN_S:
6727 CHECK_OPSIZE (2);
6728 CHECK_STACK (2);
6729 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6730 ip++;
6731 target = ip + 1 + *(signed char*)ip;
6732 ip++;
6734 ADD_BINCOND (NULL);
6736 sp = stack_start;
6737 inline_costs += BRANCH_COST;
6738 break;
6739 case CEE_BR:
6740 CHECK_OPSIZE (5);
6741 MONO_INST_NEW (cfg, ins, OP_BR);
6742 ip++;
6744 target = ip + 4 + (gint32)read32(ip);
6745 ip += 4;
6746 GET_BBLOCK (cfg, tblock, target);
6747 link_bblock (cfg, bblock, tblock);
6748 ins->inst_target_bb = tblock;
6749 if (sp != stack_start) {
6750 handle_stack_args (cfg, stack_start, sp - stack_start);
6751 sp = stack_start;
6752 CHECK_UNVERIFIABLE (cfg);
6755 MONO_ADD_INS (bblock, ins);
6757 start_new_bblock = 1;
6758 inline_costs += BRANCH_COST;
6759 break;
6760 case CEE_BRFALSE_S:
6761 case CEE_BRTRUE_S:
6762 case CEE_BRFALSE:
6763 case CEE_BRTRUE: {
6764 MonoInst *cmp;
6765 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6766 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6767 guint32 opsize = is_short ? 1 : 4;
6769 CHECK_OPSIZE (opsize);
6770 CHECK_STACK (1);
6771 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6772 UNVERIFIED;
6773 ip ++;
6774 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6775 ip += opsize;
6777 sp--;
6779 GET_BBLOCK (cfg, tblock, target);
6780 link_bblock (cfg, bblock, tblock);
6781 GET_BBLOCK (cfg, tblock, ip);
6782 link_bblock (cfg, bblock, tblock);
6784 if (sp != stack_start) {
6785 handle_stack_args (cfg, stack_start, sp - stack_start);
6786 CHECK_UNVERIFIABLE (cfg);
6789 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6790 cmp->sreg1 = sp [0]->dreg;
6791 type_from_op (cmp, sp [0], NULL);
6792 CHECK_TYPE (cmp);
6794 #if SIZEOF_REGISTER == 4
6795 if (cmp->opcode == OP_LCOMPARE_IMM) {
6796 /* Convert it to OP_LCOMPARE */
6797 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6798 ins->type = STACK_I8;
6799 ins->dreg = alloc_dreg (cfg, STACK_I8);
6800 ins->inst_l = 0;
6801 MONO_ADD_INS (bblock, ins);
6802 cmp->opcode = OP_LCOMPARE;
6803 cmp->sreg2 = ins->dreg;
6805 #endif
6806 MONO_ADD_INS (bblock, cmp);
6808 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6809 type_from_op (ins, sp [0], NULL);
6810 MONO_ADD_INS (bblock, ins);
6811 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6812 GET_BBLOCK (cfg, tblock, target);
6813 ins->inst_true_bb = tblock;
6814 GET_BBLOCK (cfg, tblock, ip);
6815 ins->inst_false_bb = tblock;
6816 start_new_bblock = 2;
6818 sp = stack_start;
6819 inline_costs += BRANCH_COST;
6820 break;
6822 case CEE_BEQ:
6823 case CEE_BGE:
6824 case CEE_BGT:
6825 case CEE_BLE:
6826 case CEE_BLT:
6827 case CEE_BNE_UN:
6828 case CEE_BGE_UN:
6829 case CEE_BGT_UN:
6830 case CEE_BLE_UN:
6831 case CEE_BLT_UN:
6832 CHECK_OPSIZE (5);
6833 CHECK_STACK (2);
6834 MONO_INST_NEW (cfg, ins, *ip);
6835 ip++;
6836 target = ip + 4 + (gint32)read32(ip);
6837 ip += 4;
6839 ADD_BINCOND (NULL);
6841 sp = stack_start;
6842 inline_costs += BRANCH_COST;
6843 break;
6844 case CEE_SWITCH: {
6845 MonoInst *src1;
6846 MonoBasicBlock **targets;
6847 MonoBasicBlock *default_bblock;
6848 MonoJumpInfoBBTable *table;
6849 int offset_reg = alloc_preg (cfg);
6850 int target_reg = alloc_preg (cfg);
6851 int table_reg = alloc_preg (cfg);
6852 int sum_reg = alloc_preg (cfg);
6853 gboolean use_op_switch;
6855 CHECK_OPSIZE (5);
6856 CHECK_STACK (1);
6857 n = read32 (ip + 1);
6858 --sp;
6859 src1 = sp [0];
6860 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6861 UNVERIFIED;
6863 ip += 5;
6864 CHECK_OPSIZE (n * sizeof (guint32));
6865 target = ip + n * sizeof (guint32);
6867 GET_BBLOCK (cfg, default_bblock, target);
6869 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6870 for (i = 0; i < n; ++i) {
6871 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6872 targets [i] = tblock;
6873 ip += 4;
6876 if (sp != stack_start) {
6878 * Link the current bb with the targets as well, so handle_stack_args
6879 * will set their in_stack correctly.
6881 link_bblock (cfg, bblock, default_bblock);
6882 for (i = 0; i < n; ++i)
6883 link_bblock (cfg, bblock, targets [i]);
6885 handle_stack_args (cfg, stack_start, sp - stack_start);
6886 sp = stack_start;
6887 CHECK_UNVERIFIABLE (cfg);
6890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6891 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6892 bblock = cfg->cbb;
6894 for (i = 0; i < n; ++i)
6895 link_bblock (cfg, bblock, targets [i]);
6897 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6898 table->table = targets;
6899 table->table_size = n;
6901 use_op_switch = FALSE;
6902 #ifdef TARGET_ARM
6903 /* ARM implements SWITCH statements differently */
6904 /* FIXME: Make it use the generic implementation */
6905 if (!cfg->compile_aot)
6906 use_op_switch = TRUE;
6907 #endif
6909 if (COMPILE_LLVM (cfg))
6910 use_op_switch = TRUE;
6912 cfg->cbb->has_jump_table = 1;
6914 if (use_op_switch) {
6915 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6916 ins->sreg1 = src1->dreg;
6917 ins->inst_p0 = table;
6918 ins->inst_many_bb = targets;
6919 ins->klass = GUINT_TO_POINTER (n);
6920 MONO_ADD_INS (cfg->cbb, ins);
6921 } else {
6922 if (sizeof (gpointer) == 8)
6923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6924 else
6925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6927 #if SIZEOF_REGISTER == 8
6928 /* The upper word might not be zero, and we add it to a 64 bit address later */
6929 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6930 #endif
6932 if (cfg->compile_aot) {
6933 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6934 } else {
6935 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6936 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6937 ins->inst_p0 = table;
6938 ins->dreg = table_reg;
6939 MONO_ADD_INS (cfg->cbb, ins);
6942 /* FIXME: Use load_memindex */
6943 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6945 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6947 start_new_bblock = 1;
6948 inline_costs += (BRANCH_COST * 2);
6949 break;
6951 case CEE_LDIND_I1:
6952 case CEE_LDIND_U1:
6953 case CEE_LDIND_I2:
6954 case CEE_LDIND_U2:
6955 case CEE_LDIND_I4:
6956 case CEE_LDIND_U4:
6957 case CEE_LDIND_I8:
6958 case CEE_LDIND_I:
6959 case CEE_LDIND_R4:
6960 case CEE_LDIND_R8:
6961 case CEE_LDIND_REF:
6962 CHECK_STACK (1);
6963 --sp;
6965 switch (*ip) {
6966 case CEE_LDIND_R4:
6967 case CEE_LDIND_R8:
6968 dreg = alloc_freg (cfg);
6969 break;
6970 case CEE_LDIND_I8:
6971 dreg = alloc_lreg (cfg);
6972 break;
6973 default:
6974 dreg = alloc_preg (cfg);
6977 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6978 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6979 ins->flags |= ins_flag;
6980 ins_flag = 0;
6981 MONO_ADD_INS (bblock, ins);
6982 *sp++ = ins;
6983 ++ip;
6984 break;
6985 case CEE_STIND_REF:
6986 case CEE_STIND_I1:
6987 case CEE_STIND_I2:
6988 case CEE_STIND_I4:
6989 case CEE_STIND_I8:
6990 case CEE_STIND_R4:
6991 case CEE_STIND_R8:
6992 case CEE_STIND_I:
6993 CHECK_STACK (2);
6994 sp -= 2;
6996 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6997 ins->flags |= ins_flag;
6998 ins_flag = 0;
6999 MONO_ADD_INS (bblock, ins);
7001 #if HAVE_WRITE_BARRIERS
7002 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7003 MonoInst *dummy_use;
7004 /* insert call to write barrier */
7005 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7006 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7007 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7009 #endif
7011 inline_costs += 1;
7012 ++ip;
7013 break;
7015 case CEE_MUL:
7016 CHECK_STACK (2);
7018 MONO_INST_NEW (cfg, ins, (*ip));
7019 sp -= 2;
7020 ins->sreg1 = sp [0]->dreg;
7021 ins->sreg2 = sp [1]->dreg;
7022 type_from_op (ins, sp [0], sp [1]);
7023 CHECK_TYPE (ins);
7024 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7026 /* Use the immediate opcodes if possible */
7027 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7028 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7029 if (imm_opcode != -1) {
7030 ins->opcode = imm_opcode;
7031 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7032 ins->sreg2 = -1;
7034 sp [1]->opcode = OP_NOP;
7038 MONO_ADD_INS ((cfg)->cbb, (ins));
7040 *sp++ = mono_decompose_opcode (cfg, ins);
7041 ip++;
7042 break;
7043 case CEE_ADD:
7044 case CEE_SUB:
7045 case CEE_DIV:
7046 case CEE_DIV_UN:
7047 case CEE_REM:
7048 case CEE_REM_UN:
7049 case CEE_AND:
7050 case CEE_OR:
7051 case CEE_XOR:
7052 case CEE_SHL:
7053 case CEE_SHR:
7054 case CEE_SHR_UN:
7055 CHECK_STACK (2);
7057 MONO_INST_NEW (cfg, ins, (*ip));
7058 sp -= 2;
7059 ins->sreg1 = sp [0]->dreg;
7060 ins->sreg2 = sp [1]->dreg;
7061 type_from_op (ins, sp [0], sp [1]);
7062 CHECK_TYPE (ins);
7063 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7064 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7066 /* FIXME: Pass opcode to is_inst_imm */
7068 /* Use the immediate opcodes if possible */
7069 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7070 int imm_opcode;
7072 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7073 if (imm_opcode != -1) {
7074 ins->opcode = imm_opcode;
7075 if (sp [1]->opcode == OP_I8CONST) {
7076 #if SIZEOF_REGISTER == 8
7077 ins->inst_imm = sp [1]->inst_l;
7078 #else
7079 ins->inst_ls_word = sp [1]->inst_ls_word;
7080 ins->inst_ms_word = sp [1]->inst_ms_word;
7081 #endif
7083 else
7084 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7085 ins->sreg2 = -1;
7087 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7088 if (sp [1]->next == NULL)
7089 sp [1]->opcode = OP_NOP;
7092 MONO_ADD_INS ((cfg)->cbb, (ins));
7094 *sp++ = mono_decompose_opcode (cfg, ins);
7095 ip++;
7096 break;
7097 case CEE_NEG:
7098 case CEE_NOT:
7099 case CEE_CONV_I1:
7100 case CEE_CONV_I2:
7101 case CEE_CONV_I4:
7102 case CEE_CONV_R4:
7103 case CEE_CONV_R8:
7104 case CEE_CONV_U4:
7105 case CEE_CONV_I8:
7106 case CEE_CONV_U8:
7107 case CEE_CONV_OVF_I8:
7108 case CEE_CONV_OVF_U8:
7109 case CEE_CONV_R_UN:
7110 CHECK_STACK (1);
7112 /* Special case this earlier so we have long constants in the IR */
7113 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7114 int data = sp [-1]->inst_c0;
7115 sp [-1]->opcode = OP_I8CONST;
7116 sp [-1]->type = STACK_I8;
7117 #if SIZEOF_REGISTER == 8
7118 if ((*ip) == CEE_CONV_U8)
7119 sp [-1]->inst_c0 = (guint32)data;
7120 else
7121 sp [-1]->inst_c0 = data;
7122 #else
7123 sp [-1]->inst_ls_word = data;
7124 if ((*ip) == CEE_CONV_U8)
7125 sp [-1]->inst_ms_word = 0;
7126 else
7127 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7128 #endif
7129 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7131 else {
7132 ADD_UNOP (*ip);
7134 ip++;
7135 break;
7136 case CEE_CONV_OVF_I4:
7137 case CEE_CONV_OVF_I1:
7138 case CEE_CONV_OVF_I2:
7139 case CEE_CONV_OVF_I:
7140 case CEE_CONV_OVF_U:
7141 CHECK_STACK (1);
7143 if (sp [-1]->type == STACK_R8) {
7144 ADD_UNOP (CEE_CONV_OVF_I8);
7145 ADD_UNOP (*ip);
7146 } else {
7147 ADD_UNOP (*ip);
7149 ip++;
7150 break;
7151 case CEE_CONV_OVF_U1:
7152 case CEE_CONV_OVF_U2:
7153 case CEE_CONV_OVF_U4:
7154 CHECK_STACK (1);
7156 if (sp [-1]->type == STACK_R8) {
7157 ADD_UNOP (CEE_CONV_OVF_U8);
7158 ADD_UNOP (*ip);
7159 } else {
7160 ADD_UNOP (*ip);
7162 ip++;
7163 break;
7164 case CEE_CONV_OVF_I1_UN:
7165 case CEE_CONV_OVF_I2_UN:
7166 case CEE_CONV_OVF_I4_UN:
7167 case CEE_CONV_OVF_I8_UN:
7168 case CEE_CONV_OVF_U1_UN:
7169 case CEE_CONV_OVF_U2_UN:
7170 case CEE_CONV_OVF_U4_UN:
7171 case CEE_CONV_OVF_U8_UN:
7172 case CEE_CONV_OVF_I_UN:
7173 case CEE_CONV_OVF_U_UN:
7174 case CEE_CONV_U2:
7175 case CEE_CONV_U1:
7176 case CEE_CONV_I:
7177 case CEE_CONV_U:
7178 CHECK_STACK (1);
7179 ADD_UNOP (*ip);
7180 CHECK_CFG_EXCEPTION;
7181 ip++;
7182 break;
7183 case CEE_ADD_OVF:
7184 case CEE_ADD_OVF_UN:
7185 case CEE_MUL_OVF:
7186 case CEE_MUL_OVF_UN:
7187 case CEE_SUB_OVF:
7188 case CEE_SUB_OVF_UN:
7189 CHECK_STACK (2);
7190 ADD_BINOP (*ip);
7191 ip++;
7192 break;
7193 case CEE_CPOBJ:
7194 CHECK_OPSIZE (5);
7195 CHECK_STACK (2);
7196 token = read32 (ip + 1);
7197 klass = mini_get_class (method, token, generic_context);
7198 CHECK_TYPELOAD (klass);
7199 sp -= 2;
7200 if (generic_class_is_reference_type (cfg, klass)) {
7201 MonoInst *store, *load;
7202 int dreg = alloc_preg (cfg);
7204 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7205 load->flags |= ins_flag;
7206 MONO_ADD_INS (cfg->cbb, load);
7208 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7209 store->flags |= ins_flag;
7210 MONO_ADD_INS (cfg->cbb, store);
7211 } else {
7212 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7214 ins_flag = 0;
7215 ip += 5;
7216 break;
7217 case CEE_LDOBJ: {
7218 int loc_index = -1;
7219 int stloc_len = 0;
7221 CHECK_OPSIZE (5);
7222 CHECK_STACK (1);
7223 --sp;
7224 token = read32 (ip + 1);
7225 klass = mini_get_class (method, token, generic_context);
7226 CHECK_TYPELOAD (klass);
7228 /* Optimize the common ldobj+stloc combination */
7229 switch (ip [5]) {
7230 case CEE_STLOC_S:
7231 loc_index = ip [6];
7232 stloc_len = 2;
7233 break;
7234 case CEE_STLOC_0:
7235 case CEE_STLOC_1:
7236 case CEE_STLOC_2:
7237 case CEE_STLOC_3:
7238 loc_index = ip [5] - CEE_STLOC_0;
7239 stloc_len = 1;
7240 break;
7241 default:
7242 break;
7245 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7246 CHECK_LOCAL (loc_index);
7248 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7249 ins->dreg = cfg->locals [loc_index]->dreg;
7250 ip += 5;
7251 ip += stloc_len;
7252 break;
7255 /* Optimize the ldobj+stobj combination */
7256 /* The reference case ends up being a load+store anyway */
7257 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7258 CHECK_STACK (1);
7260 sp --;
7262 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7264 ip += 5 + 5;
7265 ins_flag = 0;
7266 break;
7269 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7270 *sp++ = ins;
7272 ip += 5;
7273 ins_flag = 0;
7274 inline_costs += 1;
7275 break;
7277 case CEE_LDSTR:
7278 CHECK_STACK_OVF (1);
7279 CHECK_OPSIZE (5);
7280 n = read32 (ip + 1);
7282 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7283 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7284 ins->type = STACK_OBJ;
7285 *sp = ins;
7287 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7288 MonoInst *iargs [1];
7290 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7291 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7292 } else {
7293 if (cfg->opt & MONO_OPT_SHARED) {
7294 MonoInst *iargs [3];
7296 if (cfg->compile_aot) {
7297 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7299 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7300 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7301 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7302 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7303 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7304 } else {
7305 if (bblock->out_of_line) {
7306 MonoInst *iargs [2];
7308 if (image == mono_defaults.corlib) {
7310 * Avoid relocations in AOT and save some space by using a
7311 * version of helper_ldstr specialized to mscorlib.
7313 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7314 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7315 } else {
7316 /* Avoid creating the string object */
7317 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7318 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7319 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7322 else
7323 if (cfg->compile_aot) {
7324 NEW_LDSTRCONST (cfg, ins, image, n);
7325 *sp = ins;
7326 MONO_ADD_INS (bblock, ins);
7328 else {
7329 NEW_PCONST (cfg, ins, NULL);
7330 ins->type = STACK_OBJ;
7331 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7332 *sp = ins;
7333 MONO_ADD_INS (bblock, ins);
7338 sp++;
7339 ip += 5;
7340 break;
7341 case CEE_NEWOBJ: {
7342 MonoInst *iargs [2];
7343 MonoMethodSignature *fsig;
7344 MonoInst this_ins;
7345 MonoInst *alloc;
7346 MonoInst *vtable_arg = NULL;
7348 CHECK_OPSIZE (5);
7349 token = read32 (ip + 1);
7350 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7351 if (!cmethod)
7352 goto load_error;
7353 fsig = mono_method_get_signature (cmethod, image, token);
7354 if (!fsig)
7355 goto load_error;
7357 mono_save_token_info (cfg, image, token, cmethod);
7359 if (!mono_class_init (cmethod->klass))
7360 goto load_error;
7362 if (cfg->generic_sharing_context)
7363 context_used = mono_method_check_context_used (cmethod);
7365 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7366 if (check_linkdemand (cfg, method, cmethod))
7367 INLINE_FAILURE;
7368 CHECK_CFG_EXCEPTION;
7369 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7370 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7373 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7374 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7375 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7376 mono_class_vtable (cfg->domain, cmethod->klass);
7377 CHECK_TYPELOAD (cmethod->klass);
7379 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7380 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7381 } else {
7382 if (context_used) {
7383 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7384 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7385 } else {
7386 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7388 CHECK_TYPELOAD (cmethod->klass);
7389 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7394 n = fsig->param_count;
7395 CHECK_STACK (n);
7398 * Generate smaller code for the common newobj <exception> instruction in
7399 * argument checking code.
7401 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7402 is_exception_class (cmethod->klass) && n <= 2 &&
7403 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7404 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7405 MonoInst *iargs [3];
7407 g_assert (!vtable_arg);
7409 sp -= n;
7411 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7412 switch (n) {
7413 case 0:
7414 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7415 break;
7416 case 1:
7417 iargs [1] = sp [0];
7418 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7419 break;
7420 case 2:
7421 iargs [1] = sp [0];
7422 iargs [2] = sp [1];
7423 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7424 break;
7425 default:
7426 g_assert_not_reached ();
7429 ip += 5;
7430 inline_costs += 5;
7431 break;
7434 /* move the args to allow room for 'this' in the first position */
7435 while (n--) {
7436 --sp;
7437 sp [1] = sp [0];
7440 /* check_call_signature () requires sp[0] to be set */
7441 this_ins.type = STACK_OBJ;
7442 sp [0] = &this_ins;
7443 if (check_call_signature (cfg, fsig, sp))
7444 UNVERIFIED;
7446 iargs [0] = NULL;
7448 if (mini_class_is_system_array (cmethod->klass)) {
7449 g_assert (!vtable_arg);
7451 *sp = emit_get_rgctx_method (cfg, context_used,
7452 cmethod, MONO_RGCTX_INFO_METHOD);
7454 /* Avoid varargs in the common case */
7455 if (fsig->param_count == 1)
7456 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7457 else if (fsig->param_count == 2)
7458 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7459 else if (fsig->param_count == 3)
7460 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7461 else
7462 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7463 } else if (cmethod->string_ctor) {
7464 g_assert (!context_used);
7465 g_assert (!vtable_arg);
7466 /* we simply pass a null pointer */
7467 EMIT_NEW_PCONST (cfg, *sp, NULL);
7468 /* now call the string ctor */
7469 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7470 } else {
7471 MonoInst* callvirt_this_arg = NULL;
7473 if (cmethod->klass->valuetype) {
7474 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7475 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7476 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7478 alloc = NULL;
7481 * The code generated by mini_emit_virtual_call () expects
7482 * iargs [0] to be a boxed instance, but luckily the vcall
7483 * will be transformed into a normal call there.
7485 } else if (context_used) {
7486 MonoInst *data;
7487 int rgctx_info;
7489 if (cfg->opt & MONO_OPT_SHARED)
7490 rgctx_info = MONO_RGCTX_INFO_KLASS;
7491 else
7492 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7493 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7495 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7496 *sp = alloc;
7497 } else {
7498 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7500 CHECK_TYPELOAD (cmethod->klass);
7503 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7504 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7505 * As a workaround, we call class cctors before allocating objects.
7507 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7508 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7509 if (cfg->verbose_level > 2)
7510 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7511 class_inits = g_slist_prepend (class_inits, vtable);
7514 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7515 *sp = alloc;
7517 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7519 if (alloc)
7520 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7522 /* Now call the actual ctor */
7523 /* Avoid virtual calls to ctors if possible */
7524 if (cmethod->klass->marshalbyref)
7525 callvirt_this_arg = sp [0];
7527 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7528 mono_method_check_inlining (cfg, cmethod) &&
7529 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7530 !g_list_find (dont_inline, cmethod)) {
7531 int costs;
7533 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7534 cfg->real_offset += 5;
7535 bblock = cfg->cbb;
7537 inline_costs += costs - 5;
7538 } else {
7539 INLINE_FAILURE;
7540 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7542 } else if (context_used &&
7543 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7544 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7545 MonoInst *cmethod_addr;
7547 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7548 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7550 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7551 } else {
7552 INLINE_FAILURE;
7553 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7554 callvirt_this_arg, NULL, vtable_arg);
7558 if (alloc == NULL) {
7559 /* Valuetype */
7560 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7561 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7562 *sp++= ins;
7564 else
7565 *sp++ = alloc;
7567 ip += 5;
7568 inline_costs += 5;
7569 break;
7571 case CEE_CASTCLASS:
7572 CHECK_STACK (1);
7573 --sp;
7574 CHECK_OPSIZE (5);
7575 token = read32 (ip + 1);
7576 klass = mini_get_class (method, token, generic_context);
7577 CHECK_TYPELOAD (klass);
7578 if (sp [0]->type != STACK_OBJ)
7579 UNVERIFIED;
7581 if (cfg->generic_sharing_context)
7582 context_used = mono_class_check_context_used (klass);
7584 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7585 MonoInst *args [2];
7587 /* obj */
7588 args [0] = *sp;
7590 /* klass */
7591 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7593 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7594 *sp ++ = ins;
7595 ip += 5;
7596 inline_costs += 2;
7597 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7598 MonoMethod *mono_castclass;
7599 MonoInst *iargs [1];
7600 int costs;
7602 mono_castclass = mono_marshal_get_castclass (klass);
7603 iargs [0] = sp [0];
7605 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7606 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7607 g_assert (costs > 0);
7609 ip += 5;
7610 cfg->real_offset += 5;
7611 bblock = cfg->cbb;
7613 *sp++ = iargs [0];
7615 inline_costs += costs;
7617 else {
7618 ins = handle_castclass (cfg, klass, *sp, context_used);
7619 CHECK_CFG_EXCEPTION;
7620 bblock = cfg->cbb;
7621 *sp ++ = ins;
7622 ip += 5;
7624 break;
7625 case CEE_ISINST: {
7626 CHECK_STACK (1);
7627 --sp;
7628 CHECK_OPSIZE (5);
7629 token = read32 (ip + 1);
7630 klass = mini_get_class (method, token, generic_context);
7631 CHECK_TYPELOAD (klass);
7632 if (sp [0]->type != STACK_OBJ)
7633 UNVERIFIED;
7635 if (cfg->generic_sharing_context)
7636 context_used = mono_class_check_context_used (klass);
7638 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7639 MonoInst *args [2];
7641 /* obj */
7642 args [0] = *sp;
7644 /* klass */
7645 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7647 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7648 sp++;
7649 ip += 5;
7650 inline_costs += 2;
7651 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7652 MonoMethod *mono_isinst;
7653 MonoInst *iargs [1];
7654 int costs;
7656 mono_isinst = mono_marshal_get_isinst (klass);
7657 iargs [0] = sp [0];
7659 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7660 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7661 g_assert (costs > 0);
7663 ip += 5;
7664 cfg->real_offset += 5;
7665 bblock = cfg->cbb;
7667 *sp++= iargs [0];
7669 inline_costs += costs;
7671 else {
7672 ins = handle_isinst (cfg, klass, *sp, context_used);
7673 CHECK_CFG_EXCEPTION;
7674 bblock = cfg->cbb;
7675 *sp ++ = ins;
7676 ip += 5;
7678 break;
7680 case CEE_UNBOX_ANY: {
7681 CHECK_STACK (1);
7682 --sp;
7683 CHECK_OPSIZE (5);
7684 token = read32 (ip + 1);
7685 klass = mini_get_class (method, token, generic_context);
7686 CHECK_TYPELOAD (klass);
7688 mono_save_token_info (cfg, image, token, klass);
7690 if (cfg->generic_sharing_context)
7691 context_used = mono_class_check_context_used (klass);
7693 if (generic_class_is_reference_type (cfg, klass)) {
7694 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7695 if (context_used) {
7696 MonoInst *iargs [2];
7698 /* obj */
7699 iargs [0] = *sp;
7700 /* klass */
7701 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7702 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7703 *sp ++ = ins;
7704 ip += 5;
7705 inline_costs += 2;
7706 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7707 MonoMethod *mono_castclass;
7708 MonoInst *iargs [1];
7709 int costs;
7711 mono_castclass = mono_marshal_get_castclass (klass);
7712 iargs [0] = sp [0];
7714 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7715 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7717 g_assert (costs > 0);
7719 ip += 5;
7720 cfg->real_offset += 5;
7721 bblock = cfg->cbb;
7723 *sp++ = iargs [0];
7724 inline_costs += costs;
7725 } else {
7726 ins = handle_castclass (cfg, klass, *sp, 0);
7727 CHECK_CFG_EXCEPTION;
7728 bblock = cfg->cbb;
7729 *sp ++ = ins;
7730 ip += 5;
7732 break;
7735 if (mono_class_is_nullable (klass)) {
7736 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7737 *sp++= ins;
7738 ip += 5;
7739 break;
7742 /* UNBOX */
7743 ins = handle_unbox (cfg, klass, sp, context_used);
7744 *sp = ins;
7746 ip += 5;
7748 /* LDOBJ */
7749 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7750 *sp++ = ins;
7752 inline_costs += 2;
7753 break;
7755 case CEE_BOX: {
7756 MonoInst *val;
7758 CHECK_STACK (1);
7759 --sp;
7760 val = *sp;
7761 CHECK_OPSIZE (5);
7762 token = read32 (ip + 1);
7763 klass = mini_get_class (method, token, generic_context);
7764 CHECK_TYPELOAD (klass);
7766 mono_save_token_info (cfg, image, token, klass);
7768 if (cfg->generic_sharing_context)
7769 context_used = mono_class_check_context_used (klass);
7771 if (generic_class_is_reference_type (cfg, klass)) {
7772 *sp++ = val;
7773 ip += 5;
7774 break;
7777 if (klass == mono_defaults.void_class)
7778 UNVERIFIED;
7779 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7780 UNVERIFIED;
7781 /* frequent check in generic code: box (struct), brtrue */
7782 if (!mono_class_is_nullable (klass) &&
7783 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7784 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7785 ip += 5;
7786 MONO_INST_NEW (cfg, ins, OP_BR);
7787 if (*ip == CEE_BRTRUE_S) {
7788 CHECK_OPSIZE (2);
7789 ip++;
7790 target = ip + 1 + (signed char)(*ip);
7791 ip++;
7792 } else {
7793 CHECK_OPSIZE (5);
7794 ip++;
7795 target = ip + 4 + (gint)(read32 (ip));
7796 ip += 4;
7798 GET_BBLOCK (cfg, tblock, target);
7799 link_bblock (cfg, bblock, tblock);
7800 ins->inst_target_bb = tblock;
7801 GET_BBLOCK (cfg, tblock, ip);
7803 * This leads to some inconsistency, since the two bblocks are
7804 * not really connected, but it is needed for handling stack
7805 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7806 * FIXME: This should only be needed if sp != stack_start, but that
7807 * doesn't work for some reason (test failure in mcs/tests on x86).
7809 link_bblock (cfg, bblock, tblock);
7810 if (sp != stack_start) {
7811 handle_stack_args (cfg, stack_start, sp - stack_start);
7812 sp = stack_start;
7813 CHECK_UNVERIFIABLE (cfg);
7815 MONO_ADD_INS (bblock, ins);
7816 start_new_bblock = 1;
7817 break;
7820 if (context_used) {
7821 MonoInst *data;
7822 int rgctx_info;
7824 if (cfg->opt & MONO_OPT_SHARED)
7825 rgctx_info = MONO_RGCTX_INFO_KLASS;
7826 else
7827 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7828 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7829 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7830 } else {
7831 *sp++ = handle_box (cfg, val, klass);
7834 CHECK_CFG_EXCEPTION;
7835 ip += 5;
7836 inline_costs += 1;
7837 break;
7839 case CEE_UNBOX: {
7840 CHECK_STACK (1);
7841 --sp;
7842 CHECK_OPSIZE (5);
7843 token = read32 (ip + 1);
7844 klass = mini_get_class (method, token, generic_context);
7845 CHECK_TYPELOAD (klass);
7847 mono_save_token_info (cfg, image, token, klass);
7849 if (cfg->generic_sharing_context)
7850 context_used = mono_class_check_context_used (klass);
7852 if (mono_class_is_nullable (klass)) {
7853 MonoInst *val;
7855 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7856 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7858 *sp++= ins;
7859 } else {
7860 ins = handle_unbox (cfg, klass, sp, context_used);
7861 *sp++ = ins;
7863 ip += 5;
7864 inline_costs += 2;
7865 break;
7867 case CEE_LDFLD:
7868 case CEE_LDFLDA:
7869 case CEE_STFLD: {
7870 MonoClassField *field;
7871 int costs;
7872 guint foffset;
7874 if (*ip == CEE_STFLD) {
7875 CHECK_STACK (2);
7876 sp -= 2;
7877 } else {
7878 CHECK_STACK (1);
7879 --sp;
7881 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7882 UNVERIFIED;
7883 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7884 UNVERIFIED;
7885 CHECK_OPSIZE (5);
7886 token = read32 (ip + 1);
7887 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7888 field = mono_method_get_wrapper_data (method, token);
7889 klass = field->parent;
7891 else {
7892 field = mono_field_from_token (image, token, &klass, generic_context);
7894 if (!field)
7895 goto load_error;
7896 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7897 FIELD_ACCESS_FAILURE;
7898 mono_class_init (klass);
7900 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7901 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7902 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7903 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7906 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7907 if (*ip == CEE_STFLD) {
7908 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7909 UNVERIFIED;
7910 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7911 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7912 MonoInst *iargs [5];
7914 iargs [0] = sp [0];
7915 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7916 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7917 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7918 field->offset);
7919 iargs [4] = sp [1];
7921 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7922 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7923 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7924 g_assert (costs > 0);
7926 cfg->real_offset += 5;
7927 bblock = cfg->cbb;
7929 inline_costs += costs;
7930 } else {
7931 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7933 } else {
7934 MonoInst *store;
7936 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7938 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7940 #if HAVE_WRITE_BARRIERS
7941 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7942 /* insert call to write barrier */
7943 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7944 MonoInst *iargs [2], *dummy_use;
7945 int dreg;
7947 dreg = alloc_preg (cfg);
7948 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7949 iargs [1] = sp [1];
7950 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7952 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7954 #endif
7956 store->flags |= ins_flag;
7958 ins_flag = 0;
7959 ip += 5;
7960 break;
7963 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7964 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7965 MonoInst *iargs [4];
7967 iargs [0] = sp [0];
7968 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7969 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7970 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7971 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7972 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7973 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7974 bblock = cfg->cbb;
7975 g_assert (costs > 0);
7977 cfg->real_offset += 5;
7979 *sp++ = iargs [0];
7981 inline_costs += costs;
7982 } else {
7983 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7984 *sp++ = ins;
7986 } else {
7987 if (sp [0]->type == STACK_VTYPE) {
7988 MonoInst *var;
7990 /* Have to compute the address of the variable */
7992 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7993 if (!var)
7994 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7995 else
7996 g_assert (var->klass == klass);
7998 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7999 sp [0] = ins;
8002 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8004 if (*ip == CEE_LDFLDA) {
8005 dreg = alloc_preg (cfg);
8007 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8008 ins->klass = mono_class_from_mono_type (field->type);
8009 ins->type = STACK_MP;
8010 *sp++ = ins;
8011 } else {
8012 MonoInst *load;
8014 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8015 load->flags |= ins_flag;
8016 load->flags |= MONO_INST_FAULT;
8017 *sp++ = load;
8020 ins_flag = 0;
8021 ip += 5;
8022 break;
8024 case CEE_LDSFLD:
8025 case CEE_LDSFLDA:
8026 case CEE_STSFLD: {
8027 MonoClassField *field;
8028 gpointer addr = NULL;
8029 gboolean is_special_static;
8031 CHECK_OPSIZE (5);
8032 token = read32 (ip + 1);
8034 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8035 field = mono_method_get_wrapper_data (method, token);
8036 klass = field->parent;
8038 else
8039 field = mono_field_from_token (image, token, &klass, generic_context);
8040 if (!field)
8041 goto load_error;
8042 mono_class_init (klass);
8043 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8044 FIELD_ACCESS_FAILURE;
8046 /* if the class is Critical then transparent code cannot access it's fields */
8047 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8048 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8051 * We can only support shared generic static
8052 * field access on architectures where the
8053 * trampoline code has been extended to handle
8054 * the generic class init.
8056 #ifndef MONO_ARCH_VTABLE_REG
8057 GENERIC_SHARING_FAILURE (*ip);
8058 #endif
8060 if (cfg->generic_sharing_context)
8061 context_used = mono_class_check_context_used (klass);
8063 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8065 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8066 * to be called here.
8068 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8069 mono_class_vtable (cfg->domain, klass);
8070 CHECK_TYPELOAD (klass);
8072 mono_domain_lock (cfg->domain);
8073 if (cfg->domain->special_static_fields)
8074 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8075 mono_domain_unlock (cfg->domain);
8077 is_special_static = mono_class_field_is_special_static (field);
8079 /* Generate IR to compute the field address */
8080 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8082 * Fast access to TLS data
8083 * Inline version of get_thread_static_data () in
8084 * threads.c.
8086 guint32 offset;
8087 int idx, static_data_reg, array_reg, dreg;
8088 MonoInst *thread_ins;
8090 // offset &= 0x7fffffff;
8091 // idx = (offset >> 24) - 1;
8092 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8094 thread_ins = mono_get_thread_intrinsic (cfg);
8095 MONO_ADD_INS (cfg->cbb, thread_ins);
8096 static_data_reg = alloc_ireg (cfg);
8097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8099 if (cfg->compile_aot) {
8100 int offset_reg, offset2_reg, idx_reg;
8102 /* For TLS variables, this will return the TLS offset */
8103 EMIT_NEW_SFLDACONST (cfg, ins, field);
8104 offset_reg = ins->dreg;
8105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8106 idx_reg = alloc_ireg (cfg);
8107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8110 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8111 array_reg = alloc_ireg (cfg);
8112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8113 offset2_reg = alloc_ireg (cfg);
8114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8115 dreg = alloc_ireg (cfg);
8116 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8117 } else {
8118 offset = (gsize)addr & 0x7fffffff;
8119 idx = (offset >> 24) - 1;
8121 array_reg = alloc_ireg (cfg);
8122 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8123 dreg = alloc_ireg (cfg);
8124 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8126 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8127 (cfg->compile_aot && is_special_static) ||
8128 (context_used && is_special_static)) {
8129 MonoInst *iargs [2];
8131 g_assert (field->parent);
8132 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8133 if (context_used) {
8134 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8135 field, MONO_RGCTX_INFO_CLASS_FIELD);
8136 } else {
8137 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8139 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8140 } else if (context_used) {
8141 MonoInst *static_data;
8144 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8145 method->klass->name_space, method->klass->name, method->name,
8146 depth, field->offset);
8149 if (mono_class_needs_cctor_run (klass, method)) {
8150 MonoCallInst *call;
8151 MonoInst *vtable;
8153 vtable = emit_get_rgctx_klass (cfg, context_used,
8154 klass, MONO_RGCTX_INFO_VTABLE);
8156 // FIXME: This doesn't work since it tries to pass the argument
8157 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8159 * The vtable pointer is always passed in a register regardless of
8160 * the calling convention, so assign it manually, and make a call
8161 * using a signature without parameters.
8163 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8164 #ifdef MONO_ARCH_VTABLE_REG
8165 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8166 cfg->uses_vtable_reg = TRUE;
8167 #else
8168 NOT_IMPLEMENTED;
8169 #endif
8173 * The pointer we're computing here is
8175 * super_info.static_data + field->offset
8177 static_data = emit_get_rgctx_klass (cfg, context_used,
8178 klass, MONO_RGCTX_INFO_STATIC_DATA);
8180 if (field->offset == 0) {
8181 ins = static_data;
8182 } else {
8183 int addr_reg = mono_alloc_preg (cfg);
8184 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8186 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8187 MonoInst *iargs [2];
8189 g_assert (field->parent);
8190 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8191 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8192 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8193 } else {
8194 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8196 CHECK_TYPELOAD (klass);
8197 if (!addr) {
8198 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8199 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8200 if (cfg->verbose_level > 2)
8201 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8202 class_inits = g_slist_prepend (class_inits, vtable);
8203 } else {
8204 if (cfg->run_cctors) {
8205 MonoException *ex;
8206 /* This makes so that inline cannot trigger */
8207 /* .cctors: too many apps depend on them */
8208 /* running with a specific order... */
8209 if (! vtable->initialized)
8210 INLINE_FAILURE;
8211 ex = mono_runtime_class_init_full (vtable, FALSE);
8212 if (ex) {
8213 set_exception_object (cfg, ex);
8214 goto exception_exit;
8218 addr = (char*)vtable->data + field->offset;
8220 if (cfg->compile_aot)
8221 EMIT_NEW_SFLDACONST (cfg, ins, field);
8222 else
8223 EMIT_NEW_PCONST (cfg, ins, addr);
8224 } else {
8225 MonoInst *iargs [1];
8226 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8227 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8231 /* Generate IR to do the actual load/store operation */
8233 if (*ip == CEE_LDSFLDA) {
8234 ins->klass = mono_class_from_mono_type (field->type);
8235 ins->type = STACK_PTR;
8236 *sp++ = ins;
8237 } else if (*ip == CEE_STSFLD) {
8238 MonoInst *store;
8239 CHECK_STACK (1);
8240 sp--;
8242 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8243 store->flags |= ins_flag;
8244 } else {
8245 gboolean is_const = FALSE;
8246 MonoVTable *vtable = NULL;
8248 if (!context_used) {
8249 vtable = mono_class_vtable (cfg->domain, klass);
8250 CHECK_TYPELOAD (klass);
8252 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8253 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8254 gpointer addr = (char*)vtable->data + field->offset;
8255 int ro_type = field->type->type;
8256 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8257 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8259 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8260 is_const = TRUE;
8261 switch (ro_type) {
8262 case MONO_TYPE_BOOLEAN:
8263 case MONO_TYPE_U1:
8264 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8265 sp++;
8266 break;
8267 case MONO_TYPE_I1:
8268 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8269 sp++;
8270 break;
8271 case MONO_TYPE_CHAR:
8272 case MONO_TYPE_U2:
8273 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8274 sp++;
8275 break;
8276 case MONO_TYPE_I2:
8277 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8278 sp++;
8279 break;
8280 break;
8281 case MONO_TYPE_I4:
8282 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8283 sp++;
8284 break;
8285 case MONO_TYPE_U4:
8286 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8287 sp++;
8288 break;
8289 #ifndef HAVE_MOVING_COLLECTOR
8290 case MONO_TYPE_I:
8291 case MONO_TYPE_U:
8292 case MONO_TYPE_STRING:
8293 case MONO_TYPE_OBJECT:
8294 case MONO_TYPE_CLASS:
8295 case MONO_TYPE_SZARRAY:
8296 case MONO_TYPE_PTR:
8297 case MONO_TYPE_FNPTR:
8298 case MONO_TYPE_ARRAY:
8299 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8300 type_to_eval_stack_type ((cfg), field->type, *sp);
8301 sp++;
8302 break;
8303 #endif
8304 case MONO_TYPE_I8:
8305 case MONO_TYPE_U8:
8306 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8307 sp++;
8308 break;
8309 case MONO_TYPE_R4:
8310 case MONO_TYPE_R8:
8311 case MONO_TYPE_VALUETYPE:
8312 default:
8313 is_const = FALSE;
8314 break;
8318 if (!is_const) {
8319 MonoInst *load;
8321 CHECK_STACK_OVF (1);
8323 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8324 load->flags |= ins_flag;
8325 ins_flag = 0;
8326 *sp++ = load;
8329 ins_flag = 0;
8330 ip += 5;
8331 break;
8333 case CEE_STOBJ:
8334 CHECK_STACK (2);
8335 sp -= 2;
8336 CHECK_OPSIZE (5);
8337 token = read32 (ip + 1);
8338 klass = mini_get_class (method, token, generic_context);
8339 CHECK_TYPELOAD (klass);
8340 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8341 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8342 ins_flag = 0;
8343 ip += 5;
8344 inline_costs += 1;
8345 break;
8348 * Array opcodes
8350 case CEE_NEWARR: {
8351 MonoInst *len_ins;
8352 const char *data_ptr;
8353 int data_size = 0;
8354 guint32 field_token;
8356 CHECK_STACK (1);
8357 --sp;
8359 CHECK_OPSIZE (5);
8360 token = read32 (ip + 1);
8362 klass = mini_get_class (method, token, generic_context);
8363 CHECK_TYPELOAD (klass);
8365 if (cfg->generic_sharing_context)
8366 context_used = mono_class_check_context_used (klass);
8368 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8369 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8370 ins->sreg1 = sp [0]->dreg;
8371 ins->type = STACK_I4;
8372 ins->dreg = alloc_ireg (cfg);
8373 MONO_ADD_INS (cfg->cbb, ins);
8374 *sp = mono_decompose_opcode (cfg, ins);
8377 if (context_used) {
8378 MonoInst *args [3];
8379 MonoClass *array_class = mono_array_class_get (klass, 1);
8380 /* FIXME: we cannot get a managed
8381 allocator because we can't get the
8382 open generic class's vtable. We
8383 have the same problem in
8384 handle_alloc_from_inst(). This
8385 needs to be solved so that we can
8386 have managed allocs of shared
8387 generic classes. */
8389 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8390 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8392 MonoMethod *managed_alloc = NULL;
8394 /* FIXME: Decompose later to help abcrem */
8396 /* vtable */
8397 args [0] = emit_get_rgctx_klass (cfg, context_used,
8398 array_class, MONO_RGCTX_INFO_VTABLE);
8399 /* array len */
8400 args [1] = sp [0];
8402 if (managed_alloc)
8403 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8404 else
8405 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8406 } else {
8407 if (cfg->opt & MONO_OPT_SHARED) {
8408 /* Decompose now to avoid problems with references to the domainvar */
8409 MonoInst *iargs [3];
8411 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8412 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8413 iargs [2] = sp [0];
8415 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8416 } else {
8417 /* Decompose later since it is needed by abcrem */
8418 MonoClass *array_type = mono_array_class_get (klass, 1);
8419 mono_class_vtable (cfg->domain, array_type);
8420 CHECK_TYPELOAD (array_type);
8422 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8423 ins->dreg = alloc_preg (cfg);
8424 ins->sreg1 = sp [0]->dreg;
8425 ins->inst_newa_class = klass;
8426 ins->type = STACK_OBJ;
8427 ins->klass = klass;
8428 MONO_ADD_INS (cfg->cbb, ins);
8429 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8430 cfg->cbb->has_array_access = TRUE;
8432 /* Needed so mono_emit_load_get_addr () gets called */
8433 mono_get_got_var (cfg);
8437 len_ins = sp [0];
8438 ip += 5;
8439 *sp++ = ins;
8440 inline_costs += 1;
8443 * we inline/optimize the initialization sequence if possible.
8444 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8445 * for small sizes open code the memcpy
8446 * ensure the rva field is big enough
8448 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8449 MonoMethod *memcpy_method = get_memcpy_method ();
8450 MonoInst *iargs [3];
8451 int add_reg = alloc_preg (cfg);
8453 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8454 if (cfg->compile_aot) {
8455 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8456 } else {
8457 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8459 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8460 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8461 ip += 11;
8464 break;
8466 case CEE_LDLEN:
8467 CHECK_STACK (1);
8468 --sp;
8469 if (sp [0]->type != STACK_OBJ)
8470 UNVERIFIED;
8472 dreg = alloc_preg (cfg);
8473 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8474 ins->dreg = alloc_preg (cfg);
8475 ins->sreg1 = sp [0]->dreg;
8476 ins->type = STACK_I4;
8477 MONO_ADD_INS (cfg->cbb, ins);
8478 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8479 cfg->cbb->has_array_access = TRUE;
8480 ip ++;
8481 *sp++ = ins;
8482 break;
8483 case CEE_LDELEMA:
8484 CHECK_STACK (2);
8485 sp -= 2;
8486 CHECK_OPSIZE (5);
8487 if (sp [0]->type != STACK_OBJ)
8488 UNVERIFIED;
8490 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8492 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8493 CHECK_TYPELOAD (klass);
8494 /* we need to make sure that this array is exactly the type it needs
8495 * to be for correctness. the wrappers are lax with their usage
8496 * so we need to ignore them here
8498 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8499 MonoClass *array_class = mono_array_class_get (klass, 1);
8500 mini_emit_check_array_type (cfg, sp [0], array_class);
8501 CHECK_TYPELOAD (array_class);
8504 readonly = FALSE;
8505 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8506 *sp++ = ins;
8507 ip += 5;
8508 break;
8509 case CEE_LDELEM:
8510 case CEE_LDELEM_I1:
8511 case CEE_LDELEM_U1:
8512 case CEE_LDELEM_I2:
8513 case CEE_LDELEM_U2:
8514 case CEE_LDELEM_I4:
8515 case CEE_LDELEM_U4:
8516 case CEE_LDELEM_I8:
8517 case CEE_LDELEM_I:
8518 case CEE_LDELEM_R4:
8519 case CEE_LDELEM_R8:
8520 case CEE_LDELEM_REF: {
8521 MonoInst *addr;
8523 CHECK_STACK (2);
8524 sp -= 2;
8526 if (*ip == CEE_LDELEM) {
8527 CHECK_OPSIZE (5);
8528 token = read32 (ip + 1);
8529 klass = mini_get_class (method, token, generic_context);
8530 CHECK_TYPELOAD (klass);
8531 mono_class_init (klass);
8533 else
8534 klass = array_access_to_klass (*ip);
8536 if (sp [0]->type != STACK_OBJ)
8537 UNVERIFIED;
8539 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8541 if (sp [1]->opcode == OP_ICONST) {
8542 int array_reg = sp [0]->dreg;
8543 int index_reg = sp [1]->dreg;
8544 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8546 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8547 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8548 } else {
8549 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8550 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8552 *sp++ = ins;
8553 if (*ip == CEE_LDELEM)
8554 ip += 5;
8555 else
8556 ++ip;
8557 break;
8559 case CEE_STELEM_I:
8560 case CEE_STELEM_I1:
8561 case CEE_STELEM_I2:
8562 case CEE_STELEM_I4:
8563 case CEE_STELEM_I8:
8564 case CEE_STELEM_R4:
8565 case CEE_STELEM_R8:
8566 case CEE_STELEM_REF:
8567 case CEE_STELEM: {
8568 MonoInst *addr;
8570 CHECK_STACK (3);
8571 sp -= 3;
8573 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8575 if (*ip == CEE_STELEM) {
8576 CHECK_OPSIZE (5);
8577 token = read32 (ip + 1);
8578 klass = mini_get_class (method, token, generic_context);
8579 CHECK_TYPELOAD (klass);
8580 mono_class_init (klass);
8582 else
8583 klass = array_access_to_klass (*ip);
8585 if (sp [0]->type != STACK_OBJ)
8586 UNVERIFIED;
8588 /* storing a NULL doesn't need any of the complex checks in stelemref */
8589 if (generic_class_is_reference_type (cfg, klass) &&
8590 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8591 MonoMethod* helper = mono_marshal_get_stelemref ();
8592 MonoInst *iargs [3];
8594 if (sp [0]->type != STACK_OBJ)
8595 UNVERIFIED;
8596 if (sp [2]->type != STACK_OBJ)
8597 UNVERIFIED;
8599 iargs [2] = sp [2];
8600 iargs [1] = sp [1];
8601 iargs [0] = sp [0];
8603 mono_emit_method_call (cfg, helper, iargs, NULL);
8604 } else {
8605 if (sp [1]->opcode == OP_ICONST) {
8606 int array_reg = sp [0]->dreg;
8607 int index_reg = sp [1]->dreg;
8608 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8610 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8611 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8612 } else {
8613 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8614 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8618 if (*ip == CEE_STELEM)
8619 ip += 5;
8620 else
8621 ++ip;
8622 inline_costs += 1;
8623 break;
8625 case CEE_CKFINITE: {
8626 CHECK_STACK (1);
8627 --sp;
8629 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8630 ins->sreg1 = sp [0]->dreg;
8631 ins->dreg = alloc_freg (cfg);
8632 ins->type = STACK_R8;
8633 MONO_ADD_INS (bblock, ins);
8635 *sp++ = mono_decompose_opcode (cfg, ins);
8637 ++ip;
8638 break;
8640 case CEE_REFANYVAL: {
8641 MonoInst *src_var, *src;
8643 int klass_reg = alloc_preg (cfg);
8644 int dreg = alloc_preg (cfg);
8646 CHECK_STACK (1);
8647 MONO_INST_NEW (cfg, ins, *ip);
8648 --sp;
8649 CHECK_OPSIZE (5);
8650 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8651 CHECK_TYPELOAD (klass);
8652 mono_class_init (klass);
8654 if (cfg->generic_sharing_context)
8655 context_used = mono_class_check_context_used (klass);
8657 // FIXME:
8658 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8659 if (!src_var)
8660 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8661 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8664 if (context_used) {
8665 MonoInst *klass_ins;
8667 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8668 klass, MONO_RGCTX_INFO_KLASS);
8670 // FIXME:
8671 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8672 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8673 } else {
8674 mini_emit_class_check (cfg, klass_reg, klass);
8676 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8677 ins->type = STACK_MP;
8678 *sp++ = ins;
8679 ip += 5;
8680 break;
8682 case CEE_MKREFANY: {
8683 MonoInst *loc, *addr;
8685 CHECK_STACK (1);
8686 MONO_INST_NEW (cfg, ins, *ip);
8687 --sp;
8688 CHECK_OPSIZE (5);
8689 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8690 CHECK_TYPELOAD (klass);
8691 mono_class_init (klass);
8693 if (cfg->generic_sharing_context)
8694 context_used = mono_class_check_context_used (klass);
8696 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8697 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8699 if (context_used) {
8700 MonoInst *const_ins;
8701 int type_reg = alloc_preg (cfg);
8703 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8707 } else if (cfg->compile_aot) {
8708 int const_reg = alloc_preg (cfg);
8709 int type_reg = alloc_preg (cfg);
8711 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8715 } else {
8716 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8717 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8721 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8722 ins->type = STACK_VTYPE;
8723 ins->klass = mono_defaults.typed_reference_class;
8724 *sp++ = ins;
8725 ip += 5;
8726 break;
8728 case CEE_LDTOKEN: {
8729 gpointer handle;
8730 MonoClass *handle_class;
8732 CHECK_STACK_OVF (1);
8734 CHECK_OPSIZE (5);
8735 n = read32 (ip + 1);
8737 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8738 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8739 handle = mono_method_get_wrapper_data (method, n);
8740 handle_class = mono_method_get_wrapper_data (method, n + 1);
8741 if (handle_class == mono_defaults.typehandle_class)
8742 handle = &((MonoClass*)handle)->byval_arg;
8744 else {
8745 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8747 if (!handle)
8748 goto load_error;
8749 mono_class_init (handle_class);
8750 if (cfg->generic_sharing_context) {
8751 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8752 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8753 /* This case handles ldtoken
8754 of an open type, like for
8755 typeof(Gen<>). */
8756 context_used = 0;
8757 } else if (handle_class == mono_defaults.typehandle_class) {
8758 /* If we get a MONO_TYPE_CLASS
8759 then we need to provide the
8760 open type, not an
8761 instantiation of it. */
8762 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8763 context_used = 0;
8764 else
8765 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8766 } else if (handle_class == mono_defaults.fieldhandle_class)
8767 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8768 else if (handle_class == mono_defaults.methodhandle_class)
8769 context_used = mono_method_check_context_used (handle);
8770 else
8771 g_assert_not_reached ();
8774 if ((cfg->opt & MONO_OPT_SHARED) &&
8775 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8776 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8777 MonoInst *addr, *vtvar, *iargs [3];
8778 int method_context_used;
8780 if (cfg->generic_sharing_context)
8781 method_context_used = mono_method_check_context_used (method);
8782 else
8783 method_context_used = 0;
8785 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8787 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8788 EMIT_NEW_ICONST (cfg, iargs [1], n);
8789 if (method_context_used) {
8790 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8791 method, MONO_RGCTX_INFO_METHOD);
8792 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8793 } else {
8794 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8795 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8797 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8801 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8802 } else {
8803 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8804 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8805 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8806 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8807 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8808 MonoClass *tclass = mono_class_from_mono_type (handle);
8810 mono_class_init (tclass);
8811 if (context_used) {
8812 ins = emit_get_rgctx_klass (cfg, context_used,
8813 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8814 } else if (cfg->compile_aot) {
8815 if (method->wrapper_type) {
8816 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8817 /* Special case for static synchronized wrappers */
8818 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8819 } else {
8820 /* FIXME: n is not a normal token */
8821 cfg->disable_aot = TRUE;
8822 EMIT_NEW_PCONST (cfg, ins, NULL);
8824 } else {
8825 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8827 } else {
8828 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8830 ins->type = STACK_OBJ;
8831 ins->klass = cmethod->klass;
8832 ip += 5;
8833 } else {
8834 MonoInst *addr, *vtvar;
8836 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8838 if (context_used) {
8839 if (handle_class == mono_defaults.typehandle_class) {
8840 ins = emit_get_rgctx_klass (cfg, context_used,
8841 mono_class_from_mono_type (handle),
8842 MONO_RGCTX_INFO_TYPE);
8843 } else if (handle_class == mono_defaults.methodhandle_class) {
8844 ins = emit_get_rgctx_method (cfg, context_used,
8845 handle, MONO_RGCTX_INFO_METHOD);
8846 } else if (handle_class == mono_defaults.fieldhandle_class) {
8847 ins = emit_get_rgctx_field (cfg, context_used,
8848 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8849 } else {
8850 g_assert_not_reached ();
8852 } else if (cfg->compile_aot) {
8853 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8854 } else {
8855 EMIT_NEW_PCONST (cfg, ins, handle);
8857 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8859 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8863 *sp++ = ins;
8864 ip += 5;
8865 break;
8867 case CEE_THROW:
8868 CHECK_STACK (1);
8869 MONO_INST_NEW (cfg, ins, OP_THROW);
8870 --sp;
8871 ins->sreg1 = sp [0]->dreg;
8872 ip++;
8873 bblock->out_of_line = TRUE;
8874 MONO_ADD_INS (bblock, ins);
8875 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8876 MONO_ADD_INS (bblock, ins);
8877 sp = stack_start;
8879 link_bblock (cfg, bblock, end_bblock);
8880 start_new_bblock = 1;
8881 break;
8882 case CEE_ENDFINALLY:
8883 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8884 MONO_ADD_INS (bblock, ins);
8885 ip++;
8886 start_new_bblock = 1;
8889 * Control will leave the method so empty the stack, otherwise
8890 * the next basic block will start with a nonempty stack.
8892 while (sp != stack_start) {
8893 sp--;
8895 break;
8896 case CEE_LEAVE:
8897 case CEE_LEAVE_S: {
8898 GList *handlers;
8900 if (*ip == CEE_LEAVE) {
8901 CHECK_OPSIZE (5);
8902 target = ip + 5 + (gint32)read32(ip + 1);
8903 } else {
8904 CHECK_OPSIZE (2);
8905 target = ip + 2 + (signed char)(ip [1]);
8908 /* empty the stack */
8909 while (sp != stack_start) {
8910 sp--;
8914 * If this leave statement is in a catch block, check for a
8915 * pending exception, and rethrow it if necessary.
8916 * We avoid doing this in runtime invoke wrappers, since those are called
8917 * by native code which excepts the wrapper to catch all exceptions.
8919 for (i = 0; i < header->num_clauses; ++i) {
8920 MonoExceptionClause *clause = &header->clauses [i];
8923 * Use <= in the final comparison to handle clauses with multiple
8924 * leave statements, like in bug #78024.
8925 * The ordering of the exception clauses guarantees that we find the
8926 * innermost clause.
8928 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8929 MonoInst *exc_ins;
8930 MonoBasicBlock *dont_throw;
8933 MonoInst *load;
8935 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8938 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8940 NEW_BBLOCK (cfg, dont_throw);
8943 * Currently, we allways rethrow the abort exception, despite the
8944 * fact that this is not correct. See thread6.cs for an example.
8945 * But propagating the abort exception is more important than
8946 * getting the sematics right.
8948 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8949 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8950 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8952 MONO_START_BB (cfg, dont_throw);
8953 bblock = cfg->cbb;
8957 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8958 GList *tmp;
8959 MonoExceptionClause *clause;
8961 for (tmp = handlers; tmp; tmp = tmp->next) {
8962 clause = tmp->data;
8963 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8964 g_assert (tblock);
8965 link_bblock (cfg, bblock, tblock);
8966 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8967 ins->inst_target_bb = tblock;
8968 ins->inst_eh_block = clause;
8969 MONO_ADD_INS (bblock, ins);
8970 bblock->has_call_handler = 1;
8971 if (COMPILE_LLVM (cfg)) {
8972 MonoBasicBlock *target_bb;
8975 * Link the finally bblock with the target, since it will
8976 * conceptually branch there.
8977 * FIXME: Have to link the bblock containing the endfinally.
8979 GET_BBLOCK (cfg, target_bb, target);
8980 link_bblock (cfg, tblock, target_bb);
8983 g_list_free (handlers);
8986 MONO_INST_NEW (cfg, ins, OP_BR);
8987 MONO_ADD_INS (bblock, ins);
8988 GET_BBLOCK (cfg, tblock, target);
8989 link_bblock (cfg, bblock, tblock);
8990 ins->inst_target_bb = tblock;
8991 start_new_bblock = 1;
8993 if (*ip == CEE_LEAVE)
8994 ip += 5;
8995 else
8996 ip += 2;
8998 break;
9002 * Mono specific opcodes
9004 case MONO_CUSTOM_PREFIX: {
9006 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9008 CHECK_OPSIZE (2);
9009 switch (ip [1]) {
9010 case CEE_MONO_ICALL: {
9011 gpointer func;
9012 MonoJitICallInfo *info;
9014 token = read32 (ip + 2);
9015 func = mono_method_get_wrapper_data (method, token);
9016 info = mono_find_jit_icall_by_addr (func);
9017 g_assert (info);
9019 CHECK_STACK (info->sig->param_count);
9020 sp -= info->sig->param_count;
9022 ins = mono_emit_jit_icall (cfg, info->func, sp);
9023 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9024 *sp++ = ins;
9026 ip += 6;
9027 inline_costs += 10 * num_calls++;
9029 break;
9031 case CEE_MONO_LDPTR: {
9032 gpointer ptr;
9034 CHECK_STACK_OVF (1);
9035 CHECK_OPSIZE (6);
9036 token = read32 (ip + 2);
9038 ptr = mono_method_get_wrapper_data (method, token);
9039 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9040 MonoJitICallInfo *callinfo;
9041 const char *icall_name;
9043 icall_name = method->name + strlen ("__icall_wrapper_");
9044 g_assert (icall_name);
9045 callinfo = mono_find_jit_icall_by_name (icall_name);
9046 g_assert (callinfo);
9048 if (ptr == callinfo->func) {
9049 /* Will be transformed into an AOTCONST later */
9050 EMIT_NEW_PCONST (cfg, ins, ptr);
9051 *sp++ = ins;
9052 ip += 6;
9053 break;
9056 /* FIXME: Generalize this */
9057 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9058 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9059 *sp++ = ins;
9060 ip += 6;
9061 break;
9063 EMIT_NEW_PCONST (cfg, ins, ptr);
9064 *sp++ = ins;
9065 ip += 6;
9066 inline_costs += 10 * num_calls++;
9067 /* Can't embed random pointers into AOT code */
9068 cfg->disable_aot = 1;
9069 break;
9071 case CEE_MONO_ICALL_ADDR: {
9072 MonoMethod *cmethod;
9073 gpointer ptr;
9075 CHECK_STACK_OVF (1);
9076 CHECK_OPSIZE (6);
9077 token = read32 (ip + 2);
9079 cmethod = mono_method_get_wrapper_data (method, token);
9081 if (cfg->compile_aot) {
9082 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9083 } else {
9084 ptr = mono_lookup_internal_call (cmethod);
9085 g_assert (ptr);
9086 EMIT_NEW_PCONST (cfg, ins, ptr);
9088 *sp++ = ins;
9089 ip += 6;
9090 break;
9092 case CEE_MONO_VTADDR: {
9093 MonoInst *src_var, *src;
9095 CHECK_STACK (1);
9096 --sp;
9098 // FIXME:
9099 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9100 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9101 *sp++ = src;
9102 ip += 2;
9103 break;
9105 case CEE_MONO_NEWOBJ: {
9106 MonoInst *iargs [2];
9108 CHECK_STACK_OVF (1);
9109 CHECK_OPSIZE (6);
9110 token = read32 (ip + 2);
9111 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9112 mono_class_init (klass);
9113 NEW_DOMAINCONST (cfg, iargs [0]);
9114 MONO_ADD_INS (cfg->cbb, iargs [0]);
9115 NEW_CLASSCONST (cfg, iargs [1], klass);
9116 MONO_ADD_INS (cfg->cbb, iargs [1]);
9117 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9118 ip += 6;
9119 inline_costs += 10 * num_calls++;
9120 break;
9122 case CEE_MONO_OBJADDR:
9123 CHECK_STACK (1);
9124 --sp;
9125 MONO_INST_NEW (cfg, ins, OP_MOVE);
9126 ins->dreg = alloc_preg (cfg);
9127 ins->sreg1 = sp [0]->dreg;
9128 ins->type = STACK_MP;
9129 MONO_ADD_INS (cfg->cbb, ins);
9130 *sp++ = ins;
9131 ip += 2;
9132 break;
9133 case CEE_MONO_LDNATIVEOBJ:
9135 * Similar to LDOBJ, but instead load the unmanaged
9136 * representation of the vtype to the stack.
9138 CHECK_STACK (1);
9139 CHECK_OPSIZE (6);
9140 --sp;
9141 token = read32 (ip + 2);
9142 klass = mono_method_get_wrapper_data (method, token);
9143 g_assert (klass->valuetype);
9144 mono_class_init (klass);
9147 MonoInst *src, *dest, *temp;
9149 src = sp [0];
9150 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9151 temp->backend.is_pinvoke = 1;
9152 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9153 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9155 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9156 dest->type = STACK_VTYPE;
9157 dest->klass = klass;
9159 *sp ++ = dest;
9160 ip += 6;
9162 break;
9163 case CEE_MONO_RETOBJ: {
9165 * Same as RET, but return the native representation of a vtype
9166 * to the caller.
9168 g_assert (cfg->ret);
9169 g_assert (mono_method_signature (method)->pinvoke);
9170 CHECK_STACK (1);
9171 --sp;
9173 CHECK_OPSIZE (6);
9174 token = read32 (ip + 2);
9175 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9177 if (!cfg->vret_addr) {
9178 g_assert (cfg->ret_var_is_local);
9180 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9181 } else {
9182 EMIT_NEW_RETLOADA (cfg, ins);
9184 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9186 if (sp != stack_start)
9187 UNVERIFIED;
9189 MONO_INST_NEW (cfg, ins, OP_BR);
9190 ins->inst_target_bb = end_bblock;
9191 MONO_ADD_INS (bblock, ins);
9192 link_bblock (cfg, bblock, end_bblock);
9193 start_new_bblock = 1;
9194 ip += 6;
9195 break;
9197 case CEE_MONO_CISINST:
9198 case CEE_MONO_CCASTCLASS: {
9199 int token;
9200 CHECK_STACK (1);
9201 --sp;
9202 CHECK_OPSIZE (6);
9203 token = read32 (ip + 2);
9204 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9205 if (ip [1] == CEE_MONO_CISINST)
9206 ins = handle_cisinst (cfg, klass, sp [0]);
9207 else
9208 ins = handle_ccastclass (cfg, klass, sp [0]);
9209 bblock = cfg->cbb;
9210 *sp++ = ins;
9211 ip += 6;
9212 break;
9214 case CEE_MONO_SAVE_LMF:
9215 case CEE_MONO_RESTORE_LMF:
9216 #ifdef MONO_ARCH_HAVE_LMF_OPS
9217 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9218 MONO_ADD_INS (bblock, ins);
9219 cfg->need_lmf_area = TRUE;
9220 #endif
9221 ip += 2;
9222 break;
9223 case CEE_MONO_CLASSCONST:
9224 CHECK_STACK_OVF (1);
9225 CHECK_OPSIZE (6);
9226 token = read32 (ip + 2);
9227 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9228 *sp++ = ins;
9229 ip += 6;
9230 inline_costs += 10 * num_calls++;
9231 break;
9232 case CEE_MONO_NOT_TAKEN:
9233 bblock->out_of_line = TRUE;
9234 ip += 2;
9235 break;
9236 case CEE_MONO_TLS:
9237 CHECK_STACK_OVF (1);
9238 CHECK_OPSIZE (6);
9239 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9240 ins->dreg = alloc_preg (cfg);
9241 ins->inst_offset = (gint32)read32 (ip + 2);
9242 ins->type = STACK_PTR;
9243 MONO_ADD_INS (bblock, ins);
9244 *sp++ = ins;
9245 ip += 6;
9246 break;
9247 case CEE_MONO_DYN_CALL: {
9248 MonoCallInst *call;
9250 /* It would be easier to call a trampoline, but that would put an
9251 * extra frame on the stack, confusing exception handling. So
9252 * implement it inline using an opcode for now.
9255 if (!cfg->dyn_call_var) {
9256 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9257 /* prevent it from being register allocated */
9258 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9261 /* Has to use a call inst since it local regalloc expects it */
9262 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9263 ins = (MonoInst*)call;
9264 sp -= 2;
9265 ins->sreg1 = sp [0]->dreg;
9266 ins->sreg2 = sp [1]->dreg;
9267 MONO_ADD_INS (bblock, ins);
9269 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9270 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9271 #endif
9273 ip += 2;
9274 inline_costs += 10 * num_calls++;
9276 break;
9278 default:
9279 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9280 break;
9282 break;
9285 case CEE_PREFIX1: {
9286 CHECK_OPSIZE (2);
9287 switch (ip [1]) {
9288 case CEE_ARGLIST: {
9289 /* somewhat similar to LDTOKEN */
9290 MonoInst *addr, *vtvar;
9291 CHECK_STACK_OVF (1);
9292 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9294 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9295 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9297 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9298 ins->type = STACK_VTYPE;
9299 ins->klass = mono_defaults.argumenthandle_class;
9300 *sp++ = ins;
9301 ip += 2;
9302 break;
9304 case CEE_CEQ:
9305 case CEE_CGT:
9306 case CEE_CGT_UN:
9307 case CEE_CLT:
9308 case CEE_CLT_UN: {
9309 MonoInst *cmp;
9310 CHECK_STACK (2);
9312 * The following transforms:
9313 * CEE_CEQ into OP_CEQ
9314 * CEE_CGT into OP_CGT
9315 * CEE_CGT_UN into OP_CGT_UN
9316 * CEE_CLT into OP_CLT
9317 * CEE_CLT_UN into OP_CLT_UN
9319 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9321 MONO_INST_NEW (cfg, ins, cmp->opcode);
9322 sp -= 2;
9323 cmp->sreg1 = sp [0]->dreg;
9324 cmp->sreg2 = sp [1]->dreg;
9325 type_from_op (cmp, sp [0], sp [1]);
9326 CHECK_TYPE (cmp);
9327 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9328 cmp->opcode = OP_LCOMPARE;
9329 else if (sp [0]->type == STACK_R8)
9330 cmp->opcode = OP_FCOMPARE;
9331 else
9332 cmp->opcode = OP_ICOMPARE;
9333 MONO_ADD_INS (bblock, cmp);
9334 ins->type = STACK_I4;
9335 ins->dreg = alloc_dreg (cfg, ins->type);
9336 type_from_op (ins, sp [0], sp [1]);
9338 if (cmp->opcode == OP_FCOMPARE) {
9340 * The backends expect the fceq opcodes to do the
9341 * comparison too.
9343 cmp->opcode = OP_NOP;
9344 ins->sreg1 = cmp->sreg1;
9345 ins->sreg2 = cmp->sreg2;
9347 MONO_ADD_INS (bblock, ins);
9348 *sp++ = ins;
9349 ip += 2;
9350 break;
9352 case CEE_LDFTN: {
9353 MonoInst *argconst;
9354 MonoMethod *cil_method;
9355 gboolean needs_static_rgctx_invoke;
9357 CHECK_STACK_OVF (1);
9358 CHECK_OPSIZE (6);
9359 n = read32 (ip + 2);
9360 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9361 if (!cmethod)
9362 goto load_error;
9363 mono_class_init (cmethod->klass);
9365 mono_save_token_info (cfg, image, n, cmethod);
9367 if (cfg->generic_sharing_context)
9368 context_used = mono_method_check_context_used (cmethod);
9370 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9372 cil_method = cmethod;
9373 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9374 METHOD_ACCESS_FAILURE;
9376 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9377 if (check_linkdemand (cfg, method, cmethod))
9378 INLINE_FAILURE;
9379 CHECK_CFG_EXCEPTION;
9380 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9381 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9385 * Optimize the common case of ldftn+delegate creation
9387 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9388 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9389 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9390 MonoMethod *invoke;
9391 int invoke_context_used = 0;
9393 invoke = mono_get_delegate_invoke (ctor_method->klass);
9394 if (!invoke || !mono_method_signature (invoke))
9395 goto load_error;
9397 if (cfg->generic_sharing_context)
9398 invoke_context_used = mono_method_check_context_used (invoke);
9400 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9401 /* FIXME: SGEN support */
9402 if (invoke_context_used == 0) {
9403 MonoInst *target_ins;
9405 ip += 6;
9406 if (cfg->verbose_level > 3)
9407 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9408 target_ins = sp [-1];
9409 sp --;
9410 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9411 CHECK_CFG_EXCEPTION;
9412 ip += 5;
9413 sp ++;
9414 break;
9416 #endif
9420 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9421 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9422 *sp++ = ins;
9424 ip += 6;
9425 inline_costs += 10 * num_calls++;
9426 break;
9428 case CEE_LDVIRTFTN: {
9429 MonoInst *args [2];
9431 CHECK_STACK (1);
9432 CHECK_OPSIZE (6);
9433 n = read32 (ip + 2);
9434 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9435 if (!cmethod)
9436 goto load_error;
9437 mono_class_init (cmethod->klass);
9439 if (cfg->generic_sharing_context)
9440 context_used = mono_method_check_context_used (cmethod);
9442 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9443 if (check_linkdemand (cfg, method, cmethod))
9444 INLINE_FAILURE;
9445 CHECK_CFG_EXCEPTION;
9446 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9447 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9450 --sp;
9451 args [0] = *sp;
9453 args [1] = emit_get_rgctx_method (cfg, context_used,
9454 cmethod, MONO_RGCTX_INFO_METHOD);
9456 if (context_used)
9457 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9458 else
9459 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9461 ip += 6;
9462 inline_costs += 10 * num_calls++;
9463 break;
9465 case CEE_LDARG:
9466 CHECK_STACK_OVF (1);
9467 CHECK_OPSIZE (4);
9468 n = read16 (ip + 2);
9469 CHECK_ARG (n);
9470 EMIT_NEW_ARGLOAD (cfg, ins, n);
9471 *sp++ = ins;
9472 ip += 4;
9473 break;
9474 case CEE_LDARGA:
9475 CHECK_STACK_OVF (1);
9476 CHECK_OPSIZE (4);
9477 n = read16 (ip + 2);
9478 CHECK_ARG (n);
9479 NEW_ARGLOADA (cfg, ins, n);
9480 MONO_ADD_INS (cfg->cbb, ins);
9481 *sp++ = ins;
9482 ip += 4;
9483 break;
9484 case CEE_STARG:
9485 CHECK_STACK (1);
9486 --sp;
9487 CHECK_OPSIZE (4);
9488 n = read16 (ip + 2);
9489 CHECK_ARG (n);
9490 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9491 UNVERIFIED;
9492 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9493 ip += 4;
9494 break;
9495 case CEE_LDLOC:
9496 CHECK_STACK_OVF (1);
9497 CHECK_OPSIZE (4);
9498 n = read16 (ip + 2);
9499 CHECK_LOCAL (n);
9500 EMIT_NEW_LOCLOAD (cfg, ins, n);
9501 *sp++ = ins;
9502 ip += 4;
9503 break;
9504 case CEE_LDLOCA: {
9505 unsigned char *tmp_ip;
9506 CHECK_STACK_OVF (1);
9507 CHECK_OPSIZE (4);
9508 n = read16 (ip + 2);
9509 CHECK_LOCAL (n);
9511 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9512 ip = tmp_ip;
9513 inline_costs += 1;
9514 break;
9517 EMIT_NEW_LOCLOADA (cfg, ins, n);
9518 *sp++ = ins;
9519 ip += 4;
9520 break;
9522 case CEE_STLOC:
9523 CHECK_STACK (1);
9524 --sp;
9525 CHECK_OPSIZE (4);
9526 n = read16 (ip + 2);
9527 CHECK_LOCAL (n);
9528 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9529 UNVERIFIED;
9530 emit_stloc_ir (cfg, sp, header, n);
9531 ip += 4;
9532 inline_costs += 1;
9533 break;
9534 case CEE_LOCALLOC:
9535 CHECK_STACK (1);
9536 --sp;
9537 if (sp != stack_start)
9538 UNVERIFIED;
9539 if (cfg->method != method)
9541 * Inlining this into a loop in a parent could lead to
9542 * stack overflows which is different behavior than the
9543 * non-inlined case, thus disable inlining in this case.
9545 goto inline_failure;
9547 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9548 ins->dreg = alloc_preg (cfg);
9549 ins->sreg1 = sp [0]->dreg;
9550 ins->type = STACK_PTR;
9551 MONO_ADD_INS (cfg->cbb, ins);
9553 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9554 if (init_locals)
9555 ins->flags |= MONO_INST_INIT;
9557 *sp++ = ins;
9558 ip += 2;
9559 break;
9560 case CEE_ENDFILTER: {
9561 MonoExceptionClause *clause, *nearest;
9562 int cc, nearest_num;
9564 CHECK_STACK (1);
9565 --sp;
9566 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9567 UNVERIFIED;
9568 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9569 ins->sreg1 = (*sp)->dreg;
9570 MONO_ADD_INS (bblock, ins);
9571 start_new_bblock = 1;
9572 ip += 2;
9574 nearest = NULL;
9575 nearest_num = 0;
9576 for (cc = 0; cc < header->num_clauses; ++cc) {
9577 clause = &header->clauses [cc];
9578 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9579 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9580 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9581 nearest = clause;
9582 nearest_num = cc;
9585 g_assert (nearest);
9586 if ((ip - header->code) != nearest->handler_offset)
9587 UNVERIFIED;
9589 break;
9591 case CEE_UNALIGNED_:
9592 ins_flag |= MONO_INST_UNALIGNED;
9593 /* FIXME: record alignment? we can assume 1 for now */
9594 CHECK_OPSIZE (3);
9595 ip += 3;
9596 break;
9597 case CEE_VOLATILE_:
9598 ins_flag |= MONO_INST_VOLATILE;
9599 ip += 2;
9600 break;
9601 case CEE_TAIL_:
9602 ins_flag |= MONO_INST_TAILCALL;
9603 cfg->flags |= MONO_CFG_HAS_TAIL;
9604 /* Can't inline tail calls at this time */
9605 inline_costs += 100000;
9606 ip += 2;
9607 break;
9608 case CEE_INITOBJ:
9609 CHECK_STACK (1);
9610 --sp;
9611 CHECK_OPSIZE (6);
9612 token = read32 (ip + 2);
9613 klass = mini_get_class (method, token, generic_context);
9614 CHECK_TYPELOAD (klass);
9615 if (generic_class_is_reference_type (cfg, klass))
9616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9617 else
9618 mini_emit_initobj (cfg, *sp, NULL, klass);
9619 ip += 6;
9620 inline_costs += 1;
9621 break;
9622 case CEE_CONSTRAINED_:
9623 CHECK_OPSIZE (6);
9624 token = read32 (ip + 2);
9625 if (method->wrapper_type != MONO_WRAPPER_NONE)
9626 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9627 else
9628 constrained_call = mono_class_get_full (image, token, generic_context);
9629 CHECK_TYPELOAD (constrained_call);
9630 ip += 6;
9631 break;
9632 case CEE_CPBLK:
9633 case CEE_INITBLK: {
9634 MonoInst *iargs [3];
9635 CHECK_STACK (3);
9636 sp -= 3;
9638 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9639 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9640 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9641 /* emit_memset only works when val == 0 */
9642 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9643 } else {
9644 iargs [0] = sp [0];
9645 iargs [1] = sp [1];
9646 iargs [2] = sp [2];
9647 if (ip [1] == CEE_CPBLK) {
9648 MonoMethod *memcpy_method = get_memcpy_method ();
9649 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9650 } else {
9651 MonoMethod *memset_method = get_memset_method ();
9652 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9655 ip += 2;
9656 inline_costs += 1;
9657 break;
9659 case CEE_NO_:
9660 CHECK_OPSIZE (3);
9661 if (ip [2] & 0x1)
9662 ins_flag |= MONO_INST_NOTYPECHECK;
9663 if (ip [2] & 0x2)
9664 ins_flag |= MONO_INST_NORANGECHECK;
9665 /* we ignore the no-nullcheck for now since we
9666 * really do it explicitly only when doing callvirt->call
9668 ip += 3;
9669 break;
9670 case CEE_RETHROW: {
9671 MonoInst *load;
9672 int handler_offset = -1;
9674 for (i = 0; i < header->num_clauses; ++i) {
9675 MonoExceptionClause *clause = &header->clauses [i];
9676 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9677 handler_offset = clause->handler_offset;
9678 break;
9682 bblock->flags |= BB_EXCEPTION_UNSAFE;
9684 g_assert (handler_offset != -1);
9686 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9687 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9688 ins->sreg1 = load->dreg;
9689 MONO_ADD_INS (bblock, ins);
9690 sp = stack_start;
9691 link_bblock (cfg, bblock, end_bblock);
9692 start_new_bblock = 1;
9693 ip += 2;
9694 break;
9696 case CEE_SIZEOF: {
9697 guint32 align;
9698 int ialign;
9700 CHECK_STACK_OVF (1);
9701 CHECK_OPSIZE (6);
9702 token = read32 (ip + 2);
9703 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9704 MonoType *type = mono_type_create_from_typespec (image, token);
9705 token = mono_type_size (type, &ialign);
9706 } else {
9707 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9708 CHECK_TYPELOAD (klass);
9709 mono_class_init (klass);
9710 token = mono_class_value_size (klass, &align);
9712 EMIT_NEW_ICONST (cfg, ins, token);
9713 *sp++= ins;
9714 ip += 6;
9715 break;
9717 case CEE_REFANYTYPE: {
9718 MonoInst *src_var, *src;
9720 CHECK_STACK (1);
9721 --sp;
9723 // FIXME:
9724 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9725 if (!src_var)
9726 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9727 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9728 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9729 *sp++ = ins;
9730 ip += 2;
9731 break;
9733 case CEE_READONLY_:
9734 readonly = TRUE;
9735 ip += 2;
9736 break;
9738 case CEE_UNUSED56:
9739 case CEE_UNUSED57:
9740 case CEE_UNUSED70:
9741 case CEE_UNUSED:
9742 case CEE_UNUSED99:
9743 UNVERIFIED;
9745 default:
9746 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9747 UNVERIFIED;
9749 break;
9751 case CEE_UNUSED58:
9752 case CEE_UNUSED1:
9753 UNVERIFIED;
9755 default:
9756 g_warning ("opcode 0x%02x not handled", *ip);
9757 UNVERIFIED;
9760 if (start_new_bblock != 1)
9761 UNVERIFIED;
9763 bblock->cil_length = ip - bblock->cil_code;
9764 bblock->next_bb = end_bblock;
9766 if (cfg->method == method && cfg->domainvar) {
9767 MonoInst *store;
9768 MonoInst *get_domain;
9770 cfg->cbb = init_localsbb;
9772 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9773 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9775 else {
9776 get_domain->dreg = alloc_preg (cfg);
9777 MONO_ADD_INS (cfg->cbb, get_domain);
9779 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9780 MONO_ADD_INS (cfg->cbb, store);
9783 #ifdef TARGET_POWERPC
9784 if (cfg->compile_aot)
9785 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9786 mono_get_got_var (cfg);
9787 #endif
9789 if (cfg->method == method && cfg->got_var)
9790 mono_emit_load_got_addr (cfg);
9792 if (init_locals) {
9793 MonoInst *store;
9795 cfg->cbb = init_localsbb;
9796 cfg->ip = NULL;
9797 for (i = 0; i < header->num_locals; ++i) {
9798 MonoType *ptype = header->locals [i];
9799 int t = ptype->type;
9800 dreg = cfg->locals [i]->dreg;
9802 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9803 t = mono_class_enum_basetype (ptype->data.klass)->type;
9804 if (ptype->byref) {
9805 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9806 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9807 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9808 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9809 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9810 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9811 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9812 ins->type = STACK_R8;
9813 ins->inst_p0 = (void*)&r8_0;
9814 ins->dreg = alloc_dreg (cfg, STACK_R8);
9815 MONO_ADD_INS (init_localsbb, ins);
9816 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9817 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9818 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9819 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9820 } else {
9821 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9826 if (cfg->init_ref_vars && cfg->method == method) {
9827 /* Emit initialization for ref vars */
9828 // FIXME: Avoid duplication initialization for IL locals.
9829 for (i = 0; i < cfg->num_varinfo; ++i) {
9830 MonoInst *ins = cfg->varinfo [i];
9832 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9833 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9837 /* Add a sequence point for method entry/exit events */
9838 if (seq_points) {
9839 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9840 MONO_ADD_INS (init_localsbb, ins);
9841 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9842 MONO_ADD_INS (cfg->bb_exit, ins);
9845 cfg->ip = NULL;
9847 if (cfg->method == method) {
9848 MonoBasicBlock *bb;
9849 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9850 bb->region = mono_find_block_region (cfg, bb->real_offset);
9851 if (cfg->spvars)
9852 mono_create_spvar_for_region (cfg, bb->region);
9853 if (cfg->verbose_level > 2)
9854 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9858 g_slist_free (class_inits);
9859 dont_inline = g_list_remove (dont_inline, method);
9861 if (inline_costs < 0) {
9862 char *mname;
9864 /* Method is too large */
9865 mname = mono_method_full_name (method, TRUE);
9866 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9867 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9868 g_free (mname);
9869 mono_metadata_free_mh (header);
9870 mono_basic_block_free (original_bb);
9871 return -1;
9874 if ((cfg->verbose_level > 2) && (cfg->method == method))
9875 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9877 mono_metadata_free_mh (header);
9878 mono_basic_block_free (original_bb);
9879 return inline_costs;
9881 exception_exit:
9882 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9883 goto cleanup;
9885 inline_failure:
9886 goto cleanup;
9888 load_error:
9889 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9890 goto cleanup;
9892 unverified:
9893 set_exception_type_from_invalid_il (cfg, method, ip);
9894 goto cleanup;
9896 cleanup:
9897 g_slist_free (class_inits);
9898 mono_basic_block_free (original_bb);
9899 dont_inline = g_list_remove (dont_inline, method);
9900 mono_metadata_free_mh (header);
9901 return -1;
9904 static int
9905 store_membase_reg_to_store_membase_imm (int opcode)
9907 switch (opcode) {
9908 case OP_STORE_MEMBASE_REG:
9909 return OP_STORE_MEMBASE_IMM;
9910 case OP_STOREI1_MEMBASE_REG:
9911 return OP_STOREI1_MEMBASE_IMM;
9912 case OP_STOREI2_MEMBASE_REG:
9913 return OP_STOREI2_MEMBASE_IMM;
9914 case OP_STOREI4_MEMBASE_REG:
9915 return OP_STOREI4_MEMBASE_IMM;
9916 case OP_STOREI8_MEMBASE_REG:
9917 return OP_STOREI8_MEMBASE_IMM;
9918 default:
9919 g_assert_not_reached ();
9922 return -1;
9925 #endif /* DISABLE_JIT */
9928 mono_op_to_op_imm (int opcode)
9930 switch (opcode) {
9931 case OP_IADD:
9932 return OP_IADD_IMM;
9933 case OP_ISUB:
9934 return OP_ISUB_IMM;
9935 case OP_IDIV:
9936 return OP_IDIV_IMM;
9937 case OP_IDIV_UN:
9938 return OP_IDIV_UN_IMM;
9939 case OP_IREM:
9940 return OP_IREM_IMM;
9941 case OP_IREM_UN:
9942 return OP_IREM_UN_IMM;
9943 case OP_IMUL:
9944 return OP_IMUL_IMM;
9945 case OP_IAND:
9946 return OP_IAND_IMM;
9947 case OP_IOR:
9948 return OP_IOR_IMM;
9949 case OP_IXOR:
9950 return OP_IXOR_IMM;
9951 case OP_ISHL:
9952 return OP_ISHL_IMM;
9953 case OP_ISHR:
9954 return OP_ISHR_IMM;
9955 case OP_ISHR_UN:
9956 return OP_ISHR_UN_IMM;
9958 case OP_LADD:
9959 return OP_LADD_IMM;
9960 case OP_LSUB:
9961 return OP_LSUB_IMM;
9962 case OP_LAND:
9963 return OP_LAND_IMM;
9964 case OP_LOR:
9965 return OP_LOR_IMM;
9966 case OP_LXOR:
9967 return OP_LXOR_IMM;
9968 case OP_LSHL:
9969 return OP_LSHL_IMM;
9970 case OP_LSHR:
9971 return OP_LSHR_IMM;
9972 case OP_LSHR_UN:
9973 return OP_LSHR_UN_IMM;
9975 case OP_COMPARE:
9976 return OP_COMPARE_IMM;
9977 case OP_ICOMPARE:
9978 return OP_ICOMPARE_IMM;
9979 case OP_LCOMPARE:
9980 return OP_LCOMPARE_IMM;
9982 case OP_STORE_MEMBASE_REG:
9983 return OP_STORE_MEMBASE_IMM;
9984 case OP_STOREI1_MEMBASE_REG:
9985 return OP_STOREI1_MEMBASE_IMM;
9986 case OP_STOREI2_MEMBASE_REG:
9987 return OP_STOREI2_MEMBASE_IMM;
9988 case OP_STOREI4_MEMBASE_REG:
9989 return OP_STOREI4_MEMBASE_IMM;
9991 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9992 case OP_X86_PUSH:
9993 return OP_X86_PUSH_IMM;
9994 case OP_X86_COMPARE_MEMBASE_REG:
9995 return OP_X86_COMPARE_MEMBASE_IMM;
9996 #endif
9997 #if defined(TARGET_AMD64)
9998 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9999 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10000 #endif
10001 case OP_VOIDCALL_REG:
10002 return OP_VOIDCALL;
10003 case OP_CALL_REG:
10004 return OP_CALL;
10005 case OP_LCALL_REG:
10006 return OP_LCALL;
10007 case OP_FCALL_REG:
10008 return OP_FCALL;
10009 case OP_LOCALLOC:
10010 return OP_LOCALLOC_IMM;
10013 return -1;
10016 static int
10017 ldind_to_load_membase (int opcode)
10019 switch (opcode) {
10020 case CEE_LDIND_I1:
10021 return OP_LOADI1_MEMBASE;
10022 case CEE_LDIND_U1:
10023 return OP_LOADU1_MEMBASE;
10024 case CEE_LDIND_I2:
10025 return OP_LOADI2_MEMBASE;
10026 case CEE_LDIND_U2:
10027 return OP_LOADU2_MEMBASE;
10028 case CEE_LDIND_I4:
10029 return OP_LOADI4_MEMBASE;
10030 case CEE_LDIND_U4:
10031 return OP_LOADU4_MEMBASE;
10032 case CEE_LDIND_I:
10033 return OP_LOAD_MEMBASE;
10034 case CEE_LDIND_REF:
10035 return OP_LOAD_MEMBASE;
10036 case CEE_LDIND_I8:
10037 return OP_LOADI8_MEMBASE;
10038 case CEE_LDIND_R4:
10039 return OP_LOADR4_MEMBASE;
10040 case CEE_LDIND_R8:
10041 return OP_LOADR8_MEMBASE;
10042 default:
10043 g_assert_not_reached ();
10046 return -1;
10049 static int
10050 stind_to_store_membase (int opcode)
10052 switch (opcode) {
10053 case CEE_STIND_I1:
10054 return OP_STOREI1_MEMBASE_REG;
10055 case CEE_STIND_I2:
10056 return OP_STOREI2_MEMBASE_REG;
10057 case CEE_STIND_I4:
10058 return OP_STOREI4_MEMBASE_REG;
10059 case CEE_STIND_I:
10060 case CEE_STIND_REF:
10061 return OP_STORE_MEMBASE_REG;
10062 case CEE_STIND_I8:
10063 return OP_STOREI8_MEMBASE_REG;
10064 case CEE_STIND_R4:
10065 return OP_STORER4_MEMBASE_REG;
10066 case CEE_STIND_R8:
10067 return OP_STORER8_MEMBASE_REG;
10068 default:
10069 g_assert_not_reached ();
10072 return -1;
10076 mono_load_membase_to_load_mem (int opcode)
10078 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10079 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10080 switch (opcode) {
10081 case OP_LOAD_MEMBASE:
10082 return OP_LOAD_MEM;
10083 case OP_LOADU1_MEMBASE:
10084 return OP_LOADU1_MEM;
10085 case OP_LOADU2_MEMBASE:
10086 return OP_LOADU2_MEM;
10087 case OP_LOADI4_MEMBASE:
10088 return OP_LOADI4_MEM;
10089 case OP_LOADU4_MEMBASE:
10090 return OP_LOADU4_MEM;
10091 #if SIZEOF_REGISTER == 8
10092 case OP_LOADI8_MEMBASE:
10093 return OP_LOADI8_MEM;
10094 #endif
10096 #endif
10098 return -1;
10101 static inline int
10102 op_to_op_dest_membase (int store_opcode, int opcode)
10104 #if defined(TARGET_X86)
10105 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10106 return -1;
10108 switch (opcode) {
10109 case OP_IADD:
10110 return OP_X86_ADD_MEMBASE_REG;
10111 case OP_ISUB:
10112 return OP_X86_SUB_MEMBASE_REG;
10113 case OP_IAND:
10114 return OP_X86_AND_MEMBASE_REG;
10115 case OP_IOR:
10116 return OP_X86_OR_MEMBASE_REG;
10117 case OP_IXOR:
10118 return OP_X86_XOR_MEMBASE_REG;
10119 case OP_ADD_IMM:
10120 case OP_IADD_IMM:
10121 return OP_X86_ADD_MEMBASE_IMM;
10122 case OP_SUB_IMM:
10123 case OP_ISUB_IMM:
10124 return OP_X86_SUB_MEMBASE_IMM;
10125 case OP_AND_IMM:
10126 case OP_IAND_IMM:
10127 return OP_X86_AND_MEMBASE_IMM;
10128 case OP_OR_IMM:
10129 case OP_IOR_IMM:
10130 return OP_X86_OR_MEMBASE_IMM;
10131 case OP_XOR_IMM:
10132 case OP_IXOR_IMM:
10133 return OP_X86_XOR_MEMBASE_IMM;
10134 case OP_MOVE:
10135 return OP_NOP;
10137 #endif
10139 #if defined(TARGET_AMD64)
10140 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10141 return -1;
10143 switch (opcode) {
10144 case OP_IADD:
10145 return OP_X86_ADD_MEMBASE_REG;
10146 case OP_ISUB:
10147 return OP_X86_SUB_MEMBASE_REG;
10148 case OP_IAND:
10149 return OP_X86_AND_MEMBASE_REG;
10150 case OP_IOR:
10151 return OP_X86_OR_MEMBASE_REG;
10152 case OP_IXOR:
10153 return OP_X86_XOR_MEMBASE_REG;
10154 case OP_IADD_IMM:
10155 return OP_X86_ADD_MEMBASE_IMM;
10156 case OP_ISUB_IMM:
10157 return OP_X86_SUB_MEMBASE_IMM;
10158 case OP_IAND_IMM:
10159 return OP_X86_AND_MEMBASE_IMM;
10160 case OP_IOR_IMM:
10161 return OP_X86_OR_MEMBASE_IMM;
10162 case OP_IXOR_IMM:
10163 return OP_X86_XOR_MEMBASE_IMM;
10164 case OP_LADD:
10165 return OP_AMD64_ADD_MEMBASE_REG;
10166 case OP_LSUB:
10167 return OP_AMD64_SUB_MEMBASE_REG;
10168 case OP_LAND:
10169 return OP_AMD64_AND_MEMBASE_REG;
10170 case OP_LOR:
10171 return OP_AMD64_OR_MEMBASE_REG;
10172 case OP_LXOR:
10173 return OP_AMD64_XOR_MEMBASE_REG;
10174 case OP_ADD_IMM:
10175 case OP_LADD_IMM:
10176 return OP_AMD64_ADD_MEMBASE_IMM;
10177 case OP_SUB_IMM:
10178 case OP_LSUB_IMM:
10179 return OP_AMD64_SUB_MEMBASE_IMM;
10180 case OP_AND_IMM:
10181 case OP_LAND_IMM:
10182 return OP_AMD64_AND_MEMBASE_IMM;
10183 case OP_OR_IMM:
10184 case OP_LOR_IMM:
10185 return OP_AMD64_OR_MEMBASE_IMM;
10186 case OP_XOR_IMM:
10187 case OP_LXOR_IMM:
10188 return OP_AMD64_XOR_MEMBASE_IMM;
10189 case OP_MOVE:
10190 return OP_NOP;
10192 #endif
10194 return -1;
10197 static inline int
10198 op_to_op_store_membase (int store_opcode, int opcode)
10200 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10201 switch (opcode) {
10202 case OP_ICEQ:
10203 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10204 return OP_X86_SETEQ_MEMBASE;
10205 case OP_CNE:
10206 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10207 return OP_X86_SETNE_MEMBASE;
10209 #endif
10211 return -1;
10214 static inline int
10215 op_to_op_src1_membase (int load_opcode, int opcode)
10217 #ifdef TARGET_X86
10218 /* FIXME: This has sign extension issues */
10220 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10221 return OP_X86_COMPARE_MEMBASE8_IMM;
10224 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10225 return -1;
10227 switch (opcode) {
10228 case OP_X86_PUSH:
10229 return OP_X86_PUSH_MEMBASE;
10230 case OP_COMPARE_IMM:
10231 case OP_ICOMPARE_IMM:
10232 return OP_X86_COMPARE_MEMBASE_IMM;
10233 case OP_COMPARE:
10234 case OP_ICOMPARE:
10235 return OP_X86_COMPARE_MEMBASE_REG;
10237 #endif
10239 #ifdef TARGET_AMD64
10240 /* FIXME: This has sign extension issues */
10242 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10243 return OP_X86_COMPARE_MEMBASE8_IMM;
10246 switch (opcode) {
10247 case OP_X86_PUSH:
10248 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10249 return OP_X86_PUSH_MEMBASE;
10250 break;
10251 /* FIXME: This only works for 32 bit immediates
10252 case OP_COMPARE_IMM:
10253 case OP_LCOMPARE_IMM:
10254 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10255 return OP_AMD64_COMPARE_MEMBASE_IMM;
10257 case OP_ICOMPARE_IMM:
10258 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10259 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10260 break;
10261 case OP_COMPARE:
10262 case OP_LCOMPARE:
10263 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10264 return OP_AMD64_COMPARE_MEMBASE_REG;
10265 break;
10266 case OP_ICOMPARE:
10267 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10268 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10269 break;
10271 #endif
10273 return -1;
10276 static inline int
10277 op_to_op_src2_membase (int load_opcode, int opcode)
10279 #ifdef TARGET_X86
10280 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10281 return -1;
10283 switch (opcode) {
10284 case OP_COMPARE:
10285 case OP_ICOMPARE:
10286 return OP_X86_COMPARE_REG_MEMBASE;
10287 case OP_IADD:
10288 return OP_X86_ADD_REG_MEMBASE;
10289 case OP_ISUB:
10290 return OP_X86_SUB_REG_MEMBASE;
10291 case OP_IAND:
10292 return OP_X86_AND_REG_MEMBASE;
10293 case OP_IOR:
10294 return OP_X86_OR_REG_MEMBASE;
10295 case OP_IXOR:
10296 return OP_X86_XOR_REG_MEMBASE;
10298 #endif
10300 #ifdef TARGET_AMD64
10301 switch (opcode) {
10302 case OP_ICOMPARE:
10303 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10304 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10305 break;
10306 case OP_COMPARE:
10307 case OP_LCOMPARE:
10308 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10309 return OP_AMD64_COMPARE_REG_MEMBASE;
10310 break;
10311 case OP_IADD:
10312 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10313 return OP_X86_ADD_REG_MEMBASE;
10314 case OP_ISUB:
10315 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10316 return OP_X86_SUB_REG_MEMBASE;
10317 case OP_IAND:
10318 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10319 return OP_X86_AND_REG_MEMBASE;
10320 case OP_IOR:
10321 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10322 return OP_X86_OR_REG_MEMBASE;
10323 case OP_IXOR:
10324 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10325 return OP_X86_XOR_REG_MEMBASE;
10326 case OP_LADD:
10327 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10328 return OP_AMD64_ADD_REG_MEMBASE;
10329 case OP_LSUB:
10330 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10331 return OP_AMD64_SUB_REG_MEMBASE;
10332 case OP_LAND:
10333 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10334 return OP_AMD64_AND_REG_MEMBASE;
10335 case OP_LOR:
10336 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10337 return OP_AMD64_OR_REG_MEMBASE;
10338 case OP_LXOR:
10339 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10340 return OP_AMD64_XOR_REG_MEMBASE;
10342 #endif
10344 return -1;
10348 mono_op_to_op_imm_noemul (int opcode)
10350 switch (opcode) {
10351 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10352 case OP_LSHR:
10353 case OP_LSHL:
10354 case OP_LSHR_UN:
10355 #endif
10356 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10357 case OP_IDIV:
10358 case OP_IDIV_UN:
10359 case OP_IREM:
10360 case OP_IREM_UN:
10361 #endif
10362 return -1;
10363 default:
10364 return mono_op_to_op_imm (opcode);
10368 #ifndef DISABLE_JIT
10371 * mono_handle_global_vregs:
10373 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10374 * for them.
10376 void
10377 mono_handle_global_vregs (MonoCompile *cfg)
10379 gint32 *vreg_to_bb;
10380 MonoBasicBlock *bb;
10381 int i, pos;
10383 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10385 #ifdef MONO_ARCH_SIMD_INTRINSICS
10386 if (cfg->uses_simd_intrinsics)
10387 mono_simd_simplify_indirection (cfg);
10388 #endif
10390 /* Find local vregs used in more than one bb */
10391 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10392 MonoInst *ins = bb->code;
10393 int block_num = bb->block_num;
10395 if (cfg->verbose_level > 2)
10396 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10398 cfg->cbb = bb;
10399 for (; ins; ins = ins->next) {
10400 const char *spec = INS_INFO (ins->opcode);
10401 int regtype = 0, regindex;
10402 gint32 prev_bb;
10404 if (G_UNLIKELY (cfg->verbose_level > 2))
10405 mono_print_ins (ins);
10407 g_assert (ins->opcode >= MONO_CEE_LAST);
10409 for (regindex = 0; regindex < 4; regindex ++) {
10410 int vreg = 0;
10412 if (regindex == 0) {
10413 regtype = spec [MONO_INST_DEST];
10414 if (regtype == ' ')
10415 continue;
10416 vreg = ins->dreg;
10417 } else if (regindex == 1) {
10418 regtype = spec [MONO_INST_SRC1];
10419 if (regtype == ' ')
10420 continue;
10421 vreg = ins->sreg1;
10422 } else if (regindex == 2) {
10423 regtype = spec [MONO_INST_SRC2];
10424 if (regtype == ' ')
10425 continue;
10426 vreg = ins->sreg2;
10427 } else if (regindex == 3) {
10428 regtype = spec [MONO_INST_SRC3];
10429 if (regtype == ' ')
10430 continue;
10431 vreg = ins->sreg3;
10434 #if SIZEOF_REGISTER == 4
10435 /* In the LLVM case, the long opcodes are not decomposed */
10436 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10438 * Since some instructions reference the original long vreg,
10439 * and some reference the two component vregs, it is quite hard
10440 * to determine when it needs to be global. So be conservative.
10442 if (!get_vreg_to_inst (cfg, vreg)) {
10443 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10445 if (cfg->verbose_level > 2)
10446 printf ("LONG VREG R%d made global.\n", vreg);
10450 * Make the component vregs volatile since the optimizations can
10451 * get confused otherwise.
10453 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10454 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10456 #endif
10458 g_assert (vreg != -1);
10460 prev_bb = vreg_to_bb [vreg];
10461 if (prev_bb == 0) {
10462 /* 0 is a valid block num */
10463 vreg_to_bb [vreg] = block_num + 1;
10464 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10465 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10466 continue;
10468 if (!get_vreg_to_inst (cfg, vreg)) {
10469 if (G_UNLIKELY (cfg->verbose_level > 2))
10470 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10472 switch (regtype) {
10473 case 'i':
10474 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10475 break;
10476 case 'l':
10477 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10478 break;
10479 case 'f':
10480 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10481 break;
10482 case 'v':
10483 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10484 break;
10485 default:
10486 g_assert_not_reached ();
10490 /* Flag as having been used in more than one bb */
10491 vreg_to_bb [vreg] = -1;
10497 /* If a variable is used in only one bblock, convert it into a local vreg */
10498 for (i = 0; i < cfg->num_varinfo; i++) {
10499 MonoInst *var = cfg->varinfo [i];
10500 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10502 switch (var->type) {
10503 case STACK_I4:
10504 case STACK_OBJ:
10505 case STACK_PTR:
10506 case STACK_MP:
10507 case STACK_VTYPE:
10508 #if SIZEOF_REGISTER == 8
10509 case STACK_I8:
10510 #endif
10511 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10512 /* Enabling this screws up the fp stack on x86 */
10513 case STACK_R8:
10514 #endif
10515 /* Arguments are implicitly global */
10516 /* Putting R4 vars into registers doesn't work currently */
10517 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10519 * Make that the variable's liveness interval doesn't contain a call, since
10520 * that would cause the lvreg to be spilled, making the whole optimization
10521 * useless.
10523 /* This is too slow for JIT compilation */
10524 #if 0
10525 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10526 MonoInst *ins;
10527 int def_index, call_index, ins_index;
10528 gboolean spilled = FALSE;
10530 def_index = -1;
10531 call_index = -1;
10532 ins_index = 0;
10533 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10534 const char *spec = INS_INFO (ins->opcode);
10536 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10537 def_index = ins_index;
10539 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10540 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10541 if (call_index > def_index) {
10542 spilled = TRUE;
10543 break;
10547 if (MONO_IS_CALL (ins))
10548 call_index = ins_index;
10550 ins_index ++;
10553 if (spilled)
10554 break;
10556 #endif
10558 if (G_UNLIKELY (cfg->verbose_level > 2))
10559 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10560 var->flags |= MONO_INST_IS_DEAD;
10561 cfg->vreg_to_inst [var->dreg] = NULL;
10563 break;
10568 * Compress the varinfo and vars tables so the liveness computation is faster and
10569 * takes up less space.
10571 pos = 0;
10572 for (i = 0; i < cfg->num_varinfo; ++i) {
10573 MonoInst *var = cfg->varinfo [i];
10574 if (pos < i && cfg->locals_start == i)
10575 cfg->locals_start = pos;
10576 if (!(var->flags & MONO_INST_IS_DEAD)) {
10577 if (pos < i) {
10578 cfg->varinfo [pos] = cfg->varinfo [i];
10579 cfg->varinfo [pos]->inst_c0 = pos;
10580 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10581 cfg->vars [pos].idx = pos;
10582 #if SIZEOF_REGISTER == 4
10583 if (cfg->varinfo [pos]->type == STACK_I8) {
10584 /* Modify the two component vars too */
10585 MonoInst *var1;
10587 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10588 var1->inst_c0 = pos;
10589 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10590 var1->inst_c0 = pos;
10592 #endif
10594 pos ++;
10597 cfg->num_varinfo = pos;
10598 if (cfg->locals_start > cfg->num_varinfo)
10599 cfg->locals_start = cfg->num_varinfo;
10603 * mono_spill_global_vars:
10605 * Generate spill code for variables which are not allocated to registers,
10606 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10607 * code is generated which could be optimized by the local optimization passes.
10609 void
10610 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10612 MonoBasicBlock *bb;
10613 char spec2 [16];
10614 int orig_next_vreg;
10615 guint32 *vreg_to_lvreg;
10616 guint32 *lvregs;
10617 guint32 i, lvregs_len;
10618 gboolean dest_has_lvreg = FALSE;
10619 guint32 stacktypes [128];
10620 MonoInst **live_range_start, **live_range_end;
10621 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10623 *need_local_opts = FALSE;
10625 memset (spec2, 0, sizeof (spec2));
10627 /* FIXME: Move this function to mini.c */
10628 stacktypes ['i'] = STACK_PTR;
10629 stacktypes ['l'] = STACK_I8;
10630 stacktypes ['f'] = STACK_R8;
10631 #ifdef MONO_ARCH_SIMD_INTRINSICS
10632 stacktypes ['x'] = STACK_VTYPE;
10633 #endif
10635 #if SIZEOF_REGISTER == 4
10636 /* Create MonoInsts for longs */
10637 for (i = 0; i < cfg->num_varinfo; i++) {
10638 MonoInst *ins = cfg->varinfo [i];
10640 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10641 switch (ins->type) {
10642 case STACK_R8:
10643 case STACK_I8: {
10644 MonoInst *tree;
10646 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10647 break;
10649 g_assert (ins->opcode == OP_REGOFFSET);
10651 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10652 g_assert (tree);
10653 tree->opcode = OP_REGOFFSET;
10654 tree->inst_basereg = ins->inst_basereg;
10655 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10657 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10658 g_assert (tree);
10659 tree->opcode = OP_REGOFFSET;
10660 tree->inst_basereg = ins->inst_basereg;
10661 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10662 break;
10664 default:
10665 break;
10669 #endif
10671 /* FIXME: widening and truncation */
10674 * As an optimization, when a variable allocated to the stack is first loaded into
10675 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10676 * the variable again.
10678 orig_next_vreg = cfg->next_vreg;
10679 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10680 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10681 lvregs_len = 0;
10684 * These arrays contain the first and last instructions accessing a given
10685 * variable.
10686 * Since we emit bblocks in the same order we process them here, and we
10687 * don't split live ranges, these will precisely describe the live range of
10688 * the variable, i.e. the instruction range where a valid value can be found
10689 * in the variables location.
10690 * The live range is computed using the liveness info computed by the liveness pass.
10691 * We can't use vmv->range, since that is an abstract live range, and we need
10692 * one which is instruction precise.
10693 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10695 /* FIXME: Only do this if debugging info is requested */
10696 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10697 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10698 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10699 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10701 /* Add spill loads/stores */
10702 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10703 MonoInst *ins;
10705 if (cfg->verbose_level > 2)
10706 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10708 /* Clear vreg_to_lvreg array */
10709 for (i = 0; i < lvregs_len; i++)
10710 vreg_to_lvreg [lvregs [i]] = 0;
10711 lvregs_len = 0;
10713 cfg->cbb = bb;
10714 MONO_BB_FOR_EACH_INS (bb, ins) {
10715 const char *spec = INS_INFO (ins->opcode);
10716 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10717 gboolean store, no_lvreg;
10718 int sregs [MONO_MAX_SRC_REGS];
10720 if (G_UNLIKELY (cfg->verbose_level > 2))
10721 mono_print_ins (ins);
10723 if (ins->opcode == OP_NOP)
10724 continue;
10727 * We handle LDADDR here as well, since it can only be decomposed
10728 * when variable addresses are known.
10730 if (ins->opcode == OP_LDADDR) {
10731 MonoInst *var = ins->inst_p0;
10733 if (var->opcode == OP_VTARG_ADDR) {
10734 /* Happens on SPARC/S390 where vtypes are passed by reference */
10735 MonoInst *vtaddr = var->inst_left;
10736 if (vtaddr->opcode == OP_REGVAR) {
10737 ins->opcode = OP_MOVE;
10738 ins->sreg1 = vtaddr->dreg;
10740 else if (var->inst_left->opcode == OP_REGOFFSET) {
10741 ins->opcode = OP_LOAD_MEMBASE;
10742 ins->inst_basereg = vtaddr->inst_basereg;
10743 ins->inst_offset = vtaddr->inst_offset;
10744 } else
10745 NOT_IMPLEMENTED;
10746 } else {
10747 g_assert (var->opcode == OP_REGOFFSET);
10749 ins->opcode = OP_ADD_IMM;
10750 ins->sreg1 = var->inst_basereg;
10751 ins->inst_imm = var->inst_offset;
10754 *need_local_opts = TRUE;
10755 spec = INS_INFO (ins->opcode);
10758 if (ins->opcode < MONO_CEE_LAST) {
10759 mono_print_ins (ins);
10760 g_assert_not_reached ();
10764 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10765 * src register.
10766 * FIXME:
10768 if (MONO_IS_STORE_MEMBASE (ins)) {
10769 tmp_reg = ins->dreg;
10770 ins->dreg = ins->sreg2;
10771 ins->sreg2 = tmp_reg;
10772 store = TRUE;
10774 spec2 [MONO_INST_DEST] = ' ';
10775 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10776 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10777 spec2 [MONO_INST_SRC3] = ' ';
10778 spec = spec2;
10779 } else if (MONO_IS_STORE_MEMINDEX (ins))
10780 g_assert_not_reached ();
10781 else
10782 store = FALSE;
10783 no_lvreg = FALSE;
10785 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10786 printf ("\t %.3s %d", spec, ins->dreg);
10787 num_sregs = mono_inst_get_src_registers (ins, sregs);
10788 for (srcindex = 0; srcindex < 3; ++srcindex)
10789 printf (" %d", sregs [srcindex]);
10790 printf ("\n");
10793 /***************/
10794 /* DREG */
10795 /***************/
10796 regtype = spec [MONO_INST_DEST];
10797 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10798 prev_dreg = -1;
10800 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10801 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10802 MonoInst *store_ins;
10803 int store_opcode;
10804 MonoInst *def_ins = ins;
10805 int dreg = ins->dreg; /* The original vreg */
10807 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10809 if (var->opcode == OP_REGVAR) {
10810 ins->dreg = var->dreg;
10811 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10813 * Instead of emitting a load+store, use a _membase opcode.
10815 g_assert (var->opcode == OP_REGOFFSET);
10816 if (ins->opcode == OP_MOVE) {
10817 NULLIFY_INS (ins);
10818 def_ins = NULL;
10819 } else {
10820 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10821 ins->inst_basereg = var->inst_basereg;
10822 ins->inst_offset = var->inst_offset;
10823 ins->dreg = -1;
10825 spec = INS_INFO (ins->opcode);
10826 } else {
10827 guint32 lvreg;
10829 g_assert (var->opcode == OP_REGOFFSET);
10831 prev_dreg = ins->dreg;
10833 /* Invalidate any previous lvreg for this vreg */
10834 vreg_to_lvreg [ins->dreg] = 0;
10836 lvreg = 0;
10838 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10839 regtype = 'l';
10840 store_opcode = OP_STOREI8_MEMBASE_REG;
10843 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10845 if (regtype == 'l') {
10846 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10847 mono_bblock_insert_after_ins (bb, ins, store_ins);
10848 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10849 mono_bblock_insert_after_ins (bb, ins, store_ins);
10850 def_ins = store_ins;
10852 else {
10853 g_assert (store_opcode != OP_STOREV_MEMBASE);
10855 /* Try to fuse the store into the instruction itself */
10856 /* FIXME: Add more instructions */
10857 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10858 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10859 ins->inst_imm = ins->inst_c0;
10860 ins->inst_destbasereg = var->inst_basereg;
10861 ins->inst_offset = var->inst_offset;
10862 spec = INS_INFO (ins->opcode);
10863 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10864 ins->opcode = store_opcode;
10865 ins->inst_destbasereg = var->inst_basereg;
10866 ins->inst_offset = var->inst_offset;
10868 no_lvreg = TRUE;
10870 tmp_reg = ins->dreg;
10871 ins->dreg = ins->sreg2;
10872 ins->sreg2 = tmp_reg;
10873 store = TRUE;
10875 spec2 [MONO_INST_DEST] = ' ';
10876 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10877 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10878 spec2 [MONO_INST_SRC3] = ' ';
10879 spec = spec2;
10880 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10881 // FIXME: The backends expect the base reg to be in inst_basereg
10882 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10883 ins->dreg = -1;
10884 ins->inst_basereg = var->inst_basereg;
10885 ins->inst_offset = var->inst_offset;
10886 spec = INS_INFO (ins->opcode);
10887 } else {
10888 /* printf ("INS: "); mono_print_ins (ins); */
10889 /* Create a store instruction */
10890 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10892 /* Insert it after the instruction */
10893 mono_bblock_insert_after_ins (bb, ins, store_ins);
10895 def_ins = store_ins;
10898 * We can't assign ins->dreg to var->dreg here, since the
10899 * sregs could use it. So set a flag, and do it after
10900 * the sregs.
10902 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10903 dest_has_lvreg = TRUE;
10908 if (def_ins && !live_range_start [dreg]) {
10909 live_range_start [dreg] = def_ins;
10910 live_range_start_bb [dreg] = bb;
10914 /************/
10915 /* SREGS */
10916 /************/
10917 num_sregs = mono_inst_get_src_registers (ins, sregs);
10918 for (srcindex = 0; srcindex < 3; ++srcindex) {
10919 regtype = spec [MONO_INST_SRC1 + srcindex];
10920 sreg = sregs [srcindex];
10922 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10923 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10924 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10925 MonoInst *use_ins = ins;
10926 MonoInst *load_ins;
10927 guint32 load_opcode;
10929 if (var->opcode == OP_REGVAR) {
10930 sregs [srcindex] = var->dreg;
10931 //mono_inst_set_src_registers (ins, sregs);
10932 live_range_end [sreg] = use_ins;
10933 live_range_end_bb [sreg] = bb;
10934 continue;
10937 g_assert (var->opcode == OP_REGOFFSET);
10939 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10941 g_assert (load_opcode != OP_LOADV_MEMBASE);
10943 if (vreg_to_lvreg [sreg]) {
10944 g_assert (vreg_to_lvreg [sreg] != -1);
10946 /* The variable is already loaded to an lvreg */
10947 if (G_UNLIKELY (cfg->verbose_level > 2))
10948 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10949 sregs [srcindex] = vreg_to_lvreg [sreg];
10950 //mono_inst_set_src_registers (ins, sregs);
10951 continue;
10954 /* Try to fuse the load into the instruction */
10955 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10956 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10957 sregs [0] = var->inst_basereg;
10958 //mono_inst_set_src_registers (ins, sregs);
10959 ins->inst_offset = var->inst_offset;
10960 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10961 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10962 sregs [1] = var->inst_basereg;
10963 //mono_inst_set_src_registers (ins, sregs);
10964 ins->inst_offset = var->inst_offset;
10965 } else {
10966 if (MONO_IS_REAL_MOVE (ins)) {
10967 ins->opcode = OP_NOP;
10968 sreg = ins->dreg;
10969 } else {
10970 //printf ("%d ", srcindex); mono_print_ins (ins);
10972 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10974 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10975 if (var->dreg == prev_dreg) {
10977 * sreg refers to the value loaded by the load
10978 * emitted below, but we need to use ins->dreg
10979 * since it refers to the store emitted earlier.
10981 sreg = ins->dreg;
10983 g_assert (sreg != -1);
10984 vreg_to_lvreg [var->dreg] = sreg;
10985 g_assert (lvregs_len < 1024);
10986 lvregs [lvregs_len ++] = var->dreg;
10990 sregs [srcindex] = sreg;
10991 //mono_inst_set_src_registers (ins, sregs);
10993 if (regtype == 'l') {
10994 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10995 mono_bblock_insert_before_ins (bb, ins, load_ins);
10996 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10997 mono_bblock_insert_before_ins (bb, ins, load_ins);
10998 use_ins = load_ins;
11000 else {
11001 #if SIZEOF_REGISTER == 4
11002 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11003 #endif
11004 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11005 mono_bblock_insert_before_ins (bb, ins, load_ins);
11006 use_ins = load_ins;
11010 if (var->dreg < orig_next_vreg) {
11011 live_range_end [var->dreg] = use_ins;
11012 live_range_end_bb [var->dreg] = bb;
11016 mono_inst_set_src_registers (ins, sregs);
11018 if (dest_has_lvreg) {
11019 g_assert (ins->dreg != -1);
11020 vreg_to_lvreg [prev_dreg] = ins->dreg;
11021 g_assert (lvregs_len < 1024);
11022 lvregs [lvregs_len ++] = prev_dreg;
11023 dest_has_lvreg = FALSE;
11026 if (store) {
11027 tmp_reg = ins->dreg;
11028 ins->dreg = ins->sreg2;
11029 ins->sreg2 = tmp_reg;
11032 if (MONO_IS_CALL (ins)) {
11033 /* Clear vreg_to_lvreg array */
11034 for (i = 0; i < lvregs_len; i++)
11035 vreg_to_lvreg [lvregs [i]] = 0;
11036 lvregs_len = 0;
11037 } else if (ins->opcode == OP_NOP) {
11038 ins->dreg = -1;
11039 MONO_INST_NULLIFY_SREGS (ins);
11042 if (cfg->verbose_level > 2)
11043 mono_print_ins_index (1, ins);
11046 /* Extend the live range based on the liveness info */
11047 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11048 for (i = 0; i < cfg->num_varinfo; i ++) {
11049 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11051 if (vreg_is_volatile (cfg, vi->vreg))
11052 /* The liveness info is incomplete */
11053 continue;
11055 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11056 /* Live from at least the first ins of this bb */
11057 live_range_start [vi->vreg] = bb->code;
11058 live_range_start_bb [vi->vreg] = bb;
11061 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11062 /* Live at least until the last ins of this bb */
11063 live_range_end [vi->vreg] = bb->last_ins;
11064 live_range_end_bb [vi->vreg] = bb;
11070 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11072 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11073 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11075 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11076 for (i = 0; i < cfg->num_varinfo; ++i) {
11077 int vreg = MONO_VARINFO (cfg, i)->vreg;
11078 MonoInst *ins;
11080 if (live_range_start [vreg]) {
11081 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11082 ins->inst_c0 = i;
11083 ins->inst_c1 = vreg;
11084 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11086 if (live_range_end [vreg]) {
11087 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11088 ins->inst_c0 = i;
11089 ins->inst_c1 = vreg;
11090 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11091 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11092 else
11093 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11097 #endif
11099 g_free (live_range_start);
11100 g_free (live_range_end);
11101 g_free (live_range_start_bb);
11102 g_free (live_range_end_bb);
11106 * FIXME:
11107 * - use 'iadd' instead of 'int_add'
11108 * - handling ovf opcodes: decompose in method_to_ir.
11109 * - unify iregs/fregs
11110 * -> partly done, the missing parts are:
11111 * - a more complete unification would involve unifying the hregs as well, so
11112 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11113 * would no longer map to the machine hregs, so the code generators would need to
11114 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11115 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11116 * fp/non-fp branches speeds it up by about 15%.
11117 * - use sext/zext opcodes instead of shifts
11118 * - add OP_ICALL
11119 * - get rid of TEMPLOADs if possible and use vregs instead
11120 * - clean up usage of OP_P/OP_ opcodes
11121 * - cleanup usage of DUMMY_USE
11122 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11123 * stack
11124 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11125 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11126 * - make sure handle_stack_args () is called before the branch is emitted
11127 * - when the new IR is done, get rid of all unused stuff
11128 * - COMPARE/BEQ as separate instructions or unify them ?
11129 * - keeping them separate allows specialized compare instructions like
11130 * compare_imm, compare_membase
11131 * - most back ends unify fp compare+branch, fp compare+ceq
11132 * - integrate mono_save_args into inline_method
11133 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11134 * - handle long shift opts on 32 bit platforms somehow: they require
11135 * 3 sregs (2 for arg1 and 1 for arg2)
11136 * - make byref a 'normal' type.
11137 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11138 * variable if needed.
11139 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11140 * like inline_method.
11141 * - remove inlining restrictions
11142 * - fix LNEG and enable cfold of INEG
11143 * - generalize x86 optimizations like ldelema as a peephole optimization
11144 * - add store_mem_imm for amd64
11145 * - optimize the loading of the interruption flag in the managed->native wrappers
11146 * - avoid special handling of OP_NOP in passes
11147 * - move code inserting instructions into one function/macro.
11148 * - try a coalescing phase after liveness analysis
11149 * - add float -> vreg conversion + local optimizations on !x86
11150 * - figure out how to handle decomposed branches during optimizations, ie.
11151 * compare+branch, op_jump_table+op_br etc.
11152 * - promote RuntimeXHandles to vregs
11153 * - vtype cleanups:
11154 * - add a NEW_VARLOADA_VREG macro
11155 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11156 * accessing vtype fields.
11157 * - get rid of I8CONST on 64 bit platforms
11158 * - dealing with the increase in code size due to branches created during opcode
11159 * decomposition:
11160 * - use extended basic blocks
11161 * - all parts of the JIT
11162 * - handle_global_vregs () && local regalloc
11163 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11164 * - sources of increase in code size:
11165 * - vtypes
11166 * - long compares
11167 * - isinst and castclass
11168 * - lvregs not allocated to global registers even if used multiple times
11169 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11170 * meaningful.
11171 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11172 * - add all micro optimizations from the old JIT
11173 * - put tree optimizations into the deadce pass
11174 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11175 * specific function.
11176 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11177 * fcompare + branchCC.
11178 * - create a helper function for allocating a stack slot, taking into account
11179 * MONO_CFG_HAS_SPILLUP.
11180 * - merge r68207.
11181 * - merge the ia64 switch changes.
11182 * - optimize mono_regstate2_alloc_int/float.
11183 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11184 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11185 * parts of the tree could be separated by other instructions, killing the tree
11186 * arguments, or stores killing loads etc. Also, should we fold loads into other
11187 * instructions if the result of the load is used multiple times ?
11188 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11189 * - LAST MERGE: 108395.
11190 * - when returning vtypes in registers, generate IR and append it to the end of the
11191 * last bb instead of doing it in the epilog.
11192 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11197 NOTES
11198 -----
11200 - When to decompose opcodes:
11201 - earlier: this makes some optimizations hard to implement, since the low level IR
11202 no longer contains the neccessary information. But it is easier to do.
11203 - later: harder to implement, enables more optimizations.
11204 - Branches inside bblocks:
11205 - created when decomposing complex opcodes.
11206 - branches to another bblock: harmless, but not tracked by the branch
11207 optimizations, so need to branch to a label at the start of the bblock.
11208 - branches to inside the same bblock: very problematic, trips up the local
11209 reg allocator. Can be fixed by spitting the current bblock, but that is a
11210 complex operation, since some local vregs can become global vregs etc.
11211 - Local/global vregs:
11212 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11213 local register allocator.
11214 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11215 structure, created by mono_create_var (). Assigned to hregs or the stack by
11216 the global register allocator.
11217 - When to do optimizations like alu->alu_imm:
11218 - earlier -> saves work later on since the IR will be smaller/simpler
11219 - later -> can work on more instructions
11220 - Handling of valuetypes:
11221 - When a vtype is pushed on the stack, a new temporary is created, an
11222 instruction computing its address (LDADDR) is emitted and pushed on
11223 the stack. Need to optimize cases when the vtype is used immediately as in
11224 argument passing, stloc etc.
11225 - Instead of the to_end stuff in the old JIT, simply call the function handling
11226 the values on the stack before emitting the last instruction of the bb.
11229 #endif /* DISABLE_JIT */