Disable v4 monitor.enter fastpath under aot and llvm
[mono-project.git] / mono / mini / method-to-ir.c
blobdcfbb246eb4d2fd637e0ae883a513ebf3cab038c
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
56 #include "mini.h"
57 #include "trace.h"
59 #include "ir-emit.h"
61 #include "jit-icalls.h"
62 #include "jit.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
69 goto inline_failure;\
70 } while (0)
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
73 goto exception_exit;\
74 } while (0)
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
83 } while (0)
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
92 } while (0)
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
99 } \
100 } while (0)
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
104 } while (0)
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
130 #ifdef MINI_OP
131 #undef MINI_OP
132 #endif
133 #ifdef MINI_OP3
134 #undef MINI_OP3
135 #endif
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #define NONE ' '
139 #define IREG 'i'
140 #define FREG 'f'
141 #define VREG 'v'
142 #define XREG 'x'
143 #if SIZEOF_REGISTER == 8
144 #define LREG IREG
145 #else
146 #define LREG 'l'
147 #endif
148 /* keep in sync with the enum in mini.h */
149 const char
150 ins_info[] = {
151 #include "mini-ops.h"
153 #undef MINI_OP
154 #undef MINI_OP3
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
165 #undef MINI_OP
166 #undef MINI_OP3
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
170 (vi)->reg = -1; \
171 (vi)->idx = (id); \
172 } while (0)
174 void
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
182 guint32
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
188 guint32
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
194 guint32
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
200 guint32
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
211 guint32
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
222 guint32
223 mono_alloc_ireg_mp (MonoCompile *cfg)
225 return alloc_ireg_mp (cfg);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
233 guint32
234 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
236 if (vreg_is_ref (cfg, vreg))
237 return alloc_ireg_ref (cfg);
238 else if (vreg_is_mp (cfg, vreg))
239 return alloc_ireg_mp (cfg);
240 else
241 return alloc_ireg (cfg);
244 guint
245 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
247 if (type->byref)
248 return OP_MOVE;
250 handle_enum:
251 switch (type->type) {
252 case MONO_TYPE_I1:
253 case MONO_TYPE_U1:
254 case MONO_TYPE_BOOLEAN:
255 return OP_MOVE;
256 case MONO_TYPE_I2:
257 case MONO_TYPE_U2:
258 case MONO_TYPE_CHAR:
259 return OP_MOVE;
260 case MONO_TYPE_I4:
261 case MONO_TYPE_U4:
262 return OP_MOVE;
263 case MONO_TYPE_I:
264 case MONO_TYPE_U:
265 case MONO_TYPE_PTR:
266 case MONO_TYPE_FNPTR:
267 return OP_MOVE;
268 case MONO_TYPE_CLASS:
269 case MONO_TYPE_STRING:
270 case MONO_TYPE_OBJECT:
271 case MONO_TYPE_SZARRAY:
272 case MONO_TYPE_ARRAY:
273 return OP_MOVE;
274 case MONO_TYPE_I8:
275 case MONO_TYPE_U8:
276 #if SIZEOF_REGISTER == 8
277 return OP_MOVE;
278 #else
279 return OP_LMOVE;
280 #endif
281 case MONO_TYPE_R4:
282 return OP_FMOVE;
283 case MONO_TYPE_R8:
284 return OP_FMOVE;
285 case MONO_TYPE_VALUETYPE:
286 if (type->data.klass->enumtype) {
287 type = mono_class_enum_basetype (type->data.klass);
288 goto handle_enum;
290 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
291 return OP_XMOVE;
292 return OP_VMOVE;
293 case MONO_TYPE_TYPEDBYREF:
294 return OP_VMOVE;
295 case MONO_TYPE_GENERICINST:
296 type = &type->data.generic_class->container_class->byval_arg;
297 goto handle_enum;
298 case MONO_TYPE_VAR:
299 case MONO_TYPE_MVAR:
300 g_assert (cfg->generic_sharing_context);
301 return OP_MOVE;
302 default:
303 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
305 return -1;
308 void
309 mono_print_bb (MonoBasicBlock *bb, const char *msg)
311 int i;
312 MonoInst *tree;
314 printf ("\n%s %d: [IN: ", msg, bb->block_num);
315 for (i = 0; i < bb->in_count; ++i)
316 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
317 printf (", OUT: ");
318 for (i = 0; i < bb->out_count; ++i)
319 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
320 printf (" ]\n");
321 for (tree = bb->code; tree; tree = tree->next)
322 mono_print_ins_index (-1, tree);
325 void
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get = mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
339 * file.
341 #ifndef DISABLE_JIT
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
349 if (!(tblock)) { \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
355 } while (0)
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
366 } while (0)
367 #endif
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
373 MonoInst *widen; \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
378 } while (0)
379 #else
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
381 #endif
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
385 sp -= 2; \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
389 CHECK_TYPE (ins); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
395 } while (0)
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
399 sp--; \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
402 CHECK_TYPE (ins); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
406 } while (0)
408 #define ADD_BINCOND(next_block) do { \
409 MonoInst *cmp; \
410 sp -= 2; \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
415 CHECK_TYPE (cmp); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
425 } else { \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
437 } while (0)
439 /* *
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
446 static void
447 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 MonoBasicBlock **newa;
450 int i, found;
452 #if 0
453 if (from->cil_code) {
454 if (to->cil_code)
455 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
456 else
457 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
458 } else {
459 if (to->cil_code)
460 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
461 else
462 printf ("edge from entry to exit\n");
464 #endif
466 found = FALSE;
467 for (i = 0; i < from->out_count; ++i) {
468 if (to == from->out_bb [i]) {
469 found = TRUE;
470 break;
473 if (!found) {
474 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
475 for (i = 0; i < from->out_count; ++i) {
476 newa [i] = from->out_bb [i];
478 newa [i] = to;
479 from->out_count++;
480 from->out_bb = newa;
483 found = FALSE;
484 for (i = 0; i < to->in_count; ++i) {
485 if (from == to->in_bb [i]) {
486 found = TRUE;
487 break;
490 if (!found) {
491 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
492 for (i = 0; i < to->in_count; ++i) {
493 newa [i] = to->in_bb [i];
495 newa [i] = from;
496 to->in_count++;
497 to->in_bb = newa;
501 void
502 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
504 link_bblock (cfg, from, to);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
513 * Returns:
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
521 static int
522 mono_find_block_region (MonoCompile *cfg, int offset)
524 MonoMethodHeader *header = cfg->header;
525 MonoExceptionClause *clause;
526 int i;
528 for (i = 0; i < header->num_clauses; ++i) {
529 clause = &header->clauses [i];
530 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
531 (offset < (clause->handler_offset)))
532 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
534 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
535 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
536 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
537 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
538 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
539 else
540 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
543 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
544 return ((i + 1) << 8) | clause->flags;
547 return -1;
550 static GList*
551 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
553 MonoMethodHeader *header = cfg->header;
554 MonoExceptionClause *clause;
555 int i;
556 GList *res = NULL;
558 for (i = 0; i < header->num_clauses; ++i) {
559 clause = &header->clauses [i];
560 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
562 if (clause->flags == type)
563 res = g_list_append (res, clause);
566 return res;
569 static void
570 mono_create_spvar_for_region (MonoCompile *cfg, int region)
572 MonoInst *var;
574 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
575 if (var)
576 return;
578 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
579 /* prevent it from being register allocated */
580 var->flags |= MONO_INST_INDIRECT;
582 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
585 MonoInst *
586 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
588 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
591 static MonoInst*
592 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
594 MonoInst *var;
596 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
597 if (var)
598 return var;
600 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
601 /* prevent it from being register allocated */
602 var->flags |= MONO_INST_INDIRECT;
604 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
606 return var;
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
613 void
614 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
616 MonoClass *klass;
618 inst->klass = klass = mono_class_from_mono_type (type);
619 if (type->byref) {
620 inst->type = STACK_MP;
621 return;
624 handle_enum:
625 switch (type->type) {
626 case MONO_TYPE_VOID:
627 inst->type = STACK_INV;
628 return;
629 case MONO_TYPE_I1:
630 case MONO_TYPE_U1:
631 case MONO_TYPE_BOOLEAN:
632 case MONO_TYPE_I2:
633 case MONO_TYPE_U2:
634 case MONO_TYPE_CHAR:
635 case MONO_TYPE_I4:
636 case MONO_TYPE_U4:
637 inst->type = STACK_I4;
638 return;
639 case MONO_TYPE_I:
640 case MONO_TYPE_U:
641 case MONO_TYPE_PTR:
642 case MONO_TYPE_FNPTR:
643 inst->type = STACK_PTR;
644 return;
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_STRING:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 inst->type = STACK_OBJ;
651 return;
652 case MONO_TYPE_I8:
653 case MONO_TYPE_U8:
654 inst->type = STACK_I8;
655 return;
656 case MONO_TYPE_R4:
657 case MONO_TYPE_R8:
658 inst->type = STACK_R8;
659 return;
660 case MONO_TYPE_VALUETYPE:
661 if (type->data.klass->enumtype) {
662 type = mono_class_enum_basetype (type->data.klass);
663 goto handle_enum;
664 } else {
665 inst->klass = klass;
666 inst->type = STACK_VTYPE;
667 return;
669 case MONO_TYPE_TYPEDBYREF:
670 inst->klass = mono_defaults.typed_reference_class;
671 inst->type = STACK_VTYPE;
672 return;
673 case MONO_TYPE_GENERICINST:
674 type = &type->data.generic_class->container_class->byval_arg;
675 goto handle_enum;
676 case MONO_TYPE_VAR :
677 case MONO_TYPE_MVAR :
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
680 * really a reference
682 g_assert (cfg->generic_sharing_context);
683 inst->type = STACK_OBJ;
684 return;
685 default:
686 g_error ("unknown type 0x%02x in eval stack type", type->type);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
693 static const char
694 bin_num_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
705 static const char
706 neg_table [] = {
707 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
710 /* reduce the size of this table */
711 static const char
712 bin_int_table [STACK_MAX] [STACK_MAX] = {
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
723 static const char
724 bin_comp_table [STACK_MAX] [STACK_MAX] = {
725 /* Inv i L p F & O vt */
726 {0},
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
737 static const char
738 shift_table [STACK_MAX] [STACK_MAX] = {
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
754 static const guint16
755 binops_op_map [STACK_MAX] = {
756 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
760 static const guint16
761 unops_op_map [STACK_MAX] = {
762 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
766 static const guint16
767 ovfops_op_map [STACK_MAX] = {
768 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
772 static const guint16
773 ovf2ops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
778 static const guint16
779 ovf3ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
784 static const guint16
785 beqops_op_map [STACK_MAX] = {
786 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
790 static const guint16
791 ceqops_op_map [STACK_MAX] = {
792 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
803 static void
804 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
806 switch (ins->opcode) {
807 /* binops */
808 case CEE_ADD:
809 case CEE_SUB:
810 case CEE_MUL:
811 case CEE_DIV:
812 case CEE_REM:
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins->type = bin_num_table [src1->type] [src2->type];
815 ins->opcode += binops_op_map [ins->type];
816 break;
817 case CEE_DIV_UN:
818 case CEE_REM_UN:
819 case CEE_AND:
820 case CEE_OR:
821 case CEE_XOR:
822 ins->type = bin_int_table [src1->type] [src2->type];
823 ins->opcode += binops_op_map [ins->type];
824 break;
825 case CEE_SHL:
826 case CEE_SHR:
827 case CEE_SHR_UN:
828 ins->type = shift_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
830 break;
831 case OP_COMPARE:
832 case OP_LCOMPARE:
833 case OP_ICOMPARE:
834 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
835 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
836 ins->opcode = OP_LCOMPARE;
837 else if (src1->type == STACK_R8)
838 ins->opcode = OP_FCOMPARE;
839 else
840 ins->opcode = OP_ICOMPARE;
841 break;
842 case OP_ICOMPARE_IMM:
843 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE_IMM;
846 break;
847 case CEE_BEQ:
848 case CEE_BGE:
849 case CEE_BGT:
850 case CEE_BLE:
851 case CEE_BLT:
852 case CEE_BNE_UN:
853 case CEE_BGE_UN:
854 case CEE_BGT_UN:
855 case CEE_BLE_UN:
856 case CEE_BLT_UN:
857 ins->opcode += beqops_op_map [src1->type];
858 break;
859 case OP_CEQ:
860 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
861 ins->opcode += ceqops_op_map [src1->type];
862 break;
863 case OP_CGT:
864 case OP_CGT_UN:
865 case OP_CLT:
866 case OP_CLT_UN:
867 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
868 ins->opcode += ceqops_op_map [src1->type];
869 break;
870 /* unops */
871 case CEE_NEG:
872 ins->type = neg_table [src1->type];
873 ins->opcode += unops_op_map [ins->type];
874 break;
875 case CEE_NOT:
876 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
877 ins->type = src1->type;
878 else
879 ins->type = STACK_INV;
880 ins->opcode += unops_op_map [ins->type];
881 break;
882 case CEE_CONV_I1:
883 case CEE_CONV_I2:
884 case CEE_CONV_I4:
885 case CEE_CONV_U4:
886 ins->type = STACK_I4;
887 ins->opcode += unops_op_map [src1->type];
888 break;
889 case CEE_CONV_R_UN:
890 ins->type = STACK_R8;
891 switch (src1->type) {
892 case STACK_I4:
893 case STACK_PTR:
894 ins->opcode = OP_ICONV_TO_R_UN;
895 break;
896 case STACK_I8:
897 ins->opcode = OP_LCONV_TO_R_UN;
898 break;
900 break;
901 case CEE_CONV_OVF_I1:
902 case CEE_CONV_OVF_U1:
903 case CEE_CONV_OVF_I2:
904 case CEE_CONV_OVF_U2:
905 case CEE_CONV_OVF_I4:
906 case CEE_CONV_OVF_U4:
907 ins->type = STACK_I4;
908 ins->opcode += ovf3ops_op_map [src1->type];
909 break;
910 case CEE_CONV_OVF_I_UN:
911 case CEE_CONV_OVF_U_UN:
912 ins->type = STACK_PTR;
913 ins->opcode += ovf2ops_op_map [src1->type];
914 break;
915 case CEE_CONV_OVF_I1_UN:
916 case CEE_CONV_OVF_I2_UN:
917 case CEE_CONV_OVF_I4_UN:
918 case CEE_CONV_OVF_U1_UN:
919 case CEE_CONV_OVF_U2_UN:
920 case CEE_CONV_OVF_U4_UN:
921 ins->type = STACK_I4;
922 ins->opcode += ovf2ops_op_map [src1->type];
923 break;
924 case CEE_CONV_U:
925 ins->type = STACK_PTR;
926 switch (src1->type) {
927 case STACK_I4:
928 ins->opcode = OP_ICONV_TO_U;
929 break;
930 case STACK_PTR:
931 case STACK_MP:
932 #if SIZEOF_VOID_P == 8
933 ins->opcode = OP_LCONV_TO_U;
934 #else
935 ins->opcode = OP_MOVE;
936 #endif
937 break;
938 case STACK_I8:
939 ins->opcode = OP_LCONV_TO_U;
940 break;
941 case STACK_R8:
942 ins->opcode = OP_FCONV_TO_U;
943 break;
945 break;
946 case CEE_CONV_I8:
947 case CEE_CONV_U8:
948 ins->type = STACK_I8;
949 ins->opcode += unops_op_map [src1->type];
950 break;
951 case CEE_CONV_OVF_I8:
952 case CEE_CONV_OVF_U8:
953 ins->type = STACK_I8;
954 ins->opcode += ovf3ops_op_map [src1->type];
955 break;
956 case CEE_CONV_OVF_U8_UN:
957 case CEE_CONV_OVF_I8_UN:
958 ins->type = STACK_I8;
959 ins->opcode += ovf2ops_op_map [src1->type];
960 break;
961 case CEE_CONV_R4:
962 case CEE_CONV_R8:
963 ins->type = STACK_R8;
964 ins->opcode += unops_op_map [src1->type];
965 break;
966 case OP_CKFINITE:
967 ins->type = STACK_R8;
968 break;
969 case CEE_CONV_U2:
970 case CEE_CONV_U1:
971 ins->type = STACK_I4;
972 ins->opcode += ovfops_op_map [src1->type];
973 break;
974 case CEE_CONV_I:
975 case CEE_CONV_OVF_I:
976 case CEE_CONV_OVF_U:
977 ins->type = STACK_PTR;
978 ins->opcode += ovfops_op_map [src1->type];
979 break;
980 case CEE_ADD_OVF:
981 case CEE_ADD_OVF_UN:
982 case CEE_MUL_OVF:
983 case CEE_MUL_OVF_UN:
984 case CEE_SUB_OVF:
985 case CEE_SUB_OVF_UN:
986 ins->type = bin_num_table [src1->type] [src2->type];
987 ins->opcode += ovfops_op_map [src1->type];
988 if (ins->type == STACK_R8)
989 ins->type = STACK_INV;
990 break;
991 case OP_LOAD_MEMBASE:
992 ins->type = STACK_PTR;
993 break;
994 case OP_LOADI1_MEMBASE:
995 case OP_LOADU1_MEMBASE:
996 case OP_LOADI2_MEMBASE:
997 case OP_LOADU2_MEMBASE:
998 case OP_LOADI4_MEMBASE:
999 case OP_LOADU4_MEMBASE:
1000 ins->type = STACK_PTR;
1001 break;
1002 case OP_LOADI8_MEMBASE:
1003 ins->type = STACK_I8;
1004 break;
1005 case OP_LOADR4_MEMBASE:
1006 case OP_LOADR8_MEMBASE:
1007 ins->type = STACK_R8;
1008 break;
1009 default:
1010 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1011 break;
1014 if (ins->type == STACK_MP)
1015 ins->klass = mono_defaults.object_class;
1018 static const char
1019 ldind_type [] = {
1020 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1023 #if 0
1025 static const char
1026 param_table [STACK_MAX] [STACK_MAX] = {
1027 {0},
1030 static int
1031 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1032 int i;
1034 if (sig->hasthis) {
1035 switch (args->type) {
1036 case STACK_I4:
1037 case STACK_I8:
1038 case STACK_R8:
1039 case STACK_VTYPE:
1040 case STACK_INV:
1041 return 0;
1043 args++;
1045 for (i = 0; i < sig->param_count; ++i) {
1046 switch (args [i].type) {
1047 case STACK_INV:
1048 return 0;
1049 case STACK_MP:
1050 if (!sig->params [i]->byref)
1051 return 0;
1052 continue;
1053 case STACK_OBJ:
1054 if (sig->params [i]->byref)
1055 return 0;
1056 switch (sig->params [i]->type) {
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_STRING:
1059 case MONO_TYPE_OBJECT:
1060 case MONO_TYPE_SZARRAY:
1061 case MONO_TYPE_ARRAY:
1062 break;
1063 default:
1064 return 0;
1066 continue;
1067 case STACK_R8:
1068 if (sig->params [i]->byref)
1069 return 0;
1070 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1071 return 0;
1072 continue;
1073 case STACK_PTR:
1074 case STACK_I4:
1075 case STACK_I8:
1076 case STACK_VTYPE:
1077 break;
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1080 return 0;*/
1082 return 1;
1084 #endif
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst *
1092 mono_get_domainvar (MonoCompile *cfg)
1094 if (!cfg->domainvar)
1095 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 return cfg->domainvar;
1100 * The got_var contains the address of the Global Offset Table when AOT
1101 * compiling.
1103 MonoInst *
1104 mono_get_got_var (MonoCompile *cfg)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg->compile_aot)
1108 return NULL;
1109 if (!cfg->got_var) {
1110 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1112 return cfg->got_var;
1113 #else
1114 return NULL;
1115 #endif
1118 static MonoInst *
1119 mono_get_vtable_var (MonoCompile *cfg)
1121 g_assert (cfg->generic_sharing_context);
1123 if (!cfg->rgctx_var) {
1124 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1125 /* force the var to be stack allocated */
1126 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1129 return cfg->rgctx_var;
1132 static MonoType*
1133 type_from_stack_type (MonoInst *ins) {
1134 switch (ins->type) {
1135 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1136 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1137 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1138 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1139 case STACK_MP:
1140 return &ins->klass->this_arg;
1141 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1142 case STACK_VTYPE: return &ins->klass->byval_arg;
1143 default:
1144 g_error ("stack type %d to monotype not handled\n", ins->type);
1146 return NULL;
1149 static G_GNUC_UNUSED int
1150 type_to_stack_type (MonoType *t)
1152 t = mono_type_get_underlying_type (t);
1153 switch (t->type) {
1154 case MONO_TYPE_I1:
1155 case MONO_TYPE_U1:
1156 case MONO_TYPE_BOOLEAN:
1157 case MONO_TYPE_I2:
1158 case MONO_TYPE_U2:
1159 case MONO_TYPE_CHAR:
1160 case MONO_TYPE_I4:
1161 case MONO_TYPE_U4:
1162 return STACK_I4;
1163 case MONO_TYPE_I:
1164 case MONO_TYPE_U:
1165 case MONO_TYPE_PTR:
1166 case MONO_TYPE_FNPTR:
1167 return STACK_PTR;
1168 case MONO_TYPE_CLASS:
1169 case MONO_TYPE_STRING:
1170 case MONO_TYPE_OBJECT:
1171 case MONO_TYPE_SZARRAY:
1172 case MONO_TYPE_ARRAY:
1173 return STACK_OBJ;
1174 case MONO_TYPE_I8:
1175 case MONO_TYPE_U8:
1176 return STACK_I8;
1177 case MONO_TYPE_R4:
1178 case MONO_TYPE_R8:
1179 return STACK_R8;
1180 case MONO_TYPE_VALUETYPE:
1181 case MONO_TYPE_TYPEDBYREF:
1182 return STACK_VTYPE;
1183 case MONO_TYPE_GENERICINST:
1184 if (mono_type_generic_inst_is_valuetype (t))
1185 return STACK_VTYPE;
1186 else
1187 return STACK_OBJ;
1188 break;
1189 default:
1190 g_assert_not_reached ();
1193 return -1;
1196 static MonoClass*
1197 array_access_to_klass (int opcode)
1199 switch (opcode) {
1200 case CEE_LDELEM_U1:
1201 return mono_defaults.byte_class;
1202 case CEE_LDELEM_U2:
1203 return mono_defaults.uint16_class;
1204 case CEE_LDELEM_I:
1205 case CEE_STELEM_I:
1206 return mono_defaults.int_class;
1207 case CEE_LDELEM_I1:
1208 case CEE_STELEM_I1:
1209 return mono_defaults.sbyte_class;
1210 case CEE_LDELEM_I2:
1211 case CEE_STELEM_I2:
1212 return mono_defaults.int16_class;
1213 case CEE_LDELEM_I4:
1214 case CEE_STELEM_I4:
1215 return mono_defaults.int32_class;
1216 case CEE_LDELEM_U4:
1217 return mono_defaults.uint32_class;
1218 case CEE_LDELEM_I8:
1219 case CEE_STELEM_I8:
1220 return mono_defaults.int64_class;
1221 case CEE_LDELEM_R4:
1222 case CEE_STELEM_R4:
1223 return mono_defaults.single_class;
1224 case CEE_LDELEM_R8:
1225 case CEE_STELEM_R8:
1226 return mono_defaults.double_class;
1227 case CEE_LDELEM_REF:
1228 case CEE_STELEM_REF:
1229 return mono_defaults.object_class;
1230 default:
1231 g_assert_not_reached ();
1233 return NULL;
1237 * We try to share variables when possible
1239 static MonoInst *
1240 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1242 MonoInst *res;
1243 int pos, vnum;
1245 /* inlining can result in deeper stacks */
1246 if (slot >= cfg->header->max_stack)
1247 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1249 pos = ins->type - 1 + slot * STACK_MAX;
1251 switch (ins->type) {
1252 case STACK_I4:
1253 case STACK_I8:
1254 case STACK_R8:
1255 case STACK_PTR:
1256 case STACK_MP:
1257 case STACK_OBJ:
1258 if ((vnum = cfg->intvars [pos]))
1259 return cfg->varinfo [vnum];
1260 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1261 cfg->intvars [pos] = res->inst_c0;
1262 break;
1263 default:
1264 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1266 return res;
1269 static void
1270 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1278 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1279 jump_info_token->image = image;
1280 jump_info_token->token = token;
1281 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1297 static void
1298 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1300 int i, bindex;
1301 MonoBasicBlock *bb = cfg->cbb;
1302 MonoBasicBlock *outb;
1303 MonoInst *inst, **locals;
1304 gboolean found;
1306 if (!count)
1307 return;
1308 if (cfg->verbose_level > 3)
1309 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1310 if (!bb->out_scount) {
1311 bb->out_scount = count;
1312 //printf ("bblock %d has out:", bb->block_num);
1313 found = FALSE;
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1318 continue;
1319 //printf (" %d", outb->block_num);
1320 if (outb->in_stack) {
1321 found = TRUE;
1322 bb->out_stack = outb->in_stack;
1323 break;
1326 //printf ("\n");
1327 if (!found) {
1328 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1329 for (i = 0; i < count; ++i) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1336 * slot as well.
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1340 * example.
1342 if (cfg->inlined_method)
1343 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1344 else
1345 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1350 for (i = 0; i < bb->out_count; ++i) {
1351 outb = bb->out_bb [i];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb->flags & BB_EXCEPTION_HANDLER)
1354 continue;
1355 if (outb->in_scount) {
1356 if (outb->in_scount != bb->out_scount) {
1357 cfg->unverifiable = TRUE;
1358 return;
1360 continue; /* check they are the same locals */
1362 outb->in_scount = count;
1363 outb->in_stack = bb->out_stack;
1366 locals = bb->out_stack;
1367 cfg->cbb = bb;
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1379 * in_stacks.
1382 found = TRUE;
1383 bindex = 0;
1384 while (found) {
1385 /* Find a bblock which has a different in_stack */
1386 found = FALSE;
1387 while (bindex < bb->out_count) {
1388 outb = bb->out_bb [bindex];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb->flags & BB_EXCEPTION_HANDLER) {
1391 bindex++;
1392 continue;
1394 if (outb->in_stack != locals) {
1395 for (i = 0; i < count; ++i) {
1396 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1397 inst->cil_code = sp [i]->cil_code;
1398 sp [i] = locals [i];
1399 if (cfg->verbose_level > 3)
1400 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1402 locals = outb->in_stack;
1403 found = TRUE;
1404 break;
1406 bindex ++;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1414 static void
1415 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1417 if (cfg->compile_aot) {
1418 int ioffset_reg = alloc_preg (cfg);
1419 int iid_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1425 else {
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1430 static void
1431 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1433 int ibitmap_reg = alloc_preg (cfg);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1435 MonoInst *args [2];
1436 MonoInst *res, *ins;
1437 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1438 MONO_ADD_INS (cfg->cbb, ins);
1439 args [0] = ins;
1440 if (cfg->compile_aot)
1441 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1442 else
1443 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1444 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1445 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1446 #else
1447 int ibitmap_byte_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 int shifted_iid_reg = alloc_preg (cfg);
1454 int ibitmap_byte_address_reg = alloc_preg (cfg);
1455 int masked_iid_reg = alloc_preg (cfg);
1456 int iid_one_bit_reg = alloc_preg (cfg);
1457 int iid_bit_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1463 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1464 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1466 } else {
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1470 #endif
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1477 static void
1478 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1480 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1487 static void
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1490 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1497 static void
1498 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1499 MonoBasicBlock *false_target)
1501 if (cfg->compile_aot) {
1502 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1506 else
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1508 if (false_target)
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1510 else
1511 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1515 static void
1516 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1517 MonoBasicBlock *false_target)
1519 int max_iid_reg = alloc_preg (cfg);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1522 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1525 /* Same as above, but obtains max_iid from a klass */
1526 static void
1527 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1528 MonoBasicBlock *false_target)
1530 int max_iid_reg = alloc_preg (cfg);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1533 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1536 static void
1537 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1539 int idepth_reg = alloc_preg (cfg);
1540 int stypes_reg = alloc_preg (cfg);
1541 int stype = alloc_preg (cfg);
1543 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1550 if (klass_ins) {
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1552 } else if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1556 } else {
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1562 static void
1563 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1565 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1568 static void
1569 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int intf_reg = alloc_preg (cfg);
1573 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1574 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1576 if (true_target)
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1578 else
1579 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1585 static void
1586 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1588 int intf_bit_reg = alloc_preg (cfg);
1590 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1591 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1593 if (true_target)
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1595 else
1596 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1599 static inline void
1600 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1602 if (klass_inst) {
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1604 } else if (cfg->compile_aot) {
1605 int const_reg = alloc_preg (cfg);
1606 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1607 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1608 } else {
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1614 static inline void
1615 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1617 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1620 static inline void
1621 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1623 if (cfg->compile_aot) {
1624 int const_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1627 } else {
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1633 static void
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1636 static void
1637 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1639 if (klass->rank) {
1640 int rank_reg = alloc_preg (cfg);
1641 int eclass_reg = alloc_preg (cfg);
1643 g_assert (!klass_inst);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1649 if (klass->cast_class == mono_defaults.object_class) {
1650 int parent_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1652 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1653 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1654 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1655 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class) {
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1660 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1661 } else {
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1666 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1673 } else {
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1689 static void
1690 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1692 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1695 static void
1696 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1698 int val_reg;
1700 g_assert (val == 0);
1702 if (align == 0)
1703 align = 4;
1705 if ((size <= 4) && (size <= align)) {
1706 switch (size) {
1707 case 1:
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1709 return;
1710 case 2:
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1712 return;
1713 case 4:
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1715 return;
1716 #if SIZEOF_REGISTER == 8
1717 case 8:
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1719 return;
1720 #endif
1724 val_reg = alloc_preg (cfg);
1726 if (SIZEOF_REGISTER == 8)
1727 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1728 else
1729 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1731 if (align < 4) {
1732 /* This could be optimized further if neccesary */
1733 while (size >= 1) {
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1735 offset += 1;
1736 size -= 1;
1738 return;
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1743 if (offset % 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1745 offset += 4;
1746 size -= 4;
1748 while (size >= 8) {
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1750 offset += 8;
1751 size -= 8;
1754 #endif
1756 while (size >= 4) {
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1758 offset += 4;
1759 size -= 4;
1761 while (size >= 2) {
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1763 offset += 2;
1764 size -= 2;
1766 while (size >= 1) {
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1768 offset += 1;
1769 size -= 1;
1773 void
1774 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1776 int cur_reg;
1778 if (align == 0)
1779 align = 4;
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size < 10000);
1784 if (align < 4) {
1785 /* This could be optimized further if neccesary */
1786 while (size >= 1) {
1787 cur_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1790 doffset += 1;
1791 soffset += 1;
1792 size -= 1;
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER == 8) {
1798 while (size >= 8) {
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1802 doffset += 8;
1803 soffset += 8;
1804 size -= 8;
1807 #endif
1809 while (size >= 4) {
1810 cur_reg = alloc_preg (cfg);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1813 doffset += 4;
1814 soffset += 4;
1815 size -= 4;
1817 while (size >= 2) {
1818 cur_reg = alloc_preg (cfg);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1821 doffset += 2;
1822 soffset += 2;
1823 size -= 2;
1825 while (size >= 1) {
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1829 doffset += 1;
1830 soffset += 1;
1831 size -= 1;
1835 static int
1836 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1838 if (type->byref)
1839 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1841 handle_enum:
1842 type = mini_get_basic_type_from_generic (gsctx, type);
1843 switch (type->type) {
1844 case MONO_TYPE_VOID:
1845 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1846 case MONO_TYPE_I1:
1847 case MONO_TYPE_U1:
1848 case MONO_TYPE_BOOLEAN:
1849 case MONO_TYPE_I2:
1850 case MONO_TYPE_U2:
1851 case MONO_TYPE_CHAR:
1852 case MONO_TYPE_I4:
1853 case MONO_TYPE_U4:
1854 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1855 case MONO_TYPE_I:
1856 case MONO_TYPE_U:
1857 case MONO_TYPE_PTR:
1858 case MONO_TYPE_FNPTR:
1859 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1860 case MONO_TYPE_CLASS:
1861 case MONO_TYPE_STRING:
1862 case MONO_TYPE_OBJECT:
1863 case MONO_TYPE_SZARRAY:
1864 case MONO_TYPE_ARRAY:
1865 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1866 case MONO_TYPE_I8:
1867 case MONO_TYPE_U8:
1868 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1869 case MONO_TYPE_R4:
1870 case MONO_TYPE_R8:
1871 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1872 case MONO_TYPE_VALUETYPE:
1873 if (type->data.klass->enumtype) {
1874 type = mono_class_enum_basetype (type->data.klass);
1875 goto handle_enum;
1876 } else
1877 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1878 case MONO_TYPE_TYPEDBYREF:
1879 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1880 case MONO_TYPE_GENERICINST:
1881 type = &type->data.generic_class->container_class->byval_arg;
1882 goto handle_enum;
1883 default:
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1886 return -1;
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1896 * validity checks.
1898 * Returns: non-0 value if arg can't be stored on a target.
1900 static int
1901 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1903 MonoType *simple_type;
1904 MonoClass *klass;
1906 if (target->byref) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg->type == STACK_MP)
1909 return arg->klass != mono_class_from_mono_type (target);
1910 if (arg->type == STACK_PTR)
1911 return 0;
1912 return 1;
1915 simple_type = mono_type_get_underlying_type (target);
1916 switch (simple_type->type) {
1917 case MONO_TYPE_VOID:
1918 return 1;
1919 case MONO_TYPE_I1:
1920 case MONO_TYPE_U1:
1921 case MONO_TYPE_BOOLEAN:
1922 case MONO_TYPE_I2:
1923 case MONO_TYPE_U2:
1924 case MONO_TYPE_CHAR:
1925 case MONO_TYPE_I4:
1926 case MONO_TYPE_U4:
1927 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1928 return 1;
1929 return 0;
1930 case MONO_TYPE_PTR:
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1933 return 1;
1934 return 0;
1935 case MONO_TYPE_I:
1936 case MONO_TYPE_U:
1937 case MONO_TYPE_FNPTR:
1939 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1940 * in native int. (#688008).
1942 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1943 return 1;
1944 return 0;
1945 case MONO_TYPE_CLASS:
1946 case MONO_TYPE_STRING:
1947 case MONO_TYPE_OBJECT:
1948 case MONO_TYPE_SZARRAY:
1949 case MONO_TYPE_ARRAY:
1950 if (arg->type != STACK_OBJ)
1951 return 1;
1952 /* FIXME: check type compatibility */
1953 return 0;
1954 case MONO_TYPE_I8:
1955 case MONO_TYPE_U8:
1956 if (arg->type != STACK_I8)
1957 return 1;
1958 return 0;
1959 case MONO_TYPE_R4:
1960 case MONO_TYPE_R8:
1961 if (arg->type != STACK_R8)
1962 return 1;
1963 return 0;
1964 case MONO_TYPE_VALUETYPE:
1965 if (arg->type != STACK_VTYPE)
1966 return 1;
1967 klass = mono_class_from_mono_type (simple_type);
1968 if (klass != arg->klass)
1969 return 1;
1970 return 0;
1971 case MONO_TYPE_TYPEDBYREF:
1972 if (arg->type != STACK_VTYPE)
1973 return 1;
1974 klass = mono_class_from_mono_type (simple_type);
1975 if (klass != arg->klass)
1976 return 1;
1977 return 0;
1978 case MONO_TYPE_GENERICINST:
1979 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1980 if (arg->type != STACK_VTYPE)
1981 return 1;
1982 klass = mono_class_from_mono_type (simple_type);
1983 if (klass != arg->klass)
1984 return 1;
1985 return 0;
1986 } else {
1987 if (arg->type != STACK_OBJ)
1988 return 1;
1989 /* FIXME: check type compatibility */
1990 return 0;
1992 case MONO_TYPE_VAR:
1993 case MONO_TYPE_MVAR:
1994 /* FIXME: all the arguments must be references for now,
1995 * later look inside cfg and see if the arg num is
1996 * really a reference
1998 g_assert (cfg->generic_sharing_context);
1999 if (arg->type != STACK_OBJ)
2000 return 1;
2001 return 0;
2002 default:
2003 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2005 return 1;
2009 * Prepare arguments for passing to a function call.
2010 * Return a non-zero value if the arguments can't be passed to the given
2011 * signature.
2012 * The type checks are not yet complete and some conversions may need
2013 * casts on 32 or 64 bit architectures.
2015 * FIXME: implement this using target_type_is_incompatible ()
2017 static int
2018 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2020 MonoType *simple_type;
2021 int i;
2023 if (sig->hasthis) {
2024 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2025 return 1;
2026 args++;
2028 for (i = 0; i < sig->param_count; ++i) {
2029 if (sig->params [i]->byref) {
2030 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2031 return 1;
2032 continue;
2034 simple_type = sig->params [i];
2035 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2036 handle_enum:
2037 switch (simple_type->type) {
2038 case MONO_TYPE_VOID:
2039 return 1;
2040 continue;
2041 case MONO_TYPE_I1:
2042 case MONO_TYPE_U1:
2043 case MONO_TYPE_BOOLEAN:
2044 case MONO_TYPE_I2:
2045 case MONO_TYPE_U2:
2046 case MONO_TYPE_CHAR:
2047 case MONO_TYPE_I4:
2048 case MONO_TYPE_U4:
2049 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2050 return 1;
2051 continue;
2052 case MONO_TYPE_I:
2053 case MONO_TYPE_U:
2054 case MONO_TYPE_PTR:
2055 case MONO_TYPE_FNPTR:
2056 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2057 return 1;
2058 continue;
2059 case MONO_TYPE_CLASS:
2060 case MONO_TYPE_STRING:
2061 case MONO_TYPE_OBJECT:
2062 case MONO_TYPE_SZARRAY:
2063 case MONO_TYPE_ARRAY:
2064 if (args [i]->type != STACK_OBJ)
2065 return 1;
2066 continue;
2067 case MONO_TYPE_I8:
2068 case MONO_TYPE_U8:
2069 if (args [i]->type != STACK_I8)
2070 return 1;
2071 continue;
2072 case MONO_TYPE_R4:
2073 case MONO_TYPE_R8:
2074 if (args [i]->type != STACK_R8)
2075 return 1;
2076 continue;
2077 case MONO_TYPE_VALUETYPE:
2078 if (simple_type->data.klass->enumtype) {
2079 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2080 goto handle_enum;
2082 if (args [i]->type != STACK_VTYPE)
2083 return 1;
2084 continue;
2085 case MONO_TYPE_TYPEDBYREF:
2086 if (args [i]->type != STACK_VTYPE)
2087 return 1;
2088 continue;
2089 case MONO_TYPE_GENERICINST:
2090 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2091 goto handle_enum;
2093 default:
2094 g_error ("unknown type 0x%02x in check_call_signature",
2095 simple_type->type);
2098 return 0;
2101 static int
2102 callvirt_to_call (int opcode)
2104 switch (opcode) {
2105 case OP_CALLVIRT:
2106 return OP_CALL;
2107 case OP_VOIDCALLVIRT:
2108 return OP_VOIDCALL;
2109 case OP_FCALLVIRT:
2110 return OP_FCALL;
2111 case OP_VCALLVIRT:
2112 return OP_VCALL;
2113 case OP_LCALLVIRT:
2114 return OP_LCALL;
2115 default:
2116 g_assert_not_reached ();
2119 return -1;
2122 static int
2123 callvirt_to_call_membase (int opcode)
2125 switch (opcode) {
2126 case OP_CALLVIRT:
2127 return OP_CALL_MEMBASE;
2128 case OP_VOIDCALLVIRT:
2129 return OP_VOIDCALL_MEMBASE;
2130 case OP_FCALLVIRT:
2131 return OP_FCALL_MEMBASE;
2132 case OP_LCALLVIRT:
2133 return OP_LCALL_MEMBASE;
2134 case OP_VCALLVIRT:
2135 return OP_VCALL_MEMBASE;
2136 default:
2137 g_assert_not_reached ();
2140 return -1;
2143 #ifdef MONO_ARCH_HAVE_IMT
2144 static void
2145 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2147 int method_reg;
2149 if (COMPILE_LLVM (cfg)) {
2150 method_reg = alloc_preg (cfg);
2152 if (imt_arg) {
2153 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2154 } else if (cfg->compile_aot) {
2155 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2156 } else {
2157 MonoInst *ins;
2158 MONO_INST_NEW (cfg, ins, OP_PCONST);
2159 ins->inst_p0 = call->method;
2160 ins->dreg = method_reg;
2161 MONO_ADD_INS (cfg->cbb, ins);
2164 #ifdef ENABLE_LLVM
2165 call->imt_arg_reg = method_reg;
2166 #endif
2167 #ifdef MONO_ARCH_IMT_REG
2168 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2169 #else
2170 /* Need this to keep the IMT arg alive */
2171 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2172 #endif
2173 return;
2176 #ifdef MONO_ARCH_IMT_REG
2177 method_reg = alloc_preg (cfg);
2179 if (imt_arg) {
2180 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2181 } else if (cfg->compile_aot) {
2182 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2183 } else {
2184 MonoInst *ins;
2185 MONO_INST_NEW (cfg, ins, OP_PCONST);
2186 ins->inst_p0 = call->method;
2187 ins->dreg = method_reg;
2188 MONO_ADD_INS (cfg->cbb, ins);
2191 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2192 #else
2193 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2194 #endif
2196 #endif
2198 static MonoJumpInfo *
2199 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2201 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2203 ji->ip.i = ip;
2204 ji->type = type;
2205 ji->data.target = target;
2207 return ji;
2210 inline static MonoCallInst *
2211 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2212 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2214 MonoCallInst *call;
2215 #ifdef MONO_ARCH_SOFT_FLOAT
2216 int i;
2217 #endif
2219 if (tail)
2220 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2221 else
2222 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2224 call->args = args;
2225 call->signature = sig;
2226 call->rgctx_reg = rgctx;
2228 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2230 if (tail) {
2231 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2232 call->vret_var = cfg->vret_addr;
2233 //g_assert_not_reached ();
2235 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2236 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2237 MonoInst *loada;
2239 temp->backend.is_pinvoke = sig->pinvoke;
2242 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2243 * address of return value to increase optimization opportunities.
2244 * Before vtype decomposition, the dreg of the call ins itself represents the
2245 * fact the call modifies the return value. After decomposition, the call will
2246 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2247 * will be transformed into an LDADDR.
2249 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2250 loada->dreg = alloc_preg (cfg);
2251 loada->inst_p0 = temp;
2252 /* We reference the call too since call->dreg could change during optimization */
2253 loada->inst_p1 = call;
2254 MONO_ADD_INS (cfg->cbb, loada);
2256 call->inst.dreg = temp->dreg;
2258 call->vret_var = loada;
2259 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2260 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2262 #ifdef MONO_ARCH_SOFT_FLOAT
2263 if (COMPILE_SOFT_FLOAT (cfg)) {
2265 * If the call has a float argument, we would need to do an r8->r4 conversion using
2266 * an icall, but that cannot be done during the call sequence since it would clobber
2267 * the call registers + the stack. So we do it before emitting the call.
2269 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2270 MonoType *t;
2271 MonoInst *in = call->args [i];
2273 if (i >= sig->hasthis)
2274 t = sig->params [i - sig->hasthis];
2275 else
2276 t = &mono_defaults.int_class->byval_arg;
2277 t = mono_type_get_underlying_type (t);
2279 if (!t->byref && t->type == MONO_TYPE_R4) {
2280 MonoInst *iargs [1];
2281 MonoInst *conv;
2283 iargs [0] = in;
2284 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2286 /* The result will be in an int vreg */
2287 call->args [i] = conv;
2291 #endif
2293 #ifdef ENABLE_LLVM
2294 if (COMPILE_LLVM (cfg))
2295 mono_llvm_emit_call (cfg, call);
2296 else
2297 mono_arch_emit_call (cfg, call);
2298 #else
2299 mono_arch_emit_call (cfg, call);
2300 #endif
2302 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2303 cfg->flags |= MONO_CFG_HAS_CALLS;
2305 return call;
2308 static void
2309 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2311 #ifdef MONO_ARCH_RGCTX_REG
2312 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2313 cfg->uses_rgctx_reg = TRUE;
2314 call->rgctx_reg = TRUE;
2315 #ifdef ENABLE_LLVM
2316 call->rgctx_arg_reg = rgctx_reg;
2317 #endif
2318 #else
2319 NOT_IMPLEMENTED;
2320 #endif
2323 inline static MonoInst*
2324 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2326 MonoCallInst *call;
2327 int rgctx_reg = -1;
2329 if (rgctx_arg) {
2330 rgctx_reg = mono_alloc_preg (cfg);
2331 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2334 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2336 call->inst.sreg1 = addr->dreg;
2338 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2340 if (rgctx_arg)
2341 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2343 return (MonoInst*)call;
2346 static MonoInst*
2347 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2348 static MonoInst*
2349 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2351 static MonoInst*
2352 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2353 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2355 gboolean might_be_remote;
2356 gboolean virtual = this != NULL;
2357 gboolean enable_for_aot = TRUE;
2358 int context_used;
2359 MonoCallInst *call;
2360 int rgctx_reg = 0;
2362 if (rgctx_arg) {
2363 rgctx_reg = mono_alloc_preg (cfg);
2364 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2367 if (method->string_ctor) {
2368 /* Create the real signature */
2369 /* FIXME: Cache these */
2370 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2371 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2373 sig = ctor_sig;
2376 context_used = mono_method_check_context_used (method);
2378 might_be_remote = this && sig->hasthis &&
2379 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2380 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2382 if (might_be_remote && context_used) {
2383 MonoInst *addr;
2385 g_assert (cfg->generic_sharing_context);
2387 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2389 return mono_emit_calli (cfg, sig, args, addr, NULL);
2392 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2394 if (might_be_remote)
2395 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2396 else
2397 call->method = method;
2398 call->inst.flags |= MONO_INST_HAS_METHOD;
2399 call->inst.inst_left = this;
2401 if (virtual) {
2402 int vtable_reg, slot_reg, this_reg;
2404 this_reg = this->dreg;
2406 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2407 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2408 MonoInst *dummy_use;
2410 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2412 /* Make a call to delegate->invoke_impl */
2413 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2414 call->inst.inst_basereg = this_reg;
2415 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2416 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2418 /* We must emit a dummy use here because the delegate trampoline will
2419 replace the 'this' argument with the delegate target making this activation
2420 no longer a root for the delegate.
2421 This is an issue for delegates that target collectible code such as dynamic
2422 methods of GC'able assemblies.
2424 For a test case look into #667921.
2426 FIXME: a dummy use is not the best way to do it as the local register allocator
2427 will put it on a caller save register and spil it around the call.
2428 Ideally, we would either put it on a callee save register or only do the store part.
2430 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2432 return (MonoInst*)call;
2434 #endif
2436 if ((!cfg->compile_aot || enable_for_aot) &&
2437 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2438 (MONO_METHOD_IS_FINAL (method) &&
2439 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2440 !(method->klass->marshalbyref && context_used)) {
2442 * the method is not virtual, we just need to ensure this is not null
2443 * and then we can call the method directly.
2445 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2447 * The check above ensures method is not gshared, this is needed since
2448 * gshared methods can't have wrappers.
2450 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2453 if (!method->string_ctor)
2454 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2456 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2457 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2459 * the method is virtual, but we can statically dispatch since either
2460 * it's class or the method itself are sealed.
2461 * But first we need to ensure it's not a null reference.
2463 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2465 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2466 } else {
2467 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2469 vtable_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2471 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2472 slot_reg = -1;
2473 #ifdef MONO_ARCH_HAVE_IMT
2474 if (mono_use_imt) {
2475 guint32 imt_slot = mono_method_get_imt_slot (method);
2476 emit_imt_argument (cfg, call, imt_arg);
2477 slot_reg = vtable_reg;
2478 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2480 #endif
2481 if (slot_reg == -1) {
2482 slot_reg = alloc_preg (cfg);
2483 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2484 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2486 } else {
2487 slot_reg = vtable_reg;
2488 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2489 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2490 #ifdef MONO_ARCH_HAVE_IMT
2491 if (imt_arg) {
2492 g_assert (mono_method_signature (method)->generic_param_count);
2493 emit_imt_argument (cfg, call, imt_arg);
2495 #endif
2498 call->inst.sreg1 = slot_reg;
2499 call->virtual = TRUE;
2503 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2505 if (rgctx_arg)
2506 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2508 return (MonoInst*)call;
2511 MonoInst*
2512 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2514 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2517 MonoInst*
2518 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2519 MonoInst **args)
2521 MonoCallInst *call;
2523 g_assert (sig);
2525 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2526 call->fptr = func;
2528 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2530 return (MonoInst*)call;
2533 MonoInst*
2534 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2536 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2538 g_assert (info);
2540 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2544 * mono_emit_abs_call:
2546 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2548 inline static MonoInst*
2549 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2550 MonoMethodSignature *sig, MonoInst **args)
2552 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2553 MonoInst *ins;
2556 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2557 * handle it.
2559 if (cfg->abs_patches == NULL)
2560 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2561 g_hash_table_insert (cfg->abs_patches, ji, ji);
2562 ins = mono_emit_native_call (cfg, ji, sig, args);
2563 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2564 return ins;
2567 static MonoInst*
2568 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2570 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2571 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2572 int widen_op = -1;
2575 * Native code might return non register sized integers
2576 * without initializing the upper bits.
2578 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2579 case OP_LOADI1_MEMBASE:
2580 widen_op = OP_ICONV_TO_I1;
2581 break;
2582 case OP_LOADU1_MEMBASE:
2583 widen_op = OP_ICONV_TO_U1;
2584 break;
2585 case OP_LOADI2_MEMBASE:
2586 widen_op = OP_ICONV_TO_I2;
2587 break;
2588 case OP_LOADU2_MEMBASE:
2589 widen_op = OP_ICONV_TO_U2;
2590 break;
2591 default:
2592 break;
2595 if (widen_op != -1) {
2596 int dreg = alloc_preg (cfg);
2597 MonoInst *widen;
2599 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2600 widen->type = ins->type;
2601 ins = widen;
2606 return ins;
2609 static MonoMethod*
2610 get_memcpy_method (void)
2612 static MonoMethod *memcpy_method = NULL;
2613 if (!memcpy_method) {
2614 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2615 if (!memcpy_method)
2616 g_error ("Old corlib found. Install a new one");
2618 return memcpy_method;
2621 static void
2622 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2624 MonoClassField *field;
2625 gpointer iter = NULL;
2627 while ((field = mono_class_get_fields (klass, &iter))) {
2628 int foffset;
2630 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2631 continue;
2632 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2633 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2634 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2635 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2636 } else {
2637 MonoClass *field_class = mono_class_from_mono_type (field->type);
2638 if (field_class->has_references)
2639 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2644 static void
2645 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2647 int card_table_shift_bits;
2648 gpointer card_table_mask;
2649 guint8 *card_table;
2650 MonoInst *dummy_use;
2651 int nursery_shift_bits;
2652 size_t nursery_size;
2653 gboolean has_card_table_wb = FALSE;
2655 if (!cfg->gen_write_barriers)
2656 return;
2658 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2660 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2662 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2663 has_card_table_wb = TRUE;
2664 #endif
2666 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2667 MonoInst *wbarrier;
2669 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2670 wbarrier->sreg1 = ptr->dreg;
2671 if (value)
2672 wbarrier->sreg2 = value->dreg;
2673 else
2674 wbarrier->sreg2 = value_reg;
2675 MONO_ADD_INS (cfg->cbb, wbarrier);
2676 } else if (card_table) {
2677 int offset_reg = alloc_preg (cfg);
2678 int card_reg = alloc_preg (cfg);
2679 MonoInst *ins;
2681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2682 if (card_table_mask)
2683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2685 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2686 * IMM's larger than 32bits.
2688 if (cfg->compile_aot) {
2689 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2690 } else {
2691 MONO_INST_NEW (cfg, ins, OP_PCONST);
2692 ins->inst_p0 = card_table;
2693 ins->dreg = card_reg;
2694 MONO_ADD_INS (cfg->cbb, ins);
2697 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2698 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2699 } else {
2700 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2701 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2704 if (value) {
2705 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2706 } else {
2707 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2708 dummy_use->sreg1 = value_reg;
2709 MONO_ADD_INS (cfg->cbb, dummy_use);
2713 static gboolean
2714 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2716 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2717 unsigned need_wb = 0;
2719 if (align == 0)
2720 align = 4;
2722 /*types with references can't have alignment smaller than sizeof(void*) */
2723 if (align < SIZEOF_VOID_P)
2724 return FALSE;
2726 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2727 if (size > 32 * SIZEOF_VOID_P)
2728 return FALSE;
2730 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2732 /* We don't unroll more than 5 stores to avoid code bloat. */
2733 if (size > 5 * SIZEOF_VOID_P) {
2734 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2735 size += (SIZEOF_VOID_P - 1);
2736 size &= ~(SIZEOF_VOID_P - 1);
2738 EMIT_NEW_ICONST (cfg, iargs [2], size);
2739 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2740 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2741 return TRUE;
2744 destreg = iargs [0]->dreg;
2745 srcreg = iargs [1]->dreg;
2746 offset = 0;
2748 dest_ptr_reg = alloc_preg (cfg);
2749 tmp_reg = alloc_preg (cfg);
2751 /*tmp = dreg*/
2752 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2754 while (size >= SIZEOF_VOID_P) {
2755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2758 if (need_wb & 0x1)
2759 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2761 offset += SIZEOF_VOID_P;
2762 size -= SIZEOF_VOID_P;
2763 need_wb >>= 1;
2765 /*tmp += sizeof (void*)*/
2766 if (size >= SIZEOF_VOID_P) {
2767 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2768 MONO_ADD_INS (cfg->cbb, iargs [0]);
2772 /* Those cannot be references since size < sizeof (void*) */
2773 while (size >= 4) {
2774 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2775 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2776 offset += 4;
2777 size -= 4;
2780 while (size >= 2) {
2781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2783 offset += 2;
2784 size -= 2;
2787 while (size >= 1) {
2788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2790 offset += 1;
2791 size -= 1;
2794 return TRUE;
2798 * Emit code to copy a valuetype of type @klass whose address is stored in
2799 * @src->dreg to memory whose address is stored at @dest->dreg.
2801 void
2802 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2804 MonoInst *iargs [4];
2805 int n;
2806 guint32 align = 0;
2807 MonoMethod *memcpy_method;
2809 g_assert (klass);
2811 * This check breaks with spilled vars... need to handle it during verification anyway.
2812 * g_assert (klass && klass == src->klass && klass == dest->klass);
2815 if (native)
2816 n = mono_class_native_size (klass, &align);
2817 else
2818 n = mono_class_value_size (klass, &align);
2820 /* if native is true there should be no references in the struct */
2821 if (cfg->gen_write_barriers && klass->has_references && !native) {
2822 /* Avoid barriers when storing to the stack */
2823 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2824 (dest->opcode == OP_LDADDR))) {
2825 int context_used = 0;
2827 iargs [0] = dest;
2828 iargs [1] = src;
2830 if (cfg->generic_sharing_context)
2831 context_used = mono_class_check_context_used (klass);
2833 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2834 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2835 return;
2836 } else if (context_used) {
2837 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2838 } else {
2839 if (cfg->compile_aot) {
2840 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2841 } else {
2842 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2843 mono_class_compute_gc_descriptor (klass);
2847 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2848 return;
2852 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2853 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2854 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2855 } else {
2856 iargs [0] = dest;
2857 iargs [1] = src;
2858 EMIT_NEW_ICONST (cfg, iargs [2], n);
2860 memcpy_method = get_memcpy_method ();
2861 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2865 static MonoMethod*
2866 get_memset_method (void)
2868 static MonoMethod *memset_method = NULL;
2869 if (!memset_method) {
2870 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2871 if (!memset_method)
2872 g_error ("Old corlib found. Install a new one");
2874 return memset_method;
2877 void
2878 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2880 MonoInst *iargs [3];
2881 int n;
2882 guint32 align;
2883 MonoMethod *memset_method;
2885 /* FIXME: Optimize this for the case when dest is an LDADDR */
2887 mono_class_init (klass);
2888 n = mono_class_value_size (klass, &align);
2890 if (n <= sizeof (gpointer) * 5) {
2891 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2893 else {
2894 memset_method = get_memset_method ();
2895 iargs [0] = dest;
2896 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2897 EMIT_NEW_ICONST (cfg, iargs [2], n);
2898 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2902 static MonoInst*
2903 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2905 MonoInst *this = NULL;
2907 g_assert (cfg->generic_sharing_context);
2909 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2910 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2911 !method->klass->valuetype)
2912 EMIT_NEW_ARGLOAD (cfg, this, 0);
2914 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2915 MonoInst *mrgctx_loc, *mrgctx_var;
2917 g_assert (!this);
2918 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2920 mrgctx_loc = mono_get_vtable_var (cfg);
2921 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2923 return mrgctx_var;
2924 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2925 MonoInst *vtable_loc, *vtable_var;
2927 g_assert (!this);
2929 vtable_loc = mono_get_vtable_var (cfg);
2930 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2932 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2933 MonoInst *mrgctx_var = vtable_var;
2934 int vtable_reg;
2936 vtable_reg = alloc_preg (cfg);
2937 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2938 vtable_var->type = STACK_PTR;
2941 return vtable_var;
2942 } else {
2943 MonoInst *ins;
2944 int vtable_reg;
2946 vtable_reg = alloc_preg (cfg);
2947 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2948 return ins;
2952 static MonoJumpInfoRgctxEntry *
2953 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2955 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2956 res->method = method;
2957 res->in_mrgctx = in_mrgctx;
2958 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2959 res->data->type = patch_type;
2960 res->data->data.target = patch_data;
2961 res->info_type = info_type;
2963 return res;
2966 static inline MonoInst*
2967 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2969 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2972 static MonoInst*
2973 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2974 MonoClass *klass, int rgctx_type)
2976 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2977 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2979 return emit_rgctx_fetch (cfg, rgctx, entry);
2983 * emit_get_rgctx_method:
2985 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2986 * normal constants, else emit a load from the rgctx.
2988 static MonoInst*
2989 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2990 MonoMethod *cmethod, int rgctx_type)
2992 if (!context_used) {
2993 MonoInst *ins;
2995 switch (rgctx_type) {
2996 case MONO_RGCTX_INFO_METHOD:
2997 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2998 return ins;
2999 case MONO_RGCTX_INFO_METHOD_RGCTX:
3000 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3001 return ins;
3002 default:
3003 g_assert_not_reached ();
3005 } else {
3006 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3007 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3009 return emit_rgctx_fetch (cfg, rgctx, entry);
3013 static MonoInst*
3014 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3015 MonoClassField *field, int rgctx_type)
3017 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3018 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3020 return emit_rgctx_fetch (cfg, rgctx, entry);
3024 * On return the caller must check @klass for load errors.
3026 static void
3027 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3029 MonoInst *vtable_arg;
3030 MonoCallInst *call;
3031 int context_used = 0;
3033 if (cfg->generic_sharing_context)
3034 context_used = mono_class_check_context_used (klass);
3036 if (context_used) {
3037 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3038 klass, MONO_RGCTX_INFO_VTABLE);
3039 } else {
3040 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3042 if (!vtable)
3043 return;
3044 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3047 if (COMPILE_LLVM (cfg))
3048 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3049 else
3050 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3051 #ifdef MONO_ARCH_VTABLE_REG
3052 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3053 cfg->uses_vtable_reg = TRUE;
3054 #else
3055 NOT_IMPLEMENTED;
3056 #endif
3059 static void
3060 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3062 if (mini_get_debug_options ()->better_cast_details) {
3063 int to_klass_reg = alloc_preg (cfg);
3064 int vtable_reg = alloc_preg (cfg);
3065 int klass_reg = alloc_preg (cfg);
3066 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3068 if (!tls_get) {
3069 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3070 exit (1);
3073 MONO_ADD_INS (cfg->cbb, tls_get);
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3077 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3078 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3083 static void
3084 reset_cast_details (MonoCompile *cfg)
3086 /* Reset the variables holding the cast details */
3087 if (mini_get_debug_options ()->better_cast_details) {
3088 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3090 MONO_ADD_INS (cfg->cbb, tls_get);
3091 /* It is enough to reset the from field */
3092 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3097 * On return the caller must check @array_class for load errors
3099 static void
3100 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3102 int vtable_reg = alloc_preg (cfg);
3103 int context_used = 0;
3105 if (cfg->generic_sharing_context)
3106 context_used = mono_class_check_context_used (array_class);
3108 save_cast_details (cfg, array_class, obj->dreg);
3110 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3112 if (cfg->opt & MONO_OPT_SHARED) {
3113 int class_reg = alloc_preg (cfg);
3114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3115 if (cfg->compile_aot) {
3116 int klass_reg = alloc_preg (cfg);
3117 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3118 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3119 } else {
3120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3122 } else if (context_used) {
3123 MonoInst *vtable_ins;
3125 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3126 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3127 } else {
3128 if (cfg->compile_aot) {
3129 int vt_reg;
3130 MonoVTable *vtable;
3132 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3133 return;
3134 vt_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3136 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3137 } else {
3138 MonoVTable *vtable;
3139 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3140 return;
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3145 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3147 reset_cast_details (cfg);
3151 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3152 * generic code is generated.
3154 static MonoInst*
3155 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3157 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3159 if (context_used) {
3160 MonoInst *rgctx, *addr;
3162 /* FIXME: What if the class is shared? We might not
3163 have to get the address of the method from the
3164 RGCTX. */
3165 addr = emit_get_rgctx_method (cfg, context_used, method,
3166 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3168 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3170 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3171 } else {
3172 return mono_emit_method_call (cfg, method, &val, NULL);
3176 static MonoInst*
3177 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3179 MonoInst *add;
3180 int obj_reg;
3181 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3182 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3183 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3184 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3186 obj_reg = sp [0]->dreg;
3187 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3188 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3190 /* FIXME: generics */
3191 g_assert (klass->rank == 0);
3193 // Check rank == 0
3194 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3195 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3197 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3200 if (context_used) {
3201 MonoInst *element_class;
3203 /* This assertion is from the unboxcast insn */
3204 g_assert (klass->rank == 0);
3206 element_class = emit_get_rgctx_klass (cfg, context_used,
3207 klass->element_class, MONO_RGCTX_INFO_KLASS);
3209 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3210 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3211 } else {
3212 save_cast_details (cfg, klass->element_class, obj_reg);
3213 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3214 reset_cast_details (cfg);
3217 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3218 MONO_ADD_INS (cfg->cbb, add);
3219 add->type = STACK_MP;
3220 add->klass = klass;
3222 return add;
3226 * Returns NULL and set the cfg exception on error.
3228 static MonoInst*
3229 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3231 MonoInst *iargs [2];
3232 void *alloc_ftn;
3234 if (context_used) {
3235 MonoInst *data;
3236 int rgctx_info;
3237 MonoInst *iargs [2];
3240 FIXME: we cannot get managed_alloc here because we can't get
3241 the class's vtable (because it's not a closed class)
3243 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3244 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3247 if (cfg->opt & MONO_OPT_SHARED)
3248 rgctx_info = MONO_RGCTX_INFO_KLASS;
3249 else
3250 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3251 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3253 if (cfg->opt & MONO_OPT_SHARED) {
3254 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3255 iargs [1] = data;
3256 alloc_ftn = mono_object_new;
3257 } else {
3258 iargs [0] = data;
3259 alloc_ftn = mono_object_new_specific;
3262 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3265 if (cfg->opt & MONO_OPT_SHARED) {
3266 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3267 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3269 alloc_ftn = mono_object_new;
3270 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3271 /* This happens often in argument checking code, eg. throw new FooException... */
3272 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3273 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3274 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3275 } else {
3276 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3277 MonoMethod *managed_alloc = NULL;
3278 gboolean pass_lw;
3280 if (!vtable) {
3281 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3282 cfg->exception_ptr = klass;
3283 return NULL;
3286 #ifndef MONO_CROSS_COMPILE
3287 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3288 #endif
3290 if (managed_alloc) {
3291 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3292 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3294 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3295 if (pass_lw) {
3296 guint32 lw = vtable->klass->instance_size;
3297 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3298 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3299 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3301 else {
3302 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3306 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3310 * Returns NULL and set the cfg exception on error.
3312 static MonoInst*
3313 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3315 MonoInst *alloc, *ins;
3317 if (mono_class_is_nullable (klass)) {
3318 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3320 if (context_used) {
3321 /* FIXME: What if the class is shared? We might not
3322 have to get the method address from the RGCTX. */
3323 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3324 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3325 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3327 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3328 } else {
3329 return mono_emit_method_call (cfg, method, &val, NULL);
3333 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3334 if (!alloc)
3335 return NULL;
3337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3339 return alloc;
3343 static gboolean
3344 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3346 int i;
3347 MonoGenericContainer *container;
3348 MonoGenericInst *ginst;
3350 if (klass->generic_class) {
3351 container = klass->generic_class->container_class->generic_container;
3352 ginst = klass->generic_class->context.class_inst;
3353 } else if (klass->generic_container && context_used) {
3354 container = klass->generic_container;
3355 ginst = container->context.class_inst;
3356 } else {
3357 return FALSE;
3360 for (i = 0; i < container->type_argc; ++i) {
3361 MonoType *type;
3362 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3363 continue;
3364 type = ginst->type_argv [i];
3365 if (mini_type_is_reference (cfg, type))
3366 return TRUE;
3368 return FALSE;
3371 // FIXME: This doesn't work yet (class libs tests fail?)
3372 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3375 * Returns NULL and set the cfg exception on error.
3377 static MonoInst*
3378 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3380 MonoBasicBlock *is_null_bb;
3381 int obj_reg = src->dreg;
3382 int vtable_reg = alloc_preg (cfg);
3383 MonoInst *klass_inst = NULL;
3385 if (context_used) {
3386 MonoInst *args [3];
3388 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3389 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3390 MonoInst *cache_ins;
3392 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3394 /* obj */
3395 args [0] = src;
3397 /* klass - it's the second element of the cache entry*/
3398 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3400 /* cache */
3401 args [2] = cache_ins;
3403 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3406 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3408 if (is_complex_isinst (klass)) {
3409 /* Complex case, handle by an icall */
3411 /* obj */
3412 args [0] = src;
3414 /* klass */
3415 args [1] = klass_inst;
3417 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3418 } else {
3419 /* Simple case, handled by the code below */
3423 NEW_BBLOCK (cfg, is_null_bb);
3425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3428 save_cast_details (cfg, klass, obj_reg);
3430 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3432 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3433 } else {
3434 int klass_reg = alloc_preg (cfg);
3436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3438 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3439 /* the remoting code is broken, access the class for now */
3440 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3441 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3442 if (!vt) {
3443 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3444 cfg->exception_ptr = klass;
3445 return NULL;
3447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3448 } else {
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3452 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3453 } else {
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3455 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3459 MONO_START_BB (cfg, is_null_bb);
3461 reset_cast_details (cfg);
3463 return src;
3467 * Returns NULL and set the cfg exception on error.
3469 static MonoInst*
3470 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3472 MonoInst *ins;
3473 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3474 int obj_reg = src->dreg;
3475 int vtable_reg = alloc_preg (cfg);
3476 int res_reg = alloc_ireg_ref (cfg);
3477 MonoInst *klass_inst = NULL;
3479 if (context_used) {
3480 MonoInst *args [3];
3482 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3483 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3484 MonoInst *cache_ins;
3486 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3488 /* obj */
3489 args [0] = src;
3491 /* klass - it's the second element of the cache entry*/
3492 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3494 /* cache */
3495 args [2] = cache_ins;
3497 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3500 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3502 if (is_complex_isinst (klass)) {
3503 /* Complex case, handle by an icall */
3505 /* obj */
3506 args [0] = src;
3508 /* klass */
3509 args [1] = klass_inst;
3511 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3512 } else {
3513 /* Simple case, the code below can handle it */
3517 NEW_BBLOCK (cfg, is_null_bb);
3518 NEW_BBLOCK (cfg, false_bb);
3519 NEW_BBLOCK (cfg, end_bb);
3521 /* Do the assignment at the beginning, so the other assignment can be if converted */
3522 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3523 ins->type = STACK_OBJ;
3524 ins->klass = klass;
3526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3531 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3532 g_assert (!context_used);
3533 /* the is_null_bb target simply copies the input register to the output */
3534 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3535 } else {
3536 int klass_reg = alloc_preg (cfg);
3538 if (klass->rank) {
3539 int rank_reg = alloc_preg (cfg);
3540 int eclass_reg = alloc_preg (cfg);
3542 g_assert (!context_used);
3543 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3548 if (klass->cast_class == mono_defaults.object_class) {
3549 int parent_reg = alloc_preg (cfg);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3551 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3552 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3554 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3555 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3556 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3558 } else if (klass->cast_class == mono_defaults.enum_class) {
3559 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3561 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3562 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3563 } else {
3564 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3565 /* Check that the object is a vector too */
3566 int bounds_reg = alloc_preg (cfg);
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3572 /* the is_null_bb target simply copies the input register to the output */
3573 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3575 } else if (mono_class_is_nullable (klass)) {
3576 g_assert (!context_used);
3577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3578 /* the is_null_bb target simply copies the input register to the output */
3579 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3580 } else {
3581 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3582 g_assert (!context_used);
3583 /* the remoting code is broken, access the class for now */
3584 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3585 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3586 if (!vt) {
3587 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3588 cfg->exception_ptr = klass;
3589 return NULL;
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3592 } else {
3593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3598 } else {
3599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3600 /* the is_null_bb target simply copies the input register to the output */
3601 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3606 MONO_START_BB (cfg, false_bb);
3608 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3609 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3611 MONO_START_BB (cfg, is_null_bb);
3613 MONO_START_BB (cfg, end_bb);
3615 return ins;
3618 static MonoInst*
3619 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3621 /* This opcode takes as input an object reference and a class, and returns:
3622 0) if the object is an instance of the class,
3623 1) if the object is not instance of the class,
3624 2) if the object is a proxy whose type cannot be determined */
3626 MonoInst *ins;
3627 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3628 int obj_reg = src->dreg;
3629 int dreg = alloc_ireg (cfg);
3630 int tmp_reg;
3631 int klass_reg = alloc_preg (cfg);
3633 NEW_BBLOCK (cfg, true_bb);
3634 NEW_BBLOCK (cfg, false_bb);
3635 NEW_BBLOCK (cfg, false2_bb);
3636 NEW_BBLOCK (cfg, end_bb);
3637 NEW_BBLOCK (cfg, no_proxy_bb);
3639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3642 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3643 NEW_BBLOCK (cfg, interface_fail_bb);
3645 tmp_reg = alloc_preg (cfg);
3646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3647 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3648 MONO_START_BB (cfg, interface_fail_bb);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3651 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3653 tmp_reg = alloc_preg (cfg);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3655 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3657 } else {
3658 tmp_reg = alloc_preg (cfg);
3659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3662 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3663 tmp_reg = alloc_preg (cfg);
3664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3667 tmp_reg = alloc_preg (cfg);
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3672 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3675 MONO_START_BB (cfg, no_proxy_bb);
3677 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3680 MONO_START_BB (cfg, false_bb);
3682 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3685 MONO_START_BB (cfg, false2_bb);
3687 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3690 MONO_START_BB (cfg, true_bb);
3692 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3694 MONO_START_BB (cfg, end_bb);
3696 /* FIXME: */
3697 MONO_INST_NEW (cfg, ins, OP_ICONST);
3698 ins->dreg = dreg;
3699 ins->type = STACK_I4;
3701 return ins;
3704 static MonoInst*
3705 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3707 /* This opcode takes as input an object reference and a class, and returns:
3708 0) if the object is an instance of the class,
3709 1) if the object is a proxy whose type cannot be determined
3710 an InvalidCastException exception is thrown otherwhise*/
3712 MonoInst *ins;
3713 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3714 int obj_reg = src->dreg;
3715 int dreg = alloc_ireg (cfg);
3716 int tmp_reg = alloc_preg (cfg);
3717 int klass_reg = alloc_preg (cfg);
3719 NEW_BBLOCK (cfg, end_bb);
3720 NEW_BBLOCK (cfg, ok_result_bb);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3725 save_cast_details (cfg, klass, obj_reg);
3727 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3728 NEW_BBLOCK (cfg, interface_fail_bb);
3730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3731 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3732 MONO_START_BB (cfg, interface_fail_bb);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3735 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3737 tmp_reg = alloc_preg (cfg);
3738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3740 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3742 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3745 } else {
3746 NEW_BBLOCK (cfg, no_proxy_bb);
3748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3750 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3752 tmp_reg = alloc_preg (cfg);
3753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3756 tmp_reg = alloc_preg (cfg);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3761 NEW_BBLOCK (cfg, fail_1_bb);
3763 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3765 MONO_START_BB (cfg, fail_1_bb);
3767 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3770 MONO_START_BB (cfg, no_proxy_bb);
3772 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3775 MONO_START_BB (cfg, ok_result_bb);
3777 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3779 MONO_START_BB (cfg, end_bb);
3781 /* FIXME: */
3782 MONO_INST_NEW (cfg, ins, OP_ICONST);
3783 ins->dreg = dreg;
3784 ins->type = STACK_I4;
3786 return ins;
3790 * Returns NULL and set the cfg exception on error.
3792 static G_GNUC_UNUSED MonoInst*
3793 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3795 MonoInst *ptr;
3796 int dreg;
3797 gpointer *trampoline;
3798 MonoInst *obj, *method_ins, *tramp_ins;
3799 MonoDomain *domain;
3800 guint8 **code_slot;
3802 obj = handle_alloc (cfg, klass, FALSE, 0);
3803 if (!obj)
3804 return NULL;
3806 /* Inline the contents of mono_delegate_ctor */
3808 /* Set target field */
3809 /* Optimize away setting of NULL target */
3810 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3812 if (cfg->gen_write_barriers) {
3813 dreg = alloc_preg (cfg);
3814 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3815 emit_write_barrier (cfg, ptr, target, 0);
3819 /* Set method field */
3820 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3822 if (cfg->gen_write_barriers) {
3823 dreg = alloc_preg (cfg);
3824 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3825 emit_write_barrier (cfg, ptr, method_ins, 0);
3828 * To avoid looking up the compiled code belonging to the target method
3829 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3830 * store it, and we fill it after the method has been compiled.
3832 if (!cfg->compile_aot && !method->dynamic) {
3833 MonoInst *code_slot_ins;
3835 if (context_used) {
3836 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3837 } else {
3838 domain = mono_domain_get ();
3839 mono_domain_lock (domain);
3840 if (!domain_jit_info (domain)->method_code_hash)
3841 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3842 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3843 if (!code_slot) {
3844 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3845 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3847 mono_domain_unlock (domain);
3849 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3854 /* Set invoke_impl field */
3855 if (cfg->compile_aot) {
3856 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3857 } else {
3858 trampoline = mono_create_delegate_trampoline (klass);
3859 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3863 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3865 return obj;
3868 static MonoInst*
3869 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3871 MonoJitICallInfo *info;
3873 /* Need to register the icall so it gets an icall wrapper */
3874 info = mono_get_array_new_va_icall (rank);
3876 cfg->flags |= MONO_CFG_HAS_VARARGS;
3878 /* mono_array_new_va () needs a vararg calling convention */
3879 cfg->disable_llvm = TRUE;
3881 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3882 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3885 static void
3886 mono_emit_load_got_addr (MonoCompile *cfg)
3888 MonoInst *getaddr, *dummy_use;
3890 if (!cfg->got_var || cfg->got_var_allocated)
3891 return;
3893 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3894 getaddr->dreg = cfg->got_var->dreg;
3896 /* Add it to the start of the first bblock */
3897 if (cfg->bb_entry->code) {
3898 getaddr->next = cfg->bb_entry->code;
3899 cfg->bb_entry->code = getaddr;
3901 else
3902 MONO_ADD_INS (cfg->bb_entry, getaddr);
3904 cfg->got_var_allocated = TRUE;
3907 * Add a dummy use to keep the got_var alive, since real uses might
3908 * only be generated by the back ends.
3909 * Add it to end_bblock, so the variable's lifetime covers the whole
3910 * method.
3911 * It would be better to make the usage of the got var explicit in all
3912 * cases when the backend needs it (i.e. calls, throw etc.), so this
3913 * wouldn't be needed.
3915 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3916 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3919 static int inline_limit;
3920 static gboolean inline_limit_inited;
3922 static gboolean
3923 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3925 MonoMethodHeaderSummary header;
3926 MonoVTable *vtable;
3927 #ifdef MONO_ARCH_SOFT_FLOAT
3928 MonoMethodSignature *sig = mono_method_signature (method);
3929 int i;
3930 #endif
3932 if (cfg->generic_sharing_context)
3933 return FALSE;
3935 if (cfg->inline_depth > 10)
3936 return FALSE;
3938 #ifdef MONO_ARCH_HAVE_LMF_OPS
3939 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3940 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3941 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3942 return TRUE;
3943 #endif
3946 if (!mono_method_get_header_summary (method, &header))
3947 return FALSE;
3949 /*runtime, icall and pinvoke are checked by summary call*/
3950 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3951 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3952 (method->klass->marshalbyref) ||
3953 header.has_clauses)
3954 return FALSE;
3956 /* also consider num_locals? */
3957 /* Do the size check early to avoid creating vtables */
3958 if (!inline_limit_inited) {
3959 if (getenv ("MONO_INLINELIMIT"))
3960 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3961 else
3962 inline_limit = INLINE_LENGTH_LIMIT;
3963 inline_limit_inited = TRUE;
3965 if (header.code_size >= inline_limit)
3966 return FALSE;
3969 * if we can initialize the class of the method right away, we do,
3970 * otherwise we don't allow inlining if the class needs initialization,
3971 * since it would mean inserting a call to mono_runtime_class_init()
3972 * inside the inlined code
3974 if (!(cfg->opt & MONO_OPT_SHARED)) {
3975 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3976 if (cfg->run_cctors && method->klass->has_cctor) {
3977 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3978 if (!method->klass->runtime_info)
3979 /* No vtable created yet */
3980 return FALSE;
3981 vtable = mono_class_vtable (cfg->domain, method->klass);
3982 if (!vtable)
3983 return FALSE;
3984 /* This makes so that inline cannot trigger */
3985 /* .cctors: too many apps depend on them */
3986 /* running with a specific order... */
3987 if (! vtable->initialized)
3988 return FALSE;
3989 mono_runtime_class_init (vtable);
3991 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3992 if (!method->klass->runtime_info)
3993 /* No vtable created yet */
3994 return FALSE;
3995 vtable = mono_class_vtable (cfg->domain, method->klass);
3996 if (!vtable)
3997 return FALSE;
3998 if (!vtable->initialized)
3999 return FALSE;
4001 } else {
4003 * If we're compiling for shared code
4004 * the cctor will need to be run at aot method load time, for example,
4005 * or at the end of the compilation of the inlining method.
4007 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4008 return FALSE;
4012 * CAS - do not inline methods with declarative security
4013 * Note: this has to be before any possible return TRUE;
4015 if (mono_method_has_declsec (method))
4016 return FALSE;
4018 #ifdef MONO_ARCH_SOFT_FLOAT
4019 /* FIXME: */
4020 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4021 return FALSE;
4022 for (i = 0; i < sig->param_count; ++i)
4023 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4024 return FALSE;
4025 #endif
4027 return TRUE;
4030 static gboolean
4031 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4033 if (vtable->initialized && !cfg->compile_aot)
4034 return FALSE;
4036 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4037 return FALSE;
4039 if (!mono_class_needs_cctor_run (vtable->klass, method))
4040 return FALSE;
4042 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4043 /* The initialization is already done before the method is called */
4044 return FALSE;
4046 return TRUE;
4049 static MonoInst*
4050 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4052 MonoInst *ins;
4053 guint32 size;
4054 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4056 mono_class_init (klass);
4057 size = mono_class_array_element_size (klass);
4059 mult_reg = alloc_preg (cfg);
4060 array_reg = arr->dreg;
4061 index_reg = index->dreg;
4063 #if SIZEOF_REGISTER == 8
4064 /* The array reg is 64 bits but the index reg is only 32 */
4065 if (COMPILE_LLVM (cfg)) {
4066 /* Not needed */
4067 index2_reg = index_reg;
4068 } else {
4069 index2_reg = alloc_preg (cfg);
4070 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4072 #else
4073 if (index->type == STACK_I8) {
4074 index2_reg = alloc_preg (cfg);
4075 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4076 } else {
4077 index2_reg = index_reg;
4079 #endif
4081 if (bcheck)
4082 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4084 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4085 if (size == 1 || size == 2 || size == 4 || size == 8) {
4086 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4088 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4089 ins->klass = mono_class_get_element_class (klass);
4090 ins->type = STACK_MP;
4092 return ins;
4094 #endif
4096 add_reg = alloc_ireg_mp (cfg);
4098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4099 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4100 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4101 ins->klass = mono_class_get_element_class (klass);
4102 ins->type = STACK_MP;
4103 MONO_ADD_INS (cfg->cbb, ins);
4105 return ins;
4108 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4109 static MonoInst*
4110 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4112 int bounds_reg = alloc_preg (cfg);
4113 int add_reg = alloc_ireg_mp (cfg);
4114 int mult_reg = alloc_preg (cfg);
4115 int mult2_reg = alloc_preg (cfg);
4116 int low1_reg = alloc_preg (cfg);
4117 int low2_reg = alloc_preg (cfg);
4118 int high1_reg = alloc_preg (cfg);
4119 int high2_reg = alloc_preg (cfg);
4120 int realidx1_reg = alloc_preg (cfg);
4121 int realidx2_reg = alloc_preg (cfg);
4122 int sum_reg = alloc_preg (cfg);
4123 int index1, index2;
4124 MonoInst *ins;
4125 guint32 size;
4127 mono_class_init (klass);
4128 size = mono_class_array_element_size (klass);
4130 index1 = index_ins1->dreg;
4131 index2 = index_ins2->dreg;
4133 /* range checking */
4134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4135 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4137 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4138 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4139 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4141 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4142 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4143 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4145 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4146 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4147 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4148 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4149 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4150 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4151 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4153 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4154 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4156 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4157 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4159 ins->type = STACK_MP;
4160 ins->klass = klass;
4161 MONO_ADD_INS (cfg->cbb, ins);
4163 return ins;
4165 #endif
4167 static MonoInst*
4168 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4170 int rank;
4171 MonoInst *addr;
4172 MonoMethod *addr_method;
4173 int element_size;
4175 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4177 if (rank == 1)
4178 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4180 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4181 /* emit_ldelema_2 depends on OP_LMUL */
4182 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4183 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4185 #endif
4187 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4188 addr_method = mono_marshal_get_array_address (rank, element_size);
4189 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4191 return addr;
4194 static MonoBreakPolicy
4195 always_insert_breakpoint (MonoMethod *method)
4197 return MONO_BREAK_POLICY_ALWAYS;
4200 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4203 * mono_set_break_policy:
4204 * policy_callback: the new callback function
4206 * Allow embedders to decide wherther to actually obey breakpoint instructions
4207 * (both break IL instructions and Debugger.Break () method calls), for example
4208 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4209 * untrusted or semi-trusted code.
4211 * @policy_callback will be called every time a break point instruction needs to
4212 * be inserted with the method argument being the method that calls Debugger.Break()
4213 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4214 * if it wants the breakpoint to not be effective in the given method.
4215 * #MONO_BREAK_POLICY_ALWAYS is the default.
4217 void
4218 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4220 if (policy_callback)
4221 break_policy_func = policy_callback;
4222 else
4223 break_policy_func = always_insert_breakpoint;
4226 static gboolean
4227 should_insert_brekpoint (MonoMethod *method) {
4228 switch (break_policy_func (method)) {
4229 case MONO_BREAK_POLICY_ALWAYS:
4230 return TRUE;
4231 case MONO_BREAK_POLICY_NEVER:
4232 return FALSE;
4233 case MONO_BREAK_POLICY_ON_DBG:
4234 return mono_debug_using_mono_debugger ();
4235 default:
4236 g_warning ("Incorrect value returned from break policy callback");
4237 return FALSE;
4241 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4242 static MonoInst*
4243 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4245 MonoInst *addr, *store, *load;
4246 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4248 /* the bounds check is already done by the callers */
4249 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4250 if (is_set) {
4251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4252 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4253 } else {
4254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4257 return store;
4260 static MonoInst*
4261 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4263 MonoInst *ins = NULL;
4264 #ifdef MONO_ARCH_SIMD_INTRINSICS
4265 if (cfg->opt & MONO_OPT_SIMD) {
4266 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4267 if (ins)
4268 return ins;
4270 #endif
4272 return ins;
4275 static MonoInst*
4276 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4278 MonoInst *ins = NULL;
4280 static MonoClass *runtime_helpers_class = NULL;
4281 if (! runtime_helpers_class)
4282 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4283 "System.Runtime.CompilerServices", "RuntimeHelpers");
4285 if (cmethod->klass == mono_defaults.string_class) {
4286 if (strcmp (cmethod->name, "get_Chars") == 0) {
4287 int dreg = alloc_ireg (cfg);
4288 int index_reg = alloc_preg (cfg);
4289 int mult_reg = alloc_preg (cfg);
4290 int add_reg = alloc_preg (cfg);
4292 #if SIZEOF_REGISTER == 8
4293 /* The array reg is 64 bits but the index reg is only 32 */
4294 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4295 #else
4296 index_reg = args [1]->dreg;
4297 #endif
4298 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4301 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4302 add_reg = ins->dreg;
4303 /* Avoid a warning */
4304 mult_reg = 0;
4305 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4306 add_reg, 0);
4307 #else
4308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4309 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4310 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4311 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4312 #endif
4313 type_from_op (ins, NULL, NULL);
4314 return ins;
4315 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4316 int dreg = alloc_ireg (cfg);
4317 /* Decompose later to allow more optimizations */
4318 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4319 ins->type = STACK_I4;
4320 ins->flags |= MONO_INST_FAULT;
4321 cfg->cbb->has_array_access = TRUE;
4322 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4324 return ins;
4325 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4326 int mult_reg = alloc_preg (cfg);
4327 int add_reg = alloc_preg (cfg);
4329 /* The corlib functions check for oob already. */
4330 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4331 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4333 return cfg->cbb->last_ins;
4334 } else
4335 return NULL;
4336 } else if (cmethod->klass == mono_defaults.object_class) {
4338 if (strcmp (cmethod->name, "GetType") == 0) {
4339 int dreg = alloc_ireg_ref (cfg);
4340 int vt_reg = alloc_preg (cfg);
4341 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4342 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4343 type_from_op (ins, NULL, NULL);
4345 return ins;
4346 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4347 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4348 int dreg = alloc_ireg (cfg);
4349 int t1 = alloc_ireg (cfg);
4351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4352 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4353 ins->type = STACK_I4;
4355 return ins;
4356 #endif
4357 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4358 MONO_INST_NEW (cfg, ins, OP_NOP);
4359 MONO_ADD_INS (cfg->cbb, ins);
4360 return ins;
4361 } else
4362 return NULL;
4363 } else if (cmethod->klass == mono_defaults.array_class) {
4364 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4365 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4367 #ifndef MONO_BIG_ARRAYS
4369 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4370 * Array methods.
4372 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4373 int dreg = alloc_ireg (cfg);
4374 int bounds_reg = alloc_ireg_mp (cfg);
4375 MonoBasicBlock *end_bb, *szarray_bb;
4376 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4378 NEW_BBLOCK (cfg, end_bb);
4379 NEW_BBLOCK (cfg, szarray_bb);
4381 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4382 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4385 /* Non-szarray case */
4386 if (get_length)
4387 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4388 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4389 else
4390 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4391 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4393 MONO_START_BB (cfg, szarray_bb);
4394 /* Szarray case */
4395 if (get_length)
4396 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4397 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4398 else
4399 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4400 MONO_START_BB (cfg, end_bb);
4402 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4403 ins->type = STACK_I4;
4405 return ins;
4407 #endif
4409 if (cmethod->name [0] != 'g')
4410 return NULL;
4412 if (strcmp (cmethod->name, "get_Rank") == 0) {
4413 int dreg = alloc_ireg (cfg);
4414 int vtable_reg = alloc_preg (cfg);
4415 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4416 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4417 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4418 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4419 type_from_op (ins, NULL, NULL);
4421 return ins;
4422 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4423 int dreg = alloc_ireg (cfg);
4425 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4426 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4427 type_from_op (ins, NULL, NULL);
4429 return ins;
4430 } else
4431 return NULL;
4432 } else if (cmethod->klass == runtime_helpers_class) {
4434 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4435 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4436 return ins;
4437 } else
4438 return NULL;
4439 } else if (cmethod->klass == mono_defaults.thread_class) {
4440 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4441 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4442 MONO_ADD_INS (cfg->cbb, ins);
4443 return ins;
4444 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4445 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4446 MONO_ADD_INS (cfg->cbb, ins);
4447 return ins;
4449 } else if (cmethod->klass == mono_defaults.monitor_class) {
4451 /* FIXME this should be integrated to the check below once we support the trampoline version */
4452 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4453 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4454 MonoMethod *fast_method = NULL;
4456 /*FIXME fix LLVM and AOT support*/
4457 if (COMPILE_LLVM (cfg) || cfg->compile_aot)
4458 return NULL;
4460 /* Avoid infinite recursion */
4461 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4462 return NULL;
4464 fast_method = mono_monitor_get_fast_path (cmethod);
4465 if (!fast_method)
4466 return NULL;
4468 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4470 #endif
4472 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4473 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4474 MonoCallInst *call;
4476 if (COMPILE_LLVM (cfg)) {
4478 * Pass the argument normally, the LLVM backend will handle the
4479 * calling convention problems.
4481 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4482 } else {
4483 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4484 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4485 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4486 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4489 return (MonoInst*)call;
4490 } else if (strcmp (cmethod->name, "Exit") == 0) {
4491 MonoCallInst *call;
4493 if (COMPILE_LLVM (cfg)) {
4494 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4495 } else {
4496 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4497 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4498 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4499 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4502 return (MonoInst*)call;
4504 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4505 MonoMethod *fast_method = NULL;
4507 /* Avoid infinite recursion */
4508 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4509 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4510 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4511 return NULL;
4513 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4514 strcmp (cmethod->name, "Exit") == 0)
4515 fast_method = mono_monitor_get_fast_path (cmethod);
4516 if (!fast_method)
4517 return NULL;
4519 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4520 #endif
4521 } else if (cmethod->klass->image == mono_defaults.corlib &&
4522 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4523 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4524 ins = NULL;
4526 #if SIZEOF_REGISTER == 8
4527 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4528 /* 64 bit reads are already atomic */
4529 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4530 ins->dreg = mono_alloc_preg (cfg);
4531 ins->inst_basereg = args [0]->dreg;
4532 ins->inst_offset = 0;
4533 MONO_ADD_INS (cfg->cbb, ins);
4535 #endif
4537 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4538 if (strcmp (cmethod->name, "Increment") == 0) {
4539 MonoInst *ins_iconst;
4540 guint32 opcode = 0;
4542 if (fsig->params [0]->type == MONO_TYPE_I4)
4543 opcode = OP_ATOMIC_ADD_NEW_I4;
4544 #if SIZEOF_REGISTER == 8
4545 else if (fsig->params [0]->type == MONO_TYPE_I8)
4546 opcode = OP_ATOMIC_ADD_NEW_I8;
4547 #endif
4548 if (opcode) {
4549 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4550 ins_iconst->inst_c0 = 1;
4551 ins_iconst->dreg = mono_alloc_ireg (cfg);
4552 MONO_ADD_INS (cfg->cbb, ins_iconst);
4554 MONO_INST_NEW (cfg, ins, opcode);
4555 ins->dreg = mono_alloc_ireg (cfg);
4556 ins->inst_basereg = args [0]->dreg;
4557 ins->inst_offset = 0;
4558 ins->sreg2 = ins_iconst->dreg;
4559 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4560 MONO_ADD_INS (cfg->cbb, ins);
4562 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4563 MonoInst *ins_iconst;
4564 guint32 opcode = 0;
4566 if (fsig->params [0]->type == MONO_TYPE_I4)
4567 opcode = OP_ATOMIC_ADD_NEW_I4;
4568 #if SIZEOF_REGISTER == 8
4569 else if (fsig->params [0]->type == MONO_TYPE_I8)
4570 opcode = OP_ATOMIC_ADD_NEW_I8;
4571 #endif
4572 if (opcode) {
4573 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4574 ins_iconst->inst_c0 = -1;
4575 ins_iconst->dreg = mono_alloc_ireg (cfg);
4576 MONO_ADD_INS (cfg->cbb, ins_iconst);
4578 MONO_INST_NEW (cfg, ins, opcode);
4579 ins->dreg = mono_alloc_ireg (cfg);
4580 ins->inst_basereg = args [0]->dreg;
4581 ins->inst_offset = 0;
4582 ins->sreg2 = ins_iconst->dreg;
4583 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4584 MONO_ADD_INS (cfg->cbb, ins);
4586 } else if (strcmp (cmethod->name, "Add") == 0) {
4587 guint32 opcode = 0;
4589 if (fsig->params [0]->type == MONO_TYPE_I4)
4590 opcode = OP_ATOMIC_ADD_NEW_I4;
4591 #if SIZEOF_REGISTER == 8
4592 else if (fsig->params [0]->type == MONO_TYPE_I8)
4593 opcode = OP_ATOMIC_ADD_NEW_I8;
4594 #endif
4596 if (opcode) {
4597 MONO_INST_NEW (cfg, ins, opcode);
4598 ins->dreg = mono_alloc_ireg (cfg);
4599 ins->inst_basereg = args [0]->dreg;
4600 ins->inst_offset = 0;
4601 ins->sreg2 = args [1]->dreg;
4602 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4603 MONO_ADD_INS (cfg->cbb, ins);
4606 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4608 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4609 if (strcmp (cmethod->name, "Exchange") == 0) {
4610 guint32 opcode;
4611 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4613 if (fsig->params [0]->type == MONO_TYPE_I4)
4614 opcode = OP_ATOMIC_EXCHANGE_I4;
4615 #if SIZEOF_REGISTER == 8
4616 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4617 (fsig->params [0]->type == MONO_TYPE_I))
4618 opcode = OP_ATOMIC_EXCHANGE_I8;
4619 #else
4620 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4621 opcode = OP_ATOMIC_EXCHANGE_I4;
4622 #endif
4623 else
4624 return NULL;
4626 MONO_INST_NEW (cfg, ins, opcode);
4627 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4628 ins->inst_basereg = args [0]->dreg;
4629 ins->inst_offset = 0;
4630 ins->sreg2 = args [1]->dreg;
4631 MONO_ADD_INS (cfg->cbb, ins);
4633 switch (fsig->params [0]->type) {
4634 case MONO_TYPE_I4:
4635 ins->type = STACK_I4;
4636 break;
4637 case MONO_TYPE_I8:
4638 case MONO_TYPE_I:
4639 ins->type = STACK_I8;
4640 break;
4641 case MONO_TYPE_OBJECT:
4642 ins->type = STACK_OBJ;
4643 break;
4644 default:
4645 g_assert_not_reached ();
4648 if (cfg->gen_write_barriers && is_ref)
4649 emit_write_barrier (cfg, args [0], args [1], -1);
4651 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4653 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4654 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4655 int size = 0;
4656 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4657 if (fsig->params [1]->type == MONO_TYPE_I4)
4658 size = 4;
4659 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4660 size = sizeof (gpointer);
4661 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4662 size = 8;
4663 if (size == 4) {
4664 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4665 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4666 ins->sreg1 = args [0]->dreg;
4667 ins->sreg2 = args [1]->dreg;
4668 ins->sreg3 = args [2]->dreg;
4669 ins->type = STACK_I4;
4670 MONO_ADD_INS (cfg->cbb, ins);
4671 } else if (size == 8) {
4672 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4673 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4674 ins->sreg1 = args [0]->dreg;
4675 ins->sreg2 = args [1]->dreg;
4676 ins->sreg3 = args [2]->dreg;
4677 ins->type = STACK_I8;
4678 MONO_ADD_INS (cfg->cbb, ins);
4679 } else {
4680 /* g_assert_not_reached (); */
4682 if (cfg->gen_write_barriers && is_ref)
4683 emit_write_barrier (cfg, args [0], args [1], -1);
4685 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4687 if (ins)
4688 return ins;
4689 } else if (cmethod->klass->image == mono_defaults.corlib) {
4690 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4691 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4692 if (should_insert_brekpoint (cfg->method))
4693 MONO_INST_NEW (cfg, ins, OP_BREAK);
4694 else
4695 MONO_INST_NEW (cfg, ins, OP_NOP);
4696 MONO_ADD_INS (cfg->cbb, ins);
4697 return ins;
4699 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4700 && strcmp (cmethod->klass->name, "Environment") == 0) {
4701 #ifdef TARGET_WIN32
4702 EMIT_NEW_ICONST (cfg, ins, 1);
4703 #else
4704 EMIT_NEW_ICONST (cfg, ins, 0);
4705 #endif
4706 return ins;
4708 } else if (cmethod->klass == mono_defaults.math_class) {
4710 * There is general branches code for Min/Max, but it does not work for
4711 * all inputs:
4712 * http://everything2.com/?node_id=1051618
4716 #ifdef MONO_ARCH_SIMD_INTRINSICS
4717 if (cfg->opt & MONO_OPT_SIMD) {
4718 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4719 if (ins)
4720 return ins;
4722 #endif
4724 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4728 * This entry point could be used later for arbitrary method
4729 * redirection.
4731 inline static MonoInst*
4732 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4733 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4735 if (method->klass == mono_defaults.string_class) {
4736 /* managed string allocation support */
4737 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4738 MonoInst *iargs [2];
4739 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4740 MonoMethod *managed_alloc = NULL;
4742 g_assert (vtable); /*Should not fail since it System.String*/
4743 #ifndef MONO_CROSS_COMPILE
4744 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4745 #endif
4746 if (!managed_alloc)
4747 return NULL;
4748 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4749 iargs [1] = args [0];
4750 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4753 return NULL;
4756 static void
4757 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4759 MonoInst *store, *temp;
4760 int i;
4762 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4763 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4766 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4767 * would be different than the MonoInst's used to represent arguments, and
4768 * the ldelema implementation can't deal with that.
4769 * Solution: When ldelema is used on an inline argument, create a var for
4770 * it, emit ldelema on that var, and emit the saving code below in
4771 * inline_method () if needed.
4773 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4774 cfg->args [i] = temp;
4775 /* This uses cfg->args [i] which is set by the preceeding line */
4776 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4777 store->cil_code = sp [0]->cil_code;
4778 sp++;
4782 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4783 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4785 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4786 static gboolean
4787 check_inline_called_method_name_limit (MonoMethod *called_method)
4789 int strncmp_result;
4790 static char *limit = NULL;
4792 if (limit == NULL) {
4793 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4795 if (limit_string != NULL)
4796 limit = limit_string;
4797 else
4798 limit = (char *) "";
4801 if (limit [0] != '\0') {
4802 char *called_method_name = mono_method_full_name (called_method, TRUE);
4804 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4805 g_free (called_method_name);
4807 //return (strncmp_result <= 0);
4808 return (strncmp_result == 0);
4809 } else {
4810 return TRUE;
4813 #endif
4815 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4816 static gboolean
4817 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4819 int strncmp_result;
4820 static char *limit = NULL;
4822 if (limit == NULL) {
4823 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4824 if (limit_string != NULL) {
4825 limit = limit_string;
4826 } else {
4827 limit = (char *) "";
4831 if (limit [0] != '\0') {
4832 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4834 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4835 g_free (caller_method_name);
4837 //return (strncmp_result <= 0);
4838 return (strncmp_result == 0);
4839 } else {
4840 return TRUE;
4843 #endif
4845 static int
4846 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4847 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4849 MonoInst *ins, *rvar = NULL;
4850 MonoMethodHeader *cheader;
4851 MonoBasicBlock *ebblock, *sbblock;
4852 int i, costs;
4853 MonoMethod *prev_inlined_method;
4854 MonoInst **prev_locals, **prev_args;
4855 MonoType **prev_arg_types;
4856 guint prev_real_offset;
4857 GHashTable *prev_cbb_hash;
4858 MonoBasicBlock **prev_cil_offset_to_bb;
4859 MonoBasicBlock *prev_cbb;
4860 unsigned char* prev_cil_start;
4861 guint32 prev_cil_offset_to_bb_len;
4862 MonoMethod *prev_current_method;
4863 MonoGenericContext *prev_generic_context;
4864 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4866 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4868 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4869 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4870 return 0;
4871 #endif
4872 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4873 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4874 return 0;
4875 #endif
4877 if (cfg->verbose_level > 2)
4878 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4880 if (!cmethod->inline_info) {
4881 cfg->stat_inlineable_methods++;
4882 cmethod->inline_info = 1;
4885 /* allocate local variables */
4886 cheader = mono_method_get_header (cmethod);
4888 if (cheader == NULL || mono_loader_get_last_error ()) {
4889 MonoLoaderError *error = mono_loader_get_last_error ();
4891 if (cheader)
4892 mono_metadata_free_mh (cheader);
4893 if (inline_always && error)
4894 mono_cfg_set_exception (cfg, error->exception_type);
4896 mono_loader_clear_error ();
4897 return 0;
4900 /*Must verify before creating locals as it can cause the JIT to assert.*/
4901 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4902 mono_metadata_free_mh (cheader);
4903 return 0;
4906 /* allocate space to store the return value */
4907 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4908 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4911 prev_locals = cfg->locals;
4912 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4913 for (i = 0; i < cheader->num_locals; ++i)
4914 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4916 /* allocate start and end blocks */
4917 /* This is needed so if the inline is aborted, we can clean up */
4918 NEW_BBLOCK (cfg, sbblock);
4919 sbblock->real_offset = real_offset;
4921 NEW_BBLOCK (cfg, ebblock);
4922 ebblock->block_num = cfg->num_bblocks++;
4923 ebblock->real_offset = real_offset;
4925 prev_args = cfg->args;
4926 prev_arg_types = cfg->arg_types;
4927 prev_inlined_method = cfg->inlined_method;
4928 cfg->inlined_method = cmethod;
4929 cfg->ret_var_set = FALSE;
4930 cfg->inline_depth ++;
4931 prev_real_offset = cfg->real_offset;
4932 prev_cbb_hash = cfg->cbb_hash;
4933 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4934 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4935 prev_cil_start = cfg->cil_start;
4936 prev_cbb = cfg->cbb;
4937 prev_current_method = cfg->current_method;
4938 prev_generic_context = cfg->generic_context;
4939 prev_ret_var_set = cfg->ret_var_set;
4941 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4942 virtual = TRUE;
4944 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4946 ret_var_set = cfg->ret_var_set;
4948 cfg->inlined_method = prev_inlined_method;
4949 cfg->real_offset = prev_real_offset;
4950 cfg->cbb_hash = prev_cbb_hash;
4951 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4952 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4953 cfg->cil_start = prev_cil_start;
4954 cfg->locals = prev_locals;
4955 cfg->args = prev_args;
4956 cfg->arg_types = prev_arg_types;
4957 cfg->current_method = prev_current_method;
4958 cfg->generic_context = prev_generic_context;
4959 cfg->ret_var_set = prev_ret_var_set;
4960 cfg->inline_depth --;
4962 if ((costs >= 0 && costs < 60) || inline_always) {
4963 if (cfg->verbose_level > 2)
4964 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4966 cfg->stat_inlined_methods++;
4968 /* always add some code to avoid block split failures */
4969 MONO_INST_NEW (cfg, ins, OP_NOP);
4970 MONO_ADD_INS (prev_cbb, ins);
4972 prev_cbb->next_bb = sbblock;
4973 link_bblock (cfg, prev_cbb, sbblock);
4976 * Get rid of the begin and end bblocks if possible to aid local
4977 * optimizations.
4979 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4981 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4982 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4984 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4985 MonoBasicBlock *prev = ebblock->in_bb [0];
4986 mono_merge_basic_blocks (cfg, prev, ebblock);
4987 cfg->cbb = prev;
4988 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4989 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4990 cfg->cbb = prev_cbb;
4992 } else {
4993 cfg->cbb = ebblock;
4996 if (rvar) {
4998 * If the inlined method contains only a throw, then the ret var is not
4999 * set, so set it to a dummy value.
5001 if (!ret_var_set) {
5002 static double r8_0 = 0.0;
5004 switch (rvar->type) {
5005 case STACK_I4:
5006 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5007 break;
5008 case STACK_I8:
5009 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5010 break;
5011 case STACK_PTR:
5012 case STACK_MP:
5013 case STACK_OBJ:
5014 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5015 break;
5016 case STACK_R8:
5017 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5018 ins->type = STACK_R8;
5019 ins->inst_p0 = (void*)&r8_0;
5020 ins->dreg = rvar->dreg;
5021 MONO_ADD_INS (cfg->cbb, ins);
5022 break;
5023 case STACK_VTYPE:
5024 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5025 break;
5026 default:
5027 g_assert_not_reached ();
5031 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5032 *sp++ = ins;
5034 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5035 return costs + 1;
5036 } else {
5037 if (cfg->verbose_level > 2)
5038 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5039 cfg->exception_type = MONO_EXCEPTION_NONE;
5040 mono_loader_clear_error ();
5042 /* This gets rid of the newly added bblocks */
5043 cfg->cbb = prev_cbb;
5045 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5046 return 0;
5050 * Some of these comments may well be out-of-date.
5051 * Design decisions: we do a single pass over the IL code (and we do bblock
5052 * splitting/merging in the few cases when it's required: a back jump to an IL
5053 * address that was not already seen as bblock starting point).
5054 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5055 * Complex operations are decomposed in simpler ones right away. We need to let the
5056 * arch-specific code peek and poke inside this process somehow (except when the
5057 * optimizations can take advantage of the full semantic info of coarse opcodes).
5058 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5059 * MonoInst->opcode initially is the IL opcode or some simplification of that
5060 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5061 * opcode with value bigger than OP_LAST.
5062 * At this point the IR can be handed over to an interpreter, a dumb code generator
5063 * or to the optimizing code generator that will translate it to SSA form.
5065 * Profiling directed optimizations.
5066 * We may compile by default with few or no optimizations and instrument the code
5067 * or the user may indicate what methods to optimize the most either in a config file
5068 * or through repeated runs where the compiler applies offline the optimizations to
5069 * each method and then decides if it was worth it.
5072 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5073 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5074 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5075 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5076 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5077 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5078 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5079 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5081 /* offset from br.s -> br like opcodes */
5082 #define BIG_BRANCH_OFFSET 13
5084 static gboolean
5085 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5087 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5089 return b == NULL || b == bb;
5092 static int
5093 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5095 unsigned char *ip = start;
5096 unsigned char *target;
5097 int i;
5098 guint cli_addr;
5099 MonoBasicBlock *bblock;
5100 const MonoOpcode *opcode;
5102 while (ip < end) {
5103 cli_addr = ip - start;
5104 i = mono_opcode_value ((const guint8 **)&ip, end);
5105 if (i < 0)
5106 UNVERIFIED;
5107 opcode = &mono_opcodes [i];
5108 switch (opcode->argument) {
5109 case MonoInlineNone:
5110 ip++;
5111 break;
5112 case MonoInlineString:
5113 case MonoInlineType:
5114 case MonoInlineField:
5115 case MonoInlineMethod:
5116 case MonoInlineTok:
5117 case MonoInlineSig:
5118 case MonoShortInlineR:
5119 case MonoInlineI:
5120 ip += 5;
5121 break;
5122 case MonoInlineVar:
5123 ip += 3;
5124 break;
5125 case MonoShortInlineVar:
5126 case MonoShortInlineI:
5127 ip += 2;
5128 break;
5129 case MonoShortInlineBrTarget:
5130 target = start + cli_addr + 2 + (signed char)ip [1];
5131 GET_BBLOCK (cfg, bblock, target);
5132 ip += 2;
5133 if (ip < end)
5134 GET_BBLOCK (cfg, bblock, ip);
5135 break;
5136 case MonoInlineBrTarget:
5137 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5138 GET_BBLOCK (cfg, bblock, target);
5139 ip += 5;
5140 if (ip < end)
5141 GET_BBLOCK (cfg, bblock, ip);
5142 break;
5143 case MonoInlineSwitch: {
5144 guint32 n = read32 (ip + 1);
5145 guint32 j;
5146 ip += 5;
5147 cli_addr += 5 + 4 * n;
5148 target = start + cli_addr;
5149 GET_BBLOCK (cfg, bblock, target);
5151 for (j = 0; j < n; ++j) {
5152 target = start + cli_addr + (gint32)read32 (ip);
5153 GET_BBLOCK (cfg, bblock, target);
5154 ip += 4;
5156 break;
5158 case MonoInlineR:
5159 case MonoInlineI8:
5160 ip += 9;
5161 break;
5162 default:
5163 g_assert_not_reached ();
5166 if (i == CEE_THROW) {
5167 unsigned char *bb_start = ip - 1;
5169 /* Find the start of the bblock containing the throw */
5170 bblock = NULL;
5171 while ((bb_start >= start) && !bblock) {
5172 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5173 bb_start --;
5175 if (bblock)
5176 bblock->out_of_line = 1;
5179 return 0;
5180 unverified:
5181 *pos = ip;
5182 return 1;
5185 static inline MonoMethod *
5186 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5188 MonoMethod *method;
5190 if (m->wrapper_type != MONO_WRAPPER_NONE)
5191 return mono_method_get_wrapper_data (m, token);
5193 method = mono_get_method_full (m->klass->image, token, klass, context);
5195 return method;
5198 static inline MonoMethod *
5199 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5201 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5203 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5204 return NULL;
5206 return method;
5209 static inline MonoClass*
5210 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5212 MonoClass *klass;
5214 if (method->wrapper_type != MONO_WRAPPER_NONE)
5215 klass = mono_method_get_wrapper_data (method, token);
5216 else
5217 klass = mono_class_get_full (method->klass->image, token, context);
5218 if (klass)
5219 mono_class_init (klass);
5220 return klass;
5224 * Returns TRUE if the JIT should abort inlining because "callee"
5225 * is influenced by security attributes.
5227 static
5228 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5230 guint32 result;
5232 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5233 return TRUE;
5236 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5237 if (result == MONO_JIT_SECURITY_OK)
5238 return FALSE;
5240 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5241 /* Generate code to throw a SecurityException before the actual call/link */
5242 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5243 MonoInst *args [2];
5245 NEW_ICONST (cfg, args [0], 4);
5246 NEW_METHODCONST (cfg, args [1], caller);
5247 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5248 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5249 /* don't hide previous results */
5250 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5251 cfg->exception_data = result;
5252 return TRUE;
5255 return FALSE;
5258 static MonoMethod*
5259 throw_exception (void)
5261 static MonoMethod *method = NULL;
5263 if (!method) {
5264 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5265 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5267 g_assert (method);
5268 return method;
5271 static void
5272 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5274 MonoMethod *thrower = throw_exception ();
5275 MonoInst *args [1];
5277 EMIT_NEW_PCONST (cfg, args [0], ex);
5278 mono_emit_method_call (cfg, thrower, args, NULL);
5282 * Return the original method is a wrapper is specified. We can only access
5283 * the custom attributes from the original method.
5285 static MonoMethod*
5286 get_original_method (MonoMethod *method)
5288 if (method->wrapper_type == MONO_WRAPPER_NONE)
5289 return method;
5291 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5292 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5293 return NULL;
5295 /* in other cases we need to find the original method */
5296 return mono_marshal_method_from_wrapper (method);
5299 static void
5300 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5301 MonoBasicBlock *bblock, unsigned char *ip)
5303 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5304 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5305 if (ex)
5306 emit_throw_exception (cfg, ex);
5309 static void
5310 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5311 MonoBasicBlock *bblock, unsigned char *ip)
5313 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5314 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5315 if (ex)
5316 emit_throw_exception (cfg, ex);
5320 * Check that the IL instructions at ip are the array initialization
5321 * sequence and return the pointer to the data and the size.
5323 static const char*
5324 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5327 * newarr[System.Int32]
5328 * dup
5329 * ldtoken field valuetype ...
5330 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5332 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5333 guint32 token = read32 (ip + 7);
5334 guint32 field_token = read32 (ip + 2);
5335 guint32 field_index = field_token & 0xffffff;
5336 guint32 rva;
5337 const char *data_ptr;
5338 int size = 0;
5339 MonoMethod *cmethod;
5340 MonoClass *dummy_class;
5341 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5342 int dummy_align;
5344 if (!field)
5345 return NULL;
5347 *out_field_token = field_token;
5349 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5350 if (!cmethod)
5351 return NULL;
5352 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5353 return NULL;
5354 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5355 case MONO_TYPE_BOOLEAN:
5356 case MONO_TYPE_I1:
5357 case MONO_TYPE_U1:
5358 size = 1; break;
5359 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5360 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5361 case MONO_TYPE_CHAR:
5362 case MONO_TYPE_I2:
5363 case MONO_TYPE_U2:
5364 size = 2; break;
5365 case MONO_TYPE_I4:
5366 case MONO_TYPE_U4:
5367 case MONO_TYPE_R4:
5368 size = 4; break;
5369 case MONO_TYPE_R8:
5370 #ifdef ARM_FPU_FPA
5371 return NULL; /* stupid ARM FP swapped format */
5372 #endif
5373 case MONO_TYPE_I8:
5374 case MONO_TYPE_U8:
5375 size = 8; break;
5376 #endif
5377 default:
5378 return NULL;
5380 size *= len;
5381 if (size > mono_type_size (field->type, &dummy_align))
5382 return NULL;
5383 *out_size = size;
5384 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5385 if (!method->klass->image->dynamic) {
5386 field_index = read32 (ip + 2) & 0xffffff;
5387 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5388 data_ptr = mono_image_rva_map (method->klass->image, rva);
5389 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5390 /* for aot code we do the lookup on load */
5391 if (aot && data_ptr)
5392 return GUINT_TO_POINTER (rva);
5393 } else {
5394 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5395 g_assert (!aot);
5396 data_ptr = mono_field_get_data (field);
5398 return data_ptr;
5400 return NULL;
5403 static void
5404 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5406 char *method_fname = mono_method_full_name (method, TRUE);
5407 char *method_code;
5408 MonoMethodHeader *header = mono_method_get_header (method);
5410 if (header->code_size == 0)
5411 method_code = g_strdup ("method body is empty.");
5412 else
5413 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5415 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5416 g_free (method_fname);
5417 g_free (method_code);
5418 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5421 static void
5422 set_exception_object (MonoCompile *cfg, MonoException *exception)
5424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5425 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5426 cfg->exception_ptr = exception;
5429 static gboolean
5430 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5432 return mini_type_is_reference (cfg, &klass->byval_arg);
5435 static void
5436 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5438 MonoInst *ins;
5439 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5440 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5441 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5442 /* Optimize reg-reg moves away */
5444 * Can't optimize other opcodes, since sp[0] might point to
5445 * the last ins of a decomposed opcode.
5447 sp [0]->dreg = (cfg)->locals [n]->dreg;
5448 } else {
5449 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5454 * ldloca inhibits many optimizations so try to get rid of it in common
5455 * cases.
5457 static inline unsigned char *
5458 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5460 int local, token;
5461 MonoClass *klass;
5463 if (size == 1) {
5464 local = ip [1];
5465 ip += 2;
5466 } else {
5467 local = read16 (ip + 2);
5468 ip += 4;
5471 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5472 gboolean skip = FALSE;
5474 /* From the INITOBJ case */
5475 token = read32 (ip + 2);
5476 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5477 CHECK_TYPELOAD (klass);
5478 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5479 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5480 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5481 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5482 } else {
5483 skip = TRUE;
5486 if (!skip)
5487 return ip + 6;
5489 load_error:
5490 return NULL;
5493 static gboolean
5494 is_exception_class (MonoClass *class)
5496 while (class) {
5497 if (class == mono_defaults.exception_class)
5498 return TRUE;
5499 class = class->parent;
5501 return FALSE;
5505 * is_jit_optimizer_disabled:
5507 * Determine whenever M's assembly has a DebuggableAttribute with the
5508 * IsJITOptimizerDisabled flag set.
5510 static gboolean
5511 is_jit_optimizer_disabled (MonoMethod *m)
5513 MonoAssembly *ass = m->klass->image->assembly;
5514 MonoCustomAttrInfo* attrs;
5515 static MonoClass *klass;
5516 int i;
5517 gboolean val = FALSE;
5519 g_assert (ass);
5520 if (ass->jit_optimizer_disabled_inited)
5521 return ass->jit_optimizer_disabled;
5523 if (!klass)
5524 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5525 if (!klass) {
5526 /* Linked away */
5527 ass->jit_optimizer_disabled = FALSE;
5528 mono_memory_barrier ();
5529 ass->jit_optimizer_disabled_inited = TRUE;
5530 return FALSE;
5533 attrs = mono_custom_attrs_from_assembly (ass);
5534 if (attrs) {
5535 for (i = 0; i < attrs->num_attrs; ++i) {
5536 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5537 const gchar *p;
5538 int len;
5539 MonoMethodSignature *sig;
5541 if (!attr->ctor || attr->ctor->klass != klass)
5542 continue;
5543 /* Decode the attribute. See reflection.c */
5544 len = attr->data_size;
5545 p = (const char*)attr->data;
5546 g_assert (read16 (p) == 0x0001);
5547 p += 2;
5549 // FIXME: Support named parameters
5550 sig = mono_method_signature (attr->ctor);
5551 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5552 continue;
5553 /* Two boolean arguments */
5554 p ++;
5555 val = *p;
5557 mono_custom_attrs_free (attrs);
5560 ass->jit_optimizer_disabled = val;
5561 mono_memory_barrier ();
5562 ass->jit_optimizer_disabled_inited = TRUE;
5564 return val;
5567 static gboolean
5568 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5570 gboolean supported_tail_call;
5571 int i;
5573 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5574 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5575 #else
5576 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5577 #endif
5579 for (i = 0; i < fsig->param_count; ++i) {
5580 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5581 /* These can point to the current method's stack */
5582 supported_tail_call = FALSE;
5584 if (fsig->hasthis && cmethod->klass->valuetype)
5585 /* this might point to the current method's stack */
5586 supported_tail_call = FALSE;
5587 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5588 supported_tail_call = FALSE;
5589 if (cfg->method->save_lmf)
5590 supported_tail_call = FALSE;
5591 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5592 supported_tail_call = FALSE;
5594 /* Debugging support */
5595 #if 0
5596 if (supported_tail_call) {
5597 static int count = 0;
5598 count ++;
5599 if (getenv ("COUNT")) {
5600 if (count == atoi (getenv ("COUNT")))
5601 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5602 if (count > atoi (getenv ("COUNT")))
5603 supported_tail_call = FALSE;
5606 #endif
5608 return supported_tail_call;
5611 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5612 * it to the thread local value based on the tls_offset field. Every other kind of access to
5613 * the field causes an assert.
5615 static gboolean
5616 is_magic_tls_access (MonoClassField *field)
5618 if (strcmp (field->name, "tlsdata"))
5619 return FALSE;
5620 if (strcmp (field->parent->name, "ThreadLocal`1"))
5621 return FALSE;
5622 return field->parent->image == mono_defaults.corlib;
5625 /* emits the code needed to access a managed tls var (like ThreadStatic)
5626 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5627 * pointer for the current thread.
5628 * Returns the MonoInst* representing the address of the tls var.
5630 static MonoInst*
5631 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5633 MonoInst *addr;
5634 int static_data_reg, array_reg, dreg;
5635 int offset2_reg, idx_reg;
5636 // inlined access to the tls data
5637 // idx = (offset >> 24) - 1;
5638 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5639 static_data_reg = alloc_ireg (cfg);
5640 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5641 idx_reg = alloc_ireg (cfg);
5642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5645 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5646 array_reg = alloc_ireg (cfg);
5647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5648 offset2_reg = alloc_ireg (cfg);
5649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5650 dreg = alloc_ireg (cfg);
5651 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5652 return addr;
5656 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5657 * this address is cached per-method in cached_tls_addr.
5659 static MonoInst*
5660 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5662 MonoInst *load, *addr, *temp, *store, *thread_ins;
5663 MonoClassField *offset_field;
5665 if (*cached_tls_addr) {
5666 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5667 return addr;
5669 thread_ins = mono_get_thread_intrinsic (cfg);
5670 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5672 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5673 if (thread_ins) {
5674 MONO_ADD_INS (cfg->cbb, thread_ins);
5675 } else {
5676 MonoMethod *thread_method;
5677 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5678 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5680 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5681 addr->klass = mono_class_from_mono_type (tls_field->type);
5682 addr->type = STACK_MP;
5683 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5684 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5686 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5687 return addr;
5691 * mono_method_to_ir:
5693 * Translate the .net IL into linear IR.
5696 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5697 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5698 guint inline_offset, gboolean is_virtual_call)
5700 MonoError error;
5701 MonoInst *ins, **sp, **stack_start;
5702 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5703 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5704 MonoMethod *cmethod, *method_definition;
5705 MonoInst **arg_array;
5706 MonoMethodHeader *header;
5707 MonoImage *image;
5708 guint32 token, ins_flag;
5709 MonoClass *klass;
5710 MonoClass *constrained_call = NULL;
5711 unsigned char *ip, *end, *target, *err_pos;
5712 static double r8_0 = 0.0;
5713 MonoMethodSignature *sig;
5714 MonoGenericContext *generic_context = NULL;
5715 MonoGenericContainer *generic_container = NULL;
5716 MonoType **param_types;
5717 int i, n, start_new_bblock, dreg;
5718 int num_calls = 0, inline_costs = 0;
5719 int breakpoint_id = 0;
5720 guint num_args;
5721 MonoBoolean security, pinvoke;
5722 MonoSecurityManager* secman = NULL;
5723 MonoDeclSecurityActions actions;
5724 GSList *class_inits = NULL;
5725 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5726 int context_used;
5727 gboolean init_locals, seq_points, skip_dead_blocks;
5728 gboolean disable_inline;
5729 MonoInst *cached_tls_addr = NULL;
5731 disable_inline = is_jit_optimizer_disabled (method);
5733 /* serialization and xdomain stuff may need access to private fields and methods */
5734 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5735 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5736 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5737 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5738 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5739 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5741 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5743 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5744 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5745 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5746 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5747 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5749 image = method->klass->image;
5750 header = mono_method_get_header (method);
5751 if (!header) {
5752 MonoLoaderError *error;
5754 if ((error = mono_loader_get_last_error ())) {
5755 mono_cfg_set_exception (cfg, error->exception_type);
5756 } else {
5757 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5758 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5760 goto exception_exit;
5762 generic_container = mono_method_get_generic_container (method);
5763 sig = mono_method_signature (method);
5764 num_args = sig->hasthis + sig->param_count;
5765 ip = (unsigned char*)header->code;
5766 cfg->cil_start = ip;
5767 end = ip + header->code_size;
5768 cfg->stat_cil_code_size += header->code_size;
5769 init_locals = header->init_locals;
5771 seq_points = cfg->gen_seq_points && cfg->method == method;
5774 * Methods without init_locals set could cause asserts in various passes
5775 * (#497220).
5777 init_locals = TRUE;
5779 method_definition = method;
5780 while (method_definition->is_inflated) {
5781 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5782 method_definition = imethod->declaring;
5785 /* SkipVerification is not allowed if core-clr is enabled */
5786 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5787 dont_verify = TRUE;
5788 dont_verify_stloc = TRUE;
5791 if (mono_debug_using_mono_debugger ())
5792 cfg->keep_cil_nops = TRUE;
5794 if (sig->is_inflated)
5795 generic_context = mono_method_get_context (method);
5796 else if (generic_container)
5797 generic_context = &generic_container->context;
5798 cfg->generic_context = generic_context;
5800 if (!cfg->generic_sharing_context)
5801 g_assert (!sig->has_type_parameters);
5803 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5804 g_assert (method->is_inflated);
5805 g_assert (mono_method_get_context (method)->method_inst);
5807 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5808 g_assert (sig->generic_param_count);
5810 if (cfg->method == method) {
5811 cfg->real_offset = 0;
5812 } else {
5813 cfg->real_offset = inline_offset;
5816 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5817 cfg->cil_offset_to_bb_len = header->code_size;
5819 cfg->current_method = method;
5821 if (cfg->verbose_level > 2)
5822 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5824 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5825 if (sig->hasthis)
5826 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5827 for (n = 0; n < sig->param_count; ++n)
5828 param_types [n + sig->hasthis] = sig->params [n];
5829 cfg->arg_types = param_types;
5831 dont_inline = g_list_prepend (dont_inline, method);
5832 if (cfg->method == method) {
5834 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5835 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5837 /* ENTRY BLOCK */
5838 NEW_BBLOCK (cfg, start_bblock);
5839 cfg->bb_entry = start_bblock;
5840 start_bblock->cil_code = NULL;
5841 start_bblock->cil_length = 0;
5842 #if defined(__native_client_codegen__)
5843 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5844 ins->dreg = alloc_dreg (cfg, STACK_I4);
5845 MONO_ADD_INS (start_bblock, ins);
5846 #endif
5848 /* EXIT BLOCK */
5849 NEW_BBLOCK (cfg, end_bblock);
5850 cfg->bb_exit = end_bblock;
5851 end_bblock->cil_code = NULL;
5852 end_bblock->cil_length = 0;
5853 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5854 g_assert (cfg->num_bblocks == 2);
5856 arg_array = cfg->args;
5858 if (header->num_clauses) {
5859 cfg->spvars = g_hash_table_new (NULL, NULL);
5860 cfg->exvars = g_hash_table_new (NULL, NULL);
5862 /* handle exception clauses */
5863 for (i = 0; i < header->num_clauses; ++i) {
5864 MonoBasicBlock *try_bb;
5865 MonoExceptionClause *clause = &header->clauses [i];
5866 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5867 try_bb->real_offset = clause->try_offset;
5868 try_bb->try_start = TRUE;
5869 try_bb->region = ((i + 1) << 8) | clause->flags;
5870 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5871 tblock->real_offset = clause->handler_offset;
5872 tblock->flags |= BB_EXCEPTION_HANDLER;
5874 link_bblock (cfg, try_bb, tblock);
5876 if (*(ip + clause->handler_offset) == CEE_POP)
5877 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5879 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5880 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5881 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5882 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5883 MONO_ADD_INS (tblock, ins);
5885 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5886 /* finally clauses already have a seq point */
5887 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5888 MONO_ADD_INS (tblock, ins);
5891 /* todo: is a fault block unsafe to optimize? */
5892 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5893 tblock->flags |= BB_EXCEPTION_UNSAFE;
5897 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5898 while (p < end) {
5899 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5901 /* catch and filter blocks get the exception object on the stack */
5902 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5903 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5904 MonoInst *dummy_use;
5906 /* mostly like handle_stack_args (), but just sets the input args */
5907 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5908 tblock->in_scount = 1;
5909 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5910 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5913 * Add a dummy use for the exvar so its liveness info will be
5914 * correct.
5916 cfg->cbb = tblock;
5917 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5919 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5920 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5921 tblock->flags |= BB_EXCEPTION_HANDLER;
5922 tblock->real_offset = clause->data.filter_offset;
5923 tblock->in_scount = 1;
5924 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5925 /* The filter block shares the exvar with the handler block */
5926 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5927 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5928 MONO_ADD_INS (tblock, ins);
5932 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5933 clause->data.catch_class &&
5934 cfg->generic_sharing_context &&
5935 mono_class_check_context_used (clause->data.catch_class)) {
5937 * In shared generic code with catch
5938 * clauses containing type variables
5939 * the exception handling code has to
5940 * be able to get to the rgctx.
5941 * Therefore we have to make sure that
5942 * the vtable/mrgctx argument (for
5943 * static or generic methods) or the
5944 * "this" argument (for non-static
5945 * methods) are live.
5947 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5948 mini_method_get_context (method)->method_inst ||
5949 method->klass->valuetype) {
5950 mono_get_vtable_var (cfg);
5951 } else {
5952 MonoInst *dummy_use;
5954 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5958 } else {
5959 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5960 cfg->cbb = start_bblock;
5961 cfg->args = arg_array;
5962 mono_save_args (cfg, sig, inline_args);
5965 /* FIRST CODE BLOCK */
5966 NEW_BBLOCK (cfg, bblock);
5967 bblock->cil_code = ip;
5968 cfg->cbb = bblock;
5969 cfg->ip = ip;
5971 ADD_BBLOCK (cfg, bblock);
5973 if (cfg->method == method) {
5974 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5975 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5976 MONO_INST_NEW (cfg, ins, OP_BREAK);
5977 MONO_ADD_INS (bblock, ins);
5981 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5982 secman = mono_security_manager_get_methods ();
5984 security = (secman && mono_method_has_declsec (method));
5985 /* at this point having security doesn't mean we have any code to generate */
5986 if (security && (cfg->method == method)) {
5987 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5988 * And we do not want to enter the next section (with allocation) if we
5989 * have nothing to generate */
5990 security = mono_declsec_get_demands (method, &actions);
5993 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5994 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5995 if (pinvoke) {
5996 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5997 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5998 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6000 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6001 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6002 pinvoke = FALSE;
6004 if (custom)
6005 mono_custom_attrs_free (custom);
6007 if (pinvoke) {
6008 custom = mono_custom_attrs_from_class (wrapped->klass);
6009 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6010 pinvoke = FALSE;
6012 if (custom)
6013 mono_custom_attrs_free (custom);
6015 } else {
6016 /* not a P/Invoke after all */
6017 pinvoke = FALSE;
6021 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6022 /* we use a separate basic block for the initialization code */
6023 NEW_BBLOCK (cfg, init_localsbb);
6024 cfg->bb_init = init_localsbb;
6025 init_localsbb->real_offset = cfg->real_offset;
6026 start_bblock->next_bb = init_localsbb;
6027 init_localsbb->next_bb = bblock;
6028 link_bblock (cfg, start_bblock, init_localsbb);
6029 link_bblock (cfg, init_localsbb, bblock);
6031 cfg->cbb = init_localsbb;
6032 } else {
6033 start_bblock->next_bb = bblock;
6034 link_bblock (cfg, start_bblock, bblock);
6037 /* at this point we know, if security is TRUE, that some code needs to be generated */
6038 if (security && (cfg->method == method)) {
6039 MonoInst *args [2];
6041 cfg->stat_cas_demand_generation++;
6043 if (actions.demand.blob) {
6044 /* Add code for SecurityAction.Demand */
6045 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6046 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6047 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6048 mono_emit_method_call (cfg, secman->demand, args, NULL);
6050 if (actions.noncasdemand.blob) {
6051 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6052 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6053 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6054 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6055 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6056 mono_emit_method_call (cfg, secman->demand, args, NULL);
6058 if (actions.demandchoice.blob) {
6059 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6060 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6061 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6062 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6063 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6067 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6068 if (pinvoke) {
6069 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6072 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6073 /* check if this is native code, e.g. an icall or a p/invoke */
6074 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6075 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6076 if (wrapped) {
6077 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6078 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6080 /* if this ia a native call then it can only be JITted from platform code */
6081 if ((icall || pinvk) && method->klass && method->klass->image) {
6082 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6083 MonoException *ex = icall ? mono_get_exception_security () :
6084 mono_get_exception_method_access ();
6085 emit_throw_exception (cfg, ex);
6092 if (header->code_size == 0)
6093 UNVERIFIED;
6095 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6096 ip = err_pos;
6097 UNVERIFIED;
6100 if (cfg->method == method)
6101 mono_debug_init_method (cfg, bblock, breakpoint_id);
6103 for (n = 0; n < header->num_locals; ++n) {
6104 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6105 UNVERIFIED;
6107 class_inits = NULL;
6109 /* We force the vtable variable here for all shared methods
6110 for the possibility that they might show up in a stack
6111 trace where their exact instantiation is needed. */
6112 if (cfg->generic_sharing_context && method == cfg->method) {
6113 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6114 mini_method_get_context (method)->method_inst ||
6115 method->klass->valuetype) {
6116 mono_get_vtable_var (cfg);
6117 } else {
6118 /* FIXME: Is there a better way to do this?
6119 We need the variable live for the duration
6120 of the whole method. */
6121 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6125 /* add a check for this != NULL to inlined methods */
6126 if (is_virtual_call) {
6127 MonoInst *arg_ins;
6129 NEW_ARGLOAD (cfg, arg_ins, 0);
6130 MONO_ADD_INS (cfg->cbb, arg_ins);
6131 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6134 skip_dead_blocks = !dont_verify;
6135 if (skip_dead_blocks) {
6136 original_bb = bb = mono_basic_block_split (method, &error);
6137 if (!mono_error_ok (&error)) {
6138 mono_error_cleanup (&error);
6139 UNVERIFIED;
6141 g_assert (bb);
6144 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6145 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6147 ins_flag = 0;
6148 start_new_bblock = 0;
6149 cfg->cbb = bblock;
6150 while (ip < end) {
6151 if (cfg->method == method)
6152 cfg->real_offset = ip - header->code;
6153 else
6154 cfg->real_offset = inline_offset;
6155 cfg->ip = ip;
6157 context_used = 0;
6159 if (start_new_bblock) {
6160 bblock->cil_length = ip - bblock->cil_code;
6161 if (start_new_bblock == 2) {
6162 g_assert (ip == tblock->cil_code);
6163 } else {
6164 GET_BBLOCK (cfg, tblock, ip);
6166 bblock->next_bb = tblock;
6167 bblock = tblock;
6168 cfg->cbb = bblock;
6169 start_new_bblock = 0;
6170 for (i = 0; i < bblock->in_scount; ++i) {
6171 if (cfg->verbose_level > 3)
6172 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6173 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6174 *sp++ = ins;
6176 if (class_inits)
6177 g_slist_free (class_inits);
6178 class_inits = NULL;
6179 } else {
6180 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6181 link_bblock (cfg, bblock, tblock);
6182 if (sp != stack_start) {
6183 handle_stack_args (cfg, stack_start, sp - stack_start);
6184 sp = stack_start;
6185 CHECK_UNVERIFIABLE (cfg);
6187 bblock->next_bb = tblock;
6188 bblock = tblock;
6189 cfg->cbb = bblock;
6190 for (i = 0; i < bblock->in_scount; ++i) {
6191 if (cfg->verbose_level > 3)
6192 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6193 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6194 *sp++ = ins;
6196 g_slist_free (class_inits);
6197 class_inits = NULL;
6201 if (skip_dead_blocks) {
6202 int ip_offset = ip - header->code;
6204 if (ip_offset == bb->end)
6205 bb = bb->next;
6207 if (bb->dead) {
6208 int op_size = mono_opcode_size (ip, end);
6209 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6211 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6213 if (ip_offset + op_size == bb->end) {
6214 MONO_INST_NEW (cfg, ins, OP_NOP);
6215 MONO_ADD_INS (bblock, ins);
6216 start_new_bblock = 1;
6219 ip += op_size;
6220 continue;
6224 * Sequence points are points where the debugger can place a breakpoint.
6225 * Currently, we generate these automatically at points where the IL
6226 * stack is empty.
6228 if (seq_points && sp == stack_start) {
6229 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6230 MONO_ADD_INS (cfg->cbb, ins);
6233 bblock->real_offset = cfg->real_offset;
6235 if ((cfg->method == method) && cfg->coverage_info) {
6236 guint32 cil_offset = ip - header->code;
6237 cfg->coverage_info->data [cil_offset].cil_code = ip;
6239 /* TODO: Use an increment here */
6240 #if defined(TARGET_X86)
6241 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6242 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6243 ins->inst_imm = 1;
6244 MONO_ADD_INS (cfg->cbb, ins);
6245 #else
6246 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6247 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6248 #endif
6251 if (cfg->verbose_level > 3)
6252 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6254 switch (*ip) {
6255 case CEE_NOP:
6256 if (cfg->keep_cil_nops)
6257 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6258 else
6259 MONO_INST_NEW (cfg, ins, OP_NOP);
6260 ip++;
6261 MONO_ADD_INS (bblock, ins);
6262 break;
6263 case CEE_BREAK:
6264 if (should_insert_brekpoint (cfg->method))
6265 MONO_INST_NEW (cfg, ins, OP_BREAK);
6266 else
6267 MONO_INST_NEW (cfg, ins, OP_NOP);
6268 ip++;
6269 MONO_ADD_INS (bblock, ins);
6270 break;
6271 case CEE_LDARG_0:
6272 case CEE_LDARG_1:
6273 case CEE_LDARG_2:
6274 case CEE_LDARG_3:
6275 CHECK_STACK_OVF (1);
6276 n = (*ip)-CEE_LDARG_0;
6277 CHECK_ARG (n);
6278 EMIT_NEW_ARGLOAD (cfg, ins, n);
6279 ip++;
6280 *sp++ = ins;
6281 break;
6282 case CEE_LDLOC_0:
6283 case CEE_LDLOC_1:
6284 case CEE_LDLOC_2:
6285 case CEE_LDLOC_3:
6286 CHECK_STACK_OVF (1);
6287 n = (*ip)-CEE_LDLOC_0;
6288 CHECK_LOCAL (n);
6289 EMIT_NEW_LOCLOAD (cfg, ins, n);
6290 ip++;
6291 *sp++ = ins;
6292 break;
6293 case CEE_STLOC_0:
6294 case CEE_STLOC_1:
6295 case CEE_STLOC_2:
6296 case CEE_STLOC_3: {
6297 CHECK_STACK (1);
6298 n = (*ip)-CEE_STLOC_0;
6299 CHECK_LOCAL (n);
6300 --sp;
6301 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6302 UNVERIFIED;
6303 emit_stloc_ir (cfg, sp, header, n);
6304 ++ip;
6305 inline_costs += 1;
6306 break;
6308 case CEE_LDARG_S:
6309 CHECK_OPSIZE (2);
6310 CHECK_STACK_OVF (1);
6311 n = ip [1];
6312 CHECK_ARG (n);
6313 EMIT_NEW_ARGLOAD (cfg, ins, n);
6314 *sp++ = ins;
6315 ip += 2;
6316 break;
6317 case CEE_LDARGA_S:
6318 CHECK_OPSIZE (2);
6319 CHECK_STACK_OVF (1);
6320 n = ip [1];
6321 CHECK_ARG (n);
6322 NEW_ARGLOADA (cfg, ins, n);
6323 MONO_ADD_INS (cfg->cbb, ins);
6324 *sp++ = ins;
6325 ip += 2;
6326 break;
6327 case CEE_STARG_S:
6328 CHECK_OPSIZE (2);
6329 CHECK_STACK (1);
6330 --sp;
6331 n = ip [1];
6332 CHECK_ARG (n);
6333 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6334 UNVERIFIED;
6335 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6336 ip += 2;
6337 break;
6338 case CEE_LDLOC_S:
6339 CHECK_OPSIZE (2);
6340 CHECK_STACK_OVF (1);
6341 n = ip [1];
6342 CHECK_LOCAL (n);
6343 EMIT_NEW_LOCLOAD (cfg, ins, n);
6344 *sp++ = ins;
6345 ip += 2;
6346 break;
6347 case CEE_LDLOCA_S: {
6348 unsigned char *tmp_ip;
6349 CHECK_OPSIZE (2);
6350 CHECK_STACK_OVF (1);
6351 CHECK_LOCAL (ip [1]);
6353 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6354 ip = tmp_ip;
6355 inline_costs += 1;
6356 break;
6359 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6360 *sp++ = ins;
6361 ip += 2;
6362 break;
6364 case CEE_STLOC_S:
6365 CHECK_OPSIZE (2);
6366 CHECK_STACK (1);
6367 --sp;
6368 CHECK_LOCAL (ip [1]);
6369 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6370 UNVERIFIED;
6371 emit_stloc_ir (cfg, sp, header, ip [1]);
6372 ip += 2;
6373 inline_costs += 1;
6374 break;
6375 case CEE_LDNULL:
6376 CHECK_STACK_OVF (1);
6377 EMIT_NEW_PCONST (cfg, ins, NULL);
6378 ins->type = STACK_OBJ;
6379 ++ip;
6380 *sp++ = ins;
6381 break;
6382 case CEE_LDC_I4_M1:
6383 CHECK_STACK_OVF (1);
6384 EMIT_NEW_ICONST (cfg, ins, -1);
6385 ++ip;
6386 *sp++ = ins;
6387 break;
6388 case CEE_LDC_I4_0:
6389 case CEE_LDC_I4_1:
6390 case CEE_LDC_I4_2:
6391 case CEE_LDC_I4_3:
6392 case CEE_LDC_I4_4:
6393 case CEE_LDC_I4_5:
6394 case CEE_LDC_I4_6:
6395 case CEE_LDC_I4_7:
6396 case CEE_LDC_I4_8:
6397 CHECK_STACK_OVF (1);
6398 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6399 ++ip;
6400 *sp++ = ins;
6401 break;
6402 case CEE_LDC_I4_S:
6403 CHECK_OPSIZE (2);
6404 CHECK_STACK_OVF (1);
6405 ++ip;
6406 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6407 ++ip;
6408 *sp++ = ins;
6409 break;
6410 case CEE_LDC_I4:
6411 CHECK_OPSIZE (5);
6412 CHECK_STACK_OVF (1);
6413 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6414 ip += 5;
6415 *sp++ = ins;
6416 break;
6417 case CEE_LDC_I8:
6418 CHECK_OPSIZE (9);
6419 CHECK_STACK_OVF (1);
6420 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6421 ins->type = STACK_I8;
6422 ins->dreg = alloc_dreg (cfg, STACK_I8);
6423 ++ip;
6424 ins->inst_l = (gint64)read64 (ip);
6425 MONO_ADD_INS (bblock, ins);
6426 ip += 8;
6427 *sp++ = ins;
6428 break;
6429 case CEE_LDC_R4: {
6430 float *f;
6431 gboolean use_aotconst = FALSE;
6433 #ifdef TARGET_POWERPC
6434 /* FIXME: Clean this up */
6435 if (cfg->compile_aot)
6436 use_aotconst = TRUE;
6437 #endif
6439 /* FIXME: we should really allocate this only late in the compilation process */
6440 f = mono_domain_alloc (cfg->domain, sizeof (float));
6441 CHECK_OPSIZE (5);
6442 CHECK_STACK_OVF (1);
6444 if (use_aotconst) {
6445 MonoInst *cons;
6446 int dreg;
6448 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6450 dreg = alloc_freg (cfg);
6451 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6452 ins->type = STACK_R8;
6453 } else {
6454 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6455 ins->type = STACK_R8;
6456 ins->dreg = alloc_dreg (cfg, STACK_R8);
6457 ins->inst_p0 = f;
6458 MONO_ADD_INS (bblock, ins);
6460 ++ip;
6461 readr4 (ip, f);
6462 ip += 4;
6463 *sp++ = ins;
6464 break;
6466 case CEE_LDC_R8: {
6467 double *d;
6468 gboolean use_aotconst = FALSE;
6470 #ifdef TARGET_POWERPC
6471 /* FIXME: Clean this up */
6472 if (cfg->compile_aot)
6473 use_aotconst = TRUE;
6474 #endif
6476 /* FIXME: we should really allocate this only late in the compilation process */
6477 d = mono_domain_alloc (cfg->domain, sizeof (double));
6478 CHECK_OPSIZE (9);
6479 CHECK_STACK_OVF (1);
6481 if (use_aotconst) {
6482 MonoInst *cons;
6483 int dreg;
6485 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6487 dreg = alloc_freg (cfg);
6488 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6489 ins->type = STACK_R8;
6490 } else {
6491 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6492 ins->type = STACK_R8;
6493 ins->dreg = alloc_dreg (cfg, STACK_R8);
6494 ins->inst_p0 = d;
6495 MONO_ADD_INS (bblock, ins);
6497 ++ip;
6498 readr8 (ip, d);
6499 ip += 8;
6500 *sp++ = ins;
6501 break;
6503 case CEE_DUP: {
6504 MonoInst *temp, *store;
6505 CHECK_STACK (1);
6506 CHECK_STACK_OVF (1);
6507 sp--;
6508 ins = *sp;
6510 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6511 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6513 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6514 *sp++ = ins;
6516 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6517 *sp++ = ins;
6519 ++ip;
6520 inline_costs += 2;
6521 break;
6523 case CEE_POP:
6524 CHECK_STACK (1);
6525 ip++;
6526 --sp;
6528 #ifdef TARGET_X86
6529 if (sp [0]->type == STACK_R8)
6530 /* we need to pop the value from the x86 FP stack */
6531 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6532 #endif
6533 break;
6534 case CEE_JMP: {
6535 MonoCallInst *call;
6537 INLINE_FAILURE;
6539 CHECK_OPSIZE (5);
6540 if (stack_start != sp)
6541 UNVERIFIED;
6542 token = read32 (ip + 1);
6543 /* FIXME: check the signature matches */
6544 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6546 if (!cmethod || mono_loader_get_last_error ())
6547 LOAD_ERROR;
6549 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6550 GENERIC_SHARING_FAILURE (CEE_JMP);
6552 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6553 CHECK_CFG_EXCEPTION;
6555 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6557 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6558 int i, n;
6560 /* Handle tail calls similarly to calls */
6561 n = fsig->param_count + fsig->hasthis;
6563 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6564 call->method = cmethod;
6565 call->tail_call = TRUE;
6566 call->signature = mono_method_signature (cmethod);
6567 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6568 call->inst.inst_p0 = cmethod;
6569 for (i = 0; i < n; ++i)
6570 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6572 mono_arch_emit_call (cfg, call);
6573 MONO_ADD_INS (bblock, (MonoInst*)call);
6575 #else
6576 for (i = 0; i < num_args; ++i)
6577 /* Prevent arguments from being optimized away */
6578 arg_array [i]->flags |= MONO_INST_VOLATILE;
6580 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6581 ins = (MonoInst*)call;
6582 ins->inst_p0 = cmethod;
6583 MONO_ADD_INS (bblock, ins);
6584 #endif
6586 ip += 5;
6587 start_new_bblock = 1;
6588 break;
6590 case CEE_CALLI:
6591 case CEE_CALL:
6592 case CEE_CALLVIRT: {
6593 MonoInst *addr = NULL;
6594 MonoMethodSignature *fsig = NULL;
6595 int array_rank = 0;
6596 int virtual = *ip == CEE_CALLVIRT;
6597 int calli = *ip == CEE_CALLI;
6598 gboolean pass_imt_from_rgctx = FALSE;
6599 MonoInst *imt_arg = NULL;
6600 gboolean pass_vtable = FALSE;
6601 gboolean pass_mrgctx = FALSE;
6602 MonoInst *vtable_arg = NULL;
6603 gboolean check_this = FALSE;
6604 gboolean supported_tail_call = FALSE;
6606 CHECK_OPSIZE (5);
6607 token = read32 (ip + 1);
6609 if (calli) {
6610 cmethod = NULL;
6611 CHECK_STACK (1);
6612 --sp;
6613 addr = *sp;
6614 if (method->wrapper_type != MONO_WRAPPER_NONE)
6615 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6616 else
6617 fsig = mono_metadata_parse_signature (image, token);
6619 n = fsig->param_count + fsig->hasthis;
6621 if (method->dynamic && fsig->pinvoke) {
6622 MonoInst *args [3];
6625 * This is a call through a function pointer using a pinvoke
6626 * signature. Have to create a wrapper and call that instead.
6627 * FIXME: This is very slow, need to create a wrapper at JIT time
6628 * instead based on the signature.
6630 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6631 EMIT_NEW_PCONST (cfg, args [1], fsig);
6632 args [2] = addr;
6633 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6635 } else {
6636 MonoMethod *cil_method;
6638 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6639 if (constrained_call && cfg->verbose_level > 2)
6640 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6641 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6642 cil_method = cmethod;
6643 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6644 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6645 cfg->generic_sharing_context)) {
6646 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6648 } else if (constrained_call) {
6649 if (cfg->verbose_level > 2)
6650 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6652 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6654 * This is needed since get_method_constrained can't find
6655 * the method in klass representing a type var.
6656 * The type var is guaranteed to be a reference type in this
6657 * case.
6659 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6660 cil_method = cmethod;
6661 g_assert (!cmethod->klass->valuetype);
6662 } else {
6663 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6665 } else {
6666 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6667 cil_method = cmethod;
6670 if (!cmethod || mono_loader_get_last_error ())
6671 LOAD_ERROR;
6672 if (!dont_verify && !cfg->skip_visibility) {
6673 MonoMethod *target_method = cil_method;
6674 if (method->is_inflated) {
6675 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6677 if (!mono_method_can_access_method (method_definition, target_method) &&
6678 !mono_method_can_access_method (method, cil_method))
6679 METHOD_ACCESS_FAILURE;
6682 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6683 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6685 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6686 /* MS.NET seems to silently convert this to a callvirt */
6687 virtual = 1;
6691 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6692 * converts to a callvirt.
6694 * tests/bug-515884.il is an example of this behavior
6696 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6697 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6698 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6699 virtual = 1;
6702 if (!cmethod->klass->inited)
6703 if (!mono_class_init (cmethod->klass))
6704 LOAD_ERROR;
6706 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6707 mini_class_is_system_array (cmethod->klass)) {
6708 array_rank = cmethod->klass->rank;
6709 fsig = mono_method_signature (cmethod);
6710 } else {
6711 fsig = mono_method_signature (cmethod);
6713 if (!fsig)
6714 LOAD_ERROR;
6716 if (fsig->pinvoke) {
6717 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6718 check_for_pending_exc, FALSE);
6719 fsig = mono_method_signature (wrapper);
6720 } else if (constrained_call) {
6721 fsig = mono_method_signature (cmethod);
6722 } else {
6723 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6727 mono_save_token_info (cfg, image, token, cil_method);
6729 n = fsig->param_count + fsig->hasthis;
6731 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6732 if (check_linkdemand (cfg, method, cmethod))
6733 INLINE_FAILURE;
6734 CHECK_CFG_EXCEPTION;
6737 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6738 g_assert_not_reached ();
6741 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6742 UNVERIFIED;
6744 if (!cfg->generic_sharing_context && cmethod)
6745 g_assert (!mono_method_check_context_used (cmethod));
6747 CHECK_STACK (n);
6749 //g_assert (!virtual || fsig->hasthis);
6751 sp -= n;
6753 if (constrained_call) {
6755 * We have the `constrained.' prefix opcode.
6757 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6759 * The type parameter is instantiated as a valuetype,
6760 * but that type doesn't override the method we're
6761 * calling, so we need to box `this'.
6763 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6764 ins->klass = constrained_call;
6765 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6766 CHECK_CFG_EXCEPTION;
6767 } else if (!constrained_call->valuetype) {
6768 int dreg = alloc_ireg_ref (cfg);
6771 * The type parameter is instantiated as a reference
6772 * type. We have a managed pointer on the stack, so
6773 * we need to dereference it here.
6775 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6776 ins->type = STACK_OBJ;
6777 sp [0] = ins;
6778 } else if (cmethod->klass->valuetype)
6779 virtual = 0;
6780 constrained_call = NULL;
6783 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6784 UNVERIFIED;
6787 * If the callee is a shared method, then its static cctor
6788 * might not get called after the call was patched.
6790 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6791 emit_generic_class_init (cfg, cmethod->klass);
6792 CHECK_TYPELOAD (cmethod->klass);
6795 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6796 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6797 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6798 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6799 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6802 * Pass vtable iff target method might
6803 * be shared, which means that sharing
6804 * is enabled for its class and its
6805 * context is sharable (and it's not a
6806 * generic method).
6808 if (sharing_enabled && context_sharable &&
6809 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6810 pass_vtable = TRUE;
6813 if (cmethod && mini_method_get_context (cmethod) &&
6814 mini_method_get_context (cmethod)->method_inst) {
6815 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6816 MonoGenericContext *context = mini_method_get_context (cmethod);
6817 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6819 g_assert (!pass_vtable);
6821 if (sharing_enabled && context_sharable)
6822 pass_mrgctx = TRUE;
6825 if (cfg->generic_sharing_context && cmethod) {
6826 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6828 context_used = mono_method_check_context_used (cmethod);
6830 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6831 /* Generic method interface
6832 calls are resolved via a
6833 helper function and don't
6834 need an imt. */
6835 if (!cmethod_context || !cmethod_context->method_inst)
6836 pass_imt_from_rgctx = TRUE;
6840 * If a shared method calls another
6841 * shared method then the caller must
6842 * have a generic sharing context
6843 * because the magic trampoline
6844 * requires it. FIXME: We shouldn't
6845 * have to force the vtable/mrgctx
6846 * variable here. Instead there
6847 * should be a flag in the cfg to
6848 * request a generic sharing context.
6850 if (context_used &&
6851 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6852 mono_get_vtable_var (cfg);
6855 if (pass_vtable) {
6856 if (context_used) {
6857 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6858 } else {
6859 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6861 CHECK_TYPELOAD (cmethod->klass);
6862 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6866 if (pass_mrgctx) {
6867 g_assert (!vtable_arg);
6869 if (!cfg->compile_aot) {
6871 * emit_get_rgctx_method () calls mono_class_vtable () so check
6872 * for type load errors before.
6874 mono_class_setup_vtable (cmethod->klass);
6875 CHECK_TYPELOAD (cmethod->klass);
6878 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6880 /* !marshalbyref is needed to properly handle generic methods + remoting */
6881 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6882 MONO_METHOD_IS_FINAL (cmethod)) &&
6883 !cmethod->klass->marshalbyref) {
6884 if (virtual)
6885 check_this = TRUE;
6886 virtual = 0;
6890 if (pass_imt_from_rgctx) {
6891 g_assert (!pass_vtable);
6892 g_assert (cmethod);
6894 imt_arg = emit_get_rgctx_method (cfg, context_used,
6895 cmethod, MONO_RGCTX_INFO_METHOD);
6898 if (check_this)
6899 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6901 /* Calling virtual generic methods */
6902 if (cmethod && virtual &&
6903 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6904 !(MONO_METHOD_IS_FINAL (cmethod) &&
6905 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6906 mono_method_signature (cmethod)->generic_param_count) {
6907 MonoInst *this_temp, *this_arg_temp, *store;
6908 MonoInst *iargs [4];
6910 g_assert (mono_method_signature (cmethod)->is_inflated);
6912 /* Prevent inlining of methods that contain indirect calls */
6913 INLINE_FAILURE;
6915 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6916 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6917 g_assert (!imt_arg);
6918 if (!context_used)
6919 g_assert (cmethod->is_inflated);
6920 imt_arg = emit_get_rgctx_method (cfg, context_used,
6921 cmethod, MONO_RGCTX_INFO_METHOD);
6922 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6923 } else
6924 #endif
6926 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6927 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6928 MONO_ADD_INS (bblock, store);
6930 /* FIXME: This should be a managed pointer */
6931 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6933 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6934 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6935 cmethod, MONO_RGCTX_INFO_METHOD);
6936 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6937 addr = mono_emit_jit_icall (cfg,
6938 mono_helper_compile_generic_method, iargs);
6940 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6942 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6945 if (!MONO_TYPE_IS_VOID (fsig->ret))
6946 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6948 CHECK_CFG_EXCEPTION;
6950 ip += 5;
6951 ins_flag = 0;
6952 break;
6956 * Implement a workaround for the inherent races involved in locking:
6957 * Monitor.Enter ()
6958 * try {
6959 * } finally {
6960 * Monitor.Exit ()
6962 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6963 * try block, the Exit () won't be executed, see:
6964 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6965 * To work around this, we extend such try blocks to include the last x bytes
6966 * of the Monitor.Enter () call.
6968 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6969 MonoBasicBlock *tbb;
6971 GET_BBLOCK (cfg, tbb, ip + 5);
6973 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6974 * from Monitor.Enter like ArgumentNullException.
6976 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6977 /* Mark this bblock as needing to be extended */
6978 tbb->extend_try_block = TRUE;
6982 /* Conversion to a JIT intrinsic */
6983 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6984 bblock = cfg->cbb;
6985 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6986 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6987 *sp = ins;
6988 sp++;
6991 CHECK_CFG_EXCEPTION;
6993 ip += 5;
6994 ins_flag = 0;
6995 break;
6998 /* Inlining */
6999 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7000 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7001 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7002 !g_list_find (dont_inline, cmethod)) {
7003 int costs;
7004 gboolean always = FALSE;
7006 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7007 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7008 /* Prevent inlining of methods that call wrappers */
7009 INLINE_FAILURE;
7010 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7011 always = TRUE;
7014 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7015 ip += 5;
7016 cfg->real_offset += 5;
7017 bblock = cfg->cbb;
7019 if (!MONO_TYPE_IS_VOID (fsig->ret))
7020 /* *sp is already set by inline_method */
7021 sp++;
7023 inline_costs += costs;
7024 ins_flag = 0;
7025 break;
7029 inline_costs += 10 * num_calls++;
7031 /* Tail recursion elimination */
7032 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7033 gboolean has_vtargs = FALSE;
7034 int i;
7036 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7037 INLINE_FAILURE;
7039 /* keep it simple */
7040 for (i = fsig->param_count - 1; i >= 0; i--) {
7041 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7042 has_vtargs = TRUE;
7045 if (!has_vtargs) {
7046 for (i = 0; i < n; ++i)
7047 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7048 MONO_INST_NEW (cfg, ins, OP_BR);
7049 MONO_ADD_INS (bblock, ins);
7050 tblock = start_bblock->out_bb [0];
7051 link_bblock (cfg, bblock, tblock);
7052 ins->inst_target_bb = tblock;
7053 start_new_bblock = 1;
7055 /* skip the CEE_RET, too */
7056 if (ip_in_bb (cfg, bblock, ip + 5))
7057 ip += 6;
7058 else
7059 ip += 5;
7061 ins_flag = 0;
7062 break;
7066 /* Generic sharing */
7067 /* FIXME: only do this for generic methods if
7068 they are not shared! */
7069 if (context_used && !imt_arg && !array_rank &&
7070 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7071 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7072 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7073 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7074 INLINE_FAILURE;
7076 g_assert (cfg->generic_sharing_context && cmethod);
7077 g_assert (!addr);
7080 * We are compiling a call to a
7081 * generic method from shared code,
7082 * which means that we have to look up
7083 * the method in the rgctx and do an
7084 * indirect call.
7086 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7089 /* Indirect calls */
7090 if (addr) {
7091 g_assert (!imt_arg);
7093 if (*ip == CEE_CALL)
7094 g_assert (context_used);
7095 else if (*ip == CEE_CALLI)
7096 g_assert (!vtable_arg);
7097 else
7098 /* FIXME: what the hell is this??? */
7099 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7100 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7102 /* Prevent inlining of methods with indirect calls */
7103 INLINE_FAILURE;
7105 if (vtable_arg) {
7106 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7107 } else {
7108 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7110 * Instead of emitting an indirect call, emit a direct call
7111 * with the contents of the aotconst as the patch info.
7113 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7114 NULLIFY_INS (addr);
7115 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7116 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7117 NULLIFY_INS (addr);
7118 } else {
7119 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7122 if (!MONO_TYPE_IS_VOID (fsig->ret))
7123 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7125 CHECK_CFG_EXCEPTION;
7127 ip += 5;
7128 ins_flag = 0;
7129 break;
7132 /* Array methods */
7133 if (array_rank) {
7134 MonoInst *addr;
7136 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7137 MonoInst *val = sp [fsig->param_count];
7139 if (val->type == STACK_OBJ) {
7140 MonoInst *iargs [2];
7142 iargs [0] = sp [0];
7143 iargs [1] = val;
7145 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7148 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7149 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7150 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7151 emit_write_barrier (cfg, addr, val, 0);
7152 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7153 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7155 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7157 *sp++ = ins;
7158 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7159 if (!cmethod->klass->element_class->valuetype && !readonly)
7160 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7161 CHECK_TYPELOAD (cmethod->klass);
7163 readonly = FALSE;
7164 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7165 *sp++ = addr;
7166 } else {
7167 g_assert_not_reached ();
7170 CHECK_CFG_EXCEPTION;
7172 ip += 5;
7173 ins_flag = 0;
7174 break;
7177 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7178 if (ins) {
7179 if (!MONO_TYPE_IS_VOID (fsig->ret))
7180 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7182 CHECK_CFG_EXCEPTION;
7184 ip += 5;
7185 ins_flag = 0;
7186 break;
7189 /* Tail prefix / tail call optimization */
7191 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7192 /* FIXME: runtime generic context pointer for jumps? */
7193 /* FIXME: handle this for generic sharing eventually */
7194 supported_tail_call = cmethod &&
7195 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7196 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7197 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7199 if (supported_tail_call) {
7200 MonoCallInst *call;
7202 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7203 INLINE_FAILURE;
7205 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7207 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7208 /* Handle tail calls similarly to calls */
7209 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7210 #else
7211 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7212 call->tail_call = TRUE;
7213 call->method = cmethod;
7214 call->signature = mono_method_signature (cmethod);
7217 * We implement tail calls by storing the actual arguments into the
7218 * argument variables, then emitting a CEE_JMP.
7220 for (i = 0; i < n; ++i) {
7221 /* Prevent argument from being register allocated */
7222 arg_array [i]->flags |= MONO_INST_VOLATILE;
7223 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7225 #endif
7227 ins = (MonoInst*)call;
7228 ins->inst_p0 = cmethod;
7229 ins->inst_p1 = arg_array [0];
7230 MONO_ADD_INS (bblock, ins);
7231 link_bblock (cfg, bblock, end_bblock);
7232 start_new_bblock = 1;
7234 CHECK_CFG_EXCEPTION;
7236 ip += 5;
7237 ins_flag = 0;
7239 // FIXME: Eliminate unreachable epilogs
7242 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7243 * only reachable from this call.
7245 GET_BBLOCK (cfg, tblock, ip);
7246 if (tblock == bblock || tblock->in_count == 0)
7247 ip += 1;
7248 break;
7251 /* Common call */
7252 INLINE_FAILURE;
7253 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7254 imt_arg, vtable_arg);
7256 if (!MONO_TYPE_IS_VOID (fsig->ret))
7257 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7259 CHECK_CFG_EXCEPTION;
7261 ip += 5;
7262 ins_flag = 0;
7263 break;
7265 case CEE_RET:
7266 if (cfg->method != method) {
7267 /* return from inlined method */
7269 * If in_count == 0, that means the ret is unreachable due to
7270 * being preceeded by a throw. In that case, inline_method () will
7271 * handle setting the return value
7272 * (test case: test_0_inline_throw ()).
7274 if (return_var && cfg->cbb->in_count) {
7275 MonoInst *store;
7276 CHECK_STACK (1);
7277 --sp;
7278 //g_assert (returnvar != -1);
7279 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7280 cfg->ret_var_set = TRUE;
7282 } else {
7283 if (cfg->ret) {
7284 MonoType *ret_type = mono_method_signature (method)->ret;
7286 if (seq_points) {
7288 * Place a seq point here too even through the IL stack is not
7289 * empty, so a step over on
7290 * call <FOO>
7291 * ret
7292 * will work correctly.
7294 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7295 MONO_ADD_INS (cfg->cbb, ins);
7298 g_assert (!return_var);
7299 CHECK_STACK (1);
7300 --sp;
7302 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7303 UNVERIFIED;
7305 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7306 MonoInst *ret_addr;
7308 if (!cfg->vret_addr) {
7309 MonoInst *ins;
7311 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7312 } else {
7313 EMIT_NEW_RETLOADA (cfg, ret_addr);
7315 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7316 ins->klass = mono_class_from_mono_type (ret_type);
7318 } else {
7319 #ifdef MONO_ARCH_SOFT_FLOAT
7320 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7321 MonoInst *iargs [1];
7322 MonoInst *conv;
7324 iargs [0] = *sp;
7325 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7326 mono_arch_emit_setret (cfg, method, conv);
7327 } else {
7328 mono_arch_emit_setret (cfg, method, *sp);
7330 #else
7331 mono_arch_emit_setret (cfg, method, *sp);
7332 #endif
7336 if (sp != stack_start)
7337 UNVERIFIED;
7338 MONO_INST_NEW (cfg, ins, OP_BR);
7339 ip++;
7340 ins->inst_target_bb = end_bblock;
7341 MONO_ADD_INS (bblock, ins);
7342 link_bblock (cfg, bblock, end_bblock);
7343 start_new_bblock = 1;
7344 break;
7345 case CEE_BR_S:
7346 CHECK_OPSIZE (2);
7347 MONO_INST_NEW (cfg, ins, OP_BR);
7348 ip++;
7349 target = ip + 1 + (signed char)(*ip);
7350 ++ip;
7351 GET_BBLOCK (cfg, tblock, target);
7352 link_bblock (cfg, bblock, tblock);
7353 ins->inst_target_bb = tblock;
7354 if (sp != stack_start) {
7355 handle_stack_args (cfg, stack_start, sp - stack_start);
7356 sp = stack_start;
7357 CHECK_UNVERIFIABLE (cfg);
7359 MONO_ADD_INS (bblock, ins);
7360 start_new_bblock = 1;
7361 inline_costs += BRANCH_COST;
7362 break;
7363 case CEE_BEQ_S:
7364 case CEE_BGE_S:
7365 case CEE_BGT_S:
7366 case CEE_BLE_S:
7367 case CEE_BLT_S:
7368 case CEE_BNE_UN_S:
7369 case CEE_BGE_UN_S:
7370 case CEE_BGT_UN_S:
7371 case CEE_BLE_UN_S:
7372 case CEE_BLT_UN_S:
7373 CHECK_OPSIZE (2);
7374 CHECK_STACK (2);
7375 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7376 ip++;
7377 target = ip + 1 + *(signed char*)ip;
7378 ip++;
7380 ADD_BINCOND (NULL);
7382 sp = stack_start;
7383 inline_costs += BRANCH_COST;
7384 break;
7385 case CEE_BR:
7386 CHECK_OPSIZE (5);
7387 MONO_INST_NEW (cfg, ins, OP_BR);
7388 ip++;
7390 target = ip + 4 + (gint32)read32(ip);
7391 ip += 4;
7392 GET_BBLOCK (cfg, tblock, target);
7393 link_bblock (cfg, bblock, tblock);
7394 ins->inst_target_bb = tblock;
7395 if (sp != stack_start) {
7396 handle_stack_args (cfg, stack_start, sp - stack_start);
7397 sp = stack_start;
7398 CHECK_UNVERIFIABLE (cfg);
7401 MONO_ADD_INS (bblock, ins);
7403 start_new_bblock = 1;
7404 inline_costs += BRANCH_COST;
7405 break;
7406 case CEE_BRFALSE_S:
7407 case CEE_BRTRUE_S:
7408 case CEE_BRFALSE:
7409 case CEE_BRTRUE: {
7410 MonoInst *cmp;
7411 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7412 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7413 guint32 opsize = is_short ? 1 : 4;
7415 CHECK_OPSIZE (opsize);
7416 CHECK_STACK (1);
7417 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7418 UNVERIFIED;
7419 ip ++;
7420 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7421 ip += opsize;
7423 sp--;
7425 GET_BBLOCK (cfg, tblock, target);
7426 link_bblock (cfg, bblock, tblock);
7427 GET_BBLOCK (cfg, tblock, ip);
7428 link_bblock (cfg, bblock, tblock);
7430 if (sp != stack_start) {
7431 handle_stack_args (cfg, stack_start, sp - stack_start);
7432 CHECK_UNVERIFIABLE (cfg);
7435 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7436 cmp->sreg1 = sp [0]->dreg;
7437 type_from_op (cmp, sp [0], NULL);
7438 CHECK_TYPE (cmp);
7440 #if SIZEOF_REGISTER == 4
7441 if (cmp->opcode == OP_LCOMPARE_IMM) {
7442 /* Convert it to OP_LCOMPARE */
7443 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7444 ins->type = STACK_I8;
7445 ins->dreg = alloc_dreg (cfg, STACK_I8);
7446 ins->inst_l = 0;
7447 MONO_ADD_INS (bblock, ins);
7448 cmp->opcode = OP_LCOMPARE;
7449 cmp->sreg2 = ins->dreg;
7451 #endif
7452 MONO_ADD_INS (bblock, cmp);
7454 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7455 type_from_op (ins, sp [0], NULL);
7456 MONO_ADD_INS (bblock, ins);
7457 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7458 GET_BBLOCK (cfg, tblock, target);
7459 ins->inst_true_bb = tblock;
7460 GET_BBLOCK (cfg, tblock, ip);
7461 ins->inst_false_bb = tblock;
7462 start_new_bblock = 2;
7464 sp = stack_start;
7465 inline_costs += BRANCH_COST;
7466 break;
7468 case CEE_BEQ:
7469 case CEE_BGE:
7470 case CEE_BGT:
7471 case CEE_BLE:
7472 case CEE_BLT:
7473 case CEE_BNE_UN:
7474 case CEE_BGE_UN:
7475 case CEE_BGT_UN:
7476 case CEE_BLE_UN:
7477 case CEE_BLT_UN:
7478 CHECK_OPSIZE (5);
7479 CHECK_STACK (2);
7480 MONO_INST_NEW (cfg, ins, *ip);
7481 ip++;
7482 target = ip + 4 + (gint32)read32(ip);
7483 ip += 4;
7485 ADD_BINCOND (NULL);
7487 sp = stack_start;
7488 inline_costs += BRANCH_COST;
7489 break;
7490 case CEE_SWITCH: {
7491 MonoInst *src1;
7492 MonoBasicBlock **targets;
7493 MonoBasicBlock *default_bblock;
7494 MonoJumpInfoBBTable *table;
7495 int offset_reg = alloc_preg (cfg);
7496 int target_reg = alloc_preg (cfg);
7497 int table_reg = alloc_preg (cfg);
7498 int sum_reg = alloc_preg (cfg);
7499 gboolean use_op_switch;
7501 CHECK_OPSIZE (5);
7502 CHECK_STACK (1);
7503 n = read32 (ip + 1);
7504 --sp;
7505 src1 = sp [0];
7506 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7507 UNVERIFIED;
7509 ip += 5;
7510 CHECK_OPSIZE (n * sizeof (guint32));
7511 target = ip + n * sizeof (guint32);
7513 GET_BBLOCK (cfg, default_bblock, target);
7514 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7516 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7517 for (i = 0; i < n; ++i) {
7518 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7519 targets [i] = tblock;
7520 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7521 ip += 4;
7524 if (sp != stack_start) {
7526 * Link the current bb with the targets as well, so handle_stack_args
7527 * will set their in_stack correctly.
7529 link_bblock (cfg, bblock, default_bblock);
7530 for (i = 0; i < n; ++i)
7531 link_bblock (cfg, bblock, targets [i]);
7533 handle_stack_args (cfg, stack_start, sp - stack_start);
7534 sp = stack_start;
7535 CHECK_UNVERIFIABLE (cfg);
7538 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7540 bblock = cfg->cbb;
7542 for (i = 0; i < n; ++i)
7543 link_bblock (cfg, bblock, targets [i]);
7545 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7546 table->table = targets;
7547 table->table_size = n;
7549 use_op_switch = FALSE;
7550 #ifdef TARGET_ARM
7551 /* ARM implements SWITCH statements differently */
7552 /* FIXME: Make it use the generic implementation */
7553 if (!cfg->compile_aot)
7554 use_op_switch = TRUE;
7555 #endif
7557 if (COMPILE_LLVM (cfg))
7558 use_op_switch = TRUE;
7560 cfg->cbb->has_jump_table = 1;
7562 if (use_op_switch) {
7563 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7564 ins->sreg1 = src1->dreg;
7565 ins->inst_p0 = table;
7566 ins->inst_many_bb = targets;
7567 ins->klass = GUINT_TO_POINTER (n);
7568 MONO_ADD_INS (cfg->cbb, ins);
7569 } else {
7570 if (sizeof (gpointer) == 8)
7571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7572 else
7573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7575 #if SIZEOF_REGISTER == 8
7576 /* The upper word might not be zero, and we add it to a 64 bit address later */
7577 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7578 #endif
7580 if (cfg->compile_aot) {
7581 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7582 } else {
7583 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7584 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7585 ins->inst_p0 = table;
7586 ins->dreg = table_reg;
7587 MONO_ADD_INS (cfg->cbb, ins);
7590 /* FIXME: Use load_memindex */
7591 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7593 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7595 start_new_bblock = 1;
7596 inline_costs += (BRANCH_COST * 2);
7597 break;
7599 case CEE_LDIND_I1:
7600 case CEE_LDIND_U1:
7601 case CEE_LDIND_I2:
7602 case CEE_LDIND_U2:
7603 case CEE_LDIND_I4:
7604 case CEE_LDIND_U4:
7605 case CEE_LDIND_I8:
7606 case CEE_LDIND_I:
7607 case CEE_LDIND_R4:
7608 case CEE_LDIND_R8:
7609 case CEE_LDIND_REF:
7610 CHECK_STACK (1);
7611 --sp;
7613 switch (*ip) {
7614 case CEE_LDIND_R4:
7615 case CEE_LDIND_R8:
7616 dreg = alloc_freg (cfg);
7617 break;
7618 case CEE_LDIND_I8:
7619 dreg = alloc_lreg (cfg);
7620 break;
7621 case CEE_LDIND_REF:
7622 dreg = alloc_ireg_ref (cfg);
7623 break;
7624 default:
7625 dreg = alloc_preg (cfg);
7628 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7629 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7630 ins->flags |= ins_flag;
7631 ins_flag = 0;
7632 MONO_ADD_INS (bblock, ins);
7633 *sp++ = ins;
7634 ++ip;
7635 break;
7636 case CEE_STIND_REF:
7637 case CEE_STIND_I1:
7638 case CEE_STIND_I2:
7639 case CEE_STIND_I4:
7640 case CEE_STIND_I8:
7641 case CEE_STIND_R4:
7642 case CEE_STIND_R8:
7643 case CEE_STIND_I:
7644 CHECK_STACK (2);
7645 sp -= 2;
7647 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7648 ins->flags |= ins_flag;
7649 ins_flag = 0;
7650 MONO_ADD_INS (bblock, ins);
7652 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7653 emit_write_barrier (cfg, sp [0], sp [1], -1);
7655 inline_costs += 1;
7656 ++ip;
7657 break;
7659 case CEE_MUL:
7660 CHECK_STACK (2);
7662 MONO_INST_NEW (cfg, ins, (*ip));
7663 sp -= 2;
7664 ins->sreg1 = sp [0]->dreg;
7665 ins->sreg2 = sp [1]->dreg;
7666 type_from_op (ins, sp [0], sp [1]);
7667 CHECK_TYPE (ins);
7668 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7670 /* Use the immediate opcodes if possible */
7671 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7672 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7673 if (imm_opcode != -1) {
7674 ins->opcode = imm_opcode;
7675 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7676 ins->sreg2 = -1;
7678 sp [1]->opcode = OP_NOP;
7682 MONO_ADD_INS ((cfg)->cbb, (ins));
7684 *sp++ = mono_decompose_opcode (cfg, ins);
7685 ip++;
7686 break;
7687 case CEE_ADD:
7688 case CEE_SUB:
7689 case CEE_DIV:
7690 case CEE_DIV_UN:
7691 case CEE_REM:
7692 case CEE_REM_UN:
7693 case CEE_AND:
7694 case CEE_OR:
7695 case CEE_XOR:
7696 case CEE_SHL:
7697 case CEE_SHR:
7698 case CEE_SHR_UN:
7699 CHECK_STACK (2);
7701 MONO_INST_NEW (cfg, ins, (*ip));
7702 sp -= 2;
7703 ins->sreg1 = sp [0]->dreg;
7704 ins->sreg2 = sp [1]->dreg;
7705 type_from_op (ins, sp [0], sp [1]);
7706 CHECK_TYPE (ins);
7707 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7708 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7710 /* FIXME: Pass opcode to is_inst_imm */
7712 /* Use the immediate opcodes if possible */
7713 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7714 int imm_opcode;
7716 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7717 if (imm_opcode != -1) {
7718 ins->opcode = imm_opcode;
7719 if (sp [1]->opcode == OP_I8CONST) {
7720 #if SIZEOF_REGISTER == 8
7721 ins->inst_imm = sp [1]->inst_l;
7722 #else
7723 ins->inst_ls_word = sp [1]->inst_ls_word;
7724 ins->inst_ms_word = sp [1]->inst_ms_word;
7725 #endif
7727 else
7728 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7729 ins->sreg2 = -1;
7731 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7732 if (sp [1]->next == NULL)
7733 sp [1]->opcode = OP_NOP;
7736 MONO_ADD_INS ((cfg)->cbb, (ins));
7738 *sp++ = mono_decompose_opcode (cfg, ins);
7739 ip++;
7740 break;
7741 case CEE_NEG:
7742 case CEE_NOT:
7743 case CEE_CONV_I1:
7744 case CEE_CONV_I2:
7745 case CEE_CONV_I4:
7746 case CEE_CONV_R4:
7747 case CEE_CONV_R8:
7748 case CEE_CONV_U4:
7749 case CEE_CONV_I8:
7750 case CEE_CONV_U8:
7751 case CEE_CONV_OVF_I8:
7752 case CEE_CONV_OVF_U8:
7753 case CEE_CONV_R_UN:
7754 CHECK_STACK (1);
7756 /* Special case this earlier so we have long constants in the IR */
7757 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7758 int data = sp [-1]->inst_c0;
7759 sp [-1]->opcode = OP_I8CONST;
7760 sp [-1]->type = STACK_I8;
7761 #if SIZEOF_REGISTER == 8
7762 if ((*ip) == CEE_CONV_U8)
7763 sp [-1]->inst_c0 = (guint32)data;
7764 else
7765 sp [-1]->inst_c0 = data;
7766 #else
7767 sp [-1]->inst_ls_word = data;
7768 if ((*ip) == CEE_CONV_U8)
7769 sp [-1]->inst_ms_word = 0;
7770 else
7771 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7772 #endif
7773 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7775 else {
7776 ADD_UNOP (*ip);
7778 ip++;
7779 break;
7780 case CEE_CONV_OVF_I4:
7781 case CEE_CONV_OVF_I1:
7782 case CEE_CONV_OVF_I2:
7783 case CEE_CONV_OVF_I:
7784 case CEE_CONV_OVF_U:
7785 CHECK_STACK (1);
7787 if (sp [-1]->type == STACK_R8) {
7788 ADD_UNOP (CEE_CONV_OVF_I8);
7789 ADD_UNOP (*ip);
7790 } else {
7791 ADD_UNOP (*ip);
7793 ip++;
7794 break;
7795 case CEE_CONV_OVF_U1:
7796 case CEE_CONV_OVF_U2:
7797 case CEE_CONV_OVF_U4:
7798 CHECK_STACK (1);
7800 if (sp [-1]->type == STACK_R8) {
7801 ADD_UNOP (CEE_CONV_OVF_U8);
7802 ADD_UNOP (*ip);
7803 } else {
7804 ADD_UNOP (*ip);
7806 ip++;
7807 break;
7808 case CEE_CONV_OVF_I1_UN:
7809 case CEE_CONV_OVF_I2_UN:
7810 case CEE_CONV_OVF_I4_UN:
7811 case CEE_CONV_OVF_I8_UN:
7812 case CEE_CONV_OVF_U1_UN:
7813 case CEE_CONV_OVF_U2_UN:
7814 case CEE_CONV_OVF_U4_UN:
7815 case CEE_CONV_OVF_U8_UN:
7816 case CEE_CONV_OVF_I_UN:
7817 case CEE_CONV_OVF_U_UN:
7818 case CEE_CONV_U2:
7819 case CEE_CONV_U1:
7820 case CEE_CONV_I:
7821 case CEE_CONV_U:
7822 CHECK_STACK (1);
7823 ADD_UNOP (*ip);
7824 CHECK_CFG_EXCEPTION;
7825 ip++;
7826 break;
7827 case CEE_ADD_OVF:
7828 case CEE_ADD_OVF_UN:
7829 case CEE_MUL_OVF:
7830 case CEE_MUL_OVF_UN:
7831 case CEE_SUB_OVF:
7832 case CEE_SUB_OVF_UN:
7833 CHECK_STACK (2);
7834 ADD_BINOP (*ip);
7835 ip++;
7836 break;
7837 case CEE_CPOBJ:
7838 CHECK_OPSIZE (5);
7839 CHECK_STACK (2);
7840 token = read32 (ip + 1);
7841 klass = mini_get_class (method, token, generic_context);
7842 CHECK_TYPELOAD (klass);
7843 sp -= 2;
7844 if (generic_class_is_reference_type (cfg, klass)) {
7845 MonoInst *store, *load;
7846 int dreg = alloc_ireg_ref (cfg);
7848 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7849 load->flags |= ins_flag;
7850 MONO_ADD_INS (cfg->cbb, load);
7852 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7853 store->flags |= ins_flag;
7854 MONO_ADD_INS (cfg->cbb, store);
7856 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7857 emit_write_barrier (cfg, sp [0], sp [1], -1);
7858 } else {
7859 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7861 ins_flag = 0;
7862 ip += 5;
7863 break;
7864 case CEE_LDOBJ: {
7865 int loc_index = -1;
7866 int stloc_len = 0;
7868 CHECK_OPSIZE (5);
7869 CHECK_STACK (1);
7870 --sp;
7871 token = read32 (ip + 1);
7872 klass = mini_get_class (method, token, generic_context);
7873 CHECK_TYPELOAD (klass);
7875 /* Optimize the common ldobj+stloc combination */
7876 switch (ip [5]) {
7877 case CEE_STLOC_S:
7878 loc_index = ip [6];
7879 stloc_len = 2;
7880 break;
7881 case CEE_STLOC_0:
7882 case CEE_STLOC_1:
7883 case CEE_STLOC_2:
7884 case CEE_STLOC_3:
7885 loc_index = ip [5] - CEE_STLOC_0;
7886 stloc_len = 1;
7887 break;
7888 default:
7889 break;
7892 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7893 CHECK_LOCAL (loc_index);
7895 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7896 ins->dreg = cfg->locals [loc_index]->dreg;
7897 ip += 5;
7898 ip += stloc_len;
7899 break;
7902 /* Optimize the ldobj+stobj combination */
7903 /* The reference case ends up being a load+store anyway */
7904 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7905 CHECK_STACK (1);
7907 sp --;
7909 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7911 ip += 5 + 5;
7912 ins_flag = 0;
7913 break;
7916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7917 *sp++ = ins;
7919 ip += 5;
7920 ins_flag = 0;
7921 inline_costs += 1;
7922 break;
7924 case CEE_LDSTR:
7925 CHECK_STACK_OVF (1);
7926 CHECK_OPSIZE (5);
7927 n = read32 (ip + 1);
7929 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7930 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7931 ins->type = STACK_OBJ;
7932 *sp = ins;
7934 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7935 MonoInst *iargs [1];
7937 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7938 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7939 } else {
7940 if (cfg->opt & MONO_OPT_SHARED) {
7941 MonoInst *iargs [3];
7943 if (cfg->compile_aot) {
7944 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7946 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7947 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7948 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7949 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7950 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7951 } else {
7952 if (bblock->out_of_line) {
7953 MonoInst *iargs [2];
7955 if (image == mono_defaults.corlib) {
7957 * Avoid relocations in AOT and save some space by using a
7958 * version of helper_ldstr specialized to mscorlib.
7960 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7961 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7962 } else {
7963 /* Avoid creating the string object */
7964 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7965 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7966 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7969 else
7970 if (cfg->compile_aot) {
7971 NEW_LDSTRCONST (cfg, ins, image, n);
7972 *sp = ins;
7973 MONO_ADD_INS (bblock, ins);
7975 else {
7976 NEW_PCONST (cfg, ins, NULL);
7977 ins->type = STACK_OBJ;
7978 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7979 if (!ins->inst_p0)
7980 OUT_OF_MEMORY_FAILURE;
7982 *sp = ins;
7983 MONO_ADD_INS (bblock, ins);
7988 sp++;
7989 ip += 5;
7990 break;
7991 case CEE_NEWOBJ: {
7992 MonoInst *iargs [2];
7993 MonoMethodSignature *fsig;
7994 MonoInst this_ins;
7995 MonoInst *alloc;
7996 MonoInst *vtable_arg = NULL;
7998 CHECK_OPSIZE (5);
7999 token = read32 (ip + 1);
8000 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8001 if (!cmethod || mono_loader_get_last_error ())
8002 LOAD_ERROR;
8003 fsig = mono_method_get_signature (cmethod, image, token);
8004 if (!fsig)
8005 LOAD_ERROR;
8007 mono_save_token_info (cfg, image, token, cmethod);
8009 if (!mono_class_init (cmethod->klass))
8010 LOAD_ERROR;
8012 if (cfg->generic_sharing_context)
8013 context_used = mono_method_check_context_used (cmethod);
8015 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8016 if (check_linkdemand (cfg, method, cmethod))
8017 INLINE_FAILURE;
8018 CHECK_CFG_EXCEPTION;
8019 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8020 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8023 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8024 emit_generic_class_init (cfg, cmethod->klass);
8025 CHECK_TYPELOAD (cmethod->klass);
8028 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8029 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8030 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8031 mono_class_vtable (cfg->domain, cmethod->klass);
8032 CHECK_TYPELOAD (cmethod->klass);
8034 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8035 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8036 } else {
8037 if (context_used) {
8038 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8039 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8040 } else {
8041 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8043 CHECK_TYPELOAD (cmethod->klass);
8044 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8049 n = fsig->param_count;
8050 CHECK_STACK (n);
8053 * Generate smaller code for the common newobj <exception> instruction in
8054 * argument checking code.
8056 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8057 is_exception_class (cmethod->klass) && n <= 2 &&
8058 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8059 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8060 MonoInst *iargs [3];
8062 g_assert (!vtable_arg);
8064 sp -= n;
8066 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8067 switch (n) {
8068 case 0:
8069 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8070 break;
8071 case 1:
8072 iargs [1] = sp [0];
8073 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8074 break;
8075 case 2:
8076 iargs [1] = sp [0];
8077 iargs [2] = sp [1];
8078 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8079 break;
8080 default:
8081 g_assert_not_reached ();
8084 ip += 5;
8085 inline_costs += 5;
8086 break;
8089 /* move the args to allow room for 'this' in the first position */
8090 while (n--) {
8091 --sp;
8092 sp [1] = sp [0];
8095 /* check_call_signature () requires sp[0] to be set */
8096 this_ins.type = STACK_OBJ;
8097 sp [0] = &this_ins;
8098 if (check_call_signature (cfg, fsig, sp))
8099 UNVERIFIED;
8101 iargs [0] = NULL;
8103 if (mini_class_is_system_array (cmethod->klass)) {
8104 g_assert (!vtable_arg);
8106 *sp = emit_get_rgctx_method (cfg, context_used,
8107 cmethod, MONO_RGCTX_INFO_METHOD);
8109 /* Avoid varargs in the common case */
8110 if (fsig->param_count == 1)
8111 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8112 else if (fsig->param_count == 2)
8113 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8114 else if (fsig->param_count == 3)
8115 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8116 else
8117 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8118 } else if (cmethod->string_ctor) {
8119 g_assert (!context_used);
8120 g_assert (!vtable_arg);
8121 /* we simply pass a null pointer */
8122 EMIT_NEW_PCONST (cfg, *sp, NULL);
8123 /* now call the string ctor */
8124 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8125 } else {
8126 MonoInst* callvirt_this_arg = NULL;
8128 if (cmethod->klass->valuetype) {
8129 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8130 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8131 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8133 alloc = NULL;
8136 * The code generated by mini_emit_virtual_call () expects
8137 * iargs [0] to be a boxed instance, but luckily the vcall
8138 * will be transformed into a normal call there.
8140 } else if (context_used) {
8141 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8142 *sp = alloc;
8143 } else {
8144 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8146 CHECK_TYPELOAD (cmethod->klass);
8149 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8150 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8151 * As a workaround, we call class cctors before allocating objects.
8153 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8154 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8155 if (cfg->verbose_level > 2)
8156 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8157 class_inits = g_slist_prepend (class_inits, vtable);
8160 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8161 *sp = alloc;
8163 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8165 if (alloc)
8166 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8168 /* Now call the actual ctor */
8169 /* Avoid virtual calls to ctors if possible */
8170 if (cmethod->klass->marshalbyref)
8171 callvirt_this_arg = sp [0];
8174 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8175 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8176 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8177 *sp = ins;
8178 sp++;
8181 CHECK_CFG_EXCEPTION;
8182 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8183 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8184 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8185 !g_list_find (dont_inline, cmethod)) {
8186 int costs;
8188 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8189 cfg->real_offset += 5;
8190 bblock = cfg->cbb;
8192 inline_costs += costs - 5;
8193 } else {
8194 INLINE_FAILURE;
8195 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8197 } else if (context_used &&
8198 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8199 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8200 MonoInst *cmethod_addr;
8202 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8203 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8205 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8206 } else {
8207 INLINE_FAILURE;
8208 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8209 callvirt_this_arg, NULL, vtable_arg);
8213 if (alloc == NULL) {
8214 /* Valuetype */
8215 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8216 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8217 *sp++= ins;
8219 else
8220 *sp++ = alloc;
8222 ip += 5;
8223 inline_costs += 5;
8224 break;
8226 case CEE_CASTCLASS:
8227 CHECK_STACK (1);
8228 --sp;
8229 CHECK_OPSIZE (5);
8230 token = read32 (ip + 1);
8231 klass = mini_get_class (method, token, generic_context);
8232 CHECK_TYPELOAD (klass);
8233 if (sp [0]->type != STACK_OBJ)
8234 UNVERIFIED;
8236 if (cfg->generic_sharing_context)
8237 context_used = mono_class_check_context_used (klass);
8239 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8240 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8241 MonoInst *args [3];
8243 /* obj */
8244 args [0] = *sp;
8246 /* klass */
8247 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8249 /* inline cache*/
8250 if (cfg->compile_aot)
8251 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8252 else
8253 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8255 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8256 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8257 ip += 5;
8258 inline_costs += 2;
8259 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8260 MonoMethod *mono_castclass;
8261 MonoInst *iargs [1];
8262 int costs;
8264 mono_castclass = mono_marshal_get_castclass (klass);
8265 iargs [0] = sp [0];
8267 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8268 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8269 CHECK_CFG_EXCEPTION;
8270 g_assert (costs > 0);
8272 ip += 5;
8273 cfg->real_offset += 5;
8274 bblock = cfg->cbb;
8276 *sp++ = iargs [0];
8278 inline_costs += costs;
8280 else {
8281 ins = handle_castclass (cfg, klass, *sp, context_used);
8282 CHECK_CFG_EXCEPTION;
8283 bblock = cfg->cbb;
8284 *sp ++ = ins;
8285 ip += 5;
8287 break;
8288 case CEE_ISINST: {
8289 CHECK_STACK (1);
8290 --sp;
8291 CHECK_OPSIZE (5);
8292 token = read32 (ip + 1);
8293 klass = mini_get_class (method, token, generic_context);
8294 CHECK_TYPELOAD (klass);
8295 if (sp [0]->type != STACK_OBJ)
8296 UNVERIFIED;
8298 if (cfg->generic_sharing_context)
8299 context_used = mono_class_check_context_used (klass);
8301 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8302 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8303 MonoInst *args [3];
8305 /* obj */
8306 args [0] = *sp;
8308 /* klass */
8309 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8311 /* inline cache*/
8312 if (cfg->compile_aot)
8313 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8314 else
8315 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8317 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8318 ip += 5;
8319 inline_costs += 2;
8320 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8321 MonoMethod *mono_isinst;
8322 MonoInst *iargs [1];
8323 int costs;
8325 mono_isinst = mono_marshal_get_isinst (klass);
8326 iargs [0] = sp [0];
8328 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8329 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8330 CHECK_CFG_EXCEPTION;
8331 g_assert (costs > 0);
8333 ip += 5;
8334 cfg->real_offset += 5;
8335 bblock = cfg->cbb;
8337 *sp++= iargs [0];
8339 inline_costs += costs;
8341 else {
8342 ins = handle_isinst (cfg, klass, *sp, context_used);
8343 CHECK_CFG_EXCEPTION;
8344 bblock = cfg->cbb;
8345 *sp ++ = ins;
8346 ip += 5;
8348 break;
8350 case CEE_UNBOX_ANY: {
8351 CHECK_STACK (1);
8352 --sp;
8353 CHECK_OPSIZE (5);
8354 token = read32 (ip + 1);
8355 klass = mini_get_class (method, token, generic_context);
8356 CHECK_TYPELOAD (klass);
8358 mono_save_token_info (cfg, image, token, klass);
8360 if (cfg->generic_sharing_context)
8361 context_used = mono_class_check_context_used (klass);
8363 if (generic_class_is_reference_type (cfg, klass)) {
8364 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8365 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8366 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8367 MonoInst *args [3];
8369 /* obj */
8370 args [0] = *sp;
8372 /* klass */
8373 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8375 /* inline cache*/
8376 /*FIXME AOT support*/
8377 if (cfg->compile_aot)
8378 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8379 else
8380 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8382 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8383 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8384 ip += 5;
8385 inline_costs += 2;
8386 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8387 MonoMethod *mono_castclass;
8388 MonoInst *iargs [1];
8389 int costs;
8391 mono_castclass = mono_marshal_get_castclass (klass);
8392 iargs [0] = sp [0];
8394 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8395 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8396 CHECK_CFG_EXCEPTION;
8397 g_assert (costs > 0);
8399 ip += 5;
8400 cfg->real_offset += 5;
8401 bblock = cfg->cbb;
8403 *sp++ = iargs [0];
8404 inline_costs += costs;
8405 } else {
8406 ins = handle_castclass (cfg, klass, *sp, context_used);
8407 CHECK_CFG_EXCEPTION;
8408 bblock = cfg->cbb;
8409 *sp ++ = ins;
8410 ip += 5;
8412 break;
8415 if (mono_class_is_nullable (klass)) {
8416 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8417 *sp++= ins;
8418 ip += 5;
8419 break;
8422 /* UNBOX */
8423 ins = handle_unbox (cfg, klass, sp, context_used);
8424 *sp = ins;
8426 ip += 5;
8428 /* LDOBJ */
8429 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8430 *sp++ = ins;
8432 inline_costs += 2;
8433 break;
8435 case CEE_BOX: {
8436 MonoInst *val;
8438 CHECK_STACK (1);
8439 --sp;
8440 val = *sp;
8441 CHECK_OPSIZE (5);
8442 token = read32 (ip + 1);
8443 klass = mini_get_class (method, token, generic_context);
8444 CHECK_TYPELOAD (klass);
8446 mono_save_token_info (cfg, image, token, klass);
8448 if (cfg->generic_sharing_context)
8449 context_used = mono_class_check_context_used (klass);
8451 if (generic_class_is_reference_type (cfg, klass)) {
8452 *sp++ = val;
8453 ip += 5;
8454 break;
8457 if (klass == mono_defaults.void_class)
8458 UNVERIFIED;
8459 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8460 UNVERIFIED;
8461 /* frequent check in generic code: box (struct), brtrue */
8463 // FIXME: LLVM can't handle the inconsistent bb linking
8464 if (!mono_class_is_nullable (klass) &&
8465 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8466 (ip [5] == CEE_BRTRUE ||
8467 ip [5] == CEE_BRTRUE_S ||
8468 ip [5] == CEE_BRFALSE ||
8469 ip [5] == CEE_BRFALSE_S)) {
8470 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8471 int dreg;
8472 MonoBasicBlock *true_bb, *false_bb;
8474 ip += 5;
8476 if (cfg->verbose_level > 3) {
8477 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8478 printf ("<box+brtrue opt>\n");
8481 switch (*ip) {
8482 case CEE_BRTRUE_S:
8483 case CEE_BRFALSE_S:
8484 CHECK_OPSIZE (2);
8485 ip++;
8486 target = ip + 1 + (signed char)(*ip);
8487 ip++;
8488 break;
8489 case CEE_BRTRUE:
8490 case CEE_BRFALSE:
8491 CHECK_OPSIZE (5);
8492 ip++;
8493 target = ip + 4 + (gint)(read32 (ip));
8494 ip += 4;
8495 break;
8496 default:
8497 g_assert_not_reached ();
8501 * We need to link both bblocks, since it is needed for handling stack
8502 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8503 * Branching to only one of them would lead to inconsistencies, so
8504 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8506 GET_BBLOCK (cfg, true_bb, target);
8507 GET_BBLOCK (cfg, false_bb, ip);
8509 mono_link_bblock (cfg, cfg->cbb, true_bb);
8510 mono_link_bblock (cfg, cfg->cbb, false_bb);
8512 if (sp != stack_start) {
8513 handle_stack_args (cfg, stack_start, sp - stack_start);
8514 sp = stack_start;
8515 CHECK_UNVERIFIABLE (cfg);
8518 if (COMPILE_LLVM (cfg)) {
8519 dreg = alloc_ireg (cfg);
8520 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8523 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8524 } else {
8525 /* The JIT can't eliminate the iconst+compare */
8526 MONO_INST_NEW (cfg, ins, OP_BR);
8527 ins->inst_target_bb = is_true ? true_bb : false_bb;
8528 MONO_ADD_INS (cfg->cbb, ins);
8531 start_new_bblock = 1;
8532 break;
8535 *sp++ = handle_box (cfg, val, klass, context_used);
8537 CHECK_CFG_EXCEPTION;
8538 ip += 5;
8539 inline_costs += 1;
8540 break;
8542 case CEE_UNBOX: {
8543 CHECK_STACK (1);
8544 --sp;
8545 CHECK_OPSIZE (5);
8546 token = read32 (ip + 1);
8547 klass = mini_get_class (method, token, generic_context);
8548 CHECK_TYPELOAD (klass);
8550 mono_save_token_info (cfg, image, token, klass);
8552 if (cfg->generic_sharing_context)
8553 context_used = mono_class_check_context_used (klass);
8555 if (mono_class_is_nullable (klass)) {
8556 MonoInst *val;
8558 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8559 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8561 *sp++= ins;
8562 } else {
8563 ins = handle_unbox (cfg, klass, sp, context_used);
8564 *sp++ = ins;
8566 ip += 5;
8567 inline_costs += 2;
8568 break;
8570 case CEE_LDFLD:
8571 case CEE_LDFLDA:
8572 case CEE_STFLD: {
8573 MonoClassField *field;
8574 int costs;
8575 guint foffset;
8577 if (*ip == CEE_STFLD) {
8578 CHECK_STACK (2);
8579 sp -= 2;
8580 } else {
8581 CHECK_STACK (1);
8582 --sp;
8584 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8585 UNVERIFIED;
8586 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8587 UNVERIFIED;
8588 CHECK_OPSIZE (5);
8589 token = read32 (ip + 1);
8590 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8591 field = mono_method_get_wrapper_data (method, token);
8592 klass = field->parent;
8594 else {
8595 field = mono_field_from_token (image, token, &klass, generic_context);
8597 if (!field)
8598 LOAD_ERROR;
8599 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8600 FIELD_ACCESS_FAILURE;
8601 mono_class_init (klass);
8603 if (*ip != CEE_LDFLDA && is_magic_tls_access (field))
8604 UNVERIFIED;
8605 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8606 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8607 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8608 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8611 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8612 if (*ip == CEE_STFLD) {
8613 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8614 UNVERIFIED;
8615 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8616 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8617 MonoInst *iargs [5];
8619 iargs [0] = sp [0];
8620 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8621 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8622 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8623 field->offset);
8624 iargs [4] = sp [1];
8626 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8627 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8628 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8629 CHECK_CFG_EXCEPTION;
8630 g_assert (costs > 0);
8632 cfg->real_offset += 5;
8633 bblock = cfg->cbb;
8635 inline_costs += costs;
8636 } else {
8637 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8639 } else {
8640 MonoInst *store;
8642 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8644 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8645 if (sp [0]->opcode != OP_LDADDR)
8646 store->flags |= MONO_INST_FAULT;
8648 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8649 /* insert call to write barrier */
8650 MonoInst *ptr;
8651 int dreg;
8653 dreg = alloc_ireg_mp (cfg);
8654 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8655 emit_write_barrier (cfg, ptr, sp [1], -1);
8658 store->flags |= ins_flag;
8660 ins_flag = 0;
8661 ip += 5;
8662 break;
8665 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8666 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8667 MonoInst *iargs [4];
8669 iargs [0] = sp [0];
8670 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8671 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8672 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8673 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8674 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8675 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8676 CHECK_CFG_EXCEPTION;
8677 bblock = cfg->cbb;
8678 g_assert (costs > 0);
8680 cfg->real_offset += 5;
8682 *sp++ = iargs [0];
8684 inline_costs += costs;
8685 } else {
8686 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8687 *sp++ = ins;
8689 } else {
8690 if (sp [0]->type == STACK_VTYPE) {
8691 MonoInst *var;
8693 /* Have to compute the address of the variable */
8695 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8696 if (!var)
8697 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8698 else
8699 g_assert (var->klass == klass);
8701 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8702 sp [0] = ins;
8705 if (*ip == CEE_LDFLDA) {
8706 if (is_magic_tls_access (field)) {
8707 ins = sp [0];
8708 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8709 } else {
8710 if (sp [0]->type == STACK_OBJ) {
8711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8712 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8715 dreg = alloc_ireg_mp (cfg);
8717 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8718 ins->klass = mono_class_from_mono_type (field->type);
8719 ins->type = STACK_MP;
8720 *sp++ = ins;
8722 } else {
8723 MonoInst *load;
8725 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8727 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8728 load->flags |= ins_flag;
8729 if (sp [0]->opcode != OP_LDADDR)
8730 load->flags |= MONO_INST_FAULT;
8731 *sp++ = load;
8734 ins_flag = 0;
8735 ip += 5;
8736 break;
8738 case CEE_LDSFLD:
8739 case CEE_LDSFLDA:
8740 case CEE_STSFLD: {
8741 MonoClassField *field;
8742 gpointer addr = NULL;
8743 gboolean is_special_static;
8744 MonoType *ftype;
8746 CHECK_OPSIZE (5);
8747 token = read32 (ip + 1);
8749 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8750 field = mono_method_get_wrapper_data (method, token);
8751 klass = field->parent;
8753 else
8754 field = mono_field_from_token (image, token, &klass, generic_context);
8755 if (!field)
8756 LOAD_ERROR;
8757 mono_class_init (klass);
8758 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8759 FIELD_ACCESS_FAILURE;
8761 /* if the class is Critical then transparent code cannot access it's fields */
8762 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8763 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8766 * We can only support shared generic static
8767 * field access on architectures where the
8768 * trampoline code has been extended to handle
8769 * the generic class init.
8771 #ifndef MONO_ARCH_VTABLE_REG
8772 GENERIC_SHARING_FAILURE (*ip);
8773 #endif
8775 if (cfg->generic_sharing_context)
8776 context_used = mono_class_check_context_used (klass);
8778 ftype = mono_field_get_type (field);
8780 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8782 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8783 * to be called here.
8785 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8786 mono_class_vtable (cfg->domain, klass);
8787 CHECK_TYPELOAD (klass);
8789 mono_domain_lock (cfg->domain);
8790 if (cfg->domain->special_static_fields)
8791 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8792 mono_domain_unlock (cfg->domain);
8794 is_special_static = mono_class_field_is_special_static (field);
8796 /* Generate IR to compute the field address */
8797 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8799 * Fast access to TLS data
8800 * Inline version of get_thread_static_data () in
8801 * threads.c.
8803 guint32 offset;
8804 int idx, static_data_reg, array_reg, dreg;
8805 MonoInst *thread_ins;
8807 // offset &= 0x7fffffff;
8808 // idx = (offset >> 24) - 1;
8809 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8811 thread_ins = mono_get_thread_intrinsic (cfg);
8812 MONO_ADD_INS (cfg->cbb, thread_ins);
8813 static_data_reg = alloc_ireg (cfg);
8814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8816 if (cfg->compile_aot) {
8817 int offset_reg, offset2_reg, idx_reg;
8819 /* For TLS variables, this will return the TLS offset */
8820 EMIT_NEW_SFLDACONST (cfg, ins, field);
8821 offset_reg = ins->dreg;
8822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8823 idx_reg = alloc_ireg (cfg);
8824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8825 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8827 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8828 array_reg = alloc_ireg (cfg);
8829 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8830 offset2_reg = alloc_ireg (cfg);
8831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8832 dreg = alloc_ireg (cfg);
8833 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8834 } else {
8835 offset = (gsize)addr & 0x7fffffff;
8836 idx = (offset >> 24) - 1;
8838 array_reg = alloc_ireg (cfg);
8839 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8840 dreg = alloc_ireg (cfg);
8841 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8843 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8844 (cfg->compile_aot && is_special_static) ||
8845 (context_used && is_special_static)) {
8846 MonoInst *iargs [2];
8848 g_assert (field->parent);
8849 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8850 if (context_used) {
8851 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8852 field, MONO_RGCTX_INFO_CLASS_FIELD);
8853 } else {
8854 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8856 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8857 } else if (context_used) {
8858 MonoInst *static_data;
8861 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8862 method->klass->name_space, method->klass->name, method->name,
8863 depth, field->offset);
8866 if (mono_class_needs_cctor_run (klass, method))
8867 emit_generic_class_init (cfg, klass);
8870 * The pointer we're computing here is
8872 * super_info.static_data + field->offset
8874 static_data = emit_get_rgctx_klass (cfg, context_used,
8875 klass, MONO_RGCTX_INFO_STATIC_DATA);
8877 if (field->offset == 0) {
8878 ins = static_data;
8879 } else {
8880 int addr_reg = mono_alloc_preg (cfg);
8881 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8883 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8884 MonoInst *iargs [2];
8886 g_assert (field->parent);
8887 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8888 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8889 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8890 } else {
8891 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8893 CHECK_TYPELOAD (klass);
8894 if (!addr) {
8895 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
8896 if (!(g_slist_find (class_inits, vtable))) {
8897 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8898 if (cfg->verbose_level > 2)
8899 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8900 class_inits = g_slist_prepend (class_inits, vtable);
8902 } else {
8903 if (cfg->run_cctors) {
8904 MonoException *ex;
8905 /* This makes so that inline cannot trigger */
8906 /* .cctors: too many apps depend on them */
8907 /* running with a specific order... */
8908 if (! vtable->initialized)
8909 INLINE_FAILURE;
8910 ex = mono_runtime_class_init_full (vtable, FALSE);
8911 if (ex) {
8912 set_exception_object (cfg, ex);
8913 goto exception_exit;
8917 addr = (char*)vtable->data + field->offset;
8919 if (cfg->compile_aot)
8920 EMIT_NEW_SFLDACONST (cfg, ins, field);
8921 else
8922 EMIT_NEW_PCONST (cfg, ins, addr);
8923 } else {
8924 MonoInst *iargs [1];
8925 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8926 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8930 /* Generate IR to do the actual load/store operation */
8932 if (*ip == CEE_LDSFLDA) {
8933 ins->klass = mono_class_from_mono_type (ftype);
8934 ins->type = STACK_PTR;
8935 *sp++ = ins;
8936 } else if (*ip == CEE_STSFLD) {
8937 MonoInst *store;
8938 CHECK_STACK (1);
8939 sp--;
8941 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8942 store->flags |= ins_flag;
8943 } else {
8944 gboolean is_const = FALSE;
8945 MonoVTable *vtable = NULL;
8947 if (!context_used) {
8948 vtable = mono_class_vtable (cfg->domain, klass);
8949 CHECK_TYPELOAD (klass);
8951 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8952 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8953 gpointer addr = (char*)vtable->data + field->offset;
8954 int ro_type = ftype->type;
8955 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8956 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8958 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8959 is_const = TRUE;
8960 switch (ro_type) {
8961 case MONO_TYPE_BOOLEAN:
8962 case MONO_TYPE_U1:
8963 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8964 sp++;
8965 break;
8966 case MONO_TYPE_I1:
8967 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8968 sp++;
8969 break;
8970 case MONO_TYPE_CHAR:
8971 case MONO_TYPE_U2:
8972 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8973 sp++;
8974 break;
8975 case MONO_TYPE_I2:
8976 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8977 sp++;
8978 break;
8979 break;
8980 case MONO_TYPE_I4:
8981 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8982 sp++;
8983 break;
8984 case MONO_TYPE_U4:
8985 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8986 sp++;
8987 break;
8988 case MONO_TYPE_I:
8989 case MONO_TYPE_U:
8990 case MONO_TYPE_PTR:
8991 case MONO_TYPE_FNPTR:
8992 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8993 type_to_eval_stack_type ((cfg), field->type, *sp);
8994 sp++;
8995 break;
8996 case MONO_TYPE_STRING:
8997 case MONO_TYPE_OBJECT:
8998 case MONO_TYPE_CLASS:
8999 case MONO_TYPE_SZARRAY:
9000 case MONO_TYPE_ARRAY:
9001 if (!mono_gc_is_moving ()) {
9002 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9003 type_to_eval_stack_type ((cfg), field->type, *sp);
9004 sp++;
9005 } else {
9006 is_const = FALSE;
9008 break;
9009 case MONO_TYPE_I8:
9010 case MONO_TYPE_U8:
9011 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9012 sp++;
9013 break;
9014 case MONO_TYPE_R4:
9015 case MONO_TYPE_R8:
9016 case MONO_TYPE_VALUETYPE:
9017 default:
9018 is_const = FALSE;
9019 break;
9023 if (!is_const) {
9024 MonoInst *load;
9026 CHECK_STACK_OVF (1);
9028 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9029 load->flags |= ins_flag;
9030 ins_flag = 0;
9031 *sp++ = load;
9034 ins_flag = 0;
9035 ip += 5;
9036 break;
9038 case CEE_STOBJ:
9039 CHECK_STACK (2);
9040 sp -= 2;
9041 CHECK_OPSIZE (5);
9042 token = read32 (ip + 1);
9043 klass = mini_get_class (method, token, generic_context);
9044 CHECK_TYPELOAD (klass);
9045 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9046 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9047 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9048 generic_class_is_reference_type (cfg, klass)) {
9049 /* insert call to write barrier */
9050 emit_write_barrier (cfg, sp [0], sp [1], -1);
9052 ins_flag = 0;
9053 ip += 5;
9054 inline_costs += 1;
9055 break;
9058 * Array opcodes
9060 case CEE_NEWARR: {
9061 MonoInst *len_ins;
9062 const char *data_ptr;
9063 int data_size = 0;
9064 guint32 field_token;
9066 CHECK_STACK (1);
9067 --sp;
9069 CHECK_OPSIZE (5);
9070 token = read32 (ip + 1);
9072 klass = mini_get_class (method, token, generic_context);
9073 CHECK_TYPELOAD (klass);
9075 if (cfg->generic_sharing_context)
9076 context_used = mono_class_check_context_used (klass);
9078 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9079 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9080 ins->sreg1 = sp [0]->dreg;
9081 ins->type = STACK_I4;
9082 ins->dreg = alloc_ireg (cfg);
9083 MONO_ADD_INS (cfg->cbb, ins);
9084 *sp = mono_decompose_opcode (cfg, ins);
9087 if (context_used) {
9088 MonoInst *args [3];
9089 MonoClass *array_class = mono_array_class_get (klass, 1);
9090 /* FIXME: we cannot get a managed
9091 allocator because we can't get the
9092 open generic class's vtable. We
9093 have the same problem in
9094 handle_alloc(). This
9095 needs to be solved so that we can
9096 have managed allocs of shared
9097 generic classes. */
9099 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9100 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9102 MonoMethod *managed_alloc = NULL;
9104 /* FIXME: Decompose later to help abcrem */
9106 /* vtable */
9107 args [0] = emit_get_rgctx_klass (cfg, context_used,
9108 array_class, MONO_RGCTX_INFO_VTABLE);
9109 /* array len */
9110 args [1] = sp [0];
9112 if (managed_alloc)
9113 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9114 else
9115 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9116 } else {
9117 if (cfg->opt & MONO_OPT_SHARED) {
9118 /* Decompose now to avoid problems with references to the domainvar */
9119 MonoInst *iargs [3];
9121 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9122 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9123 iargs [2] = sp [0];
9125 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9126 } else {
9127 /* Decompose later since it is needed by abcrem */
9128 MonoClass *array_type = mono_array_class_get (klass, 1);
9129 mono_class_vtable (cfg->domain, array_type);
9130 CHECK_TYPELOAD (array_type);
9132 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9133 ins->dreg = alloc_ireg_ref (cfg);
9134 ins->sreg1 = sp [0]->dreg;
9135 ins->inst_newa_class = klass;
9136 ins->type = STACK_OBJ;
9137 ins->klass = klass;
9138 MONO_ADD_INS (cfg->cbb, ins);
9139 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9140 cfg->cbb->has_array_access = TRUE;
9142 /* Needed so mono_emit_load_get_addr () gets called */
9143 mono_get_got_var (cfg);
9147 len_ins = sp [0];
9148 ip += 5;
9149 *sp++ = ins;
9150 inline_costs += 1;
9153 * we inline/optimize the initialization sequence if possible.
9154 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9155 * for small sizes open code the memcpy
9156 * ensure the rva field is big enough
9158 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9159 MonoMethod *memcpy_method = get_memcpy_method ();
9160 MonoInst *iargs [3];
9161 int add_reg = alloc_ireg_mp (cfg);
9163 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9164 if (cfg->compile_aot) {
9165 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9166 } else {
9167 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9169 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9170 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9171 ip += 11;
9174 break;
9176 case CEE_LDLEN:
9177 CHECK_STACK (1);
9178 --sp;
9179 if (sp [0]->type != STACK_OBJ)
9180 UNVERIFIED;
9182 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9183 ins->dreg = alloc_preg (cfg);
9184 ins->sreg1 = sp [0]->dreg;
9185 ins->type = STACK_I4;
9186 /* This flag will be inherited by the decomposition */
9187 ins->flags |= MONO_INST_FAULT;
9188 MONO_ADD_INS (cfg->cbb, ins);
9189 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9190 cfg->cbb->has_array_access = TRUE;
9191 ip ++;
9192 *sp++ = ins;
9193 break;
9194 case CEE_LDELEMA:
9195 CHECK_STACK (2);
9196 sp -= 2;
9197 CHECK_OPSIZE (5);
9198 if (sp [0]->type != STACK_OBJ)
9199 UNVERIFIED;
9201 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9203 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9204 CHECK_TYPELOAD (klass);
9205 /* we need to make sure that this array is exactly the type it needs
9206 * to be for correctness. the wrappers are lax with their usage
9207 * so we need to ignore them here
9209 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9210 MonoClass *array_class = mono_array_class_get (klass, 1);
9211 mini_emit_check_array_type (cfg, sp [0], array_class);
9212 CHECK_TYPELOAD (array_class);
9215 readonly = FALSE;
9216 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9217 *sp++ = ins;
9218 ip += 5;
9219 break;
9220 case CEE_LDELEM:
9221 case CEE_LDELEM_I1:
9222 case CEE_LDELEM_U1:
9223 case CEE_LDELEM_I2:
9224 case CEE_LDELEM_U2:
9225 case CEE_LDELEM_I4:
9226 case CEE_LDELEM_U4:
9227 case CEE_LDELEM_I8:
9228 case CEE_LDELEM_I:
9229 case CEE_LDELEM_R4:
9230 case CEE_LDELEM_R8:
9231 case CEE_LDELEM_REF: {
9232 MonoInst *addr;
9234 CHECK_STACK (2);
9235 sp -= 2;
9237 if (*ip == CEE_LDELEM) {
9238 CHECK_OPSIZE (5);
9239 token = read32 (ip + 1);
9240 klass = mini_get_class (method, token, generic_context);
9241 CHECK_TYPELOAD (klass);
9242 mono_class_init (klass);
9244 else
9245 klass = array_access_to_klass (*ip);
9247 if (sp [0]->type != STACK_OBJ)
9248 UNVERIFIED;
9250 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9252 if (sp [1]->opcode == OP_ICONST) {
9253 int array_reg = sp [0]->dreg;
9254 int index_reg = sp [1]->dreg;
9255 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9257 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9258 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9259 } else {
9260 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9261 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9263 *sp++ = ins;
9264 if (*ip == CEE_LDELEM)
9265 ip += 5;
9266 else
9267 ++ip;
9268 break;
9270 case CEE_STELEM_I:
9271 case CEE_STELEM_I1:
9272 case CEE_STELEM_I2:
9273 case CEE_STELEM_I4:
9274 case CEE_STELEM_I8:
9275 case CEE_STELEM_R4:
9276 case CEE_STELEM_R8:
9277 case CEE_STELEM_REF:
9278 case CEE_STELEM: {
9279 MonoInst *addr;
9281 CHECK_STACK (3);
9282 sp -= 3;
9284 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9286 if (*ip == CEE_STELEM) {
9287 CHECK_OPSIZE (5);
9288 token = read32 (ip + 1);
9289 klass = mini_get_class (method, token, generic_context);
9290 CHECK_TYPELOAD (klass);
9291 mono_class_init (klass);
9293 else
9294 klass = array_access_to_klass (*ip);
9296 if (sp [0]->type != STACK_OBJ)
9297 UNVERIFIED;
9299 /* storing a NULL doesn't need any of the complex checks in stelemref */
9300 if (generic_class_is_reference_type (cfg, klass) &&
9301 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9302 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9303 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9304 MonoInst *iargs [3];
9306 if (!helper->slot)
9307 mono_class_setup_vtable (obj_array);
9308 g_assert (helper->slot);
9310 if (sp [0]->type != STACK_OBJ)
9311 UNVERIFIED;
9312 if (sp [2]->type != STACK_OBJ)
9313 UNVERIFIED;
9315 iargs [2] = sp [2];
9316 iargs [1] = sp [1];
9317 iargs [0] = sp [0];
9319 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9320 } else {
9321 if (sp [1]->opcode == OP_ICONST) {
9322 int array_reg = sp [0]->dreg;
9323 int index_reg = sp [1]->dreg;
9324 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9326 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9327 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9328 } else {
9329 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9330 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9334 if (*ip == CEE_STELEM)
9335 ip += 5;
9336 else
9337 ++ip;
9338 inline_costs += 1;
9339 break;
9341 case CEE_CKFINITE: {
9342 CHECK_STACK (1);
9343 --sp;
9345 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9346 ins->sreg1 = sp [0]->dreg;
9347 ins->dreg = alloc_freg (cfg);
9348 ins->type = STACK_R8;
9349 MONO_ADD_INS (bblock, ins);
9351 *sp++ = mono_decompose_opcode (cfg, ins);
9353 ++ip;
9354 break;
9356 case CEE_REFANYVAL: {
9357 MonoInst *src_var, *src;
9359 int klass_reg = alloc_preg (cfg);
9360 int dreg = alloc_preg (cfg);
9362 CHECK_STACK (1);
9363 MONO_INST_NEW (cfg, ins, *ip);
9364 --sp;
9365 CHECK_OPSIZE (5);
9366 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9367 CHECK_TYPELOAD (klass);
9368 mono_class_init (klass);
9370 if (cfg->generic_sharing_context)
9371 context_used = mono_class_check_context_used (klass);
9373 // FIXME:
9374 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9375 if (!src_var)
9376 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9377 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9380 if (context_used) {
9381 MonoInst *klass_ins;
9383 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9384 klass, MONO_RGCTX_INFO_KLASS);
9386 // FIXME:
9387 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9388 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9389 } else {
9390 mini_emit_class_check (cfg, klass_reg, klass);
9392 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9393 ins->type = STACK_MP;
9394 *sp++ = ins;
9395 ip += 5;
9396 break;
9398 case CEE_MKREFANY: {
9399 MonoInst *loc, *addr;
9401 CHECK_STACK (1);
9402 MONO_INST_NEW (cfg, ins, *ip);
9403 --sp;
9404 CHECK_OPSIZE (5);
9405 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9406 CHECK_TYPELOAD (klass);
9407 mono_class_init (klass);
9409 if (cfg->generic_sharing_context)
9410 context_used = mono_class_check_context_used (klass);
9412 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9413 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9415 if (context_used) {
9416 MonoInst *const_ins;
9417 int type_reg = alloc_preg (cfg);
9419 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9420 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9422 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9423 } else if (cfg->compile_aot) {
9424 int const_reg = alloc_preg (cfg);
9425 int type_reg = alloc_preg (cfg);
9427 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9428 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9430 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9431 } else {
9432 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9433 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9435 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9437 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9438 ins->type = STACK_VTYPE;
9439 ins->klass = mono_defaults.typed_reference_class;
9440 *sp++ = ins;
9441 ip += 5;
9442 break;
9444 case CEE_LDTOKEN: {
9445 gpointer handle;
9446 MonoClass *handle_class;
9448 CHECK_STACK_OVF (1);
9450 CHECK_OPSIZE (5);
9451 n = read32 (ip + 1);
9453 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9454 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9455 handle = mono_method_get_wrapper_data (method, n);
9456 handle_class = mono_method_get_wrapper_data (method, n + 1);
9457 if (handle_class == mono_defaults.typehandle_class)
9458 handle = &((MonoClass*)handle)->byval_arg;
9460 else {
9461 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9463 if (!handle)
9464 LOAD_ERROR;
9465 mono_class_init (handle_class);
9466 if (cfg->generic_sharing_context) {
9467 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9468 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9469 /* This case handles ldtoken
9470 of an open type, like for
9471 typeof(Gen<>). */
9472 context_used = 0;
9473 } else if (handle_class == mono_defaults.typehandle_class) {
9474 /* If we get a MONO_TYPE_CLASS
9475 then we need to provide the
9476 open type, not an
9477 instantiation of it. */
9478 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9479 context_used = 0;
9480 else
9481 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9482 } else if (handle_class == mono_defaults.fieldhandle_class)
9483 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9484 else if (handle_class == mono_defaults.methodhandle_class)
9485 context_used = mono_method_check_context_used (handle);
9486 else
9487 g_assert_not_reached ();
9490 if ((cfg->opt & MONO_OPT_SHARED) &&
9491 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9492 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9493 MonoInst *addr, *vtvar, *iargs [3];
9494 int method_context_used;
9496 if (cfg->generic_sharing_context)
9497 method_context_used = mono_method_check_context_used (method);
9498 else
9499 method_context_used = 0;
9501 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9503 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9504 EMIT_NEW_ICONST (cfg, iargs [1], n);
9505 if (method_context_used) {
9506 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9507 method, MONO_RGCTX_INFO_METHOD);
9508 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9509 } else {
9510 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9511 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9513 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9515 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9517 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9518 } else {
9519 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9520 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9521 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9522 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9523 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9524 MonoClass *tclass = mono_class_from_mono_type (handle);
9526 mono_class_init (tclass);
9527 if (context_used) {
9528 ins = emit_get_rgctx_klass (cfg, context_used,
9529 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9530 } else if (cfg->compile_aot) {
9531 if (method->wrapper_type) {
9532 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9533 /* Special case for static synchronized wrappers */
9534 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9535 } else {
9536 /* FIXME: n is not a normal token */
9537 cfg->disable_aot = TRUE;
9538 EMIT_NEW_PCONST (cfg, ins, NULL);
9540 } else {
9541 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9543 } else {
9544 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9546 ins->type = STACK_OBJ;
9547 ins->klass = cmethod->klass;
9548 ip += 5;
9549 } else {
9550 MonoInst *addr, *vtvar;
9552 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9554 if (context_used) {
9555 if (handle_class == mono_defaults.typehandle_class) {
9556 ins = emit_get_rgctx_klass (cfg, context_used,
9557 mono_class_from_mono_type (handle),
9558 MONO_RGCTX_INFO_TYPE);
9559 } else if (handle_class == mono_defaults.methodhandle_class) {
9560 ins = emit_get_rgctx_method (cfg, context_used,
9561 handle, MONO_RGCTX_INFO_METHOD);
9562 } else if (handle_class == mono_defaults.fieldhandle_class) {
9563 ins = emit_get_rgctx_field (cfg, context_used,
9564 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9565 } else {
9566 g_assert_not_reached ();
9568 } else if (cfg->compile_aot) {
9569 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9570 } else {
9571 EMIT_NEW_PCONST (cfg, ins, handle);
9573 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9574 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9575 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9579 *sp++ = ins;
9580 ip += 5;
9581 break;
9583 case CEE_THROW:
9584 CHECK_STACK (1);
9585 MONO_INST_NEW (cfg, ins, OP_THROW);
9586 --sp;
9587 ins->sreg1 = sp [0]->dreg;
9588 ip++;
9589 bblock->out_of_line = TRUE;
9590 MONO_ADD_INS (bblock, ins);
9591 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9592 MONO_ADD_INS (bblock, ins);
9593 sp = stack_start;
9595 link_bblock (cfg, bblock, end_bblock);
9596 start_new_bblock = 1;
9597 break;
9598 case CEE_ENDFINALLY:
9599 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9600 MONO_ADD_INS (bblock, ins);
9601 ip++;
9602 start_new_bblock = 1;
9605 * Control will leave the method so empty the stack, otherwise
9606 * the next basic block will start with a nonempty stack.
9608 while (sp != stack_start) {
9609 sp--;
9611 break;
9612 case CEE_LEAVE:
9613 case CEE_LEAVE_S: {
9614 GList *handlers;
9616 if (*ip == CEE_LEAVE) {
9617 CHECK_OPSIZE (5);
9618 target = ip + 5 + (gint32)read32(ip + 1);
9619 } else {
9620 CHECK_OPSIZE (2);
9621 target = ip + 2 + (signed char)(ip [1]);
9624 /* empty the stack */
9625 while (sp != stack_start) {
9626 sp--;
9630 * If this leave statement is in a catch block, check for a
9631 * pending exception, and rethrow it if necessary.
9632 * We avoid doing this in runtime invoke wrappers, since those are called
9633 * by native code which excepts the wrapper to catch all exceptions.
9635 for (i = 0; i < header->num_clauses; ++i) {
9636 MonoExceptionClause *clause = &header->clauses [i];
9639 * Use <= in the final comparison to handle clauses with multiple
9640 * leave statements, like in bug #78024.
9641 * The ordering of the exception clauses guarantees that we find the
9642 * innermost clause.
9644 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9645 MonoInst *exc_ins;
9646 MonoBasicBlock *dont_throw;
9649 MonoInst *load;
9651 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9654 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9656 NEW_BBLOCK (cfg, dont_throw);
9659 * Currently, we always rethrow the abort exception, despite the
9660 * fact that this is not correct. See thread6.cs for an example.
9661 * But propagating the abort exception is more important than
9662 * getting the sematics right.
9664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9666 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9668 MONO_START_BB (cfg, dont_throw);
9669 bblock = cfg->cbb;
9673 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9674 GList *tmp;
9675 MonoExceptionClause *clause;
9677 for (tmp = handlers; tmp; tmp = tmp->next) {
9678 clause = tmp->data;
9679 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9680 g_assert (tblock);
9681 link_bblock (cfg, bblock, tblock);
9682 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9683 ins->inst_target_bb = tblock;
9684 ins->inst_eh_block = clause;
9685 MONO_ADD_INS (bblock, ins);
9686 bblock->has_call_handler = 1;
9687 if (COMPILE_LLVM (cfg)) {
9688 MonoBasicBlock *target_bb;
9691 * Link the finally bblock with the target, since it will
9692 * conceptually branch there.
9693 * FIXME: Have to link the bblock containing the endfinally.
9695 GET_BBLOCK (cfg, target_bb, target);
9696 link_bblock (cfg, tblock, target_bb);
9699 g_list_free (handlers);
9702 MONO_INST_NEW (cfg, ins, OP_BR);
9703 MONO_ADD_INS (bblock, ins);
9704 GET_BBLOCK (cfg, tblock, target);
9705 link_bblock (cfg, bblock, tblock);
9706 ins->inst_target_bb = tblock;
9707 start_new_bblock = 1;
9709 if (*ip == CEE_LEAVE)
9710 ip += 5;
9711 else
9712 ip += 2;
9714 break;
9718 * Mono specific opcodes
9720 case MONO_CUSTOM_PREFIX: {
9722 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9724 CHECK_OPSIZE (2);
9725 switch (ip [1]) {
9726 case CEE_MONO_ICALL: {
9727 gpointer func;
9728 MonoJitICallInfo *info;
9730 token = read32 (ip + 2);
9731 func = mono_method_get_wrapper_data (method, token);
9732 info = mono_find_jit_icall_by_addr (func);
9733 g_assert (info);
9735 CHECK_STACK (info->sig->param_count);
9736 sp -= info->sig->param_count;
9738 ins = mono_emit_jit_icall (cfg, info->func, sp);
9739 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9740 *sp++ = ins;
9742 ip += 6;
9743 inline_costs += 10 * num_calls++;
9745 break;
9747 case CEE_MONO_LDPTR: {
9748 gpointer ptr;
9750 CHECK_STACK_OVF (1);
9751 CHECK_OPSIZE (6);
9752 token = read32 (ip + 2);
9754 ptr = mono_method_get_wrapper_data (method, token);
9755 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9756 MonoJitICallInfo *callinfo;
9757 const char *icall_name;
9759 icall_name = method->name + strlen ("__icall_wrapper_");
9760 g_assert (icall_name);
9761 callinfo = mono_find_jit_icall_by_name (icall_name);
9762 g_assert (callinfo);
9764 if (ptr == callinfo->func) {
9765 /* Will be transformed into an AOTCONST later */
9766 EMIT_NEW_PCONST (cfg, ins, ptr);
9767 *sp++ = ins;
9768 ip += 6;
9769 break;
9772 /* FIXME: Generalize this */
9773 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9774 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9775 *sp++ = ins;
9776 ip += 6;
9777 break;
9779 EMIT_NEW_PCONST (cfg, ins, ptr);
9780 *sp++ = ins;
9781 ip += 6;
9782 inline_costs += 10 * num_calls++;
9783 /* Can't embed random pointers into AOT code */
9784 cfg->disable_aot = 1;
9785 break;
9787 case CEE_MONO_ICALL_ADDR: {
9788 MonoMethod *cmethod;
9789 gpointer ptr;
9791 CHECK_STACK_OVF (1);
9792 CHECK_OPSIZE (6);
9793 token = read32 (ip + 2);
9795 cmethod = mono_method_get_wrapper_data (method, token);
9797 if (cfg->compile_aot) {
9798 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9799 } else {
9800 ptr = mono_lookup_internal_call (cmethod);
9801 g_assert (ptr);
9802 EMIT_NEW_PCONST (cfg, ins, ptr);
9804 *sp++ = ins;
9805 ip += 6;
9806 break;
9808 case CEE_MONO_VTADDR: {
9809 MonoInst *src_var, *src;
9811 CHECK_STACK (1);
9812 --sp;
9814 // FIXME:
9815 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9816 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9817 *sp++ = src;
9818 ip += 2;
9819 break;
9821 case CEE_MONO_NEWOBJ: {
9822 MonoInst *iargs [2];
9824 CHECK_STACK_OVF (1);
9825 CHECK_OPSIZE (6);
9826 token = read32 (ip + 2);
9827 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9828 mono_class_init (klass);
9829 NEW_DOMAINCONST (cfg, iargs [0]);
9830 MONO_ADD_INS (cfg->cbb, iargs [0]);
9831 NEW_CLASSCONST (cfg, iargs [1], klass);
9832 MONO_ADD_INS (cfg->cbb, iargs [1]);
9833 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9834 ip += 6;
9835 inline_costs += 10 * num_calls++;
9836 break;
9838 case CEE_MONO_OBJADDR:
9839 CHECK_STACK (1);
9840 --sp;
9841 MONO_INST_NEW (cfg, ins, OP_MOVE);
9842 ins->dreg = alloc_ireg_mp (cfg);
9843 ins->sreg1 = sp [0]->dreg;
9844 ins->type = STACK_MP;
9845 MONO_ADD_INS (cfg->cbb, ins);
9846 *sp++ = ins;
9847 ip += 2;
9848 break;
9849 case CEE_MONO_LDNATIVEOBJ:
9851 * Similar to LDOBJ, but instead load the unmanaged
9852 * representation of the vtype to the stack.
9854 CHECK_STACK (1);
9855 CHECK_OPSIZE (6);
9856 --sp;
9857 token = read32 (ip + 2);
9858 klass = mono_method_get_wrapper_data (method, token);
9859 g_assert (klass->valuetype);
9860 mono_class_init (klass);
9863 MonoInst *src, *dest, *temp;
9865 src = sp [0];
9866 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9867 temp->backend.is_pinvoke = 1;
9868 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9869 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9871 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9872 dest->type = STACK_VTYPE;
9873 dest->klass = klass;
9875 *sp ++ = dest;
9876 ip += 6;
9878 break;
9879 case CEE_MONO_RETOBJ: {
9881 * Same as RET, but return the native representation of a vtype
9882 * to the caller.
9884 g_assert (cfg->ret);
9885 g_assert (mono_method_signature (method)->pinvoke);
9886 CHECK_STACK (1);
9887 --sp;
9889 CHECK_OPSIZE (6);
9890 token = read32 (ip + 2);
9891 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9893 if (!cfg->vret_addr) {
9894 g_assert (cfg->ret_var_is_local);
9896 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9897 } else {
9898 EMIT_NEW_RETLOADA (cfg, ins);
9900 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9902 if (sp != stack_start)
9903 UNVERIFIED;
9905 MONO_INST_NEW (cfg, ins, OP_BR);
9906 ins->inst_target_bb = end_bblock;
9907 MONO_ADD_INS (bblock, ins);
9908 link_bblock (cfg, bblock, end_bblock);
9909 start_new_bblock = 1;
9910 ip += 6;
9911 break;
9913 case CEE_MONO_CISINST:
9914 case CEE_MONO_CCASTCLASS: {
9915 int token;
9916 CHECK_STACK (1);
9917 --sp;
9918 CHECK_OPSIZE (6);
9919 token = read32 (ip + 2);
9920 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9921 if (ip [1] == CEE_MONO_CISINST)
9922 ins = handle_cisinst (cfg, klass, sp [0]);
9923 else
9924 ins = handle_ccastclass (cfg, klass, sp [0]);
9925 bblock = cfg->cbb;
9926 *sp++ = ins;
9927 ip += 6;
9928 break;
9930 case CEE_MONO_SAVE_LMF:
9931 case CEE_MONO_RESTORE_LMF:
9932 #ifdef MONO_ARCH_HAVE_LMF_OPS
9933 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9934 MONO_ADD_INS (bblock, ins);
9935 cfg->need_lmf_area = TRUE;
9936 #endif
9937 ip += 2;
9938 break;
9939 case CEE_MONO_CLASSCONST:
9940 CHECK_STACK_OVF (1);
9941 CHECK_OPSIZE (6);
9942 token = read32 (ip + 2);
9943 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9944 *sp++ = ins;
9945 ip += 6;
9946 inline_costs += 10 * num_calls++;
9947 break;
9948 case CEE_MONO_NOT_TAKEN:
9949 bblock->out_of_line = TRUE;
9950 ip += 2;
9951 break;
9952 case CEE_MONO_TLS:
9953 CHECK_STACK_OVF (1);
9954 CHECK_OPSIZE (6);
9955 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9956 ins->dreg = alloc_preg (cfg);
9957 ins->inst_offset = (gint32)read32 (ip + 2);
9958 ins->type = STACK_PTR;
9959 MONO_ADD_INS (bblock, ins);
9960 *sp++ = ins;
9961 ip += 6;
9962 break;
9963 case CEE_MONO_DYN_CALL: {
9964 MonoCallInst *call;
9966 /* It would be easier to call a trampoline, but that would put an
9967 * extra frame on the stack, confusing exception handling. So
9968 * implement it inline using an opcode for now.
9971 if (!cfg->dyn_call_var) {
9972 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9973 /* prevent it from being register allocated */
9974 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9977 /* Has to use a call inst since it local regalloc expects it */
9978 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9979 ins = (MonoInst*)call;
9980 sp -= 2;
9981 ins->sreg1 = sp [0]->dreg;
9982 ins->sreg2 = sp [1]->dreg;
9983 MONO_ADD_INS (bblock, ins);
9985 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9986 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9987 #endif
9989 ip += 2;
9990 inline_costs += 10 * num_calls++;
9992 break;
9994 default:
9995 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9996 break;
9998 break;
10001 case CEE_PREFIX1: {
10002 CHECK_OPSIZE (2);
10003 switch (ip [1]) {
10004 case CEE_ARGLIST: {
10005 /* somewhat similar to LDTOKEN */
10006 MonoInst *addr, *vtvar;
10007 CHECK_STACK_OVF (1);
10008 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10010 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10011 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10013 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10014 ins->type = STACK_VTYPE;
10015 ins->klass = mono_defaults.argumenthandle_class;
10016 *sp++ = ins;
10017 ip += 2;
10018 break;
10020 case CEE_CEQ:
10021 case CEE_CGT:
10022 case CEE_CGT_UN:
10023 case CEE_CLT:
10024 case CEE_CLT_UN: {
10025 MonoInst *cmp;
10026 CHECK_STACK (2);
10028 * The following transforms:
10029 * CEE_CEQ into OP_CEQ
10030 * CEE_CGT into OP_CGT
10031 * CEE_CGT_UN into OP_CGT_UN
10032 * CEE_CLT into OP_CLT
10033 * CEE_CLT_UN into OP_CLT_UN
10035 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10037 MONO_INST_NEW (cfg, ins, cmp->opcode);
10038 sp -= 2;
10039 cmp->sreg1 = sp [0]->dreg;
10040 cmp->sreg2 = sp [1]->dreg;
10041 type_from_op (cmp, sp [0], sp [1]);
10042 CHECK_TYPE (cmp);
10043 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10044 cmp->opcode = OP_LCOMPARE;
10045 else if (sp [0]->type == STACK_R8)
10046 cmp->opcode = OP_FCOMPARE;
10047 else
10048 cmp->opcode = OP_ICOMPARE;
10049 MONO_ADD_INS (bblock, cmp);
10050 ins->type = STACK_I4;
10051 ins->dreg = alloc_dreg (cfg, ins->type);
10052 type_from_op (ins, sp [0], sp [1]);
10054 if (cmp->opcode == OP_FCOMPARE) {
10056 * The backends expect the fceq opcodes to do the
10057 * comparison too.
10059 cmp->opcode = OP_NOP;
10060 ins->sreg1 = cmp->sreg1;
10061 ins->sreg2 = cmp->sreg2;
10063 MONO_ADD_INS (bblock, ins);
10064 *sp++ = ins;
10065 ip += 2;
10066 break;
10068 case CEE_LDFTN: {
10069 MonoInst *argconst;
10070 MonoMethod *cil_method;
10072 CHECK_STACK_OVF (1);
10073 CHECK_OPSIZE (6);
10074 n = read32 (ip + 2);
10075 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10076 if (!cmethod || mono_loader_get_last_error ())
10077 LOAD_ERROR;
10078 mono_class_init (cmethod->klass);
10080 mono_save_token_info (cfg, image, n, cmethod);
10082 if (cfg->generic_sharing_context)
10083 context_used = mono_method_check_context_used (cmethod);
10085 cil_method = cmethod;
10086 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10087 METHOD_ACCESS_FAILURE;
10089 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10090 if (check_linkdemand (cfg, method, cmethod))
10091 INLINE_FAILURE;
10092 CHECK_CFG_EXCEPTION;
10093 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10094 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10098 * Optimize the common case of ldftn+delegate creation
10100 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10101 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10102 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10103 MonoInst *target_ins;
10104 MonoMethod *invoke;
10105 int invoke_context_used = 0;
10107 invoke = mono_get_delegate_invoke (ctor_method->klass);
10108 if (!invoke || !mono_method_signature (invoke))
10109 LOAD_ERROR;
10111 if (cfg->generic_sharing_context)
10112 invoke_context_used = mono_method_check_context_used (invoke);
10114 target_ins = sp [-1];
10116 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10117 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10119 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10120 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10121 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10123 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10127 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10128 /* FIXME: SGEN support */
10129 if (invoke_context_used == 0) {
10130 ip += 6;
10131 if (cfg->verbose_level > 3)
10132 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10133 sp --;
10134 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10135 CHECK_CFG_EXCEPTION;
10136 ip += 5;
10137 sp ++;
10138 break;
10140 #endif
10144 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10145 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10146 *sp++ = ins;
10148 ip += 6;
10149 inline_costs += 10 * num_calls++;
10150 break;
10152 case CEE_LDVIRTFTN: {
10153 MonoInst *args [2];
10155 CHECK_STACK (1);
10156 CHECK_OPSIZE (6);
10157 n = read32 (ip + 2);
10158 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10159 if (!cmethod || mono_loader_get_last_error ())
10160 LOAD_ERROR;
10161 mono_class_init (cmethod->klass);
10163 if (cfg->generic_sharing_context)
10164 context_used = mono_method_check_context_used (cmethod);
10166 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10167 if (check_linkdemand (cfg, method, cmethod))
10168 INLINE_FAILURE;
10169 CHECK_CFG_EXCEPTION;
10170 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10171 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10174 --sp;
10175 args [0] = *sp;
10177 args [1] = emit_get_rgctx_method (cfg, context_used,
10178 cmethod, MONO_RGCTX_INFO_METHOD);
10180 if (context_used)
10181 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10182 else
10183 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10185 ip += 6;
10186 inline_costs += 10 * num_calls++;
10187 break;
10189 case CEE_LDARG:
10190 CHECK_STACK_OVF (1);
10191 CHECK_OPSIZE (4);
10192 n = read16 (ip + 2);
10193 CHECK_ARG (n);
10194 EMIT_NEW_ARGLOAD (cfg, ins, n);
10195 *sp++ = ins;
10196 ip += 4;
10197 break;
10198 case CEE_LDARGA:
10199 CHECK_STACK_OVF (1);
10200 CHECK_OPSIZE (4);
10201 n = read16 (ip + 2);
10202 CHECK_ARG (n);
10203 NEW_ARGLOADA (cfg, ins, n);
10204 MONO_ADD_INS (cfg->cbb, ins);
10205 *sp++ = ins;
10206 ip += 4;
10207 break;
10208 case CEE_STARG:
10209 CHECK_STACK (1);
10210 --sp;
10211 CHECK_OPSIZE (4);
10212 n = read16 (ip + 2);
10213 CHECK_ARG (n);
10214 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10215 UNVERIFIED;
10216 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10217 ip += 4;
10218 break;
10219 case CEE_LDLOC:
10220 CHECK_STACK_OVF (1);
10221 CHECK_OPSIZE (4);
10222 n = read16 (ip + 2);
10223 CHECK_LOCAL (n);
10224 EMIT_NEW_LOCLOAD (cfg, ins, n);
10225 *sp++ = ins;
10226 ip += 4;
10227 break;
10228 case CEE_LDLOCA: {
10229 unsigned char *tmp_ip;
10230 CHECK_STACK_OVF (1);
10231 CHECK_OPSIZE (4);
10232 n = read16 (ip + 2);
10233 CHECK_LOCAL (n);
10235 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10236 ip = tmp_ip;
10237 inline_costs += 1;
10238 break;
10241 EMIT_NEW_LOCLOADA (cfg, ins, n);
10242 *sp++ = ins;
10243 ip += 4;
10244 break;
10246 case CEE_STLOC:
10247 CHECK_STACK (1);
10248 --sp;
10249 CHECK_OPSIZE (4);
10250 n = read16 (ip + 2);
10251 CHECK_LOCAL (n);
10252 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10253 UNVERIFIED;
10254 emit_stloc_ir (cfg, sp, header, n);
10255 ip += 4;
10256 inline_costs += 1;
10257 break;
10258 case CEE_LOCALLOC:
10259 CHECK_STACK (1);
10260 --sp;
10261 if (sp != stack_start)
10262 UNVERIFIED;
10263 if (cfg->method != method)
10265 * Inlining this into a loop in a parent could lead to
10266 * stack overflows which is different behavior than the
10267 * non-inlined case, thus disable inlining in this case.
10269 goto inline_failure;
10271 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10272 ins->dreg = alloc_preg (cfg);
10273 ins->sreg1 = sp [0]->dreg;
10274 ins->type = STACK_PTR;
10275 MONO_ADD_INS (cfg->cbb, ins);
10277 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10278 if (init_locals)
10279 ins->flags |= MONO_INST_INIT;
10281 *sp++ = ins;
10282 ip += 2;
10283 break;
10284 case CEE_ENDFILTER: {
10285 MonoExceptionClause *clause, *nearest;
10286 int cc, nearest_num;
10288 CHECK_STACK (1);
10289 --sp;
10290 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10291 UNVERIFIED;
10292 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10293 ins->sreg1 = (*sp)->dreg;
10294 MONO_ADD_INS (bblock, ins);
10295 start_new_bblock = 1;
10296 ip += 2;
10298 nearest = NULL;
10299 nearest_num = 0;
10300 for (cc = 0; cc < header->num_clauses; ++cc) {
10301 clause = &header->clauses [cc];
10302 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10303 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10304 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10305 nearest = clause;
10306 nearest_num = cc;
10309 g_assert (nearest);
10310 if ((ip - header->code) != nearest->handler_offset)
10311 UNVERIFIED;
10313 break;
10315 case CEE_UNALIGNED_:
10316 ins_flag |= MONO_INST_UNALIGNED;
10317 /* FIXME: record alignment? we can assume 1 for now */
10318 CHECK_OPSIZE (3);
10319 ip += 3;
10320 break;
10321 case CEE_VOLATILE_:
10322 ins_flag |= MONO_INST_VOLATILE;
10323 ip += 2;
10324 break;
10325 case CEE_TAIL_:
10326 ins_flag |= MONO_INST_TAILCALL;
10327 cfg->flags |= MONO_CFG_HAS_TAIL;
10328 /* Can't inline tail calls at this time */
10329 inline_costs += 100000;
10330 ip += 2;
10331 break;
10332 case CEE_INITOBJ:
10333 CHECK_STACK (1);
10334 --sp;
10335 CHECK_OPSIZE (6);
10336 token = read32 (ip + 2);
10337 klass = mini_get_class (method, token, generic_context);
10338 CHECK_TYPELOAD (klass);
10339 if (generic_class_is_reference_type (cfg, klass))
10340 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10341 else
10342 mini_emit_initobj (cfg, *sp, NULL, klass);
10343 ip += 6;
10344 inline_costs += 1;
10345 break;
10346 case CEE_CONSTRAINED_:
10347 CHECK_OPSIZE (6);
10348 token = read32 (ip + 2);
10349 if (method->wrapper_type != MONO_WRAPPER_NONE)
10350 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10351 else
10352 constrained_call = mono_class_get_full (image, token, generic_context);
10353 CHECK_TYPELOAD (constrained_call);
10354 ip += 6;
10355 break;
10356 case CEE_CPBLK:
10357 case CEE_INITBLK: {
10358 MonoInst *iargs [3];
10359 CHECK_STACK (3);
10360 sp -= 3;
10362 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10363 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10364 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10365 /* emit_memset only works when val == 0 */
10366 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10367 } else {
10368 iargs [0] = sp [0];
10369 iargs [1] = sp [1];
10370 iargs [2] = sp [2];
10371 if (ip [1] == CEE_CPBLK) {
10372 MonoMethod *memcpy_method = get_memcpy_method ();
10373 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10374 } else {
10375 MonoMethod *memset_method = get_memset_method ();
10376 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10379 ip += 2;
10380 inline_costs += 1;
10381 break;
10383 case CEE_NO_:
10384 CHECK_OPSIZE (3);
10385 if (ip [2] & 0x1)
10386 ins_flag |= MONO_INST_NOTYPECHECK;
10387 if (ip [2] & 0x2)
10388 ins_flag |= MONO_INST_NORANGECHECK;
10389 /* we ignore the no-nullcheck for now since we
10390 * really do it explicitly only when doing callvirt->call
10392 ip += 3;
10393 break;
10394 case CEE_RETHROW: {
10395 MonoInst *load;
10396 int handler_offset = -1;
10398 for (i = 0; i < header->num_clauses; ++i) {
10399 MonoExceptionClause *clause = &header->clauses [i];
10400 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10401 handler_offset = clause->handler_offset;
10402 break;
10406 bblock->flags |= BB_EXCEPTION_UNSAFE;
10408 g_assert (handler_offset != -1);
10410 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10411 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10412 ins->sreg1 = load->dreg;
10413 MONO_ADD_INS (bblock, ins);
10415 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10416 MONO_ADD_INS (bblock, ins);
10418 sp = stack_start;
10419 link_bblock (cfg, bblock, end_bblock);
10420 start_new_bblock = 1;
10421 ip += 2;
10422 break;
10424 case CEE_SIZEOF: {
10425 guint32 align;
10426 int ialign;
10428 CHECK_STACK_OVF (1);
10429 CHECK_OPSIZE (6);
10430 token = read32 (ip + 2);
10431 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10432 MonoType *type = mono_type_create_from_typespec (image, token);
10433 token = mono_type_size (type, &ialign);
10434 } else {
10435 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10436 CHECK_TYPELOAD (klass);
10437 mono_class_init (klass);
10438 token = mono_class_value_size (klass, &align);
10440 EMIT_NEW_ICONST (cfg, ins, token);
10441 *sp++= ins;
10442 ip += 6;
10443 break;
10445 case CEE_REFANYTYPE: {
10446 MonoInst *src_var, *src;
10448 CHECK_STACK (1);
10449 --sp;
10451 // FIXME:
10452 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10453 if (!src_var)
10454 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10455 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10456 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10457 *sp++ = ins;
10458 ip += 2;
10459 break;
10461 case CEE_READONLY_:
10462 readonly = TRUE;
10463 ip += 2;
10464 break;
10466 case CEE_UNUSED56:
10467 case CEE_UNUSED57:
10468 case CEE_UNUSED70:
10469 case CEE_UNUSED:
10470 case CEE_UNUSED99:
10471 UNVERIFIED;
10473 default:
10474 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10475 UNVERIFIED;
10477 break;
10479 case CEE_UNUSED58:
10480 case CEE_UNUSED1:
10481 UNVERIFIED;
10483 default:
10484 g_warning ("opcode 0x%02x not handled", *ip);
10485 UNVERIFIED;
10488 if (start_new_bblock != 1)
10489 UNVERIFIED;
10491 bblock->cil_length = ip - bblock->cil_code;
10492 bblock->next_bb = end_bblock;
10494 if (cfg->method == method && cfg->domainvar) {
10495 MonoInst *store;
10496 MonoInst *get_domain;
10498 cfg->cbb = init_localsbb;
10500 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10501 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10503 else {
10504 get_domain->dreg = alloc_preg (cfg);
10505 MONO_ADD_INS (cfg->cbb, get_domain);
10507 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10508 MONO_ADD_INS (cfg->cbb, store);
10511 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10512 if (cfg->compile_aot)
10513 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10514 mono_get_got_var (cfg);
10515 #endif
10517 if (cfg->method == method && cfg->got_var)
10518 mono_emit_load_got_addr (cfg);
10520 if (init_locals) {
10521 MonoInst *store;
10523 cfg->cbb = init_localsbb;
10524 cfg->ip = NULL;
10525 for (i = 0; i < header->num_locals; ++i) {
10526 MonoType *ptype = header->locals [i];
10527 int t = ptype->type;
10528 dreg = cfg->locals [i]->dreg;
10530 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10531 t = mono_class_enum_basetype (ptype->data.klass)->type;
10532 if (ptype->byref) {
10533 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10534 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10535 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10536 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10537 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10538 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10539 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10540 ins->type = STACK_R8;
10541 ins->inst_p0 = (void*)&r8_0;
10542 ins->dreg = alloc_dreg (cfg, STACK_R8);
10543 MONO_ADD_INS (init_localsbb, ins);
10544 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10545 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10546 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10547 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10548 } else {
10549 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10554 if (cfg->init_ref_vars && cfg->method == method) {
10555 /* Emit initialization for ref vars */
10556 // FIXME: Avoid duplication initialization for IL locals.
10557 for (i = 0; i < cfg->num_varinfo; ++i) {
10558 MonoInst *ins = cfg->varinfo [i];
10560 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10561 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10565 /* Add a sequence point for method entry/exit events */
10566 if (seq_points) {
10567 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10568 MONO_ADD_INS (init_localsbb, ins);
10569 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10570 MONO_ADD_INS (cfg->bb_exit, ins);
10573 cfg->ip = NULL;
10575 if (cfg->method == method) {
10576 MonoBasicBlock *bb;
10577 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10578 bb->region = mono_find_block_region (cfg, bb->real_offset);
10579 if (cfg->spvars)
10580 mono_create_spvar_for_region (cfg, bb->region);
10581 if (cfg->verbose_level > 2)
10582 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10586 g_slist_free (class_inits);
10587 dont_inline = g_list_remove (dont_inline, method);
10589 if (inline_costs < 0) {
10590 char *mname;
10592 /* Method is too large */
10593 mname = mono_method_full_name (method, TRUE);
10594 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10595 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10596 g_free (mname);
10597 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10598 mono_basic_block_free (original_bb);
10599 return -1;
10602 if ((cfg->verbose_level > 2) && (cfg->method == method))
10603 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10605 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10606 mono_basic_block_free (original_bb);
10607 return inline_costs;
10609 exception_exit:
10610 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10611 goto cleanup;
10613 inline_failure:
10614 goto cleanup;
10616 load_error:
10617 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10618 goto cleanup;
10620 unverified:
10621 set_exception_type_from_invalid_il (cfg, method, ip);
10622 goto cleanup;
10624 cleanup:
10625 g_slist_free (class_inits);
10626 mono_basic_block_free (original_bb);
10627 dont_inline = g_list_remove (dont_inline, method);
10628 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10629 return -1;
10632 static int
10633 store_membase_reg_to_store_membase_imm (int opcode)
10635 switch (opcode) {
10636 case OP_STORE_MEMBASE_REG:
10637 return OP_STORE_MEMBASE_IMM;
10638 case OP_STOREI1_MEMBASE_REG:
10639 return OP_STOREI1_MEMBASE_IMM;
10640 case OP_STOREI2_MEMBASE_REG:
10641 return OP_STOREI2_MEMBASE_IMM;
10642 case OP_STOREI4_MEMBASE_REG:
10643 return OP_STOREI4_MEMBASE_IMM;
10644 case OP_STOREI8_MEMBASE_REG:
10645 return OP_STOREI8_MEMBASE_IMM;
10646 default:
10647 g_assert_not_reached ();
10650 return -1;
10653 #endif /* DISABLE_JIT */
10656 mono_op_to_op_imm (int opcode)
10658 switch (opcode) {
10659 case OP_IADD:
10660 return OP_IADD_IMM;
10661 case OP_ISUB:
10662 return OP_ISUB_IMM;
10663 case OP_IDIV:
10664 return OP_IDIV_IMM;
10665 case OP_IDIV_UN:
10666 return OP_IDIV_UN_IMM;
10667 case OP_IREM:
10668 return OP_IREM_IMM;
10669 case OP_IREM_UN:
10670 return OP_IREM_UN_IMM;
10671 case OP_IMUL:
10672 return OP_IMUL_IMM;
10673 case OP_IAND:
10674 return OP_IAND_IMM;
10675 case OP_IOR:
10676 return OP_IOR_IMM;
10677 case OP_IXOR:
10678 return OP_IXOR_IMM;
10679 case OP_ISHL:
10680 return OP_ISHL_IMM;
10681 case OP_ISHR:
10682 return OP_ISHR_IMM;
10683 case OP_ISHR_UN:
10684 return OP_ISHR_UN_IMM;
10686 case OP_LADD:
10687 return OP_LADD_IMM;
10688 case OP_LSUB:
10689 return OP_LSUB_IMM;
10690 case OP_LAND:
10691 return OP_LAND_IMM;
10692 case OP_LOR:
10693 return OP_LOR_IMM;
10694 case OP_LXOR:
10695 return OP_LXOR_IMM;
10696 case OP_LSHL:
10697 return OP_LSHL_IMM;
10698 case OP_LSHR:
10699 return OP_LSHR_IMM;
10700 case OP_LSHR_UN:
10701 return OP_LSHR_UN_IMM;
10703 case OP_COMPARE:
10704 return OP_COMPARE_IMM;
10705 case OP_ICOMPARE:
10706 return OP_ICOMPARE_IMM;
10707 case OP_LCOMPARE:
10708 return OP_LCOMPARE_IMM;
10710 case OP_STORE_MEMBASE_REG:
10711 return OP_STORE_MEMBASE_IMM;
10712 case OP_STOREI1_MEMBASE_REG:
10713 return OP_STOREI1_MEMBASE_IMM;
10714 case OP_STOREI2_MEMBASE_REG:
10715 return OP_STOREI2_MEMBASE_IMM;
10716 case OP_STOREI4_MEMBASE_REG:
10717 return OP_STOREI4_MEMBASE_IMM;
10719 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10720 case OP_X86_PUSH:
10721 return OP_X86_PUSH_IMM;
10722 case OP_X86_COMPARE_MEMBASE_REG:
10723 return OP_X86_COMPARE_MEMBASE_IMM;
10724 #endif
10725 #if defined(TARGET_AMD64)
10726 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10727 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10728 #endif
10729 case OP_VOIDCALL_REG:
10730 return OP_VOIDCALL;
10731 case OP_CALL_REG:
10732 return OP_CALL;
10733 case OP_LCALL_REG:
10734 return OP_LCALL;
10735 case OP_FCALL_REG:
10736 return OP_FCALL;
10737 case OP_LOCALLOC:
10738 return OP_LOCALLOC_IMM;
10741 return -1;
10744 static int
10745 ldind_to_load_membase (int opcode)
10747 switch (opcode) {
10748 case CEE_LDIND_I1:
10749 return OP_LOADI1_MEMBASE;
10750 case CEE_LDIND_U1:
10751 return OP_LOADU1_MEMBASE;
10752 case CEE_LDIND_I2:
10753 return OP_LOADI2_MEMBASE;
10754 case CEE_LDIND_U2:
10755 return OP_LOADU2_MEMBASE;
10756 case CEE_LDIND_I4:
10757 return OP_LOADI4_MEMBASE;
10758 case CEE_LDIND_U4:
10759 return OP_LOADU4_MEMBASE;
10760 case CEE_LDIND_I:
10761 return OP_LOAD_MEMBASE;
10762 case CEE_LDIND_REF:
10763 return OP_LOAD_MEMBASE;
10764 case CEE_LDIND_I8:
10765 return OP_LOADI8_MEMBASE;
10766 case CEE_LDIND_R4:
10767 return OP_LOADR4_MEMBASE;
10768 case CEE_LDIND_R8:
10769 return OP_LOADR8_MEMBASE;
10770 default:
10771 g_assert_not_reached ();
10774 return -1;
10777 static int
10778 stind_to_store_membase (int opcode)
10780 switch (opcode) {
10781 case CEE_STIND_I1:
10782 return OP_STOREI1_MEMBASE_REG;
10783 case CEE_STIND_I2:
10784 return OP_STOREI2_MEMBASE_REG;
10785 case CEE_STIND_I4:
10786 return OP_STOREI4_MEMBASE_REG;
10787 case CEE_STIND_I:
10788 case CEE_STIND_REF:
10789 return OP_STORE_MEMBASE_REG;
10790 case CEE_STIND_I8:
10791 return OP_STOREI8_MEMBASE_REG;
10792 case CEE_STIND_R4:
10793 return OP_STORER4_MEMBASE_REG;
10794 case CEE_STIND_R8:
10795 return OP_STORER8_MEMBASE_REG;
10796 default:
10797 g_assert_not_reached ();
10800 return -1;
10804 mono_load_membase_to_load_mem (int opcode)
10806 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10807 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10808 switch (opcode) {
10809 case OP_LOAD_MEMBASE:
10810 return OP_LOAD_MEM;
10811 case OP_LOADU1_MEMBASE:
10812 return OP_LOADU1_MEM;
10813 case OP_LOADU2_MEMBASE:
10814 return OP_LOADU2_MEM;
10815 case OP_LOADI4_MEMBASE:
10816 return OP_LOADI4_MEM;
10817 case OP_LOADU4_MEMBASE:
10818 return OP_LOADU4_MEM;
10819 #if SIZEOF_REGISTER == 8
10820 case OP_LOADI8_MEMBASE:
10821 return OP_LOADI8_MEM;
10822 #endif
10824 #endif
10826 return -1;
10829 static inline int
10830 op_to_op_dest_membase (int store_opcode, int opcode)
10832 #if defined(TARGET_X86)
10833 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10834 return -1;
10836 switch (opcode) {
10837 case OP_IADD:
10838 return OP_X86_ADD_MEMBASE_REG;
10839 case OP_ISUB:
10840 return OP_X86_SUB_MEMBASE_REG;
10841 case OP_IAND:
10842 return OP_X86_AND_MEMBASE_REG;
10843 case OP_IOR:
10844 return OP_X86_OR_MEMBASE_REG;
10845 case OP_IXOR:
10846 return OP_X86_XOR_MEMBASE_REG;
10847 case OP_ADD_IMM:
10848 case OP_IADD_IMM:
10849 return OP_X86_ADD_MEMBASE_IMM;
10850 case OP_SUB_IMM:
10851 case OP_ISUB_IMM:
10852 return OP_X86_SUB_MEMBASE_IMM;
10853 case OP_AND_IMM:
10854 case OP_IAND_IMM:
10855 return OP_X86_AND_MEMBASE_IMM;
10856 case OP_OR_IMM:
10857 case OP_IOR_IMM:
10858 return OP_X86_OR_MEMBASE_IMM;
10859 case OP_XOR_IMM:
10860 case OP_IXOR_IMM:
10861 return OP_X86_XOR_MEMBASE_IMM;
10862 case OP_MOVE:
10863 return OP_NOP;
10865 #endif
10867 #if defined(TARGET_AMD64)
10868 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10869 return -1;
10871 switch (opcode) {
10872 case OP_IADD:
10873 return OP_X86_ADD_MEMBASE_REG;
10874 case OP_ISUB:
10875 return OP_X86_SUB_MEMBASE_REG;
10876 case OP_IAND:
10877 return OP_X86_AND_MEMBASE_REG;
10878 case OP_IOR:
10879 return OP_X86_OR_MEMBASE_REG;
10880 case OP_IXOR:
10881 return OP_X86_XOR_MEMBASE_REG;
10882 case OP_IADD_IMM:
10883 return OP_X86_ADD_MEMBASE_IMM;
10884 case OP_ISUB_IMM:
10885 return OP_X86_SUB_MEMBASE_IMM;
10886 case OP_IAND_IMM:
10887 return OP_X86_AND_MEMBASE_IMM;
10888 case OP_IOR_IMM:
10889 return OP_X86_OR_MEMBASE_IMM;
10890 case OP_IXOR_IMM:
10891 return OP_X86_XOR_MEMBASE_IMM;
10892 case OP_LADD:
10893 return OP_AMD64_ADD_MEMBASE_REG;
10894 case OP_LSUB:
10895 return OP_AMD64_SUB_MEMBASE_REG;
10896 case OP_LAND:
10897 return OP_AMD64_AND_MEMBASE_REG;
10898 case OP_LOR:
10899 return OP_AMD64_OR_MEMBASE_REG;
10900 case OP_LXOR:
10901 return OP_AMD64_XOR_MEMBASE_REG;
10902 case OP_ADD_IMM:
10903 case OP_LADD_IMM:
10904 return OP_AMD64_ADD_MEMBASE_IMM;
10905 case OP_SUB_IMM:
10906 case OP_LSUB_IMM:
10907 return OP_AMD64_SUB_MEMBASE_IMM;
10908 case OP_AND_IMM:
10909 case OP_LAND_IMM:
10910 return OP_AMD64_AND_MEMBASE_IMM;
10911 case OP_OR_IMM:
10912 case OP_LOR_IMM:
10913 return OP_AMD64_OR_MEMBASE_IMM;
10914 case OP_XOR_IMM:
10915 case OP_LXOR_IMM:
10916 return OP_AMD64_XOR_MEMBASE_IMM;
10917 case OP_MOVE:
10918 return OP_NOP;
10920 #endif
10922 return -1;
10925 static inline int
10926 op_to_op_store_membase (int store_opcode, int opcode)
10928 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10929 switch (opcode) {
10930 case OP_ICEQ:
10931 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10932 return OP_X86_SETEQ_MEMBASE;
10933 case OP_CNE:
10934 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10935 return OP_X86_SETNE_MEMBASE;
10937 #endif
10939 return -1;
10942 static inline int
10943 op_to_op_src1_membase (int load_opcode, int opcode)
10945 #ifdef TARGET_X86
10946 /* FIXME: This has sign extension issues */
10948 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10949 return OP_X86_COMPARE_MEMBASE8_IMM;
10952 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10953 return -1;
10955 switch (opcode) {
10956 case OP_X86_PUSH:
10957 return OP_X86_PUSH_MEMBASE;
10958 case OP_COMPARE_IMM:
10959 case OP_ICOMPARE_IMM:
10960 return OP_X86_COMPARE_MEMBASE_IMM;
10961 case OP_COMPARE:
10962 case OP_ICOMPARE:
10963 return OP_X86_COMPARE_MEMBASE_REG;
10965 #endif
10967 #ifdef TARGET_AMD64
10968 /* FIXME: This has sign extension issues */
10970 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10971 return OP_X86_COMPARE_MEMBASE8_IMM;
10974 switch (opcode) {
10975 case OP_X86_PUSH:
10976 #ifdef __mono_ilp32__
10977 if (load_opcode == OP_LOADI8_MEMBASE)
10978 #else
10979 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10980 #endif
10981 return OP_X86_PUSH_MEMBASE;
10982 break;
10983 /* FIXME: This only works for 32 bit immediates
10984 case OP_COMPARE_IMM:
10985 case OP_LCOMPARE_IMM:
10986 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10987 return OP_AMD64_COMPARE_MEMBASE_IMM;
10989 case OP_ICOMPARE_IMM:
10990 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10991 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10992 break;
10993 case OP_COMPARE:
10994 case OP_LCOMPARE:
10995 #ifdef __mono_ilp32__
10996 if (load_opcode == OP_LOAD_MEMBASE)
10997 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10998 if (load_opcode == OP_LOADI8_MEMBASE)
10999 #else
11000 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11001 #endif
11002 return OP_AMD64_COMPARE_MEMBASE_REG;
11003 break;
11004 case OP_ICOMPARE:
11005 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11006 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11007 break;
11009 #endif
11011 return -1;
11014 static inline int
11015 op_to_op_src2_membase (int load_opcode, int opcode)
11017 #ifdef TARGET_X86
11018 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11019 return -1;
11021 switch (opcode) {
11022 case OP_COMPARE:
11023 case OP_ICOMPARE:
11024 return OP_X86_COMPARE_REG_MEMBASE;
11025 case OP_IADD:
11026 return OP_X86_ADD_REG_MEMBASE;
11027 case OP_ISUB:
11028 return OP_X86_SUB_REG_MEMBASE;
11029 case OP_IAND:
11030 return OP_X86_AND_REG_MEMBASE;
11031 case OP_IOR:
11032 return OP_X86_OR_REG_MEMBASE;
11033 case OP_IXOR:
11034 return OP_X86_XOR_REG_MEMBASE;
11036 #endif
11038 #ifdef TARGET_AMD64
11039 #ifdef __mono_ilp32__
11040 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11041 #else
11042 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11043 #endif
11044 switch (opcode) {
11045 case OP_ICOMPARE:
11046 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11047 case OP_IADD:
11048 return OP_X86_ADD_REG_MEMBASE;
11049 case OP_ISUB:
11050 return OP_X86_SUB_REG_MEMBASE;
11051 case OP_IAND:
11052 return OP_X86_AND_REG_MEMBASE;
11053 case OP_IOR:
11054 return OP_X86_OR_REG_MEMBASE;
11055 case OP_IXOR:
11056 return OP_X86_XOR_REG_MEMBASE;
11058 #ifdef __mono_ilp32__
11059 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11060 #else
11061 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11062 #endif
11063 switch (opcode) {
11064 case OP_COMPARE:
11065 case OP_LCOMPARE:
11066 return OP_AMD64_COMPARE_REG_MEMBASE;
11067 case OP_LADD:
11068 return OP_AMD64_ADD_REG_MEMBASE;
11069 case OP_LSUB:
11070 return OP_AMD64_SUB_REG_MEMBASE;
11071 case OP_LAND:
11072 return OP_AMD64_AND_REG_MEMBASE;
11073 case OP_LOR:
11074 return OP_AMD64_OR_REG_MEMBASE;
11075 case OP_LXOR:
11076 return OP_AMD64_XOR_REG_MEMBASE;
11079 #endif
11081 return -1;
11085 mono_op_to_op_imm_noemul (int opcode)
11087 switch (opcode) {
11088 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11089 case OP_LSHR:
11090 case OP_LSHL:
11091 case OP_LSHR_UN:
11092 return -1;
11093 #endif
11094 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11095 case OP_IDIV:
11096 case OP_IDIV_UN:
11097 case OP_IREM:
11098 case OP_IREM_UN:
11099 return -1;
11100 #endif
11101 default:
11102 return mono_op_to_op_imm (opcode);
11106 #ifndef DISABLE_JIT
11109 * mono_handle_global_vregs:
11111 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11112 * for them.
11114 void
11115 mono_handle_global_vregs (MonoCompile *cfg)
11117 gint32 *vreg_to_bb;
11118 MonoBasicBlock *bb;
11119 int i, pos;
11121 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11123 #ifdef MONO_ARCH_SIMD_INTRINSICS
11124 if (cfg->uses_simd_intrinsics)
11125 mono_simd_simplify_indirection (cfg);
11126 #endif
11128 /* Find local vregs used in more than one bb */
11129 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11130 MonoInst *ins = bb->code;
11131 int block_num = bb->block_num;
11133 if (cfg->verbose_level > 2)
11134 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11136 cfg->cbb = bb;
11137 for (; ins; ins = ins->next) {
11138 const char *spec = INS_INFO (ins->opcode);
11139 int regtype = 0, regindex;
11140 gint32 prev_bb;
11142 if (G_UNLIKELY (cfg->verbose_level > 2))
11143 mono_print_ins (ins);
11145 g_assert (ins->opcode >= MONO_CEE_LAST);
11147 for (regindex = 0; regindex < 4; regindex ++) {
11148 int vreg = 0;
11150 if (regindex == 0) {
11151 regtype = spec [MONO_INST_DEST];
11152 if (regtype == ' ')
11153 continue;
11154 vreg = ins->dreg;
11155 } else if (regindex == 1) {
11156 regtype = spec [MONO_INST_SRC1];
11157 if (regtype == ' ')
11158 continue;
11159 vreg = ins->sreg1;
11160 } else if (regindex == 2) {
11161 regtype = spec [MONO_INST_SRC2];
11162 if (regtype == ' ')
11163 continue;
11164 vreg = ins->sreg2;
11165 } else if (regindex == 3) {
11166 regtype = spec [MONO_INST_SRC3];
11167 if (regtype == ' ')
11168 continue;
11169 vreg = ins->sreg3;
11172 #if SIZEOF_REGISTER == 4
11173 /* In the LLVM case, the long opcodes are not decomposed */
11174 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11176 * Since some instructions reference the original long vreg,
11177 * and some reference the two component vregs, it is quite hard
11178 * to determine when it needs to be global. So be conservative.
11180 if (!get_vreg_to_inst (cfg, vreg)) {
11181 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11183 if (cfg->verbose_level > 2)
11184 printf ("LONG VREG R%d made global.\n", vreg);
11188 * Make the component vregs volatile since the optimizations can
11189 * get confused otherwise.
11191 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11192 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11194 #endif
11196 g_assert (vreg != -1);
11198 prev_bb = vreg_to_bb [vreg];
11199 if (prev_bb == 0) {
11200 /* 0 is a valid block num */
11201 vreg_to_bb [vreg] = block_num + 1;
11202 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11203 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11204 continue;
11206 if (!get_vreg_to_inst (cfg, vreg)) {
11207 if (G_UNLIKELY (cfg->verbose_level > 2))
11208 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11210 switch (regtype) {
11211 case 'i':
11212 if (vreg_is_ref (cfg, vreg))
11213 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11214 else
11215 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11216 break;
11217 case 'l':
11218 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11219 break;
11220 case 'f':
11221 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11222 break;
11223 case 'v':
11224 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11225 break;
11226 default:
11227 g_assert_not_reached ();
11231 /* Flag as having been used in more than one bb */
11232 vreg_to_bb [vreg] = -1;
11238 /* If a variable is used in only one bblock, convert it into a local vreg */
11239 for (i = 0; i < cfg->num_varinfo; i++) {
11240 MonoInst *var = cfg->varinfo [i];
11241 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11243 switch (var->type) {
11244 case STACK_I4:
11245 case STACK_OBJ:
11246 case STACK_PTR:
11247 case STACK_MP:
11248 case STACK_VTYPE:
11249 #if SIZEOF_REGISTER == 8
11250 case STACK_I8:
11251 #endif
11252 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11253 /* Enabling this screws up the fp stack on x86 */
11254 case STACK_R8:
11255 #endif
11256 /* Arguments are implicitly global */
11257 /* Putting R4 vars into registers doesn't work currently */
11258 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11260 * Make that the variable's liveness interval doesn't contain a call, since
11261 * that would cause the lvreg to be spilled, making the whole optimization
11262 * useless.
11264 /* This is too slow for JIT compilation */
11265 #if 0
11266 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11267 MonoInst *ins;
11268 int def_index, call_index, ins_index;
11269 gboolean spilled = FALSE;
11271 def_index = -1;
11272 call_index = -1;
11273 ins_index = 0;
11274 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11275 const char *spec = INS_INFO (ins->opcode);
11277 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11278 def_index = ins_index;
11280 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11281 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11282 if (call_index > def_index) {
11283 spilled = TRUE;
11284 break;
11288 if (MONO_IS_CALL (ins))
11289 call_index = ins_index;
11291 ins_index ++;
11294 if (spilled)
11295 break;
11297 #endif
11299 if (G_UNLIKELY (cfg->verbose_level > 2))
11300 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11301 var->flags |= MONO_INST_IS_DEAD;
11302 cfg->vreg_to_inst [var->dreg] = NULL;
11304 break;
11309 * Compress the varinfo and vars tables so the liveness computation is faster and
11310 * takes up less space.
11312 pos = 0;
11313 for (i = 0; i < cfg->num_varinfo; ++i) {
11314 MonoInst *var = cfg->varinfo [i];
11315 if (pos < i && cfg->locals_start == i)
11316 cfg->locals_start = pos;
11317 if (!(var->flags & MONO_INST_IS_DEAD)) {
11318 if (pos < i) {
11319 cfg->varinfo [pos] = cfg->varinfo [i];
11320 cfg->varinfo [pos]->inst_c0 = pos;
11321 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11322 cfg->vars [pos].idx = pos;
11323 #if SIZEOF_REGISTER == 4
11324 if (cfg->varinfo [pos]->type == STACK_I8) {
11325 /* Modify the two component vars too */
11326 MonoInst *var1;
11328 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11329 var1->inst_c0 = pos;
11330 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11331 var1->inst_c0 = pos;
11333 #endif
11335 pos ++;
11338 cfg->num_varinfo = pos;
11339 if (cfg->locals_start > cfg->num_varinfo)
11340 cfg->locals_start = cfg->num_varinfo;
11344 * mono_spill_global_vars:
11346 * Generate spill code for variables which are not allocated to registers,
11347 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11348 * code is generated which could be optimized by the local optimization passes.
11350 void
11351 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11353 MonoBasicBlock *bb;
11354 char spec2 [16];
11355 int orig_next_vreg;
11356 guint32 *vreg_to_lvreg;
11357 guint32 *lvregs;
11358 guint32 i, lvregs_len;
11359 gboolean dest_has_lvreg = FALSE;
11360 guint32 stacktypes [128];
11361 MonoInst **live_range_start, **live_range_end;
11362 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11364 *need_local_opts = FALSE;
11366 memset (spec2, 0, sizeof (spec2));
11368 /* FIXME: Move this function to mini.c */
11369 stacktypes ['i'] = STACK_PTR;
11370 stacktypes ['l'] = STACK_I8;
11371 stacktypes ['f'] = STACK_R8;
11372 #ifdef MONO_ARCH_SIMD_INTRINSICS
11373 stacktypes ['x'] = STACK_VTYPE;
11374 #endif
11376 #if SIZEOF_REGISTER == 4
11377 /* Create MonoInsts for longs */
11378 for (i = 0; i < cfg->num_varinfo; i++) {
11379 MonoInst *ins = cfg->varinfo [i];
11381 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11382 switch (ins->type) {
11383 case STACK_R8:
11384 case STACK_I8: {
11385 MonoInst *tree;
11387 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11388 break;
11390 g_assert (ins->opcode == OP_REGOFFSET);
11392 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11393 g_assert (tree);
11394 tree->opcode = OP_REGOFFSET;
11395 tree->inst_basereg = ins->inst_basereg;
11396 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11398 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11399 g_assert (tree);
11400 tree->opcode = OP_REGOFFSET;
11401 tree->inst_basereg = ins->inst_basereg;
11402 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11403 break;
11405 default:
11406 break;
11410 #endif
11412 if (cfg->compute_gc_maps) {
11413 /* registers need liveness info even for !non refs */
11414 for (i = 0; i < cfg->num_varinfo; i++) {
11415 MonoInst *ins = cfg->varinfo [i];
11417 if (ins->opcode == OP_REGVAR)
11418 ins->flags |= MONO_INST_GC_TRACK;
11422 /* FIXME: widening and truncation */
11425 * As an optimization, when a variable allocated to the stack is first loaded into
11426 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11427 * the variable again.
11429 orig_next_vreg = cfg->next_vreg;
11430 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11431 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11432 lvregs_len = 0;
11435 * These arrays contain the first and last instructions accessing a given
11436 * variable.
11437 * Since we emit bblocks in the same order we process them here, and we
11438 * don't split live ranges, these will precisely describe the live range of
11439 * the variable, i.e. the instruction range where a valid value can be found
11440 * in the variables location.
11441 * The live range is computed using the liveness info computed by the liveness pass.
11442 * We can't use vmv->range, since that is an abstract live range, and we need
11443 * one which is instruction precise.
11444 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11446 /* FIXME: Only do this if debugging info is requested */
11447 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11448 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11449 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11450 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11452 /* Add spill loads/stores */
11453 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11454 MonoInst *ins;
11456 if (cfg->verbose_level > 2)
11457 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11459 /* Clear vreg_to_lvreg array */
11460 for (i = 0; i < lvregs_len; i++)
11461 vreg_to_lvreg [lvregs [i]] = 0;
11462 lvregs_len = 0;
11464 cfg->cbb = bb;
11465 MONO_BB_FOR_EACH_INS (bb, ins) {
11466 const char *spec = INS_INFO (ins->opcode);
11467 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11468 gboolean store, no_lvreg;
11469 int sregs [MONO_MAX_SRC_REGS];
11471 if (G_UNLIKELY (cfg->verbose_level > 2))
11472 mono_print_ins (ins);
11474 if (ins->opcode == OP_NOP)
11475 continue;
11478 * We handle LDADDR here as well, since it can only be decomposed
11479 * when variable addresses are known.
11481 if (ins->opcode == OP_LDADDR) {
11482 MonoInst *var = ins->inst_p0;
11484 if (var->opcode == OP_VTARG_ADDR) {
11485 /* Happens on SPARC/S390 where vtypes are passed by reference */
11486 MonoInst *vtaddr = var->inst_left;
11487 if (vtaddr->opcode == OP_REGVAR) {
11488 ins->opcode = OP_MOVE;
11489 ins->sreg1 = vtaddr->dreg;
11491 else if (var->inst_left->opcode == OP_REGOFFSET) {
11492 ins->opcode = OP_LOAD_MEMBASE;
11493 ins->inst_basereg = vtaddr->inst_basereg;
11494 ins->inst_offset = vtaddr->inst_offset;
11495 } else
11496 NOT_IMPLEMENTED;
11497 } else {
11498 g_assert (var->opcode == OP_REGOFFSET);
11500 ins->opcode = OP_ADD_IMM;
11501 ins->sreg1 = var->inst_basereg;
11502 ins->inst_imm = var->inst_offset;
11505 *need_local_opts = TRUE;
11506 spec = INS_INFO (ins->opcode);
11509 if (ins->opcode < MONO_CEE_LAST) {
11510 mono_print_ins (ins);
11511 g_assert_not_reached ();
11515 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11516 * src register.
11517 * FIXME:
11519 if (MONO_IS_STORE_MEMBASE (ins)) {
11520 tmp_reg = ins->dreg;
11521 ins->dreg = ins->sreg2;
11522 ins->sreg2 = tmp_reg;
11523 store = TRUE;
11525 spec2 [MONO_INST_DEST] = ' ';
11526 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11527 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11528 spec2 [MONO_INST_SRC3] = ' ';
11529 spec = spec2;
11530 } else if (MONO_IS_STORE_MEMINDEX (ins))
11531 g_assert_not_reached ();
11532 else
11533 store = FALSE;
11534 no_lvreg = FALSE;
11536 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11537 printf ("\t %.3s %d", spec, ins->dreg);
11538 num_sregs = mono_inst_get_src_registers (ins, sregs);
11539 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11540 printf (" %d", sregs [srcindex]);
11541 printf ("\n");
11544 /***************/
11545 /* DREG */
11546 /***************/
11547 regtype = spec [MONO_INST_DEST];
11548 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11549 prev_dreg = -1;
11551 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11552 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11553 MonoInst *store_ins;
11554 int store_opcode;
11555 MonoInst *def_ins = ins;
11556 int dreg = ins->dreg; /* The original vreg */
11558 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11560 if (var->opcode == OP_REGVAR) {
11561 ins->dreg = var->dreg;
11562 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11564 * Instead of emitting a load+store, use a _membase opcode.
11566 g_assert (var->opcode == OP_REGOFFSET);
11567 if (ins->opcode == OP_MOVE) {
11568 NULLIFY_INS (ins);
11569 def_ins = NULL;
11570 } else {
11571 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11572 ins->inst_basereg = var->inst_basereg;
11573 ins->inst_offset = var->inst_offset;
11574 ins->dreg = -1;
11576 spec = INS_INFO (ins->opcode);
11577 } else {
11578 guint32 lvreg;
11580 g_assert (var->opcode == OP_REGOFFSET);
11582 prev_dreg = ins->dreg;
11584 /* Invalidate any previous lvreg for this vreg */
11585 vreg_to_lvreg [ins->dreg] = 0;
11587 lvreg = 0;
11589 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11590 regtype = 'l';
11591 store_opcode = OP_STOREI8_MEMBASE_REG;
11594 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11596 if (regtype == 'l') {
11597 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11598 mono_bblock_insert_after_ins (bb, ins, store_ins);
11599 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11600 mono_bblock_insert_after_ins (bb, ins, store_ins);
11601 def_ins = store_ins;
11603 else {
11604 g_assert (store_opcode != OP_STOREV_MEMBASE);
11606 /* Try to fuse the store into the instruction itself */
11607 /* FIXME: Add more instructions */
11608 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11609 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11610 ins->inst_imm = ins->inst_c0;
11611 ins->inst_destbasereg = var->inst_basereg;
11612 ins->inst_offset = var->inst_offset;
11613 spec = INS_INFO (ins->opcode);
11614 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11615 ins->opcode = store_opcode;
11616 ins->inst_destbasereg = var->inst_basereg;
11617 ins->inst_offset = var->inst_offset;
11619 no_lvreg = TRUE;
11621 tmp_reg = ins->dreg;
11622 ins->dreg = ins->sreg2;
11623 ins->sreg2 = tmp_reg;
11624 store = TRUE;
11626 spec2 [MONO_INST_DEST] = ' ';
11627 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11628 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11629 spec2 [MONO_INST_SRC3] = ' ';
11630 spec = spec2;
11631 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11632 // FIXME: The backends expect the base reg to be in inst_basereg
11633 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11634 ins->dreg = -1;
11635 ins->inst_basereg = var->inst_basereg;
11636 ins->inst_offset = var->inst_offset;
11637 spec = INS_INFO (ins->opcode);
11638 } else {
11639 /* printf ("INS: "); mono_print_ins (ins); */
11640 /* Create a store instruction */
11641 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11643 /* Insert it after the instruction */
11644 mono_bblock_insert_after_ins (bb, ins, store_ins);
11646 def_ins = store_ins;
11649 * We can't assign ins->dreg to var->dreg here, since the
11650 * sregs could use it. So set a flag, and do it after
11651 * the sregs.
11653 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11654 dest_has_lvreg = TRUE;
11659 if (def_ins && !live_range_start [dreg]) {
11660 live_range_start [dreg] = def_ins;
11661 live_range_start_bb [dreg] = bb;
11664 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11665 MonoInst *tmp;
11667 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11668 tmp->inst_c1 = dreg;
11669 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11673 /************/
11674 /* SREGS */
11675 /************/
11676 num_sregs = mono_inst_get_src_registers (ins, sregs);
11677 for (srcindex = 0; srcindex < 3; ++srcindex) {
11678 regtype = spec [MONO_INST_SRC1 + srcindex];
11679 sreg = sregs [srcindex];
11681 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11682 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11683 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11684 MonoInst *use_ins = ins;
11685 MonoInst *load_ins;
11686 guint32 load_opcode;
11688 if (var->opcode == OP_REGVAR) {
11689 sregs [srcindex] = var->dreg;
11690 //mono_inst_set_src_registers (ins, sregs);
11691 live_range_end [sreg] = use_ins;
11692 live_range_end_bb [sreg] = bb;
11694 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11695 MonoInst *tmp;
11697 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11698 /* var->dreg is a hreg */
11699 tmp->inst_c1 = sreg;
11700 mono_bblock_insert_after_ins (bb, ins, tmp);
11703 continue;
11706 g_assert (var->opcode == OP_REGOFFSET);
11708 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11710 g_assert (load_opcode != OP_LOADV_MEMBASE);
11712 if (vreg_to_lvreg [sreg]) {
11713 g_assert (vreg_to_lvreg [sreg] != -1);
11715 /* The variable is already loaded to an lvreg */
11716 if (G_UNLIKELY (cfg->verbose_level > 2))
11717 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11718 sregs [srcindex] = vreg_to_lvreg [sreg];
11719 //mono_inst_set_src_registers (ins, sregs);
11720 continue;
11723 /* Try to fuse the load into the instruction */
11724 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11725 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11726 sregs [0] = var->inst_basereg;
11727 //mono_inst_set_src_registers (ins, sregs);
11728 ins->inst_offset = var->inst_offset;
11729 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11730 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11731 sregs [1] = var->inst_basereg;
11732 //mono_inst_set_src_registers (ins, sregs);
11733 ins->inst_offset = var->inst_offset;
11734 } else {
11735 if (MONO_IS_REAL_MOVE (ins)) {
11736 ins->opcode = OP_NOP;
11737 sreg = ins->dreg;
11738 } else {
11739 //printf ("%d ", srcindex); mono_print_ins (ins);
11741 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11743 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11744 if (var->dreg == prev_dreg) {
11746 * sreg refers to the value loaded by the load
11747 * emitted below, but we need to use ins->dreg
11748 * since it refers to the store emitted earlier.
11750 sreg = ins->dreg;
11752 g_assert (sreg != -1);
11753 vreg_to_lvreg [var->dreg] = sreg;
11754 g_assert (lvregs_len < 1024);
11755 lvregs [lvregs_len ++] = var->dreg;
11759 sregs [srcindex] = sreg;
11760 //mono_inst_set_src_registers (ins, sregs);
11762 if (regtype == 'l') {
11763 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11764 mono_bblock_insert_before_ins (bb, ins, load_ins);
11765 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11766 mono_bblock_insert_before_ins (bb, ins, load_ins);
11767 use_ins = load_ins;
11769 else {
11770 #if SIZEOF_REGISTER == 4
11771 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11772 #endif
11773 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11774 mono_bblock_insert_before_ins (bb, ins, load_ins);
11775 use_ins = load_ins;
11779 if (var->dreg < orig_next_vreg) {
11780 live_range_end [var->dreg] = use_ins;
11781 live_range_end_bb [var->dreg] = bb;
11784 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11785 MonoInst *tmp;
11787 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11788 tmp->inst_c1 = var->dreg;
11789 mono_bblock_insert_after_ins (bb, ins, tmp);
11793 mono_inst_set_src_registers (ins, sregs);
11795 if (dest_has_lvreg) {
11796 g_assert (ins->dreg != -1);
11797 vreg_to_lvreg [prev_dreg] = ins->dreg;
11798 g_assert (lvregs_len < 1024);
11799 lvregs [lvregs_len ++] = prev_dreg;
11800 dest_has_lvreg = FALSE;
11803 if (store) {
11804 tmp_reg = ins->dreg;
11805 ins->dreg = ins->sreg2;
11806 ins->sreg2 = tmp_reg;
11809 if (MONO_IS_CALL (ins)) {
11810 /* Clear vreg_to_lvreg array */
11811 for (i = 0; i < lvregs_len; i++)
11812 vreg_to_lvreg [lvregs [i]] = 0;
11813 lvregs_len = 0;
11814 } else if (ins->opcode == OP_NOP) {
11815 ins->dreg = -1;
11816 MONO_INST_NULLIFY_SREGS (ins);
11819 if (cfg->verbose_level > 2)
11820 mono_print_ins_index (1, ins);
11823 /* Extend the live range based on the liveness info */
11824 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11825 for (i = 0; i < cfg->num_varinfo; i ++) {
11826 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11828 if (vreg_is_volatile (cfg, vi->vreg))
11829 /* The liveness info is incomplete */
11830 continue;
11832 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11833 /* Live from at least the first ins of this bb */
11834 live_range_start [vi->vreg] = bb->code;
11835 live_range_start_bb [vi->vreg] = bb;
11838 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11839 /* Live at least until the last ins of this bb */
11840 live_range_end [vi->vreg] = bb->last_ins;
11841 live_range_end_bb [vi->vreg] = bb;
11847 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11849 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11850 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11852 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11853 for (i = 0; i < cfg->num_varinfo; ++i) {
11854 int vreg = MONO_VARINFO (cfg, i)->vreg;
11855 MonoInst *ins;
11857 if (live_range_start [vreg]) {
11858 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11859 ins->inst_c0 = i;
11860 ins->inst_c1 = vreg;
11861 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11863 if (live_range_end [vreg]) {
11864 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11865 ins->inst_c0 = i;
11866 ins->inst_c1 = vreg;
11867 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11868 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11869 else
11870 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11874 #endif
11876 g_free (live_range_start);
11877 g_free (live_range_end);
11878 g_free (live_range_start_bb);
11879 g_free (live_range_end_bb);
11883 * FIXME:
11884 * - use 'iadd' instead of 'int_add'
11885 * - handling ovf opcodes: decompose in method_to_ir.
11886 * - unify iregs/fregs
11887 * -> partly done, the missing parts are:
11888 * - a more complete unification would involve unifying the hregs as well, so
11889 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11890 * would no longer map to the machine hregs, so the code generators would need to
11891 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11892 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11893 * fp/non-fp branches speeds it up by about 15%.
11894 * - use sext/zext opcodes instead of shifts
11895 * - add OP_ICALL
11896 * - get rid of TEMPLOADs if possible and use vregs instead
11897 * - clean up usage of OP_P/OP_ opcodes
11898 * - cleanup usage of DUMMY_USE
11899 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11900 * stack
11901 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11902 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11903 * - make sure handle_stack_args () is called before the branch is emitted
11904 * - when the new IR is done, get rid of all unused stuff
11905 * - COMPARE/BEQ as separate instructions or unify them ?
11906 * - keeping them separate allows specialized compare instructions like
11907 * compare_imm, compare_membase
11908 * - most back ends unify fp compare+branch, fp compare+ceq
11909 * - integrate mono_save_args into inline_method
11910 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11911 * - handle long shift opts on 32 bit platforms somehow: they require
11912 * 3 sregs (2 for arg1 and 1 for arg2)
11913 * - make byref a 'normal' type.
11914 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11915 * variable if needed.
11916 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11917 * like inline_method.
11918 * - remove inlining restrictions
11919 * - fix LNEG and enable cfold of INEG
11920 * - generalize x86 optimizations like ldelema as a peephole optimization
11921 * - add store_mem_imm for amd64
11922 * - optimize the loading of the interruption flag in the managed->native wrappers
11923 * - avoid special handling of OP_NOP in passes
11924 * - move code inserting instructions into one function/macro.
11925 * - try a coalescing phase after liveness analysis
11926 * - add float -> vreg conversion + local optimizations on !x86
11927 * - figure out how to handle decomposed branches during optimizations, ie.
11928 * compare+branch, op_jump_table+op_br etc.
11929 * - promote RuntimeXHandles to vregs
11930 * - vtype cleanups:
11931 * - add a NEW_VARLOADA_VREG macro
11932 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11933 * accessing vtype fields.
11934 * - get rid of I8CONST on 64 bit platforms
11935 * - dealing with the increase in code size due to branches created during opcode
11936 * decomposition:
11937 * - use extended basic blocks
11938 * - all parts of the JIT
11939 * - handle_global_vregs () && local regalloc
11940 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11941 * - sources of increase in code size:
11942 * - vtypes
11943 * - long compares
11944 * - isinst and castclass
11945 * - lvregs not allocated to global registers even if used multiple times
11946 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11947 * meaningful.
11948 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11949 * - add all micro optimizations from the old JIT
11950 * - put tree optimizations into the deadce pass
11951 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11952 * specific function.
11953 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11954 * fcompare + branchCC.
11955 * - create a helper function for allocating a stack slot, taking into account
11956 * MONO_CFG_HAS_SPILLUP.
11957 * - merge r68207.
11958 * - merge the ia64 switch changes.
11959 * - optimize mono_regstate2_alloc_int/float.
11960 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11961 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11962 * parts of the tree could be separated by other instructions, killing the tree
11963 * arguments, or stores killing loads etc. Also, should we fold loads into other
11964 * instructions if the result of the load is used multiple times ?
11965 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11966 * - LAST MERGE: 108395.
11967 * - when returning vtypes in registers, generate IR and append it to the end of the
11968 * last bb instead of doing it in the epilog.
11969 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11974 NOTES
11975 -----
11977 - When to decompose opcodes:
11978 - earlier: this makes some optimizations hard to implement, since the low level IR
11979 no longer contains the neccessary information. But it is easier to do.
11980 - later: harder to implement, enables more optimizations.
11981 - Branches inside bblocks:
11982 - created when decomposing complex opcodes.
11983 - branches to another bblock: harmless, but not tracked by the branch
11984 optimizations, so need to branch to a label at the start of the bblock.
11985 - branches to inside the same bblock: very problematic, trips up the local
11986 reg allocator. Can be fixed by spitting the current bblock, but that is a
11987 complex operation, since some local vregs can become global vregs etc.
11988 - Local/global vregs:
11989 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11990 local register allocator.
11991 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11992 structure, created by mono_create_var (). Assigned to hregs or the stack by
11993 the global register allocator.
11994 - When to do optimizations like alu->alu_imm:
11995 - earlier -> saves work later on since the IR will be smaller/simpler
11996 - later -> can work on more instructions
11997 - Handling of valuetypes:
11998 - When a vtype is pushed on the stack, a new temporary is created, an
11999 instruction computing its address (LDADDR) is emitted and pushed on
12000 the stack. Need to optimize cases when the vtype is used immediately as in
12001 argument passing, stloc etc.
12002 - Instead of the to_end stuff in the old JIT, simply call the function handling
12003 the values on the stack before emitting the last instruction of the bb.
12006 #endif /* DISABLE_JIT */