2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/utils/mono-compiler.h>
56 #include <mono/utils/mono-memory-model.h>
57 #include <mono/metadata/mono-basic-block.h>
64 #include "jit-icalls.h"
66 #include "debugger-agent.h"
68 #define BRANCH_COST 10
69 #define INLINE_LENGTH_LIMIT 20
70 #define INLINE_FAILURE do {\
71 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
74 #define CHECK_CFG_EXCEPTION do {\
75 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
78 #define METHOD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
81 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
82 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (cil_method_fname); \
85 goto exception_exit; \
87 #define FIELD_ACCESS_FAILURE do { \
88 char *method_fname = mono_method_full_name (method, TRUE); \
89 char *field_fname = mono_field_full_name (field); \
90 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
91 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
92 g_free (method_fname); \
93 g_free (field_fname); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 if (cfg->generic_sharing_context) { \
98 if (cfg->verbose_level > 2) \
99 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
100 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
101 goto exception_exit; \
104 #define OUT_OF_MEMORY_FAILURE do { \
105 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
106 goto exception_exit; \
108 /* Determine whenever 'ins' represents a load of the 'this' argument */
109 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
111 static int ldind_to_load_membase (int opcode
);
112 static int stind_to_store_membase (int opcode
);
114 int mono_op_to_op_imm (int opcode
);
115 int mono_op_to_op_imm_noemul (int opcode
);
117 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
118 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
119 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
121 /* helper methods signatures */
122 static MonoMethodSignature
*helper_sig_class_init_trampoline
= NULL
;
123 static MonoMethodSignature
*helper_sig_domain_get
= NULL
;
124 static MonoMethodSignature
*helper_sig_generic_class_init_trampoline
= NULL
;
125 static MonoMethodSignature
*helper_sig_generic_class_init_trampoline_llvm
= NULL
;
126 static MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
= NULL
;
127 static MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
= NULL
;
128 static MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline_llvm
= NULL
;
131 * Instruction metadata
139 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
140 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
146 #if SIZEOF_REGISTER == 8
151 /* keep in sync with the enum in mini.h */
154 #include "mini-ops.h"
159 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
160 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
162 * This should contain the index of the last sreg + 1. This is not the same
163 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
165 const gint8 ins_sreg_counts
[] = {
166 #include "mini-ops.h"
171 #define MONO_INIT_VARINFO(vi,id) do { \
172 (vi)->range.first_use.pos.bid = 0xffff; \
178 mono_inst_set_src_registers (MonoInst
*ins
, int *regs
)
180 ins
->sreg1
= regs
[0];
181 ins
->sreg2
= regs
[1];
182 ins
->sreg3
= regs
[2];
186 mono_alloc_ireg (MonoCompile
*cfg
)
188 return alloc_ireg (cfg
);
192 mono_alloc_freg (MonoCompile
*cfg
)
194 return alloc_freg (cfg
);
198 mono_alloc_preg (MonoCompile
*cfg
)
200 return alloc_preg (cfg
);
204 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
206 return alloc_dreg (cfg
, stack_type
);
210 * mono_alloc_ireg_ref:
212 * Allocate an IREG, and mark it as holding a GC ref.
215 mono_alloc_ireg_ref (MonoCompile
*cfg
)
217 return alloc_ireg_ref (cfg
);
221 * mono_alloc_ireg_mp:
223 * Allocate an IREG, and mark it as holding a managed pointer.
226 mono_alloc_ireg_mp (MonoCompile
*cfg
)
228 return alloc_ireg_mp (cfg
);
232 * mono_alloc_ireg_copy:
234 * Allocate an IREG with the same GC type as VREG.
237 mono_alloc_ireg_copy (MonoCompile
*cfg
, guint32 vreg
)
239 if (vreg_is_ref (cfg
, vreg
))
240 return alloc_ireg_ref (cfg
);
241 else if (vreg_is_mp (cfg
, vreg
))
242 return alloc_ireg_mp (cfg
);
244 return alloc_ireg (cfg
);
248 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
254 switch (type
->type
) {
257 case MONO_TYPE_BOOLEAN
:
269 case MONO_TYPE_FNPTR
:
271 case MONO_TYPE_CLASS
:
272 case MONO_TYPE_STRING
:
273 case MONO_TYPE_OBJECT
:
274 case MONO_TYPE_SZARRAY
:
275 case MONO_TYPE_ARRAY
:
279 #if SIZEOF_REGISTER == 8
288 case MONO_TYPE_VALUETYPE
:
289 if (type
->data
.klass
->enumtype
) {
290 type
= mono_class_enum_basetype (type
->data
.klass
);
293 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
296 case MONO_TYPE_TYPEDBYREF
:
298 case MONO_TYPE_GENERICINST
:
299 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
303 g_assert (cfg
->generic_sharing_context
);
306 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
312 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
317 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
318 for (i
= 0; i
< bb
->in_count
; ++i
)
319 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
321 for (i
= 0; i
< bb
->out_count
; ++i
)
322 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
324 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
325 mono_print_ins_index (-1, tree
);
329 mono_create_helper_signatures (void)
331 helper_sig_domain_get
= mono_create_icall_signature ("ptr");
332 helper_sig_class_init_trampoline
= mono_create_icall_signature ("void");
333 helper_sig_generic_class_init_trampoline
= mono_create_icall_signature ("void");
334 helper_sig_generic_class_init_trampoline_llvm
= mono_create_icall_signature ("void ptr");
335 helper_sig_rgctx_lazy_fetch_trampoline
= mono_create_icall_signature ("ptr ptr");
336 helper_sig_monitor_enter_exit_trampoline
= mono_create_icall_signature ("void");
337 helper_sig_monitor_enter_exit_trampoline_llvm
= mono_create_icall_signature ("void object");
341 * Can't put this at the beginning, since other files reference stuff from this
346 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
348 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
350 #define GET_BBLOCK(cfg,tblock,ip) do { \
351 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
353 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
354 NEW_BBLOCK (cfg, (tblock)); \
355 (tblock)->cil_code = (ip); \
356 ADD_BBLOCK (cfg, (tblock)); \
360 #if defined(TARGET_X86) || defined(TARGET_AMD64)
361 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
362 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
363 (dest)->dreg = alloc_ireg_mp ((cfg)); \
364 (dest)->sreg1 = (sr1); \
365 (dest)->sreg2 = (sr2); \
366 (dest)->inst_imm = (imm); \
367 (dest)->backend.shift_amount = (shift); \
368 MONO_ADD_INS ((cfg)->cbb, (dest)); \
372 #if SIZEOF_REGISTER == 8
373 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
374 /* FIXME: Need to add many more cases */ \
375 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
377 int dr = alloc_preg (cfg); \
378 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
379 (ins)->sreg2 = widen->dreg; \
383 #define ADD_WIDEN_OP(ins, arg1, arg2)
386 #define ADD_BINOP(op) do { \
387 MONO_INST_NEW (cfg, ins, (op)); \
389 ins->sreg1 = sp [0]->dreg; \
390 ins->sreg2 = sp [1]->dreg; \
391 type_from_op (ins, sp [0], sp [1]); \
393 /* Have to insert a widening op */ \
394 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
395 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
396 MONO_ADD_INS ((cfg)->cbb, (ins)); \
397 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
400 #define ADD_UNOP(op) do { \
401 MONO_INST_NEW (cfg, ins, (op)); \
403 ins->sreg1 = sp [0]->dreg; \
404 type_from_op (ins, sp [0], NULL); \
406 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
407 MONO_ADD_INS ((cfg)->cbb, (ins)); \
408 *sp++ = mono_decompose_opcode (cfg, ins); \
411 #define ADD_BINCOND(next_block) do { \
414 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
415 cmp->sreg1 = sp [0]->dreg; \
416 cmp->sreg2 = sp [1]->dreg; \
417 type_from_op (cmp, sp [0], sp [1]); \
419 type_from_op (ins, sp [0], sp [1]); \
420 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
421 GET_BBLOCK (cfg, tblock, target); \
422 link_bblock (cfg, bblock, tblock); \
423 ins->inst_true_bb = tblock; \
424 if ((next_block)) { \
425 link_bblock (cfg, bblock, (next_block)); \
426 ins->inst_false_bb = (next_block); \
427 start_new_bblock = 1; \
429 GET_BBLOCK (cfg, tblock, ip); \
430 link_bblock (cfg, bblock, tblock); \
431 ins->inst_false_bb = tblock; \
432 start_new_bblock = 2; \
434 if (sp != stack_start) { \
435 handle_stack_args (cfg, stack_start, sp - stack_start); \
436 CHECK_UNVERIFIABLE (cfg); \
438 MONO_ADD_INS (bblock, cmp); \
439 MONO_ADD_INS (bblock, ins); \
443 * link_bblock: Links two basic blocks
445 * links two basic blocks in the control flow graph, the 'from'
446 * argument is the starting block and the 'to' argument is the block
447 * the control flow ends to after 'from'.
450 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
452 MonoBasicBlock
**newa
;
456 if (from
->cil_code
) {
458 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
460 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
463 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
465 printf ("edge from entry to exit\n");
470 for (i
= 0; i
< from
->out_count
; ++i
) {
471 if (to
== from
->out_bb
[i
]) {
477 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
478 for (i
= 0; i
< from
->out_count
; ++i
) {
479 newa
[i
] = from
->out_bb
[i
];
487 for (i
= 0; i
< to
->in_count
; ++i
) {
488 if (from
== to
->in_bb
[i
]) {
494 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
495 for (i
= 0; i
< to
->in_count
; ++i
) {
496 newa
[i
] = to
->in_bb
[i
];
505 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
507 link_bblock (cfg
, from
, to
);
511 * mono_find_block_region:
513 * We mark each basic block with a region ID. We use that to avoid BB
514 * optimizations when blocks are in different regions.
517 * A region token that encodes where this region is, and information
518 * about the clause owner for this block.
520 * The region encodes the try/catch/filter clause that owns this block
521 * as well as the type. -1 is a special value that represents a block
522 * that is in none of try/catch/filter.
525 mono_find_block_region (MonoCompile
*cfg
, int offset
)
527 MonoMethodHeader
*header
= cfg
->header
;
528 MonoExceptionClause
*clause
;
531 for (i
= 0; i
< header
->num_clauses
; ++i
) {
532 clause
= &header
->clauses
[i
];
533 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
534 (offset
< (clause
->handler_offset
)))
535 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
537 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
538 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
539 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
540 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
541 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
543 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
546 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
547 return ((i
+ 1) << 8) | clause
->flags
;
554 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
556 MonoMethodHeader
*header
= cfg
->header
;
557 MonoExceptionClause
*clause
;
561 for (i
= 0; i
< header
->num_clauses
; ++i
) {
562 clause
= &header
->clauses
[i
];
563 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
564 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
565 if (clause
->flags
== type
)
566 res
= g_list_append (res
, clause
);
573 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
577 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
581 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
582 /* prevent it from being register allocated */
583 var
->flags
|= MONO_INST_INDIRECT
;
585 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
589 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
591 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
595 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
599 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
603 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
604 /* prevent it from being register allocated */
605 var
->flags
|= MONO_INST_INDIRECT
;
607 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
613 * Returns the type used in the eval stack when @type is loaded.
614 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
617 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
621 inst
->klass
= klass
= mono_class_from_mono_type (type
);
623 inst
->type
= STACK_MP
;
628 switch (type
->type
) {
630 inst
->type
= STACK_INV
;
634 case MONO_TYPE_BOOLEAN
:
640 inst
->type
= STACK_I4
;
645 case MONO_TYPE_FNPTR
:
646 inst
->type
= STACK_PTR
;
648 case MONO_TYPE_CLASS
:
649 case MONO_TYPE_STRING
:
650 case MONO_TYPE_OBJECT
:
651 case MONO_TYPE_SZARRAY
:
652 case MONO_TYPE_ARRAY
:
653 inst
->type
= STACK_OBJ
;
657 inst
->type
= STACK_I8
;
661 inst
->type
= STACK_R8
;
663 case MONO_TYPE_VALUETYPE
:
664 if (type
->data
.klass
->enumtype
) {
665 type
= mono_class_enum_basetype (type
->data
.klass
);
669 inst
->type
= STACK_VTYPE
;
672 case MONO_TYPE_TYPEDBYREF
:
673 inst
->klass
= mono_defaults
.typed_reference_class
;
674 inst
->type
= STACK_VTYPE
;
676 case MONO_TYPE_GENERICINST
:
677 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
680 case MONO_TYPE_MVAR
:
681 /* FIXME: all the arguments must be references for now,
682 * later look inside cfg and see if the arg num is
685 g_assert (cfg
->generic_sharing_context
);
686 inst
->type
= STACK_OBJ
;
689 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
694 * The following tables are used to quickly validate the IL code in type_from_op ().
697 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
698 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
699 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
700 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
701 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
702 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
703 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
704 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
705 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
710 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
713 /* reduce the size of this table */
715 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
716 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
717 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
718 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
719 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
720 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
721 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
722 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
723 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
727 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
728 /* Inv i L p F & O vt */
730 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
731 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
732 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
733 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
734 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
735 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
736 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
739 /* reduce the size of this table */
741 shift_table
[STACK_MAX
] [STACK_MAX
] = {
742 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
743 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
744 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
745 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
746 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
747 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
748 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
749 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
753 * Tables to map from the non-specific opcode to the matching
754 * type-specific opcode.
756 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
758 binops_op_map
[STACK_MAX
] = {
759 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
762 /* handles from CEE_NEG to CEE_CONV_U8 */
764 unops_op_map
[STACK_MAX
] = {
765 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
768 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
770 ovfops_op_map
[STACK_MAX
] = {
771 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
774 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
776 ovf2ops_op_map
[STACK_MAX
] = {
777 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
780 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
782 ovf3ops_op_map
[STACK_MAX
] = {
783 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
786 /* handles from CEE_BEQ to CEE_BLT_UN */
788 beqops_op_map
[STACK_MAX
] = {
789 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
792 /* handles from CEE_CEQ to CEE_CLT_UN */
794 ceqops_op_map
[STACK_MAX
] = {
795 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
799 * Sets ins->type (the type on the eval stack) according to the
800 * type of the opcode and the arguments to it.
801 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
803 * FIXME: this function sets ins->type unconditionally in some cases, but
804 * it should set it to invalid for some types (a conv.x on an object)
807 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
809 switch (ins
->opcode
) {
816 /* FIXME: check unverifiable args for STACK_MP */
817 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
818 ins
->opcode
+= binops_op_map
[ins
->type
];
825 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
826 ins
->opcode
+= binops_op_map
[ins
->type
];
831 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
832 ins
->opcode
+= binops_op_map
[ins
->type
];
837 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
838 if ((src1
->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
839 ins
->opcode
= OP_LCOMPARE
;
840 else if (src1
->type
== STACK_R8
)
841 ins
->opcode
= OP_FCOMPARE
;
843 ins
->opcode
= OP_ICOMPARE
;
845 case OP_ICOMPARE_IMM
:
846 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
847 if ((src1
->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
848 ins
->opcode
= OP_LCOMPARE_IMM
;
860 ins
->opcode
+= beqops_op_map
[src1
->type
];
863 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
864 ins
->opcode
+= ceqops_op_map
[src1
->type
];
870 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
871 ins
->opcode
+= ceqops_op_map
[src1
->type
];
875 ins
->type
= neg_table
[src1
->type
];
876 ins
->opcode
+= unops_op_map
[ins
->type
];
879 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
880 ins
->type
= src1
->type
;
882 ins
->type
= STACK_INV
;
883 ins
->opcode
+= unops_op_map
[ins
->type
];
889 ins
->type
= STACK_I4
;
890 ins
->opcode
+= unops_op_map
[src1
->type
];
893 ins
->type
= STACK_R8
;
894 switch (src1
->type
) {
897 ins
->opcode
= OP_ICONV_TO_R_UN
;
900 ins
->opcode
= OP_LCONV_TO_R_UN
;
904 case CEE_CONV_OVF_I1
:
905 case CEE_CONV_OVF_U1
:
906 case CEE_CONV_OVF_I2
:
907 case CEE_CONV_OVF_U2
:
908 case CEE_CONV_OVF_I4
:
909 case CEE_CONV_OVF_U4
:
910 ins
->type
= STACK_I4
;
911 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
913 case CEE_CONV_OVF_I_UN
:
914 case CEE_CONV_OVF_U_UN
:
915 ins
->type
= STACK_PTR
;
916 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
918 case CEE_CONV_OVF_I1_UN
:
919 case CEE_CONV_OVF_I2_UN
:
920 case CEE_CONV_OVF_I4_UN
:
921 case CEE_CONV_OVF_U1_UN
:
922 case CEE_CONV_OVF_U2_UN
:
923 case CEE_CONV_OVF_U4_UN
:
924 ins
->type
= STACK_I4
;
925 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
928 ins
->type
= STACK_PTR
;
929 switch (src1
->type
) {
931 ins
->opcode
= OP_ICONV_TO_U
;
935 #if SIZEOF_VOID_P == 8
936 ins
->opcode
= OP_LCONV_TO_U
;
938 ins
->opcode
= OP_MOVE
;
942 ins
->opcode
= OP_LCONV_TO_U
;
945 ins
->opcode
= OP_FCONV_TO_U
;
951 ins
->type
= STACK_I8
;
952 ins
->opcode
+= unops_op_map
[src1
->type
];
954 case CEE_CONV_OVF_I8
:
955 case CEE_CONV_OVF_U8
:
956 ins
->type
= STACK_I8
;
957 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
959 case CEE_CONV_OVF_U8_UN
:
960 case CEE_CONV_OVF_I8_UN
:
961 ins
->type
= STACK_I8
;
962 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
966 ins
->type
= STACK_R8
;
967 ins
->opcode
+= unops_op_map
[src1
->type
];
970 ins
->type
= STACK_R8
;
974 ins
->type
= STACK_I4
;
975 ins
->opcode
+= ovfops_op_map
[src1
->type
];
980 ins
->type
= STACK_PTR
;
981 ins
->opcode
+= ovfops_op_map
[src1
->type
];
989 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
990 ins
->opcode
+= ovfops_op_map
[src1
->type
];
991 if (ins
->type
== STACK_R8
)
992 ins
->type
= STACK_INV
;
994 case OP_LOAD_MEMBASE
:
995 ins
->type
= STACK_PTR
;
997 case OP_LOADI1_MEMBASE
:
998 case OP_LOADU1_MEMBASE
:
999 case OP_LOADI2_MEMBASE
:
1000 case OP_LOADU2_MEMBASE
:
1001 case OP_LOADI4_MEMBASE
:
1002 case OP_LOADU4_MEMBASE
:
1003 ins
->type
= STACK_PTR
;
1005 case OP_LOADI8_MEMBASE
:
1006 ins
->type
= STACK_I8
;
1008 case OP_LOADR4_MEMBASE
:
1009 case OP_LOADR8_MEMBASE
:
1010 ins
->type
= STACK_R8
;
1013 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
1017 if (ins
->type
== STACK_MP
)
1018 ins
->klass
= mono_defaults
.object_class
;
1023 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
1029 param_table
[STACK_MAX
] [STACK_MAX
] = {
1034 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
1038 switch (args
->type
) {
1048 for (i
= 0; i
< sig
->param_count
; ++i
) {
1049 switch (args
[i
].type
) {
1053 if (!sig
->params
[i
]->byref
)
1057 if (sig
->params
[i
]->byref
)
1059 switch (sig
->params
[i
]->type
) {
1060 case MONO_TYPE_CLASS
:
1061 case MONO_TYPE_STRING
:
1062 case MONO_TYPE_OBJECT
:
1063 case MONO_TYPE_SZARRAY
:
1064 case MONO_TYPE_ARRAY
:
1071 if (sig
->params
[i
]->byref
)
1073 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1082 /*if (!param_table [args [i].type] [sig->params [i]->type])
1090 * When we need a pointer to the current domain many times in a method, we
1091 * call mono_domain_get() once and we store the result in a local variable.
1092 * This function returns the variable that represents the MonoDomain*.
1094 inline static MonoInst
*
1095 mono_get_domainvar (MonoCompile
*cfg
)
1097 if (!cfg
->domainvar
)
1098 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1099 return cfg
->domainvar
;
1103 * The got_var contains the address of the Global Offset Table when AOT
1107 mono_get_got_var (MonoCompile
*cfg
)
1109 #ifdef MONO_ARCH_NEED_GOT_VAR
1110 if (!cfg
->compile_aot
)
1112 if (!cfg
->got_var
) {
1113 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1115 return cfg
->got_var
;
1122 mono_get_vtable_var (MonoCompile
*cfg
)
1124 g_assert (cfg
->generic_sharing_context
);
1126 if (!cfg
->rgctx_var
) {
1127 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1128 /* force the var to be stack allocated */
1129 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1132 return cfg
->rgctx_var
;
1136 type_from_stack_type (MonoInst
*ins
) {
1137 switch (ins
->type
) {
1138 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1139 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1140 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1141 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1143 return &ins
->klass
->this_arg
;
1144 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1145 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1147 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1152 static G_GNUC_UNUSED
int
1153 type_to_stack_type (MonoType
*t
)
1155 t
= mono_type_get_underlying_type (t
);
1159 case MONO_TYPE_BOOLEAN
:
1162 case MONO_TYPE_CHAR
:
1169 case MONO_TYPE_FNPTR
:
1171 case MONO_TYPE_CLASS
:
1172 case MONO_TYPE_STRING
:
1173 case MONO_TYPE_OBJECT
:
1174 case MONO_TYPE_SZARRAY
:
1175 case MONO_TYPE_ARRAY
:
1183 case MONO_TYPE_VALUETYPE
:
1184 case MONO_TYPE_TYPEDBYREF
:
1186 case MONO_TYPE_GENERICINST
:
1187 if (mono_type_generic_inst_is_valuetype (t
))
1193 g_assert_not_reached ();
1200 array_access_to_klass (int opcode
)
1204 return mono_defaults
.byte_class
;
1206 return mono_defaults
.uint16_class
;
1209 return mono_defaults
.int_class
;
1212 return mono_defaults
.sbyte_class
;
1215 return mono_defaults
.int16_class
;
1218 return mono_defaults
.int32_class
;
1220 return mono_defaults
.uint32_class
;
1223 return mono_defaults
.int64_class
;
1226 return mono_defaults
.single_class
;
1229 return mono_defaults
.double_class
;
1230 case CEE_LDELEM_REF
:
1231 case CEE_STELEM_REF
:
1232 return mono_defaults
.object_class
;
1234 g_assert_not_reached ();
1240 * We try to share variables when possible
1243 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1248 /* inlining can result in deeper stacks */
1249 if (slot
>= cfg
->header
->max_stack
)
1250 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1252 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1254 switch (ins
->type
) {
1261 if ((vnum
= cfg
->intvars
[pos
]))
1262 return cfg
->varinfo
[vnum
];
1263 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1264 cfg
->intvars
[pos
] = res
->inst_c0
;
1267 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1273 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1276 * Don't use this if a generic_context is set, since that means AOT can't
1277 * look up the method using just the image+token.
1278 * table == 0 means this is a reference made from a wrapper.
1280 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1281 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1282 jump_info_token
->image
= image
;
1283 jump_info_token
->token
= token
;
1284 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1289 * This function is called to handle items that are left on the evaluation stack
1290 * at basic block boundaries. What happens is that we save the values to local variables
1291 * and we reload them later when first entering the target basic block (with the
1292 * handle_loaded_temps () function).
1293 * A single joint point will use the same variables (stored in the array bb->out_stack or
1294 * bb->in_stack, if the basic block is before or after the joint point).
1296 * This function needs to be called _before_ emitting the last instruction of
1297 * the bb (i.e. before emitting a branch).
1298 * If the stack merge fails at a join point, cfg->unverifiable is set.
1301 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1304 MonoBasicBlock
*bb
= cfg
->cbb
;
1305 MonoBasicBlock
*outb
;
1306 MonoInst
*inst
, **locals
;
1311 if (cfg
->verbose_level
> 3)
1312 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1313 if (!bb
->out_scount
) {
1314 bb
->out_scount
= count
;
1315 //printf ("bblock %d has out:", bb->block_num);
1317 for (i
= 0; i
< bb
->out_count
; ++i
) {
1318 outb
= bb
->out_bb
[i
];
1319 /* exception handlers are linked, but they should not be considered for stack args */
1320 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1322 //printf (" %d", outb->block_num);
1323 if (outb
->in_stack
) {
1325 bb
->out_stack
= outb
->in_stack
;
1331 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1332 for (i
= 0; i
< count
; ++i
) {
1334 * try to reuse temps already allocated for this purpouse, if they occupy the same
1335 * stack slot and if they are of the same type.
1336 * This won't cause conflicts since if 'local' is used to
1337 * store one of the values in the in_stack of a bblock, then
1338 * the same variable will be used for the same outgoing stack
1340 * This doesn't work when inlining methods, since the bblocks
1341 * in the inlined methods do not inherit their in_stack from
1342 * the bblock they are inlined to. See bug #58863 for an
1345 if (cfg
->inlined_method
)
1346 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1348 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1353 for (i
= 0; i
< bb
->out_count
; ++i
) {
1354 outb
= bb
->out_bb
[i
];
1355 /* exception handlers are linked, but they should not be considered for stack args */
1356 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1358 if (outb
->in_scount
) {
1359 if (outb
->in_scount
!= bb
->out_scount
) {
1360 cfg
->unverifiable
= TRUE
;
1363 continue; /* check they are the same locals */
1365 outb
->in_scount
= count
;
1366 outb
->in_stack
= bb
->out_stack
;
1369 locals
= bb
->out_stack
;
1371 for (i
= 0; i
< count
; ++i
) {
1372 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1373 inst
->cil_code
= sp
[i
]->cil_code
;
1374 sp
[i
] = locals
[i
];
1375 if (cfg
->verbose_level
> 3)
1376 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1380 * It is possible that the out bblocks already have in_stack assigned, and
1381 * the in_stacks differ. In this case, we will store to all the different
1388 /* Find a bblock which has a different in_stack */
1390 while (bindex
< bb
->out_count
) {
1391 outb
= bb
->out_bb
[bindex
];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1397 if (outb
->in_stack
!= locals
) {
1398 for (i
= 0; i
< count
; ++i
) {
1399 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1400 inst
->cil_code
= sp
[i
]->cil_code
;
1401 sp
[i
] = locals
[i
];
1402 if (cfg
->verbose_level
> 3)
1403 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1405 locals
= outb
->in_stack
;
1414 /* Emit code which loads interface_offsets [klass->interface_id]
1415 * The array is stored in memory before vtable.
1418 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1420 if (cfg
->compile_aot
) {
1421 int ioffset_reg
= alloc_preg (cfg
);
1422 int iid_reg
= alloc_preg (cfg
);
1424 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1425 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1434 mini_emit_interface_bitmap_check (MonoCompile
*cfg
, int intf_bit_reg
, int base_reg
, int offset
, MonoClass
*klass
)
1436 int ibitmap_reg
= alloc_preg (cfg
);
1437 #ifdef COMPRESSED_INTERFACE_BITMAP
1439 MonoInst
*res
, *ins
;
1440 NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, ibitmap_reg
, base_reg
, offset
);
1441 MONO_ADD_INS (cfg
->cbb
, ins
);
1443 if (cfg
->compile_aot
)
1444 EMIT_NEW_AOTCONST (cfg
, args
[1], MONO_PATCH_INFO_IID
, klass
);
1446 EMIT_NEW_ICONST (cfg
, args
[1], klass
->interface_id
);
1447 res
= mono_emit_jit_icall (cfg
, mono_class_interface_match
, args
);
1448 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, intf_bit_reg
, res
->dreg
);
1450 int ibitmap_byte_reg
= alloc_preg (cfg
);
1452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, base_reg
, offset
);
1454 if (cfg
->compile_aot
) {
1455 int iid_reg
= alloc_preg (cfg
);
1456 int shifted_iid_reg
= alloc_preg (cfg
);
1457 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1458 int masked_iid_reg
= alloc_preg (cfg
);
1459 int iid_one_bit_reg
= alloc_preg (cfg
);
1460 int iid_bit_reg
= alloc_preg (cfg
);
1461 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1463 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1466 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1467 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1468 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1471 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1477 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1478 * stored in "klass_reg" implements the interface "klass".
1481 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1483 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
), klass
);
1487 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1488 * stored in "vtable_reg" implements the interface "klass".
1491 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1493 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
), klass
);
1497 * Emit code which checks whenever the interface id of @klass is smaller than
1498 * than the value given by max_iid_reg.
1501 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1502 MonoBasicBlock
*false_target
)
1504 if (cfg
->compile_aot
) {
1505 int iid_reg
= alloc_preg (cfg
);
1506 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1507 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1510 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1514 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1517 /* Same as above, but obtains max_iid from a vtable */
1519 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1520 MonoBasicBlock
*false_target
)
1522 int max_iid_reg
= alloc_preg (cfg
);
1524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1525 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1528 /* Same as above, but obtains max_iid from a klass */
1530 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1531 MonoBasicBlock
*false_target
)
1533 int max_iid_reg
= alloc_preg (cfg
);
1535 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1536 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1540 mini_emit_isninst_cast_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_ins
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1542 int idepth_reg
= alloc_preg (cfg
);
1543 int stypes_reg
= alloc_preg (cfg
);
1544 int stype
= alloc_preg (cfg
);
1546 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1547 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1548 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1554 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, klass_ins
->dreg
);
1555 } else if (cfg
->compile_aot
) {
1556 int const_reg
= alloc_preg (cfg
);
1557 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1558 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1566 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1568 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, NULL
, false_target
, true_target
);
1572 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1574 int intf_reg
= alloc_preg (cfg
);
1576 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1577 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1578 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1582 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1586 * Variant of the above that takes a register to the class, not the vtable.
1589 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1591 int intf_bit_reg
= alloc_preg (cfg
);
1593 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1594 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1595 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1599 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1603 mini_emit_class_check_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
)
1606 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_inst
->dreg
);
1607 } else if (cfg
->compile_aot
) {
1608 int const_reg
= alloc_preg (cfg
);
1609 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1610 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1614 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1618 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1620 return mini_emit_class_check_inst (cfg
, klass_reg
, klass
, NULL
);
1624 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1626 if (cfg
->compile_aot
) {
1627 int const_reg
= alloc_preg (cfg
);
1628 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1629 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1631 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1637 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
);
1640 mini_emit_castclass_inst (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
, MonoBasicBlock
*object_is_null
)
1643 int rank_reg
= alloc_preg (cfg
);
1644 int eclass_reg
= alloc_preg (cfg
);
1646 g_assert (!klass_inst
);
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1649 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1650 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1652 if (klass
->cast_class
== mono_defaults
.object_class
) {
1653 int parent_reg
= alloc_preg (cfg
);
1654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1655 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1656 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1657 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1658 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1659 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1660 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1661 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1662 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1663 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1665 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1666 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1669 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1670 /* Check that the object is a vector too */
1671 int bounds_reg
= alloc_preg (cfg
);
1672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1673 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1674 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1677 int idepth_reg
= alloc_preg (cfg
);
1678 int stypes_reg
= alloc_preg (cfg
);
1679 int stype
= alloc_preg (cfg
);
1681 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1683 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1684 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1688 mini_emit_class_check_inst (cfg
, stype
, klass
, klass_inst
);
1693 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1695 return mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, NULL
, object_is_null
);
1699 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1703 g_assert (val
== 0);
1708 if ((size
<= 4) && (size
<= align
)) {
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1717 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1719 #if SIZEOF_REGISTER == 8
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1727 val_reg
= alloc_preg (cfg
);
1729 if (SIZEOF_REGISTER
== 8)
1730 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1732 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1735 /* This could be optimized further if neccesary */
1737 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1744 #if !NO_UNALIGNED_ACCESS
1745 if (SIZEOF_REGISTER
== 8) {
1747 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1752 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1760 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1777 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1784 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1785 g_assert (size
< 10000);
1788 /* This could be optimized further if neccesary */
1790 cur_reg
= alloc_preg (cfg
);
1791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1792 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1799 #if !NO_UNALIGNED_ACCESS
1800 if (SIZEOF_REGISTER
== 8) {
1802 cur_reg
= alloc_preg (cfg
);
1803 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1804 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1813 cur_reg
= alloc_preg (cfg
);
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1815 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1821 cur_reg
= alloc_preg (cfg
);
1822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1823 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1829 cur_reg
= alloc_preg (cfg
);
1830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1831 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1839 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1842 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1845 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1846 switch (type
->type
) {
1847 case MONO_TYPE_VOID
:
1848 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1851 case MONO_TYPE_BOOLEAN
:
1854 case MONO_TYPE_CHAR
:
1857 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1861 case MONO_TYPE_FNPTR
:
1862 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1863 case MONO_TYPE_CLASS
:
1864 case MONO_TYPE_STRING
:
1865 case MONO_TYPE_OBJECT
:
1866 case MONO_TYPE_SZARRAY
:
1867 case MONO_TYPE_ARRAY
:
1868 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1871 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1874 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1875 case MONO_TYPE_VALUETYPE
:
1876 if (type
->data
.klass
->enumtype
) {
1877 type
= mono_class_enum_basetype (type
->data
.klass
);
1880 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1881 case MONO_TYPE_TYPEDBYREF
:
1882 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1883 case MONO_TYPE_GENERICINST
:
1884 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1887 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1893 * target_type_is_incompatible:
1894 * @cfg: MonoCompile context
1896 * Check that the item @arg on the evaluation stack can be stored
1897 * in the target type (can be a local, or field, etc).
1898 * The cfg arg can be used to check if we need verification or just
1901 * Returns: non-0 value if arg can't be stored on a target.
1904 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1906 MonoType
*simple_type
;
1909 if (target
->byref
) {
1910 /* FIXME: check that the pointed to types match */
1911 if (arg
->type
== STACK_MP
)
1912 return arg
->klass
!= mono_class_from_mono_type (target
);
1913 if (arg
->type
== STACK_PTR
)
1918 simple_type
= mono_type_get_underlying_type (target
);
1919 switch (simple_type
->type
) {
1920 case MONO_TYPE_VOID
:
1924 case MONO_TYPE_BOOLEAN
:
1927 case MONO_TYPE_CHAR
:
1930 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1934 /* STACK_MP is needed when setting pinned locals */
1935 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1940 case MONO_TYPE_FNPTR
:
1942 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1943 * in native int. (#688008).
1945 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1948 case MONO_TYPE_CLASS
:
1949 case MONO_TYPE_STRING
:
1950 case MONO_TYPE_OBJECT
:
1951 case MONO_TYPE_SZARRAY
:
1952 case MONO_TYPE_ARRAY
:
1953 if (arg
->type
!= STACK_OBJ
)
1955 /* FIXME: check type compatibility */
1959 if (arg
->type
!= STACK_I8
)
1964 if (arg
->type
!= STACK_R8
)
1967 case MONO_TYPE_VALUETYPE
:
1968 if (arg
->type
!= STACK_VTYPE
)
1970 klass
= mono_class_from_mono_type (simple_type
);
1971 if (klass
!= arg
->klass
)
1974 case MONO_TYPE_TYPEDBYREF
:
1975 if (arg
->type
!= STACK_VTYPE
)
1977 klass
= mono_class_from_mono_type (simple_type
);
1978 if (klass
!= arg
->klass
)
1981 case MONO_TYPE_GENERICINST
:
1982 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1983 if (arg
->type
!= STACK_VTYPE
)
1985 klass
= mono_class_from_mono_type (simple_type
);
1986 if (klass
!= arg
->klass
)
1990 if (arg
->type
!= STACK_OBJ
)
1992 /* FIXME: check type compatibility */
1996 case MONO_TYPE_MVAR
:
1997 /* FIXME: all the arguments must be references for now,
1998 * later look inside cfg and see if the arg num is
1999 * really a reference
2001 g_assert (cfg
->generic_sharing_context
);
2002 if (arg
->type
!= STACK_OBJ
)
2006 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
2012 * Prepare arguments for passing to a function call.
2013 * Return a non-zero value if the arguments can't be passed to the given
2015 * The type checks are not yet complete and some conversions may need
2016 * casts on 32 or 64 bit architectures.
2018 * FIXME: implement this using target_type_is_incompatible ()
2021 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
2023 MonoType
*simple_type
;
2027 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
2031 for (i
= 0; i
< sig
->param_count
; ++i
) {
2032 if (sig
->params
[i
]->byref
) {
2033 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
2037 simple_type
= sig
->params
[i
];
2038 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
2040 switch (simple_type
->type
) {
2041 case MONO_TYPE_VOID
:
2046 case MONO_TYPE_BOOLEAN
:
2049 case MONO_TYPE_CHAR
:
2052 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
2058 case MONO_TYPE_FNPTR
:
2059 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
2062 case MONO_TYPE_CLASS
:
2063 case MONO_TYPE_STRING
:
2064 case MONO_TYPE_OBJECT
:
2065 case MONO_TYPE_SZARRAY
:
2066 case MONO_TYPE_ARRAY
:
2067 if (args
[i
]->type
!= STACK_OBJ
)
2072 if (args
[i
]->type
!= STACK_I8
)
2077 if (args
[i
]->type
!= STACK_R8
)
2080 case MONO_TYPE_VALUETYPE
:
2081 if (simple_type
->data
.klass
->enumtype
) {
2082 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
2085 if (args
[i
]->type
!= STACK_VTYPE
)
2088 case MONO_TYPE_TYPEDBYREF
:
2089 if (args
[i
]->type
!= STACK_VTYPE
)
2092 case MONO_TYPE_GENERICINST
:
2093 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
2097 g_error ("unknown type 0x%02x in check_call_signature",
2105 callvirt_to_call (int opcode
)
2110 case OP_VOIDCALLVIRT
:
2119 g_assert_not_reached ();
2126 callvirt_to_call_membase (int opcode
)
2130 return OP_CALL_MEMBASE
;
2131 case OP_VOIDCALLVIRT
:
2132 return OP_VOIDCALL_MEMBASE
;
2134 return OP_FCALL_MEMBASE
;
2136 return OP_LCALL_MEMBASE
;
2138 return OP_VCALL_MEMBASE
;
2140 g_assert_not_reached ();
2146 #ifdef MONO_ARCH_HAVE_IMT
2148 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2152 if (COMPILE_LLVM (cfg
)) {
2153 method_reg
= alloc_preg (cfg
);
2156 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2157 } else if (cfg
->compile_aot
) {
2158 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2161 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2162 ins
->inst_p0
= call
->method
;
2163 ins
->dreg
= method_reg
;
2164 MONO_ADD_INS (cfg
->cbb
, ins
);
2168 call
->imt_arg_reg
= method_reg
;
2170 #ifdef MONO_ARCH_IMT_REG
2171 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2173 /* Need this to keep the IMT arg alive */
2174 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, 0, FALSE
);
2179 #ifdef MONO_ARCH_IMT_REG
2180 method_reg
= alloc_preg (cfg
);
2183 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2184 } else if (cfg
->compile_aot
) {
2185 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2188 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2189 ins
->inst_p0
= call
->method
;
2190 ins
->dreg
= method_reg
;
2191 MONO_ADD_INS (cfg
->cbb
, ins
);
2194 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2196 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2201 static MonoJumpInfo
*
2202 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2204 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2208 ji
->data
.target
= target
;
2213 inline static MonoCallInst
*
2214 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2215 MonoInst
**args
, int calli
, int virtual, int tail
, int rgctx
)
2218 #ifdef MONO_ARCH_SOFT_FLOAT
2223 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
2225 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2228 call
->signature
= sig
;
2229 call
->rgctx_reg
= rgctx
;
2231 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2234 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2235 call
->vret_var
= cfg
->vret_addr
;
2236 //g_assert_not_reached ();
2238 } else if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2239 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2242 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2245 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2246 * address of return value to increase optimization opportunities.
2247 * Before vtype decomposition, the dreg of the call ins itself represents the
2248 * fact the call modifies the return value. After decomposition, the call will
2249 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2250 * will be transformed into an LDADDR.
2252 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2253 loada
->dreg
= alloc_preg (cfg
);
2254 loada
->inst_p0
= temp
;
2255 /* We reference the call too since call->dreg could change during optimization */
2256 loada
->inst_p1
= call
;
2257 MONO_ADD_INS (cfg
->cbb
, loada
);
2259 call
->inst
.dreg
= temp
->dreg
;
2261 call
->vret_var
= loada
;
2262 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2263 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2265 #ifdef MONO_ARCH_SOFT_FLOAT
2266 if (COMPILE_SOFT_FLOAT (cfg
)) {
2268 * If the call has a float argument, we would need to do an r8->r4 conversion using
2269 * an icall, but that cannot be done during the call sequence since it would clobber
2270 * the call registers + the stack. So we do it before emitting the call.
2272 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2274 MonoInst
*in
= call
->args
[i
];
2276 if (i
>= sig
->hasthis
)
2277 t
= sig
->params
[i
- sig
->hasthis
];
2279 t
= &mono_defaults
.int_class
->byval_arg
;
2280 t
= mono_type_get_underlying_type (t
);
2282 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2283 MonoInst
*iargs
[1];
2287 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2289 /* The result will be in an int vreg */
2290 call
->args
[i
] = conv
;
2297 if (COMPILE_LLVM (cfg
))
2298 mono_llvm_emit_call (cfg
, call
);
2300 mono_arch_emit_call (cfg
, call
);
2302 mono_arch_emit_call (cfg
, call
);
2305 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2306 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2312 set_rgctx_arg (MonoCompile
*cfg
, MonoCallInst
*call
, int rgctx_reg
, MonoInst
*rgctx_arg
)
2314 #ifdef MONO_ARCH_RGCTX_REG
2315 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2316 cfg
->uses_rgctx_reg
= TRUE
;
2317 call
->rgctx_reg
= TRUE
;
2319 call
->rgctx_arg_reg
= rgctx_reg
;
2326 inline static MonoInst
*
2327 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2333 rgctx_reg
= mono_alloc_preg (cfg
);
2334 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2337 call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
, rgctx_arg
? TRUE
: FALSE
);
2339 call
->inst
.sreg1
= addr
->dreg
;
2341 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2344 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2346 return (MonoInst
*)call
;
2350 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
, MonoMethod
*cmethod
, int rgctx_type
);
2352 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
, MonoClass
*klass
, int rgctx_type
);
2355 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2356 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
2358 gboolean might_be_remote
;
2359 gboolean
virtual = this != NULL
;
2360 gboolean enable_for_aot
= TRUE
;
2366 rgctx_reg
= mono_alloc_preg (cfg
);
2367 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2370 if (method
->string_ctor
) {
2371 /* Create the real signature */
2372 /* FIXME: Cache these */
2373 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2374 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2379 context_used
= mono_method_check_context_used (method
);
2381 might_be_remote
= this && sig
->hasthis
&&
2382 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2383 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && (!MONO_CHECK_THIS (this) || context_used
);
2385 if (might_be_remote
&& context_used
) {
2388 g_assert (cfg
->generic_sharing_context
);
2390 addr
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
2392 return mono_emit_calli (cfg
, sig
, args
, addr
, NULL
);
2395 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual, FALSE
, rgctx_arg
? TRUE
: FALSE
);
2397 if (might_be_remote
)
2398 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2400 call
->method
= method
;
2401 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2402 call
->inst
.inst_left
= this;
2405 int vtable_reg
, slot_reg
, this_reg
;
2407 this_reg
= this->dreg
;
2409 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2410 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2411 MonoInst
*dummy_use
;
2413 MONO_EMIT_NULL_CHECK (cfg
, this_reg
);
2415 /* Make a call to delegate->invoke_impl */
2416 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2417 call
->inst
.inst_basereg
= this_reg
;
2418 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2419 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2421 /* We must emit a dummy use here because the delegate trampoline will
2422 replace the 'this' argument with the delegate target making this activation
2423 no longer a root for the delegate.
2424 This is an issue for delegates that target collectible code such as dynamic
2425 methods of GC'able assemblies.
2427 For a test case look into #667921.
2429 FIXME: a dummy use is not the best way to do it as the local register allocator
2430 will put it on a caller save register and spil it around the call.
2431 Ideally, we would either put it on a callee save register or only do the store part.
2433 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[0]);
2435 return (MonoInst
*)call
;
2439 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2440 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2441 (MONO_METHOD_IS_FINAL (method
) &&
2442 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)) &&
2443 !(method
->klass
->marshalbyref
&& context_used
)) {
2445 * the method is not virtual, we just need to ensure this is not null
2446 * and then we can call the method directly.
2448 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2450 * The check above ensures method is not gshared, this is needed since
2451 * gshared methods can't have wrappers.
2453 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2456 if (!method
->string_ctor
)
2457 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2459 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2460 } else if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2462 * the method is virtual, but we can statically dispatch since either
2463 * it's class or the method itself are sealed.
2464 * But first we need to ensure it's not a null reference.
2466 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2468 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2470 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2472 vtable_reg
= alloc_preg (cfg
);
2473 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2474 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2476 #ifdef MONO_ARCH_HAVE_IMT
2478 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2479 emit_imt_argument (cfg
, call
, imt_arg
);
2480 slot_reg
= vtable_reg
;
2481 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2484 if (slot_reg
== -1) {
2485 slot_reg
= alloc_preg (cfg
);
2486 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2487 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2490 slot_reg
= vtable_reg
;
2491 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2492 ((mono_method_get_vtable_index (method
)) * (SIZEOF_VOID_P
));
2493 #ifdef MONO_ARCH_HAVE_IMT
2495 g_assert (mono_method_signature (method
)->generic_param_count
);
2496 emit_imt_argument (cfg
, call
, imt_arg
);
2501 call
->inst
.sreg1
= slot_reg
;
2502 call
->virtual = TRUE
;
2506 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2509 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2511 return (MonoInst
*)call
;
2515 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2517 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
, NULL
);
2521 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2528 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
, FALSE
);
2531 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2533 return (MonoInst
*)call
;
2537 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2539 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2543 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2547 * mono_emit_abs_call:
2549 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2551 inline static MonoInst
*
2552 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2553 MonoMethodSignature
*sig
, MonoInst
**args
)
2555 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2559 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2562 if (cfg
->abs_patches
== NULL
)
2563 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2564 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2565 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2566 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2571 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2573 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2574 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2578 * Native code might return non register sized integers
2579 * without initializing the upper bits.
2581 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2582 case OP_LOADI1_MEMBASE
:
2583 widen_op
= OP_ICONV_TO_I1
;
2585 case OP_LOADU1_MEMBASE
:
2586 widen_op
= OP_ICONV_TO_U1
;
2588 case OP_LOADI2_MEMBASE
:
2589 widen_op
= OP_ICONV_TO_I2
;
2591 case OP_LOADU2_MEMBASE
:
2592 widen_op
= OP_ICONV_TO_U2
;
2598 if (widen_op
!= -1) {
2599 int dreg
= alloc_preg (cfg
);
2602 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2603 widen
->type
= ins
->type
;
2613 get_memcpy_method (void)
2615 static MonoMethod
*memcpy_method
= NULL
;
2616 if (!memcpy_method
) {
2617 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2619 g_error ("Old corlib found. Install a new one");
2621 return memcpy_method
;
2625 create_write_barrier_bitmap (MonoCompile
*cfg
, MonoClass
*klass
, unsigned *wb_bitmap
, int offset
)
2627 MonoClassField
*field
;
2628 gpointer iter
= NULL
;
2630 while ((field
= mono_class_get_fields (klass
, &iter
))) {
2633 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
2635 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
2636 if (mini_type_is_reference (cfg
, mono_field_get_type (field
))) {
2637 g_assert ((foffset
% SIZEOF_VOID_P
) == 0);
2638 *wb_bitmap
|= 1 << ((offset
+ foffset
) / SIZEOF_VOID_P
);
2640 MonoClass
*field_class
= mono_class_from_mono_type (field
->type
);
2641 if (field_class
->has_references
)
2642 create_write_barrier_bitmap (cfg
, field_class
, wb_bitmap
, offset
+ foffset
);
2648 emit_write_barrier (MonoCompile
*cfg
, MonoInst
*ptr
, MonoInst
*value
, int value_reg
)
2650 int card_table_shift_bits
;
2651 gpointer card_table_mask
;
2653 MonoInst
*dummy_use
;
2654 int nursery_shift_bits
;
2655 size_t nursery_size
;
2656 gboolean has_card_table_wb
= FALSE
;
2658 if (!cfg
->gen_write_barriers
)
2661 card_table
= mono_gc_get_card_table (&card_table_shift_bits
, &card_table_mask
);
2663 mono_gc_get_nursery (&nursery_shift_bits
, &nursery_size
);
2665 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2666 has_card_table_wb
= TRUE
;
2669 if (has_card_table_wb
&& !cfg
->compile_aot
&& card_table
&& nursery_shift_bits
> 0) {
2672 MONO_INST_NEW (cfg
, wbarrier
, OP_CARD_TABLE_WBARRIER
);
2673 wbarrier
->sreg1
= ptr
->dreg
;
2675 wbarrier
->sreg2
= value
->dreg
;
2677 wbarrier
->sreg2
= value_reg
;
2678 MONO_ADD_INS (cfg
->cbb
, wbarrier
);
2679 } else if (card_table
) {
2680 int offset_reg
= alloc_preg (cfg
);
2681 int card_reg
= alloc_preg (cfg
);
2684 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, offset_reg
, ptr
->dreg
, card_table_shift_bits
);
2685 if (card_table_mask
)
2686 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PAND_IMM
, offset_reg
, offset_reg
, card_table_mask
);
2688 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2689 * IMM's larger than 32bits.
2691 if (cfg
->compile_aot
) {
2692 MONO_EMIT_NEW_AOTCONST (cfg
, card_reg
, NULL
, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
);
2694 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2695 ins
->inst_p0
= card_table
;
2696 ins
->dreg
= card_reg
;
2697 MONO_ADD_INS (cfg
->cbb
, ins
);
2700 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, offset_reg
, offset_reg
, card_reg
);
2701 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, offset_reg
, 0, 1);
2703 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
2704 mono_emit_method_call (cfg
, write_barrier
, &ptr
, NULL
);
2708 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, value
);
2710 MONO_INST_NEW (cfg
, dummy_use
, OP_DUMMY_USE
);
2711 dummy_use
->sreg1
= value_reg
;
2712 MONO_ADD_INS (cfg
->cbb
, dummy_use
);
2717 mono_emit_wb_aware_memcpy (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*iargs
[4], int size
, int align
)
2719 int dest_ptr_reg
, tmp_reg
, destreg
, srcreg
, offset
;
2720 unsigned need_wb
= 0;
2725 /*types with references can't have alignment smaller than sizeof(void*) */
2726 if (align
< SIZEOF_VOID_P
)
2729 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2730 if (size
> 32 * SIZEOF_VOID_P
)
2733 create_write_barrier_bitmap (cfg
, klass
, &need_wb
, 0);
2735 /* We don't unroll more than 5 stores to avoid code bloat. */
2736 if (size
> 5 * SIZEOF_VOID_P
) {
2737 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2738 size
+= (SIZEOF_VOID_P
- 1);
2739 size
&= ~(SIZEOF_VOID_P
- 1);
2741 EMIT_NEW_ICONST (cfg
, iargs
[2], size
);
2742 EMIT_NEW_ICONST (cfg
, iargs
[3], need_wb
);
2743 mono_emit_jit_icall (cfg
, mono_gc_wbarrier_value_copy_bitmap
, iargs
);
2747 destreg
= iargs
[0]->dreg
;
2748 srcreg
= iargs
[1]->dreg
;
2751 dest_ptr_reg
= alloc_preg (cfg
);
2752 tmp_reg
= alloc_preg (cfg
);
2755 EMIT_NEW_UNALU (cfg
, iargs
[0], OP_MOVE
, dest_ptr_reg
, destreg
);
2757 while (size
>= SIZEOF_VOID_P
) {
2758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, tmp_reg
, srcreg
, offset
);
2759 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, dest_ptr_reg
, 0, tmp_reg
);
2762 emit_write_barrier (cfg
, iargs
[0], NULL
, tmp_reg
);
2764 offset
+= SIZEOF_VOID_P
;
2765 size
-= SIZEOF_VOID_P
;
2768 /*tmp += sizeof (void*)*/
2769 if (size
>= SIZEOF_VOID_P
) {
2770 NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dest_ptr_reg
, dest_ptr_reg
, SIZEOF_VOID_P
);
2771 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
2775 /* Those cannot be references since size < sizeof (void*) */
2777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, tmp_reg
, srcreg
, offset
);
2778 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmp_reg
, srcreg
, offset
);
2785 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmp_reg
, srcreg
, offset
);
2792 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2801 * Emit code to copy a valuetype of type @klass whose address is stored in
2802 * @src->dreg to memory whose address is stored at @dest->dreg.
2805 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2807 MonoInst
*iargs
[4];
2810 MonoMethod
*memcpy_method
;
2814 * This check breaks with spilled vars... need to handle it during verification anyway.
2815 * g_assert (klass && klass == src->klass && klass == dest->klass);
2819 n
= mono_class_native_size (klass
, &align
);
2821 n
= mono_class_value_size (klass
, &align
);
2823 /* if native is true there should be no references in the struct */
2824 if (cfg
->gen_write_barriers
&& klass
->has_references
&& !native
) {
2825 /* Avoid barriers when storing to the stack */
2826 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2827 (dest
->opcode
== OP_LDADDR
))) {
2828 int context_used
= 0;
2833 if (cfg
->generic_sharing_context
)
2834 context_used
= mono_class_check_context_used (klass
);
2836 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2837 if ((cfg
->opt
& MONO_OPT_INTRINS
) && mono_emit_wb_aware_memcpy (cfg
, klass
, iargs
, n
, align
)) {
2839 } else if (context_used
) {
2840 iargs
[2] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
2842 if (cfg
->compile_aot
) {
2843 EMIT_NEW_CLASSCONST (cfg
, iargs
[2], klass
);
2845 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2846 mono_class_compute_gc_descriptor (klass
);
2850 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2855 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2856 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2857 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2861 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2863 memcpy_method
= get_memcpy_method ();
2864 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2869 get_memset_method (void)
2871 static MonoMethod
*memset_method
= NULL
;
2872 if (!memset_method
) {
2873 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2875 g_error ("Old corlib found. Install a new one");
2877 return memset_method
;
2881 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2883 MonoInst
*iargs
[3];
2886 MonoMethod
*memset_method
;
2888 /* FIXME: Optimize this for the case when dest is an LDADDR */
2890 mono_class_init (klass
);
2891 n
= mono_class_value_size (klass
, &align
);
2893 if (n
<= sizeof (gpointer
) * 5) {
2894 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2897 memset_method
= get_memset_method ();
2899 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2900 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2901 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2906 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2908 MonoInst
*this = NULL
;
2910 g_assert (cfg
->generic_sharing_context
);
2912 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2913 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2914 !method
->klass
->valuetype
)
2915 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2917 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2918 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2921 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2923 mrgctx_loc
= mono_get_vtable_var (cfg
);
2924 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2927 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2928 MonoInst
*vtable_loc
, *vtable_var
;
2932 vtable_loc
= mono_get_vtable_var (cfg
);
2933 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2935 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2936 MonoInst
*mrgctx_var
= vtable_var
;
2939 vtable_reg
= alloc_preg (cfg
);
2940 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2941 vtable_var
->type
= STACK_PTR
;
2949 vtable_reg
= alloc_preg (cfg
);
2950 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2955 static MonoJumpInfoRgctxEntry
*
2956 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2958 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2959 res
->method
= method
;
2960 res
->in_mrgctx
= in_mrgctx
;
2961 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2962 res
->data
->type
= patch_type
;
2963 res
->data
->data
.target
= patch_data
;
2964 res
->info_type
= info_type
;
2969 static inline MonoInst
*
2970 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2972 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2976 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2977 MonoClass
*klass
, int rgctx_type
)
2979 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2980 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2982 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2986 * emit_get_rgctx_method:
2988 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2989 * normal constants, else emit a load from the rgctx.
2992 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2993 MonoMethod
*cmethod
, int rgctx_type
)
2995 if (!context_used
) {
2998 switch (rgctx_type
) {
2999 case MONO_RGCTX_INFO_METHOD
:
3000 EMIT_NEW_METHODCONST (cfg
, ins
, cmethod
);
3002 case MONO_RGCTX_INFO_METHOD_RGCTX
:
3003 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, ins
, cmethod
);
3006 g_assert_not_reached ();
3009 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
3010 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3012 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3017 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
3018 MonoClassField
*field
, int rgctx_type
)
3020 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
3021 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3023 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3027 * On return the caller must check @klass for load errors.
3030 emit_generic_class_init (MonoCompile
*cfg
, MonoClass
*klass
)
3032 MonoInst
*vtable_arg
;
3034 int context_used
= 0;
3036 if (cfg
->generic_sharing_context
)
3037 context_used
= mono_class_check_context_used (klass
);
3040 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
3041 klass
, MONO_RGCTX_INFO_VTABLE
);
3043 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3047 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
3050 if (COMPILE_LLVM (cfg
))
3051 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline_llvm
, &vtable_arg
);
3053 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable_arg
);
3054 #ifdef MONO_ARCH_VTABLE_REG
3055 mono_call_inst_add_outarg_reg (cfg
, call
, vtable_arg
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
3056 cfg
->uses_vtable_reg
= TRUE
;
3063 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
3065 if (mini_get_debug_options ()->better_cast_details
) {
3066 int to_klass_reg
= alloc_preg (cfg
);
3067 int vtable_reg
= alloc_preg (cfg
);
3068 int klass_reg
= alloc_preg (cfg
);
3069 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3072 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
3076 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
3081 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
3082 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
3087 reset_cast_details (MonoCompile
*cfg
)
3089 /* Reset the variables holding the cast details */
3090 if (mini_get_debug_options ()->better_cast_details
) {
3091 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3093 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3094 /* It is enough to reset the from field */
3095 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
3100 * On return the caller must check @array_class for load errors
3103 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
3105 int vtable_reg
= alloc_preg (cfg
);
3106 int context_used
= 0;
3108 if (cfg
->generic_sharing_context
)
3109 context_used
= mono_class_check_context_used (array_class
);
3111 save_cast_details (cfg
, array_class
, obj
->dreg
);
3113 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3115 if (cfg
->opt
& MONO_OPT_SHARED
) {
3116 int class_reg
= alloc_preg (cfg
);
3117 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3118 if (cfg
->compile_aot
) {
3119 int klass_reg
= alloc_preg (cfg
);
3120 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
3121 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
3123 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
3125 } else if (context_used
) {
3126 MonoInst
*vtable_ins
;
3128 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
3129 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
3131 if (cfg
->compile_aot
) {
3135 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
3137 vt_reg
= alloc_preg (cfg
);
3138 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, vtable
);
3139 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
3142 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
3144 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vtable
);
3148 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
3150 reset_cast_details (cfg
);
3154 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3155 * generic code is generated.
3158 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
3160 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
3163 MonoInst
*rgctx
, *addr
;
3165 /* FIXME: What if the class is shared? We might not
3166 have to get the address of the method from the
3168 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3169 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3171 rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3173 return mono_emit_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3175 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3180 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
3184 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
3185 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3186 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3187 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
3189 obj_reg
= sp
[0]->dreg
;
3190 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3191 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3193 /* FIXME: generics */
3194 g_assert (klass
->rank
== 0);
3197 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
3198 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3200 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
3204 MonoInst
*element_class
;
3206 /* This assertion is from the unboxcast insn */
3207 g_assert (klass
->rank
== 0);
3209 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
3210 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
3212 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
3213 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3215 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
3216 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
3217 reset_cast_details (cfg
);
3220 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_MP
), obj_reg
, sizeof (MonoObject
));
3221 MONO_ADD_INS (cfg
->cbb
, add
);
3222 add
->type
= STACK_MP
;
3229 * Returns NULL and set the cfg exception on error.
3232 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
, int context_used
)
3234 MonoInst
*iargs
[2];
3240 MonoInst
*iargs
[2];
3243 FIXME: we cannot get managed_alloc here because we can't get
3244 the class's vtable (because it's not a closed class)
3246 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3247 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3250 if (cfg
->opt
& MONO_OPT_SHARED
)
3251 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
3253 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
3254 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
3256 if (cfg
->opt
& MONO_OPT_SHARED
) {
3257 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3259 alloc_ftn
= mono_object_new
;
3262 alloc_ftn
= mono_object_new_specific
;
3265 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3268 if (cfg
->opt
& MONO_OPT_SHARED
) {
3269 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3270 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
3272 alloc_ftn
= mono_object_new
;
3273 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
3274 /* This happens often in argument checking code, eg. throw new FooException... */
3275 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3276 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
3277 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
3279 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3280 MonoMethod
*managed_alloc
= NULL
;
3284 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3285 cfg
->exception_ptr
= klass
;
3289 #ifndef MONO_CROSS_COMPILE
3290 managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
3293 if (managed_alloc
) {
3294 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3295 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
3297 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
3299 guint32 lw
= vtable
->klass
->instance_size
;
3300 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
3301 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
3302 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
3305 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3309 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3313 * Returns NULL and set the cfg exception on error.
3316 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
)
3318 MonoInst
*alloc
, *ins
;
3320 if (mono_class_is_nullable (klass
)) {
3321 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
3324 /* FIXME: What if the class is shared? We might not
3325 have to get the method address from the RGCTX. */
3326 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3327 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3328 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3330 return mono_emit_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3332 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3336 alloc
= handle_alloc (cfg
, klass
, TRUE
, context_used
);
3340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3347 mini_class_has_reference_variant_generic_argument (MonoCompile
*cfg
, MonoClass
*klass
, int context_used
)
3350 MonoGenericContainer
*container
;
3351 MonoGenericInst
*ginst
;
3353 if (klass
->generic_class
) {
3354 container
= klass
->generic_class
->container_class
->generic_container
;
3355 ginst
= klass
->generic_class
->context
.class_inst
;
3356 } else if (klass
->generic_container
&& context_used
) {
3357 container
= klass
->generic_container
;
3358 ginst
= container
->context
.class_inst
;
3363 for (i
= 0; i
< container
->type_argc
; ++i
) {
3365 if (!(mono_generic_container_get_param_info (container
, i
)->flags
& (MONO_GEN_PARAM_VARIANT
|MONO_GEN_PARAM_COVARIANT
)))
3367 type
= ginst
->type_argv
[i
];
3368 if (mini_type_is_reference (cfg
, type
))
3374 // FIXME: This doesn't work yet (class libs tests fail?)
3375 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3378 * Returns NULL and set the cfg exception on error.
3381 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3383 MonoBasicBlock
*is_null_bb
;
3384 int obj_reg
= src
->dreg
;
3385 int vtable_reg
= alloc_preg (cfg
);
3386 MonoInst
*klass_inst
= NULL
;
3391 if(mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
3392 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
3393 MonoInst
*cache_ins
;
3395 cache_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_CAST_CACHE
);
3400 /* klass - it's the second element of the cache entry*/
3401 EMIT_NEW_LOAD_MEMBASE (cfg
, args
[1], OP_LOAD_MEMBASE
, alloc_preg (cfg
), cache_ins
->dreg
, sizeof (gpointer
));
3404 args
[2] = cache_ins
;
3406 return mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
3409 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3411 if (is_complex_isinst (klass
)) {
3412 /* Complex case, handle by an icall */
3418 args
[1] = klass_inst
;
3420 return mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
3422 /* Simple case, handled by the code below */
3426 NEW_BBLOCK (cfg
, is_null_bb
);
3428 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3429 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
3431 save_cast_details (cfg
, klass
, obj_reg
);
3433 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3435 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
3437 int klass_reg
= alloc_preg (cfg
);
3439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3441 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3442 /* the remoting code is broken, access the class for now */
3443 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3444 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3446 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3447 cfg
->exception_ptr
= klass
;
3450 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3453 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3455 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3458 mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, klass_inst
, is_null_bb
);
3462 MONO_START_BB (cfg
, is_null_bb
);
3464 reset_cast_details (cfg
);
3470 * Returns NULL and set the cfg exception on error.
3473 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3476 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
3477 int obj_reg
= src
->dreg
;
3478 int vtable_reg
= alloc_preg (cfg
);
3479 int res_reg
= alloc_ireg_ref (cfg
);
3480 MonoInst
*klass_inst
= NULL
;
3485 if(mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
3486 MonoMethod
*mono_isinst
= mono_marshal_get_isinst_with_cache ();
3487 MonoInst
*cache_ins
;
3489 cache_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_CAST_CACHE
);
3494 /* klass - it's the second element of the cache entry*/
3495 EMIT_NEW_LOAD_MEMBASE (cfg
, args
[1], OP_LOAD_MEMBASE
, alloc_preg (cfg
), cache_ins
->dreg
, sizeof (gpointer
));
3498 args
[2] = cache_ins
;
3500 return mono_emit_method_call (cfg
, mono_isinst
, args
, NULL
);
3503 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3505 if (is_complex_isinst (klass
)) {
3506 /* Complex case, handle by an icall */
3512 args
[1] = klass_inst
;
3514 return mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
3516 /* Simple case, the code below can handle it */
3520 NEW_BBLOCK (cfg
, is_null_bb
);
3521 NEW_BBLOCK (cfg
, false_bb
);
3522 NEW_BBLOCK (cfg
, end_bb
);
3524 /* Do the assignment at the beginning, so the other assignment can be if converted */
3525 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
3526 ins
->type
= STACK_OBJ
;
3529 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3534 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3535 g_assert (!context_used
);
3536 /* the is_null_bb target simply copies the input register to the output */
3537 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
3539 int klass_reg
= alloc_preg (cfg
);
3542 int rank_reg
= alloc_preg (cfg
);
3543 int eclass_reg
= alloc_preg (cfg
);
3545 g_assert (!context_used
);
3546 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3547 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
3551 if (klass
->cast_class
== mono_defaults
.object_class
) {
3552 int parent_reg
= alloc_preg (cfg
);
3553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
3554 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
3555 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3557 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
3558 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
3559 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3561 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
3562 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3564 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3565 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3567 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
3568 /* Check that the object is a vector too */
3569 int bounds_reg
= alloc_preg (cfg
);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3571 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
3572 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3575 /* the is_null_bb target simply copies the input register to the output */
3576 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3578 } else if (mono_class_is_nullable (klass
)) {
3579 g_assert (!context_used
);
3580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3581 /* the is_null_bb target simply copies the input register to the output */
3582 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3584 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3585 g_assert (!context_used
);
3586 /* the remoting code is broken, access the class for now */
3587 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3588 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3590 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3591 cfg
->exception_ptr
= klass
;
3594 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3597 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3603 /* the is_null_bb target simply copies the input register to the output */
3604 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, klass_inst
, false_bb
, is_null_bb
);
3609 MONO_START_BB (cfg
, false_bb
);
3611 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3612 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3614 MONO_START_BB (cfg
, is_null_bb
);
3616 MONO_START_BB (cfg
, end_bb
);
3622 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3624 /* This opcode takes as input an object reference and a class, and returns:
3625 0) if the object is an instance of the class,
3626 1) if the object is not instance of the class,
3627 2) if the object is a proxy whose type cannot be determined */
3630 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3631 int obj_reg
= src
->dreg
;
3632 int dreg
= alloc_ireg (cfg
);
3634 int klass_reg
= alloc_preg (cfg
);
3636 NEW_BBLOCK (cfg
, true_bb
);
3637 NEW_BBLOCK (cfg
, false_bb
);
3638 NEW_BBLOCK (cfg
, false2_bb
);
3639 NEW_BBLOCK (cfg
, end_bb
);
3640 NEW_BBLOCK (cfg
, no_proxy_bb
);
3642 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3645 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3646 NEW_BBLOCK (cfg
, interface_fail_bb
);
3648 tmp_reg
= alloc_preg (cfg
);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3650 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3651 MONO_START_BB (cfg
, interface_fail_bb
);
3652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3654 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3656 tmp_reg
= alloc_preg (cfg
);
3657 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3658 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3661 tmp_reg
= alloc_preg (cfg
);
3662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3665 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3666 tmp_reg
= alloc_preg (cfg
);
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3670 tmp_reg
= alloc_preg (cfg
);
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3672 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3675 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3678 MONO_START_BB (cfg
, no_proxy_bb
);
3680 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3683 MONO_START_BB (cfg
, false_bb
);
3685 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3688 MONO_START_BB (cfg
, false2_bb
);
3690 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3693 MONO_START_BB (cfg
, true_bb
);
3695 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3697 MONO_START_BB (cfg
, end_bb
);
3700 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3702 ins
->type
= STACK_I4
;
3708 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3710 /* This opcode takes as input an object reference and a class, and returns:
3711 0) if the object is an instance of the class,
3712 1) if the object is a proxy whose type cannot be determined
3713 an InvalidCastException exception is thrown otherwhise*/
3716 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3717 int obj_reg
= src
->dreg
;
3718 int dreg
= alloc_ireg (cfg
);
3719 int tmp_reg
= alloc_preg (cfg
);
3720 int klass_reg
= alloc_preg (cfg
);
3722 NEW_BBLOCK (cfg
, end_bb
);
3723 NEW_BBLOCK (cfg
, ok_result_bb
);
3725 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3728 save_cast_details (cfg
, klass
, obj_reg
);
3730 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3731 NEW_BBLOCK (cfg
, interface_fail_bb
);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3734 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3735 MONO_START_BB (cfg
, interface_fail_bb
);
3736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3738 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3740 tmp_reg
= alloc_preg (cfg
);
3741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3742 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3743 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3745 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3749 NEW_BBLOCK (cfg
, no_proxy_bb
);
3751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3753 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3755 tmp_reg
= alloc_preg (cfg
);
3756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3759 tmp_reg
= alloc_preg (cfg
);
3760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3761 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3764 NEW_BBLOCK (cfg
, fail_1_bb
);
3766 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3768 MONO_START_BB (cfg
, fail_1_bb
);
3770 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3773 MONO_START_BB (cfg
, no_proxy_bb
);
3775 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3778 MONO_START_BB (cfg
, ok_result_bb
);
3780 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3782 MONO_START_BB (cfg
, end_bb
);
3785 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3787 ins
->type
= STACK_I4
;
3793 * Returns NULL and set the cfg exception on error.
3795 static G_GNUC_UNUSED MonoInst
*
3796 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
, int context_used
)
3800 gpointer
*trampoline
;
3801 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3805 obj
= handle_alloc (cfg
, klass
, FALSE
, 0);
3809 /* Inline the contents of mono_delegate_ctor */
3811 /* Set target field */
3812 /* Optimize away setting of NULL target */
3813 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0)) {
3814 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3815 if (cfg
->gen_write_barriers
) {
3816 dreg
= alloc_preg (cfg
);
3817 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
));
3818 emit_write_barrier (cfg
, ptr
, target
, 0);
3822 /* Set method field */
3823 method_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD
);
3824 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3825 if (cfg
->gen_write_barriers
) {
3826 dreg
= alloc_preg (cfg
);
3827 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
));
3828 emit_write_barrier (cfg
, ptr
, method_ins
, 0);
3831 * To avoid looking up the compiled code belonging to the target method
3832 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3833 * store it, and we fill it after the method has been compiled.
3835 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3836 MonoInst
*code_slot_ins
;
3839 code_slot_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE
);
3841 domain
= mono_domain_get ();
3842 mono_domain_lock (domain
);
3843 if (!domain_jit_info (domain
)->method_code_hash
)
3844 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3845 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3847 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3848 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3850 mono_domain_unlock (domain
);
3852 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3854 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3857 /* Set invoke_impl field */
3858 if (cfg
->compile_aot
) {
3859 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3861 trampoline
= mono_create_delegate_trampoline (klass
);
3862 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3864 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3866 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3872 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3874 MonoJitICallInfo
*info
;
3876 /* Need to register the icall so it gets an icall wrapper */
3877 info
= mono_get_array_new_va_icall (rank
);
3879 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3881 /* mono_array_new_va () needs a vararg calling convention */
3882 cfg
->disable_llvm
= TRUE
;
3884 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3885 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3889 mono_emit_load_got_addr (MonoCompile
*cfg
)
3891 MonoInst
*getaddr
, *dummy_use
;
3893 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3896 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3897 getaddr
->dreg
= cfg
->got_var
->dreg
;
3899 /* Add it to the start of the first bblock */
3900 if (cfg
->bb_entry
->code
) {
3901 getaddr
->next
= cfg
->bb_entry
->code
;
3902 cfg
->bb_entry
->code
= getaddr
;
3905 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3907 cfg
->got_var_allocated
= TRUE
;
3910 * Add a dummy use to keep the got_var alive, since real uses might
3911 * only be generated by the back ends.
3912 * Add it to end_bblock, so the variable's lifetime covers the whole
3914 * It would be better to make the usage of the got var explicit in all
3915 * cases when the backend needs it (i.e. calls, throw etc.), so this
3916 * wouldn't be needed.
3918 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3919 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3922 static int inline_limit
;
3923 static gboolean inline_limit_inited
;
3926 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3928 MonoMethodHeaderSummary header
;
3930 #ifdef MONO_ARCH_SOFT_FLOAT
3931 MonoMethodSignature
*sig
= mono_method_signature (method
);
3935 if (cfg
->generic_sharing_context
)
3938 if (cfg
->inline_depth
> 10)
3941 #ifdef MONO_ARCH_HAVE_LMF_OPS
3942 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3943 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3944 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3949 if (!mono_method_get_header_summary (method
, &header
))
3952 /*runtime, icall and pinvoke are checked by summary call*/
3953 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3954 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3955 (method
->klass
->marshalbyref
) ||
3959 /* also consider num_locals? */
3960 /* Do the size check early to avoid creating vtables */
3961 if (!inline_limit_inited
) {
3962 if (getenv ("MONO_INLINELIMIT"))
3963 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3965 inline_limit
= INLINE_LENGTH_LIMIT
;
3966 inline_limit_inited
= TRUE
;
3968 if (header
.code_size
>= inline_limit
)
3972 * if we can initialize the class of the method right away, we do,
3973 * otherwise we don't allow inlining if the class needs initialization,
3974 * since it would mean inserting a call to mono_runtime_class_init()
3975 * inside the inlined code
3977 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3978 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3979 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3980 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3981 if (!method
->klass
->runtime_info
)
3982 /* No vtable created yet */
3984 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3987 /* This makes so that inline cannot trigger */
3988 /* .cctors: too many apps depend on them */
3989 /* running with a specific order... */
3990 if (! vtable
->initialized
)
3992 mono_runtime_class_init (vtable
);
3994 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3995 if (!method
->klass
->runtime_info
)
3996 /* No vtable created yet */
3998 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4001 if (!vtable
->initialized
)
4006 * If we're compiling for shared code
4007 * the cctor will need to be run at aot method load time, for example,
4008 * or at the end of the compilation of the inlining method.
4010 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
4015 * CAS - do not inline methods with declarative security
4016 * Note: this has to be before any possible return TRUE;
4018 if (mono_method_has_declsec (method
))
4021 #ifdef MONO_ARCH_SOFT_FLOAT
4023 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
4025 for (i
= 0; i
< sig
->param_count
; ++i
)
4026 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
4034 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
4036 if (vtable
->initialized
&& !cfg
->compile_aot
)
4039 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
4042 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
4045 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
4046 /* The initialization is already done before the method is called */
4053 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
, gboolean bcheck
)
4057 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
4059 mono_class_init (klass
);
4060 size
= mono_class_array_element_size (klass
);
4062 mult_reg
= alloc_preg (cfg
);
4063 array_reg
= arr
->dreg
;
4064 index_reg
= index
->dreg
;
4066 #if SIZEOF_REGISTER == 8
4067 /* The array reg is 64 bits but the index reg is only 32 */
4068 if (COMPILE_LLVM (cfg
)) {
4070 index2_reg
= index_reg
;
4072 index2_reg
= alloc_preg (cfg
);
4073 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
4076 if (index
->type
== STACK_I8
) {
4077 index2_reg
= alloc_preg (cfg
);
4078 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
4080 index2_reg
= index_reg
;
4085 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
4087 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4088 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
4089 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4091 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
4092 ins
->klass
= mono_class_get_element_class (klass
);
4093 ins
->type
= STACK_MP
;
4099 add_reg
= alloc_ireg_mp (cfg
);
4101 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
4102 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
4103 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
4104 ins
->klass
= mono_class_get_element_class (klass
);
4105 ins
->type
= STACK_MP
;
4106 MONO_ADD_INS (cfg
->cbb
, ins
);
4111 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4113 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
4115 int bounds_reg
= alloc_preg (cfg
);
4116 int add_reg
= alloc_ireg_mp (cfg
);
4117 int mult_reg
= alloc_preg (cfg
);
4118 int mult2_reg
= alloc_preg (cfg
);
4119 int low1_reg
= alloc_preg (cfg
);
4120 int low2_reg
= alloc_preg (cfg
);
4121 int high1_reg
= alloc_preg (cfg
);
4122 int high2_reg
= alloc_preg (cfg
);
4123 int realidx1_reg
= alloc_preg (cfg
);
4124 int realidx2_reg
= alloc_preg (cfg
);
4125 int sum_reg
= alloc_preg (cfg
);
4130 mono_class_init (klass
);
4131 size
= mono_class_array_element_size (klass
);
4133 index1
= index_ins1
->dreg
;
4134 index2
= index_ins2
->dreg
;
4136 /* range checking */
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
4138 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
4141 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4142 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
4143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
4144 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4145 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
4146 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4148 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
4149 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4150 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
4151 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
4152 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4153 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
4154 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4156 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
4157 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
4158 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
4159 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
4160 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
4162 ins
->type
= STACK_MP
;
4164 MONO_ADD_INS (cfg
->cbb
, ins
);
4171 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
4175 MonoMethod
*addr_method
;
4178 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
4181 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], TRUE
);
4183 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4184 /* emit_ldelema_2 depends on OP_LMUL */
4185 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
4186 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
4190 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
4191 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
4192 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
4197 static MonoBreakPolicy
4198 always_insert_breakpoint (MonoMethod
*method
)
4200 return MONO_BREAK_POLICY_ALWAYS
;
4203 static MonoBreakPolicyFunc break_policy_func
= always_insert_breakpoint
;
4206 * mono_set_break_policy:
4207 * policy_callback: the new callback function
4209 * Allow embedders to decide wherther to actually obey breakpoint instructions
4210 * (both break IL instructions and Debugger.Break () method calls), for example
4211 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4212 * untrusted or semi-trusted code.
4214 * @policy_callback will be called every time a break point instruction needs to
4215 * be inserted with the method argument being the method that calls Debugger.Break()
4216 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4217 * if it wants the breakpoint to not be effective in the given method.
4218 * #MONO_BREAK_POLICY_ALWAYS is the default.
4221 mono_set_break_policy (MonoBreakPolicyFunc policy_callback
)
4223 if (policy_callback
)
4224 break_policy_func
= policy_callback
;
4226 break_policy_func
= always_insert_breakpoint
;
4230 should_insert_brekpoint (MonoMethod
*method
) {
4231 switch (break_policy_func (method
)) {
4232 case MONO_BREAK_POLICY_ALWAYS
:
4234 case MONO_BREAK_POLICY_NEVER
:
4236 case MONO_BREAK_POLICY_ON_DBG
:
4237 return mono_debug_using_mono_debugger ();
4239 g_warning ("Incorrect value returned from break policy callback");
4244 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4246 emit_array_generic_access (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, int is_set
)
4248 MonoInst
*addr
, *store
, *load
;
4249 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[2]);
4251 /* the bounds check is already done by the callers */
4252 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1], FALSE
);
4254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, args
[2]->dreg
, 0);
4255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, addr
->dreg
, 0, load
->dreg
);
4257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
4258 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
4264 mini_emit_inst_for_ctor (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4266 MonoInst
*ins
= NULL
;
4267 #ifdef MONO_ARCH_SIMD_INTRINSICS
4268 if (cfg
->opt
& MONO_OPT_SIMD
) {
4269 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4279 emit_memory_barrier (MonoCompile
*cfg
, int kind
)
4281 MonoInst
*ins
= NULL
;
4282 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
4283 MONO_ADD_INS (cfg
->cbb
, ins
);
4284 ins
->backend
.memory_barrier_kind
= kind
;
4290 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4292 MonoInst
*ins
= NULL
;
4294 static MonoClass
*runtime_helpers_class
= NULL
;
4295 if (! runtime_helpers_class
)
4296 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
4297 "System.Runtime.CompilerServices", "RuntimeHelpers");
4299 if (cmethod
->klass
== mono_defaults
.string_class
) {
4300 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
4301 int dreg
= alloc_ireg (cfg
);
4302 int index_reg
= alloc_preg (cfg
);
4303 int mult_reg
= alloc_preg (cfg
);
4304 int add_reg
= alloc_preg (cfg
);
4306 #if SIZEOF_REGISTER == 8
4307 /* The array reg is 64 bits but the index reg is only 32 */
4308 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
4310 index_reg
= args
[1]->dreg
;
4312 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
4314 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4315 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
4316 add_reg
= ins
->dreg
;
4317 /* Avoid a warning */
4319 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4322 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
4323 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4324 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4325 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
4327 type_from_op (ins
, NULL
, NULL
);
4329 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4330 int dreg
= alloc_ireg (cfg
);
4331 /* Decompose later to allow more optimizations */
4332 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
4333 ins
->type
= STACK_I4
;
4334 ins
->flags
|= MONO_INST_FAULT
;
4335 cfg
->cbb
->has_array_access
= TRUE
;
4336 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
4339 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
4340 int mult_reg
= alloc_preg (cfg
);
4341 int add_reg
= alloc_preg (cfg
);
4343 /* The corlib functions check for oob already. */
4344 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
4345 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4346 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
4347 return cfg
->cbb
->last_ins
;
4350 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
4352 if (strcmp (cmethod
->name
, "GetType") == 0) {
4353 int dreg
= alloc_ireg_ref (cfg
);
4354 int vt_reg
= alloc_preg (cfg
);
4355 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4356 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
4357 type_from_op (ins
, NULL
, NULL
);
4360 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4361 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4362 int dreg
= alloc_ireg (cfg
);
4363 int t1
= alloc_ireg (cfg
);
4365 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
4366 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
4367 ins
->type
= STACK_I4
;
4371 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
4372 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4373 MONO_ADD_INS (cfg
->cbb
, ins
);
4377 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
4378 if (strcmp (cmethod
->name
+ 1, "etGenericValueImpl") == 0)
4379 return emit_array_generic_access (cfg
, fsig
, args
, *cmethod
->name
== 'S');
4381 #ifndef MONO_BIG_ARRAYS
4383 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4386 if ((strcmp (cmethod
->name
, "GetLength") == 0 || strcmp (cmethod
->name
, "GetLowerBound") == 0) && args
[1]->opcode
== OP_ICONST
&& args
[1]->inst_c0
== 0) {
4387 int dreg
= alloc_ireg (cfg
);
4388 int bounds_reg
= alloc_ireg_mp (cfg
);
4389 MonoBasicBlock
*end_bb
, *szarray_bb
;
4390 gboolean get_length
= strcmp (cmethod
->name
, "GetLength") == 0;
4392 NEW_BBLOCK (cfg
, end_bb
);
4393 NEW_BBLOCK (cfg
, szarray_bb
);
4395 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOAD_MEMBASE
, bounds_reg
,
4396 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
4397 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
4398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, szarray_bb
);
4399 /* Non-szarray case */
4401 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4402 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4404 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4405 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
4407 MONO_START_BB (cfg
, szarray_bb
);
4410 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4411 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4413 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
4414 MONO_START_BB (cfg
, end_bb
);
4416 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, dreg
, dreg
);
4417 ins
->type
= STACK_I4
;
4423 if (cmethod
->name
[0] != 'g')
4426 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
4427 int dreg
= alloc_ireg (cfg
);
4428 int vtable_reg
= alloc_preg (cfg
);
4429 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
4430 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4431 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
4432 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
4433 type_from_op (ins
, NULL
, NULL
);
4436 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4437 int dreg
= alloc_ireg (cfg
);
4439 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4440 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4441 type_from_op (ins
, NULL
, NULL
);
4446 } else if (cmethod
->klass
== runtime_helpers_class
) {
4448 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
4449 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
4453 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
4454 if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
4455 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
4456 MONO_ADD_INS (cfg
->cbb
, ins
);
4458 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
4459 return emit_memory_barrier (cfg
, FullBarrier
);
4461 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
4462 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4463 if (strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 1) {
4466 if (COMPILE_LLVM (cfg
)) {
4468 * Pass the argument normally, the LLVM backend will handle the
4469 * calling convention problems.
4471 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4473 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
4474 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4475 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4476 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4479 return (MonoInst
*)call
;
4480 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
4483 if (COMPILE_LLVM (cfg
)) {
4484 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4486 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
4487 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4488 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4489 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4492 return (MonoInst
*)call
;
4494 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4495 MonoMethod
*fast_method
= NULL
;
4497 /* Avoid infinite recursion */
4498 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
4499 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
4500 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
4503 if ((strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 1) ||
4504 strcmp (cmethod
->name
, "Exit") == 0)
4505 fast_method
= mono_monitor_get_fast_path (cmethod
);
4509 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
4511 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
4512 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
4513 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
4516 #if SIZEOF_REGISTER == 8
4517 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
4518 /* 64 bit reads are already atomic */
4519 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
4520 ins
->dreg
= mono_alloc_preg (cfg
);
4521 ins
->inst_basereg
= args
[0]->dreg
;
4522 ins
->inst_offset
= 0;
4523 MONO_ADD_INS (cfg
->cbb
, ins
);
4527 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4528 if (strcmp (cmethod
->name
, "Increment") == 0) {
4529 MonoInst
*ins_iconst
;
4532 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4533 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4534 #if SIZEOF_REGISTER == 8
4535 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4536 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4539 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4540 ins_iconst
->inst_c0
= 1;
4541 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4542 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4544 MONO_INST_NEW (cfg
, ins
, opcode
);
4545 ins
->dreg
= mono_alloc_ireg (cfg
);
4546 ins
->inst_basereg
= args
[0]->dreg
;
4547 ins
->inst_offset
= 0;
4548 ins
->sreg2
= ins_iconst
->dreg
;
4549 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4550 MONO_ADD_INS (cfg
->cbb
, ins
);
4552 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
4553 MonoInst
*ins_iconst
;
4556 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4557 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4558 #if SIZEOF_REGISTER == 8
4559 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4560 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4563 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4564 ins_iconst
->inst_c0
= -1;
4565 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4566 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4568 MONO_INST_NEW (cfg
, ins
, opcode
);
4569 ins
->dreg
= mono_alloc_ireg (cfg
);
4570 ins
->inst_basereg
= args
[0]->dreg
;
4571 ins
->inst_offset
= 0;
4572 ins
->sreg2
= ins_iconst
->dreg
;
4573 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4574 MONO_ADD_INS (cfg
->cbb
, ins
);
4576 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4579 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4580 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4581 #if SIZEOF_REGISTER == 8
4582 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4583 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4587 MONO_INST_NEW (cfg
, ins
, opcode
);
4588 ins
->dreg
= mono_alloc_ireg (cfg
);
4589 ins
->inst_basereg
= args
[0]->dreg
;
4590 ins
->inst_offset
= 0;
4591 ins
->sreg2
= args
[1]->dreg
;
4592 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4593 MONO_ADD_INS (cfg
->cbb
, ins
);
4596 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4598 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4599 if (strcmp (cmethod
->name
, "Exchange") == 0) {
4601 gboolean is_ref
= fsig
->params
[0]->type
== MONO_TYPE_OBJECT
;
4603 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4604 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4605 #if SIZEOF_REGISTER == 8
4606 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
4607 (fsig
->params
[0]->type
== MONO_TYPE_I
))
4608 opcode
= OP_ATOMIC_EXCHANGE_I8
;
4610 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I
))
4611 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4616 MONO_INST_NEW (cfg
, ins
, opcode
);
4617 ins
->dreg
= is_ref
? mono_alloc_ireg_ref (cfg
) : mono_alloc_ireg (cfg
);
4618 ins
->inst_basereg
= args
[0]->dreg
;
4619 ins
->inst_offset
= 0;
4620 ins
->sreg2
= args
[1]->dreg
;
4621 MONO_ADD_INS (cfg
->cbb
, ins
);
4623 switch (fsig
->params
[0]->type
) {
4625 ins
->type
= STACK_I4
;
4629 ins
->type
= STACK_I8
;
4631 case MONO_TYPE_OBJECT
:
4632 ins
->type
= STACK_OBJ
;
4635 g_assert_not_reached ();
4638 if (cfg
->gen_write_barriers
&& is_ref
)
4639 emit_write_barrier (cfg
, args
[0], args
[1], -1);
4641 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4643 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4644 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
4646 gboolean is_ref
= mini_type_is_reference (cfg
, fsig
->params
[1]);
4647 if (fsig
->params
[1]->type
== MONO_TYPE_I4
)
4649 else if (is_ref
|| fsig
->params
[1]->type
== MONO_TYPE_I
)
4650 size
= sizeof (gpointer
);
4651 else if (sizeof (gpointer
) == 8 && fsig
->params
[1]->type
== MONO_TYPE_I8
)
4654 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I4
);
4655 ins
->dreg
= is_ref
? alloc_ireg_ref (cfg
) : alloc_ireg (cfg
);
4656 ins
->sreg1
= args
[0]->dreg
;
4657 ins
->sreg2
= args
[1]->dreg
;
4658 ins
->sreg3
= args
[2]->dreg
;
4659 ins
->type
= STACK_I4
;
4660 MONO_ADD_INS (cfg
->cbb
, ins
);
4661 } else if (size
== 8) {
4662 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I8
);
4663 ins
->dreg
= is_ref
? alloc_ireg_ref (cfg
) : alloc_ireg (cfg
);
4664 ins
->sreg1
= args
[0]->dreg
;
4665 ins
->sreg2
= args
[1]->dreg
;
4666 ins
->sreg3
= args
[2]->dreg
;
4667 ins
->type
= STACK_I8
;
4668 MONO_ADD_INS (cfg
->cbb
, ins
);
4670 /* g_assert_not_reached (); */
4672 if (cfg
->gen_write_barriers
&& is_ref
)
4673 emit_write_barrier (cfg
, args
[0], args
[1], -1);
4675 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4679 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
4680 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
4681 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
4682 if (should_insert_brekpoint (cfg
->method
))
4683 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
4685 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4686 MONO_ADD_INS (cfg
->cbb
, ins
);
4689 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
4690 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
4692 EMIT_NEW_ICONST (cfg
, ins
, 1);
4694 EMIT_NEW_ICONST (cfg
, ins
, 0);
4698 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
4700 * There is general branches code for Min/Max, but it does not work for
4702 * http://everything2.com/?node_id=1051618
4706 #ifdef MONO_ARCH_SIMD_INTRINSICS
4707 if (cfg
->opt
& MONO_OPT_SIMD
) {
4708 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4714 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
4718 * This entry point could be used later for arbitrary method
4721 inline static MonoInst
*
4722 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
4723 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
4725 if (method
->klass
== mono_defaults
.string_class
) {
4726 /* managed string allocation support */
4727 if (strcmp (method
->name
, "InternalAllocateStr") == 0 && !(mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
) && !(cfg
->opt
& MONO_OPT_SHARED
)) {
4728 MonoInst
*iargs
[2];
4729 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4730 MonoMethod
*managed_alloc
= NULL
;
4732 g_assert (vtable
); /*Should not fail since it System.String*/
4733 #ifndef MONO_CROSS_COMPILE
4734 managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
4738 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4739 iargs
[1] = args
[0];
4740 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
4747 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
4749 MonoInst
*store
, *temp
;
4752 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4753 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
4756 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4757 * would be different than the MonoInst's used to represent arguments, and
4758 * the ldelema implementation can't deal with that.
4759 * Solution: When ldelema is used on an inline argument, create a var for
4760 * it, emit ldelema on that var, and emit the saving code below in
4761 * inline_method () if needed.
4763 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
4764 cfg
->args
[i
] = temp
;
4765 /* This uses cfg->args [i] which is set by the preceeding line */
4766 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
4767 store
->cil_code
= sp
[0]->cil_code
;
4772 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4773 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4775 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4777 check_inline_called_method_name_limit (MonoMethod
*called_method
)
4780 static char *limit
= NULL
;
4782 if (limit
== NULL
) {
4783 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4785 if (limit_string
!= NULL
)
4786 limit
= limit_string
;
4788 limit
= (char *) "";
4791 if (limit
[0] != '\0') {
4792 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4794 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4795 g_free (called_method_name
);
4797 //return (strncmp_result <= 0);
4798 return (strncmp_result
== 0);
4805 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4807 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4810 static char *limit
= NULL
;
4812 if (limit
== NULL
) {
4813 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4814 if (limit_string
!= NULL
) {
4815 limit
= limit_string
;
4817 limit
= (char *) "";
4821 if (limit
[0] != '\0') {
4822 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4824 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4825 g_free (caller_method_name
);
4827 //return (strncmp_result <= 0);
4828 return (strncmp_result
== 0);
4836 emit_init_rvar (MonoCompile
*cfg
, MonoInst
*rvar
, MonoType
*rtype
)
4838 static double r8_0
= 0.0;
4841 switch (rvar
->type
) {
4843 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
4846 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
4851 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
4854 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4855 ins
->type
= STACK_R8
;
4856 ins
->inst_p0
= (void*)&r8_0
;
4857 ins
->dreg
= rvar
->dreg
;
4858 MONO_ADD_INS (cfg
->cbb
, ins
);
4861 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (rtype
));
4864 g_assert_not_reached ();
4869 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4870 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_always
)
4872 MonoInst
*ins
, *rvar
= NULL
;
4873 MonoMethodHeader
*cheader
;
4874 MonoBasicBlock
*ebblock
, *sbblock
;
4876 MonoMethod
*prev_inlined_method
;
4877 MonoInst
**prev_locals
, **prev_args
;
4878 MonoType
**prev_arg_types
;
4879 guint prev_real_offset
;
4880 GHashTable
*prev_cbb_hash
;
4881 MonoBasicBlock
**prev_cil_offset_to_bb
;
4882 MonoBasicBlock
*prev_cbb
;
4883 unsigned char* prev_cil_start
;
4884 guint32 prev_cil_offset_to_bb_len
;
4885 MonoMethod
*prev_current_method
;
4886 MonoGenericContext
*prev_generic_context
;
4887 gboolean ret_var_set
, prev_ret_var_set
, virtual = FALSE
;
4889 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4891 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4892 if ((! inline_always
) && ! check_inline_called_method_name_limit (cmethod
))
4895 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4896 if ((! inline_always
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4900 if (cfg
->verbose_level
> 2)
4901 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4903 if (!cmethod
->inline_info
) {
4904 mono_jit_stats
.inlineable_methods
++;
4905 cmethod
->inline_info
= 1;
4908 /* allocate local variables */
4909 cheader
= mono_method_get_header (cmethod
);
4911 if (cheader
== NULL
|| mono_loader_get_last_error ()) {
4912 MonoLoaderError
*error
= mono_loader_get_last_error ();
4915 mono_metadata_free_mh (cheader
);
4916 if (inline_always
&& error
)
4917 mono_cfg_set_exception (cfg
, error
->exception_type
);
4919 mono_loader_clear_error ();
4923 /*Must verify before creating locals as it can cause the JIT to assert.*/
4924 if (mono_compile_is_broken (cfg
, cmethod
, FALSE
)) {
4925 mono_metadata_free_mh (cheader
);
4929 /* allocate space to store the return value */
4930 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4931 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4934 prev_locals
= cfg
->locals
;
4935 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4936 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4937 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4939 /* allocate start and end blocks */
4940 /* This is needed so if the inline is aborted, we can clean up */
4941 NEW_BBLOCK (cfg
, sbblock
);
4942 sbblock
->real_offset
= real_offset
;
4944 NEW_BBLOCK (cfg
, ebblock
);
4945 ebblock
->block_num
= cfg
->num_bblocks
++;
4946 ebblock
->real_offset
= real_offset
;
4948 prev_args
= cfg
->args
;
4949 prev_arg_types
= cfg
->arg_types
;
4950 prev_inlined_method
= cfg
->inlined_method
;
4951 cfg
->inlined_method
= cmethod
;
4952 cfg
->ret_var_set
= FALSE
;
4953 cfg
->inline_depth
++;
4954 prev_real_offset
= cfg
->real_offset
;
4955 prev_cbb_hash
= cfg
->cbb_hash
;
4956 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4957 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4958 prev_cil_start
= cfg
->cil_start
;
4959 prev_cbb
= cfg
->cbb
;
4960 prev_current_method
= cfg
->current_method
;
4961 prev_generic_context
= cfg
->generic_context
;
4962 prev_ret_var_set
= cfg
->ret_var_set
;
4964 if (*ip
== CEE_CALLVIRT
&& !(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
))
4967 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, virtual);
4969 ret_var_set
= cfg
->ret_var_set
;
4971 cfg
->inlined_method
= prev_inlined_method
;
4972 cfg
->real_offset
= prev_real_offset
;
4973 cfg
->cbb_hash
= prev_cbb_hash
;
4974 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4975 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4976 cfg
->cil_start
= prev_cil_start
;
4977 cfg
->locals
= prev_locals
;
4978 cfg
->args
= prev_args
;
4979 cfg
->arg_types
= prev_arg_types
;
4980 cfg
->current_method
= prev_current_method
;
4981 cfg
->generic_context
= prev_generic_context
;
4982 cfg
->ret_var_set
= prev_ret_var_set
;
4983 cfg
->inline_depth
--;
4985 if ((costs
>= 0 && costs
< 60) || inline_always
) {
4986 if (cfg
->verbose_level
> 2)
4987 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4989 mono_jit_stats
.inlined_methods
++;
4991 /* always add some code to avoid block split failures */
4992 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4993 MONO_ADD_INS (prev_cbb
, ins
);
4995 prev_cbb
->next_bb
= sbblock
;
4996 link_bblock (cfg
, prev_cbb
, sbblock
);
4999 * Get rid of the begin and end bblocks if possible to aid local
5002 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
5004 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
5005 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
5007 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
5008 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
5009 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
5011 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
5012 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
5013 cfg
->cbb
= prev_cbb
;
5017 * Its possible that the rvar is set in some prev bblock, but not in others.
5023 for (i
= 0; i
< ebblock
->in_count
; ++i
) {
5024 bb
= ebblock
->in_bb
[i
];
5026 if (bb
->last_ins
&& bb
->last_ins
->opcode
== OP_NOT_REACHED
) {
5029 emit_init_rvar (cfg
, rvar
, fsig
->ret
);
5039 * If the inlined method contains only a throw, then the ret var is not
5040 * set, so set it to a dummy value.
5043 emit_init_rvar (cfg
, rvar
, fsig
->ret
);
5045 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
5048 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
5051 if (cfg
->verbose_level
> 2)
5052 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
5053 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
5054 mono_loader_clear_error ();
5056 /* This gets rid of the newly added bblocks */
5057 cfg
->cbb
= prev_cbb
;
5059 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
5064 * Some of these comments may well be out-of-date.
5065 * Design decisions: we do a single pass over the IL code (and we do bblock
5066 * splitting/merging in the few cases when it's required: a back jump to an IL
5067 * address that was not already seen as bblock starting point).
5068 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5069 * Complex operations are decomposed in simpler ones right away. We need to let the
5070 * arch-specific code peek and poke inside this process somehow (except when the
5071 * optimizations can take advantage of the full semantic info of coarse opcodes).
5072 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5073 * MonoInst->opcode initially is the IL opcode or some simplification of that
5074 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5075 * opcode with value bigger than OP_LAST.
5076 * At this point the IR can be handed over to an interpreter, a dumb code generator
5077 * or to the optimizing code generator that will translate it to SSA form.
5079 * Profiling directed optimizations.
5080 * We may compile by default with few or no optimizations and instrument the code
5081 * or the user may indicate what methods to optimize the most either in a config file
5082 * or through repeated runs where the compiler applies offline the optimizations to
5083 * each method and then decides if it was worth it.
5086 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5087 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5088 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5089 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5090 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5091 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5092 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5093 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5095 /* offset from br.s -> br like opcodes */
5096 #define BIG_BRANCH_OFFSET 13
5099 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
5101 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
5103 return b
== NULL
|| b
== bb
;
5107 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
5109 unsigned char *ip
= start
;
5110 unsigned char *target
;
5113 MonoBasicBlock
*bblock
;
5114 const MonoOpcode
*opcode
;
5117 cli_addr
= ip
- start
;
5118 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
5121 opcode
= &mono_opcodes
[i
];
5122 switch (opcode
->argument
) {
5123 case MonoInlineNone
:
5126 case MonoInlineString
:
5127 case MonoInlineType
:
5128 case MonoInlineField
:
5129 case MonoInlineMethod
:
5132 case MonoShortInlineR
:
5139 case MonoShortInlineVar
:
5140 case MonoShortInlineI
:
5143 case MonoShortInlineBrTarget
:
5144 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
5145 GET_BBLOCK (cfg
, bblock
, target
);
5148 GET_BBLOCK (cfg
, bblock
, ip
);
5150 case MonoInlineBrTarget
:
5151 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
5152 GET_BBLOCK (cfg
, bblock
, target
);
5155 GET_BBLOCK (cfg
, bblock
, ip
);
5157 case MonoInlineSwitch
: {
5158 guint32 n
= read32 (ip
+ 1);
5161 cli_addr
+= 5 + 4 * n
;
5162 target
= start
+ cli_addr
;
5163 GET_BBLOCK (cfg
, bblock
, target
);
5165 for (j
= 0; j
< n
; ++j
) {
5166 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
5167 GET_BBLOCK (cfg
, bblock
, target
);
5177 g_assert_not_reached ();
5180 if (i
== CEE_THROW
) {
5181 unsigned char *bb_start
= ip
- 1;
5183 /* Find the start of the bblock containing the throw */
5185 while ((bb_start
>= start
) && !bblock
) {
5186 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
5190 bblock
->out_of_line
= 1;
5199 static inline MonoMethod
*
5200 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
5204 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
5205 return mono_method_get_wrapper_data (m
, token
);
5207 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
5212 static inline MonoMethod
*
5213 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
5215 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
5217 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
5223 static inline MonoClass
*
5224 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
5228 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
5229 klass
= mono_method_get_wrapper_data (method
, token
);
5231 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
5233 mono_class_init (klass
);
5238 * Returns TRUE if the JIT should abort inlining because "callee"
5239 * is influenced by security attributes.
5242 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
5246 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
5250 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
5251 if (result
== MONO_JIT_SECURITY_OK
)
5254 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
5255 /* Generate code to throw a SecurityException before the actual call/link */
5256 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
5259 NEW_ICONST (cfg
, args
[0], 4);
5260 NEW_METHODCONST (cfg
, args
[1], caller
);
5261 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
5262 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
5263 /* don't hide previous results */
5264 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_SECURITY_LINKDEMAND
);
5265 cfg
->exception_data
= result
;
5273 throw_exception (void)
5275 static MonoMethod
*method
= NULL
;
5278 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
5279 method
= mono_class_get_method_from_name (secman
->securitymanager
, "ThrowException", 1);
5286 emit_throw_exception (MonoCompile
*cfg
, MonoException
*ex
)
5288 MonoMethod
*thrower
= throw_exception ();
5291 EMIT_NEW_PCONST (cfg
, args
[0], ex
);
5292 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
5296 * Return the original method is a wrapper is specified. We can only access
5297 * the custom attributes from the original method.
5300 get_original_method (MonoMethod
*method
)
5302 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
5305 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5306 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
5309 /* in other cases we need to find the original method */
5310 return mono_marshal_method_from_wrapper (method
);
5314 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
5315 MonoBasicBlock
*bblock
, unsigned char *ip
)
5317 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5318 MonoException
*ex
= mono_security_core_clr_is_field_access_allowed (get_original_method (caller
), field
);
5320 emit_throw_exception (cfg
, ex
);
5324 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
5325 MonoBasicBlock
*bblock
, unsigned char *ip
)
5327 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5328 MonoException
*ex
= mono_security_core_clr_is_call_allowed (get_original_method (caller
), callee
);
5330 emit_throw_exception (cfg
, ex
);
5334 * Check that the IL instructions at ip are the array initialization
5335 * sequence and return the pointer to the data and the size.
5338 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
5341 * newarr[System.Int32]
5343 * ldtoken field valuetype ...
5344 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5346 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
5347 guint32 token
= read32 (ip
+ 7);
5348 guint32 field_token
= read32 (ip
+ 2);
5349 guint32 field_index
= field_token
& 0xffffff;
5351 const char *data_ptr
;
5353 MonoMethod
*cmethod
;
5354 MonoClass
*dummy_class
;
5355 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
5361 *out_field_token
= field_token
;
5363 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
5366 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
5368 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
5369 case MONO_TYPE_BOOLEAN
:
5373 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5374 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5375 case MONO_TYPE_CHAR
:
5385 return NULL
; /* stupid ARM FP swapped format */
5395 if (size
> mono_type_size (field
->type
, &dummy_align
))
5398 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5399 if (!method
->klass
->image
->dynamic
) {
5400 field_index
= read32 (ip
+ 2) & 0xffffff;
5401 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
5402 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
5403 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5404 /* for aot code we do the lookup on load */
5405 if (aot
&& data_ptr
)
5406 return GUINT_TO_POINTER (rva
);
5408 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5410 data_ptr
= mono_field_get_data (field
);
5418 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
5420 char *method_fname
= mono_method_full_name (method
, TRUE
);
5422 MonoMethodHeader
*header
= mono_method_get_header (method
);
5424 if (header
->code_size
== 0)
5425 method_code
= g_strdup ("method body is empty.");
5427 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
5428 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
5429 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
5430 g_free (method_fname
);
5431 g_free (method_code
);
5432 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
5436 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
5438 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_OBJECT_SUPPLIED
);
5439 MONO_GC_REGISTER_ROOT_SINGLE (cfg
->exception_ptr
);
5440 cfg
->exception_ptr
= exception
;
5444 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
5446 return mini_type_is_reference (cfg
, &klass
->byval_arg
);
5450 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5453 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5454 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5455 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5456 /* Optimize reg-reg moves away */
5458 * Can't optimize other opcodes, since sp[0] might point to
5459 * the last ins of a decomposed opcode.
5461 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5463 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5468 * ldloca inhibits many optimizations so try to get rid of it in common
5471 static inline unsigned char *
5472 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5481 local
= read16 (ip
+ 2);
5485 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5486 gboolean skip
= FALSE
;
5488 /* From the INITOBJ case */
5489 token
= read32 (ip
+ 2);
5490 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5491 CHECK_TYPELOAD (klass
);
5492 if (mini_type_is_reference (cfg
, &klass
->byval_arg
)) {
5493 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5494 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5495 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5508 is_exception_class (MonoClass
*class)
5511 if (class == mono_defaults
.exception_class
)
5513 class = class->parent
;
5519 * is_jit_optimizer_disabled:
5521 * Determine whenever M's assembly has a DebuggableAttribute with the
5522 * IsJITOptimizerDisabled flag set.
5525 is_jit_optimizer_disabled (MonoMethod
*m
)
5527 MonoAssembly
*ass
= m
->klass
->image
->assembly
;
5528 MonoCustomAttrInfo
* attrs
;
5529 static MonoClass
*klass
;
5531 gboolean val
= FALSE
;
5534 if (ass
->jit_optimizer_disabled_inited
)
5535 return ass
->jit_optimizer_disabled
;
5538 klass
= mono_class_from_name (mono_defaults
.corlib
, "System.Diagnostics", "DebuggableAttribute");
5541 ass
->jit_optimizer_disabled
= FALSE
;
5542 mono_memory_barrier ();
5543 ass
->jit_optimizer_disabled_inited
= TRUE
;
5547 attrs
= mono_custom_attrs_from_assembly (ass
);
5549 for (i
= 0; i
< attrs
->num_attrs
; ++i
) {
5550 MonoCustomAttrEntry
*attr
= &attrs
->attrs
[i
];
5553 MonoMethodSignature
*sig
;
5555 if (!attr
->ctor
|| attr
->ctor
->klass
!= klass
)
5557 /* Decode the attribute. See reflection.c */
5558 len
= attr
->data_size
;
5559 p
= (const char*)attr
->data
;
5560 g_assert (read16 (p
) == 0x0001);
5563 // FIXME: Support named parameters
5564 sig
= mono_method_signature (attr
->ctor
);
5565 if (sig
->param_count
!= 2 || sig
->params
[0]->type
!= MONO_TYPE_BOOLEAN
|| sig
->params
[1]->type
!= MONO_TYPE_BOOLEAN
)
5567 /* Two boolean arguments */
5571 mono_custom_attrs_free (attrs
);
5574 ass
->jit_optimizer_disabled
= val
;
5575 mono_memory_barrier ();
5576 ass
->jit_optimizer_disabled_inited
= TRUE
;
5582 is_supported_tail_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
)
5584 gboolean supported_tail_call
;
5587 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5588 supported_tail_call
= MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method
), mono_method_signature (cmethod
));
5590 supported_tail_call
= mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->ret
);
5593 for (i
= 0; i
< fsig
->param_count
; ++i
) {
5594 if (fsig
->params
[i
]->byref
|| fsig
->params
[i
]->type
== MONO_TYPE_PTR
|| fsig
->params
[i
]->type
== MONO_TYPE_FNPTR
)
5595 /* These can point to the current method's stack */
5596 supported_tail_call
= FALSE
;
5598 if (fsig
->hasthis
&& cmethod
->klass
->valuetype
)
5599 /* this might point to the current method's stack */
5600 supported_tail_call
= FALSE
;
5601 if (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)
5602 supported_tail_call
= FALSE
;
5603 if (cfg
->method
->save_lmf
)
5604 supported_tail_call
= FALSE
;
5605 if (cmethod
->wrapper_type
&& cmethod
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
5606 supported_tail_call
= FALSE
;
5608 /* Debugging support */
5610 if (supported_tail_call
) {
5611 static int count
= 0;
5613 if (getenv ("COUNT")) {
5614 if (count
== atoi (getenv ("COUNT")))
5615 printf ("LAST: %s\n", mono_method_full_name (cmethod
, TRUE
));
5616 if (count
> atoi (getenv ("COUNT")))
5617 supported_tail_call
= FALSE
;
5622 return supported_tail_call
;
5626 * mono_method_to_ir:
5628 * Translate the .net IL into linear IR.
5631 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5632 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5633 guint inline_offset
, gboolean is_virtual_call
)
5636 MonoInst
*ins
, **sp
, **stack_start
;
5637 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5638 MonoSimpleBasicBlock
*bb
= NULL
, *original_bb
= NULL
;
5639 MonoMethod
*cmethod
, *method_definition
;
5640 MonoInst
**arg_array
;
5641 MonoMethodHeader
*header
;
5643 guint32 token
, ins_flag
;
5645 MonoClass
*constrained_call
= NULL
;
5646 unsigned char *ip
, *end
, *target
, *err_pos
;
5647 static double r8_0
= 0.0;
5648 MonoMethodSignature
*sig
;
5649 MonoGenericContext
*generic_context
= NULL
;
5650 MonoGenericContainer
*generic_container
= NULL
;
5651 MonoType
**param_types
;
5652 int i
, n
, start_new_bblock
, dreg
;
5653 int num_calls
= 0, inline_costs
= 0;
5654 int breakpoint_id
= 0;
5656 MonoBoolean security
, pinvoke
;
5657 MonoSecurityManager
* secman
= NULL
;
5658 MonoDeclSecurityActions actions
;
5659 GSList
*class_inits
= NULL
;
5660 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5662 gboolean init_locals
, seq_points
, skip_dead_blocks
;
5663 gboolean disable_inline
;
5665 disable_inline
= is_jit_optimizer_disabled (method
);
5667 /* serialization and xdomain stuff may need access to private fields and methods */
5668 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5669 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5670 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5671 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5672 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5673 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5675 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5677 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5678 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5679 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5680 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5681 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_STELEMREF
;
5683 image
= method
->klass
->image
;
5684 header
= mono_method_get_header (method
);
5686 MonoLoaderError
*error
;
5688 if ((error
= mono_loader_get_last_error ())) {
5689 mono_cfg_set_exception (cfg
, error
->exception_type
);
5691 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
5692 cfg
->exception_message
= g_strdup_printf ("Missing or incorrect header for method %s", cfg
->method
->name
);
5694 goto exception_exit
;
5696 generic_container
= mono_method_get_generic_container (method
);
5697 sig
= mono_method_signature (method
);
5698 num_args
= sig
->hasthis
+ sig
->param_count
;
5699 ip
= (unsigned char*)header
->code
;
5700 cfg
->cil_start
= ip
;
5701 end
= ip
+ header
->code_size
;
5702 mono_jit_stats
.cil_code_size
+= header
->code_size
;
5703 init_locals
= header
->init_locals
;
5705 seq_points
= cfg
->gen_seq_points
&& cfg
->method
== method
;
5708 * Methods without init_locals set could cause asserts in various passes
5713 method_definition
= method
;
5714 while (method_definition
->is_inflated
) {
5715 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5716 method_definition
= imethod
->declaring
;
5719 /* SkipVerification is not allowed if core-clr is enabled */
5720 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5722 dont_verify_stloc
= TRUE
;
5725 if (mono_debug_using_mono_debugger ())
5726 cfg
->keep_cil_nops
= TRUE
;
5728 if (sig
->is_inflated
)
5729 generic_context
= mono_method_get_context (method
);
5730 else if (generic_container
)
5731 generic_context
= &generic_container
->context
;
5732 cfg
->generic_context
= generic_context
;
5734 if (!cfg
->generic_sharing_context
)
5735 g_assert (!sig
->has_type_parameters
);
5737 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5738 g_assert (method
->is_inflated
);
5739 g_assert (mono_method_get_context (method
)->method_inst
);
5741 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5742 g_assert (sig
->generic_param_count
);
5744 if (cfg
->method
== method
) {
5745 cfg
->real_offset
= 0;
5747 cfg
->real_offset
= inline_offset
;
5750 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5751 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5753 cfg
->current_method
= method
;
5755 if (cfg
->verbose_level
> 2)
5756 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5758 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5760 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5761 for (n
= 0; n
< sig
->param_count
; ++n
)
5762 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5763 cfg
->arg_types
= param_types
;
5765 dont_inline
= g_list_prepend (dont_inline
, method
);
5766 if (cfg
->method
== method
) {
5768 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5769 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5772 NEW_BBLOCK (cfg
, start_bblock
);
5773 cfg
->bb_entry
= start_bblock
;
5774 start_bblock
->cil_code
= NULL
;
5775 start_bblock
->cil_length
= 0;
5776 #if defined(__native_client_codegen__)
5777 MONO_INST_NEW (cfg
, ins
, OP_NACL_GC_SAFE_POINT
);
5778 ins
->dreg
= alloc_dreg (cfg
, STACK_I4
);
5779 MONO_ADD_INS (start_bblock
, ins
);
5783 NEW_BBLOCK (cfg
, end_bblock
);
5784 cfg
->bb_exit
= end_bblock
;
5785 end_bblock
->cil_code
= NULL
;
5786 end_bblock
->cil_length
= 0;
5787 end_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
5788 g_assert (cfg
->num_bblocks
== 2);
5790 arg_array
= cfg
->args
;
5792 if (header
->num_clauses
) {
5793 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5794 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5796 /* handle exception clauses */
5797 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5798 MonoBasicBlock
*try_bb
;
5799 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5800 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5801 try_bb
->real_offset
= clause
->try_offset
;
5802 try_bb
->try_start
= TRUE
;
5803 try_bb
->region
= ((i
+ 1) << 8) | clause
->flags
;
5804 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5805 tblock
->real_offset
= clause
->handler_offset
;
5806 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5808 link_bblock (cfg
, try_bb
, tblock
);
5810 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5811 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5813 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5814 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5815 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5816 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5817 MONO_ADD_INS (tblock
, ins
);
5819 if (seq_points
&& clause
->flags
!= MONO_EXCEPTION_CLAUSE_FINALLY
) {
5820 /* finally clauses already have a seq point */
5821 NEW_SEQ_POINT (cfg
, ins
, clause
->handler_offset
, TRUE
);
5822 MONO_ADD_INS (tblock
, ins
);
5825 /* todo: is a fault block unsafe to optimize? */
5826 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5827 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5831 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5833 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5835 /* catch and filter blocks get the exception object on the stack */
5836 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5837 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5838 MonoInst
*dummy_use
;
5840 /* mostly like handle_stack_args (), but just sets the input args */
5841 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5842 tblock
->in_scount
= 1;
5843 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5844 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5847 * Add a dummy use for the exvar so its liveness info will be
5851 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5853 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5854 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5855 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5856 tblock
->real_offset
= clause
->data
.filter_offset
;
5857 tblock
->in_scount
= 1;
5858 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5859 /* The filter block shares the exvar with the handler block */
5860 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5861 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5862 MONO_ADD_INS (tblock
, ins
);
5866 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5867 clause
->data
.catch_class
&&
5868 cfg
->generic_sharing_context
&&
5869 mono_class_check_context_used (clause
->data
.catch_class
)) {
5871 * In shared generic code with catch
5872 * clauses containing type variables
5873 * the exception handling code has to
5874 * be able to get to the rgctx.
5875 * Therefore we have to make sure that
5876 * the vtable/mrgctx argument (for
5877 * static or generic methods) or the
5878 * "this" argument (for non-static
5879 * methods) are live.
5881 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5882 mini_method_get_context (method
)->method_inst
||
5883 method
->klass
->valuetype
) {
5884 mono_get_vtable_var (cfg
);
5886 MonoInst
*dummy_use
;
5888 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5893 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5894 cfg
->cbb
= start_bblock
;
5895 cfg
->args
= arg_array
;
5896 mono_save_args (cfg
, sig
, inline_args
);
5899 /* FIRST CODE BLOCK */
5900 NEW_BBLOCK (cfg
, bblock
);
5901 bblock
->cil_code
= ip
;
5905 ADD_BBLOCK (cfg
, bblock
);
5907 if (cfg
->method
== method
) {
5908 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5909 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5910 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5911 MONO_ADD_INS (bblock
, ins
);
5915 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5916 secman
= mono_security_manager_get_methods ();
5918 security
= (secman
&& mono_method_has_declsec (method
));
5919 /* at this point having security doesn't mean we have any code to generate */
5920 if (security
&& (cfg
->method
== method
)) {
5921 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5922 * And we do not want to enter the next section (with allocation) if we
5923 * have nothing to generate */
5924 security
= mono_declsec_get_demands (method
, &actions
);
5927 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5928 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5930 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5931 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5932 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
5934 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5935 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5939 mono_custom_attrs_free (custom
);
5942 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
5943 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5947 mono_custom_attrs_free (custom
);
5950 /* not a P/Invoke after all */
5955 if ((init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
5956 /* we use a separate basic block for the initialization code */
5957 NEW_BBLOCK (cfg
, init_localsbb
);
5958 cfg
->bb_init
= init_localsbb
;
5959 init_localsbb
->real_offset
= cfg
->real_offset
;
5960 start_bblock
->next_bb
= init_localsbb
;
5961 init_localsbb
->next_bb
= bblock
;
5962 link_bblock (cfg
, start_bblock
, init_localsbb
);
5963 link_bblock (cfg
, init_localsbb
, bblock
);
5965 cfg
->cbb
= init_localsbb
;
5967 start_bblock
->next_bb
= bblock
;
5968 link_bblock (cfg
, start_bblock
, bblock
);
5971 /* at this point we know, if security is TRUE, that some code needs to be generated */
5972 if (security
&& (cfg
->method
== method
)) {
5975 mono_jit_stats
.cas_demand_generation
++;
5977 if (actions
.demand
.blob
) {
5978 /* Add code for SecurityAction.Demand */
5979 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
5980 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
5981 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5982 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5984 if (actions
.noncasdemand
.blob
) {
5985 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5986 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5987 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
5988 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
5989 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5990 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5992 if (actions
.demandchoice
.blob
) {
5993 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5994 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
5995 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
5996 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5997 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
6001 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6003 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
6006 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
6007 /* check if this is native code, e.g. an icall or a p/invoke */
6008 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
6009 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
6011 gboolean pinvk
= (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
);
6012 gboolean icall
= (wrapped
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
);
6014 /* if this ia a native call then it can only be JITted from platform code */
6015 if ((icall
|| pinvk
) && method
->klass
&& method
->klass
->image
) {
6016 if (!mono_security_core_clr_is_platform_image (method
->klass
->image
)) {
6017 MonoException
*ex
= icall
? mono_get_exception_security () :
6018 mono_get_exception_method_access ();
6019 emit_throw_exception (cfg
, ex
);
6026 if (header
->code_size
== 0)
6029 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
6034 if (cfg
->method
== method
)
6035 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
6037 for (n
= 0; n
< header
->num_locals
; ++n
) {
6038 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
6043 /* We force the vtable variable here for all shared methods
6044 for the possibility that they might show up in a stack
6045 trace where their exact instantiation is needed. */
6046 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
6047 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
6048 mini_method_get_context (method
)->method_inst
||
6049 method
->klass
->valuetype
) {
6050 mono_get_vtable_var (cfg
);
6052 /* FIXME: Is there a better way to do this?
6053 We need the variable live for the duration
6054 of the whole method. */
6055 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
6059 /* add a check for this != NULL to inlined methods */
6060 if (is_virtual_call
) {
6063 NEW_ARGLOAD (cfg
, arg_ins
, 0);
6064 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
6065 MONO_EMIT_NEW_CHECK_THIS (cfg
, arg_ins
->dreg
);
6068 skip_dead_blocks
= !dont_verify
;
6069 if (skip_dead_blocks
) {
6070 original_bb
= bb
= mono_basic_block_split (method
, &error
);
6071 if (!mono_error_ok (&error
)) {
6072 mono_error_cleanup (&error
);
6078 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6079 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
6082 start_new_bblock
= 0;
6085 if (cfg
->method
== method
)
6086 cfg
->real_offset
= ip
- header
->code
;
6088 cfg
->real_offset
= inline_offset
;
6093 if (start_new_bblock
) {
6094 bblock
->cil_length
= ip
- bblock
->cil_code
;
6095 if (start_new_bblock
== 2) {
6096 g_assert (ip
== tblock
->cil_code
);
6098 GET_BBLOCK (cfg
, tblock
, ip
);
6100 bblock
->next_bb
= tblock
;
6103 start_new_bblock
= 0;
6104 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
6105 if (cfg
->verbose_level
> 3)
6106 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
6107 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
6111 g_slist_free (class_inits
);
6114 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
6115 link_bblock (cfg
, bblock
, tblock
);
6116 if (sp
!= stack_start
) {
6117 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6119 CHECK_UNVERIFIABLE (cfg
);
6121 bblock
->next_bb
= tblock
;
6124 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
6125 if (cfg
->verbose_level
> 3)
6126 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
6127 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
6130 g_slist_free (class_inits
);
6135 if (skip_dead_blocks
) {
6136 int ip_offset
= ip
- header
->code
;
6138 if (ip_offset
== bb
->end
)
6142 int op_size
= mono_opcode_size (ip
, end
);
6143 g_assert (op_size
> 0); /*The BB formation pass must catch all bad ops*/
6145 if (cfg
->verbose_level
> 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset
);
6147 if (ip_offset
+ op_size
== bb
->end
) {
6148 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6149 MONO_ADD_INS (bblock
, ins
);
6150 start_new_bblock
= 1;
6158 * Sequence points are points where the debugger can place a breakpoint.
6159 * Currently, we generate these automatically at points where the IL
6162 if (seq_points
&& sp
== stack_start
) {
6163 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
6164 MONO_ADD_INS (cfg
->cbb
, ins
);
6167 bblock
->real_offset
= cfg
->real_offset
;
6169 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
6170 guint32 cil_offset
= ip
- header
->code
;
6171 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
6173 /* TODO: Use an increment here */
6174 #if defined(TARGET_X86)
6175 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
6176 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
6178 MONO_ADD_INS (cfg
->cbb
, ins
);
6180 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
6181 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
6185 if (cfg
->verbose_level
> 3)
6186 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
6190 if (cfg
->keep_cil_nops
)
6191 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
6193 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6195 MONO_ADD_INS (bblock
, ins
);
6198 if (should_insert_brekpoint (cfg
->method
))
6199 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
6201 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6203 MONO_ADD_INS (bblock
, ins
);
6209 CHECK_STACK_OVF (1);
6210 n
= (*ip
)-CEE_LDARG_0
;
6212 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6220 CHECK_STACK_OVF (1);
6221 n
= (*ip
)-CEE_LDLOC_0
;
6223 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6232 n
= (*ip
)-CEE_STLOC_0
;
6235 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
6237 emit_stloc_ir (cfg
, sp
, header
, n
);
6244 CHECK_STACK_OVF (1);
6247 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6253 CHECK_STACK_OVF (1);
6256 NEW_ARGLOADA (cfg
, ins
, n
);
6257 MONO_ADD_INS (cfg
->cbb
, ins
);
6267 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
6269 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
6274 CHECK_STACK_OVF (1);
6277 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6281 case CEE_LDLOCA_S
: {
6282 unsigned char *tmp_ip
;
6284 CHECK_STACK_OVF (1);
6285 CHECK_LOCAL (ip
[1]);
6287 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
6293 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
6302 CHECK_LOCAL (ip
[1]);
6303 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
6305 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
6310 CHECK_STACK_OVF (1);
6311 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
6312 ins
->type
= STACK_OBJ
;
6317 CHECK_STACK_OVF (1);
6318 EMIT_NEW_ICONST (cfg
, ins
, -1);
6331 CHECK_STACK_OVF (1);
6332 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
6338 CHECK_STACK_OVF (1);
6340 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
6346 CHECK_STACK_OVF (1);
6347 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
6353 CHECK_STACK_OVF (1);
6354 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6355 ins
->type
= STACK_I8
;
6356 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6358 ins
->inst_l
= (gint64
)read64 (ip
);
6359 MONO_ADD_INS (bblock
, ins
);
6365 gboolean use_aotconst
= FALSE
;
6367 #ifdef TARGET_POWERPC
6368 /* FIXME: Clean this up */
6369 if (cfg
->compile_aot
)
6370 use_aotconst
= TRUE
;
6373 /* FIXME: we should really allocate this only late in the compilation process */
6374 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
6376 CHECK_STACK_OVF (1);
6382 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
6384 dreg
= alloc_freg (cfg
);
6385 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
6386 ins
->type
= STACK_R8
;
6388 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
6389 ins
->type
= STACK_R8
;
6390 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6392 MONO_ADD_INS (bblock
, ins
);
6402 gboolean use_aotconst
= FALSE
;
6404 #ifdef TARGET_POWERPC
6405 /* FIXME: Clean this up */
6406 if (cfg
->compile_aot
)
6407 use_aotconst
= TRUE
;
6410 /* FIXME: we should really allocate this only late in the compilation process */
6411 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
6413 CHECK_STACK_OVF (1);
6419 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
6421 dreg
= alloc_freg (cfg
);
6422 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
6423 ins
->type
= STACK_R8
;
6425 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
6426 ins
->type
= STACK_R8
;
6427 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6429 MONO_ADD_INS (bblock
, ins
);
6438 MonoInst
*temp
, *store
;
6440 CHECK_STACK_OVF (1);
6444 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
6445 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
6447 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6450 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6463 if (sp
[0]->type
== STACK_R8
)
6464 /* we need to pop the value from the x86 FP stack */
6465 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
6474 if (stack_start
!= sp
)
6476 token
= read32 (ip
+ 1);
6477 /* FIXME: check the signature matches */
6478 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6480 if (!cmethod
|| mono_loader_get_last_error ())
6483 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
6484 GENERIC_SHARING_FAILURE (CEE_JMP
);
6486 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6487 CHECK_CFG_EXCEPTION
;
6489 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6491 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
6494 /* Handle tail calls similarly to calls */
6495 n
= fsig
->param_count
+ fsig
->hasthis
;
6497 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6498 call
->method
= cmethod
;
6499 call
->tail_call
= TRUE
;
6500 call
->signature
= mono_method_signature (cmethod
);
6501 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6502 call
->inst
.inst_p0
= cmethod
;
6503 for (i
= 0; i
< n
; ++i
)
6504 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6506 mono_arch_emit_call (cfg
, call
);
6507 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
6510 for (i
= 0; i
< num_args
; ++i
)
6511 /* Prevent arguments from being optimized away */
6512 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6514 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6515 ins
= (MonoInst
*)call
;
6516 ins
->inst_p0
= cmethod
;
6517 MONO_ADD_INS (bblock
, ins
);
6521 start_new_bblock
= 1;
6526 case CEE_CALLVIRT
: {
6527 MonoInst
*addr
= NULL
;
6528 MonoMethodSignature
*fsig
= NULL
;
6530 int virtual = *ip
== CEE_CALLVIRT
;
6531 int calli
= *ip
== CEE_CALLI
;
6532 gboolean pass_imt_from_rgctx
= FALSE
;
6533 MonoInst
*imt_arg
= NULL
;
6534 gboolean pass_vtable
= FALSE
;
6535 gboolean pass_mrgctx
= FALSE
;
6536 MonoInst
*vtable_arg
= NULL
;
6537 gboolean check_this
= FALSE
;
6538 gboolean supported_tail_call
= FALSE
;
6541 token
= read32 (ip
+ 1);
6548 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
6549 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
6551 fsig
= mono_metadata_parse_signature (image
, token
);
6553 n
= fsig
->param_count
+ fsig
->hasthis
;
6555 if (method
->dynamic
&& fsig
->pinvoke
) {
6559 * This is a call through a function pointer using a pinvoke
6560 * signature. Have to create a wrapper and call that instead.
6561 * FIXME: This is very slow, need to create a wrapper at JIT time
6562 * instead based on the signature.
6564 EMIT_NEW_IMAGECONST (cfg
, args
[0], method
->klass
->image
);
6565 EMIT_NEW_PCONST (cfg
, args
[1], fsig
);
6567 addr
= mono_emit_jit_icall (cfg
, mono_get_native_calli_wrapper
, args
);
6570 MonoMethod
*cil_method
;
6572 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
6573 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
6574 cil_method
= cmethod
;
6575 } else if (constrained_call
) {
6576 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
6578 * This is needed since get_method_constrained can't find
6579 * the method in klass representing a type var.
6580 * The type var is guaranteed to be a reference type in this
6583 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6584 cil_method
= cmethod
;
6585 g_assert (!cmethod
->klass
->valuetype
);
6587 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
6590 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6591 cil_method
= cmethod
;
6594 if (!cmethod
|| mono_loader_get_last_error ())
6596 if (!dont_verify
&& !cfg
->skip_visibility
) {
6597 MonoMethod
*target_method
= cil_method
;
6598 if (method
->is_inflated
) {
6599 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
6601 if (!mono_method_can_access_method (method_definition
, target_method
) &&
6602 !mono_method_can_access_method (method
, cil_method
))
6603 METHOD_ACCESS_FAILURE
;
6606 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
6607 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
6609 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
6610 /* MS.NET seems to silently convert this to a callvirt */
6615 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6616 * converts to a callvirt.
6618 * tests/bug-515884.il is an example of this behavior
6620 const int test_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
| METHOD_ATTRIBUTE_STATIC
;
6621 const int expected_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
;
6622 if (!virtual && cmethod
->klass
->marshalbyref
&& (cmethod
->flags
& test_flags
) == expected_flags
&& cfg
->method
->wrapper_type
== MONO_WRAPPER_NONE
)
6626 if (!cmethod
->klass
->inited
)
6627 if (!mono_class_init (cmethod
->klass
))
6630 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
6631 mini_class_is_system_array (cmethod
->klass
)) {
6632 array_rank
= cmethod
->klass
->rank
;
6633 fsig
= mono_method_signature (cmethod
);
6635 fsig
= mono_method_signature (cmethod
);
6640 if (fsig
->pinvoke
) {
6641 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
6642 check_for_pending_exc
, FALSE
);
6643 fsig
= mono_method_signature (wrapper
);
6644 } else if (constrained_call
) {
6645 fsig
= mono_method_signature (cmethod
);
6647 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
6651 mono_save_token_info (cfg
, image
, token
, cil_method
);
6653 n
= fsig
->param_count
+ fsig
->hasthis
;
6655 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
6656 if (check_linkdemand (cfg
, method
, cmethod
))
6658 CHECK_CFG_EXCEPTION
;
6661 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
6662 g_assert_not_reached ();
6665 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
6668 if (!cfg
->generic_sharing_context
&& cmethod
)
6669 g_assert (!mono_method_check_context_used (cmethod
));
6673 //g_assert (!virtual || fsig->hasthis);
6677 if (constrained_call
) {
6679 * We have the `constrained.' prefix opcode.
6681 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
6683 * The type parameter is instantiated as a valuetype,
6684 * but that type doesn't override the method we're
6685 * calling, so we need to box `this'.
6687 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &constrained_call
->byval_arg
, sp
[0]->dreg
, 0);
6688 ins
->klass
= constrained_call
;
6689 sp
[0] = handle_box (cfg
, ins
, constrained_call
, mono_class_check_context_used (constrained_call
));
6690 CHECK_CFG_EXCEPTION
;
6691 } else if (!constrained_call
->valuetype
) {
6692 int dreg
= alloc_ireg_ref (cfg
);
6695 * The type parameter is instantiated as a reference
6696 * type. We have a managed pointer on the stack, so
6697 * we need to dereference it here.
6699 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6700 ins
->type
= STACK_OBJ
;
6702 } else if (cmethod
->klass
->valuetype
)
6704 constrained_call
= NULL
;
6707 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6711 * If the callee is a shared method, then its static cctor
6712 * might not get called after the call was patched.
6714 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
6715 emit_generic_class_init (cfg
, cmethod
->klass
);
6716 CHECK_TYPELOAD (cmethod
->klass
);
6719 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6720 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6721 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6722 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6723 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6726 * Pass vtable iff target method might
6727 * be shared, which means that sharing
6728 * is enabled for its class and its
6729 * context is sharable (and it's not a
6732 if (sharing_enabled
&& context_sharable
&&
6733 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6737 if (cmethod
&& mini_method_get_context (cmethod
) &&
6738 mini_method_get_context (cmethod
)->method_inst
) {
6739 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6740 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6741 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6743 g_assert (!pass_vtable
);
6745 if (sharing_enabled
&& context_sharable
)
6749 if (cfg
->generic_sharing_context
&& cmethod
) {
6750 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6752 context_used
= mono_method_check_context_used (cmethod
);
6754 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6755 /* Generic method interface
6756 calls are resolved via a
6757 helper function and don't
6759 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6760 pass_imt_from_rgctx
= TRUE
;
6764 * If a shared method calls another
6765 * shared method then the caller must
6766 * have a generic sharing context
6767 * because the magic trampoline
6768 * requires it. FIXME: We shouldn't
6769 * have to force the vtable/mrgctx
6770 * variable here. Instead there
6771 * should be a flag in the cfg to
6772 * request a generic sharing context.
6775 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6776 mono_get_vtable_var (cfg
);
6781 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6783 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6785 CHECK_TYPELOAD (cmethod
->klass
);
6786 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6791 g_assert (!vtable_arg
);
6793 if (!cfg
->compile_aot
) {
6795 * emit_get_rgctx_method () calls mono_class_vtable () so check
6796 * for type load errors before.
6798 mono_class_setup_vtable (cmethod
->klass
);
6799 CHECK_TYPELOAD (cmethod
->klass
);
6802 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6804 /* !marshalbyref is needed to properly handle generic methods + remoting */
6805 if ((!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6806 MONO_METHOD_IS_FINAL (cmethod
)) &&
6807 !cmethod
->klass
->marshalbyref
) {
6814 if (pass_imt_from_rgctx
) {
6815 g_assert (!pass_vtable
);
6818 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6819 cmethod
, MONO_RGCTX_INFO_METHOD
);
6823 MONO_EMIT_NEW_CHECK_THIS (cfg
, sp
[0]->dreg
);
6825 /* Calling virtual generic methods */
6826 if (cmethod
&& virtual &&
6827 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6828 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6829 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6830 mono_method_signature (cmethod
)->generic_param_count
) {
6831 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6832 MonoInst
*iargs
[4];
6834 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6836 /* Prevent inlining of methods that contain indirect calls */
6839 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6840 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
&& mono_use_imt
) {
6841 g_assert (!imt_arg
);
6843 g_assert (cmethod
->is_inflated
);
6844 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6845 cmethod
, MONO_RGCTX_INFO_METHOD
);
6846 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
, NULL
);
6850 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6851 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6852 MONO_ADD_INS (bblock
, store
);
6854 /* FIXME: This should be a managed pointer */
6855 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6857 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6858 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6859 cmethod
, MONO_RGCTX_INFO_METHOD
);
6860 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6861 addr
= mono_emit_jit_icall (cfg
,
6862 mono_helper_compile_generic_method
, iargs
);
6864 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6866 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, NULL
);
6869 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6870 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6872 CHECK_CFG_EXCEPTION
;
6880 * Implement a workaround for the inherent races involved in locking:
6886 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6887 * try block, the Exit () won't be executed, see:
6888 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6889 * To work around this, we extend such try blocks to include the last x bytes
6890 * of the Monitor.Enter () call.
6892 if (cmethod
&& cmethod
->klass
== mono_defaults
.monitor_class
&& !strcmp (cmethod
->name
, "Enter") && mono_method_signature (cmethod
)->param_count
== 1) {
6893 MonoBasicBlock
*tbb
;
6895 GET_BBLOCK (cfg
, tbb
, ip
+ 5);
6897 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6898 * from Monitor.Enter like ArgumentNullException.
6900 if (tbb
->try_start
&& MONO_REGION_FLAGS(tbb
->region
) == MONO_EXCEPTION_CLAUSE_FINALLY
) {
6901 /* Mark this bblock as needing to be extended */
6902 tbb
->extend_try_block
= TRUE
;
6906 /* Conversion to a JIT intrinsic */
6907 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6909 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6910 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6915 CHECK_CFG_EXCEPTION
;
6923 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
6924 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
6925 !disable_inline
&& mono_method_check_inlining (cfg
, cmethod
) &&
6926 !g_list_find (dont_inline
, cmethod
)) {
6928 gboolean always
= FALSE
;
6930 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
6931 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6932 /* Prevent inlining of methods that call wrappers */
6934 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
6938 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, always
))) {
6940 cfg
->real_offset
+= 5;
6943 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6944 /* *sp is already set by inline_method */
6947 inline_costs
+= costs
;
6953 inline_costs
+= 10 * num_calls
++;
6955 /* Tail recursion elimination */
6956 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
6957 gboolean has_vtargs
= FALSE
;
6960 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6963 /* keep it simple */
6964 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
6965 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
6970 for (i
= 0; i
< n
; ++i
)
6971 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6972 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6973 MONO_ADD_INS (bblock
, ins
);
6974 tblock
= start_bblock
->out_bb
[0];
6975 link_bblock (cfg
, bblock
, tblock
);
6976 ins
->inst_target_bb
= tblock
;
6977 start_new_bblock
= 1;
6979 /* skip the CEE_RET, too */
6980 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
6990 /* Generic sharing */
6991 /* FIXME: only do this for generic methods if
6992 they are not shared! */
6993 if (context_used
&& !imt_arg
&& !array_rank
&&
6994 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
6995 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
6996 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
6997 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
7000 g_assert (cfg
->generic_sharing_context
&& cmethod
);
7004 * We are compiling a call to a
7005 * generic method from shared code,
7006 * which means that we have to look up
7007 * the method in the rgctx and do an
7010 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7013 /* Indirect calls */
7015 g_assert (!imt_arg
);
7017 if (*ip
== CEE_CALL
)
7018 g_assert (context_used
);
7019 else if (*ip
== CEE_CALLI
)
7020 g_assert (!vtable_arg
);
7022 /* FIXME: what the hell is this??? */
7023 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
7024 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
7026 /* Prevent inlining of methods with indirect calls */
7032 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, vtable_arg
);
7033 call
= (MonoCallInst
*)ins
;
7035 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
7037 * Instead of emitting an indirect call, emit a direct call
7038 * with the contents of the aotconst as the patch info.
7040 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
7042 } else if (addr
->opcode
== OP_GOT_ENTRY
&& addr
->inst_right
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
7043 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_right
->inst_left
, fsig
, sp
);
7046 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, NULL
);
7049 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7050 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7052 CHECK_CFG_EXCEPTION
;
7063 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
7064 MonoInst
*val
= sp
[fsig
->param_count
];
7066 if (val
->type
== STACK_OBJ
) {
7067 MonoInst
*iargs
[2];
7072 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
7075 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
7076 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, val
->dreg
);
7077 if (cfg
->gen_write_barriers
&& val
->type
== STACK_OBJ
&& !(val
->opcode
== OP_PCONST
&& val
->inst_c0
== 0))
7078 emit_write_barrier (cfg
, addr
, val
, 0);
7079 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
7080 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7082 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
7085 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
7086 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
7087 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
7088 CHECK_TYPELOAD (cmethod
->klass
);
7091 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7094 g_assert_not_reached ();
7097 CHECK_CFG_EXCEPTION
;
7104 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
7106 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7107 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7109 CHECK_CFG_EXCEPTION
;
7116 /* Tail prefix / tail call optimization */
7118 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7119 /* FIXME: runtime generic context pointer for jumps? */
7120 /* FIXME: handle this for generic sharing eventually */
7121 supported_tail_call
= cmethod
&&
7122 ((((ins_flag
& MONO_INST_TAILCALL
) && (*ip
== CEE_CALL
))
7123 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7124 && !vtable_arg
&& !cfg
->generic_sharing_context
&& is_supported_tail_call (cfg
, method
, cmethod
, fsig
);
7126 if (supported_tail_call
) {
7129 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7132 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7134 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7135 /* Handle tail calls similarly to calls */
7136 call
= mono_emit_call_args (cfg
, mono_method_signature (cmethod
), sp
, FALSE
, FALSE
, TRUE
, FALSE
);
7138 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
7139 call
->tail_call
= TRUE
;
7140 call
->method
= cmethod
;
7141 call
->signature
= mono_method_signature (cmethod
);
7144 * We implement tail calls by storing the actual arguments into the
7145 * argument variables, then emitting a CEE_JMP.
7147 for (i
= 0; i
< n
; ++i
) {
7148 /* Prevent argument from being register allocated */
7149 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
7150 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
7154 ins
= (MonoInst
*)call
;
7155 ins
->inst_p0
= cmethod
;
7156 ins
->inst_p1
= arg_array
[0];
7157 MONO_ADD_INS (bblock
, ins
);
7158 link_bblock (cfg
, bblock
, end_bblock
);
7159 start_new_bblock
= 1;
7161 CHECK_CFG_EXCEPTION
;
7166 // FIXME: Eliminate unreachable epilogs
7169 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7170 * only reachable from this call.
7172 GET_BBLOCK (cfg
, tblock
, ip
);
7173 if (tblock
== bblock
|| tblock
->in_count
== 0)
7180 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
7181 imt_arg
, vtable_arg
);
7183 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7184 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7186 CHECK_CFG_EXCEPTION
;
7193 if (cfg
->method
!= method
) {
7194 /* return from inlined method */
7196 * If in_count == 0, that means the ret is unreachable due to
7197 * being preceeded by a throw. In that case, inline_method () will
7198 * handle setting the return value
7199 * (test case: test_0_inline_throw ()).
7201 if (return_var
&& cfg
->cbb
->in_count
) {
7205 //g_assert (returnvar != -1);
7206 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
7207 cfg
->ret_var_set
= TRUE
;
7211 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
7215 * Place a seq point here too even through the IL stack is not
7216 * empty, so a step over on
7219 * will work correctly.
7221 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
7222 MONO_ADD_INS (cfg
->cbb
, ins
);
7225 g_assert (!return_var
);
7229 if ((method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
|| method
->wrapper_type
== MONO_WRAPPER_NONE
) && target_type_is_incompatible (cfg
, ret_type
, *sp
))
7232 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
7235 if (!cfg
->vret_addr
) {
7238 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
7240 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
7242 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
7243 ins
->klass
= mono_class_from_mono_type (ret_type
);
7246 #ifdef MONO_ARCH_SOFT_FLOAT
7247 if (COMPILE_SOFT_FLOAT (cfg
) && !ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
7248 MonoInst
*iargs
[1];
7252 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
7253 mono_arch_emit_setret (cfg
, method
, conv
);
7255 mono_arch_emit_setret (cfg
, method
, *sp
);
7258 mono_arch_emit_setret (cfg
, method
, *sp
);
7263 if (sp
!= stack_start
)
7265 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7267 ins
->inst_target_bb
= end_bblock
;
7268 MONO_ADD_INS (bblock
, ins
);
7269 link_bblock (cfg
, bblock
, end_bblock
);
7270 start_new_bblock
= 1;
7274 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7276 target
= ip
+ 1 + (signed char)(*ip
);
7278 GET_BBLOCK (cfg
, tblock
, target
);
7279 link_bblock (cfg
, bblock
, tblock
);
7280 ins
->inst_target_bb
= tblock
;
7281 if (sp
!= stack_start
) {
7282 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7284 CHECK_UNVERIFIABLE (cfg
);
7286 MONO_ADD_INS (bblock
, ins
);
7287 start_new_bblock
= 1;
7288 inline_costs
+= BRANCH_COST
;
7302 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
7304 target
= ip
+ 1 + *(signed char*)ip
;
7310 inline_costs
+= BRANCH_COST
;
7314 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7317 target
= ip
+ 4 + (gint32
)read32(ip
);
7319 GET_BBLOCK (cfg
, tblock
, target
);
7320 link_bblock (cfg
, bblock
, tblock
);
7321 ins
->inst_target_bb
= tblock
;
7322 if (sp
!= stack_start
) {
7323 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7325 CHECK_UNVERIFIABLE (cfg
);
7328 MONO_ADD_INS (bblock
, ins
);
7330 start_new_bblock
= 1;
7331 inline_costs
+= BRANCH_COST
;
7338 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
7339 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
7340 guint32 opsize
= is_short
? 1 : 4;
7342 CHECK_OPSIZE (opsize
);
7344 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
7347 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
7352 GET_BBLOCK (cfg
, tblock
, target
);
7353 link_bblock (cfg
, bblock
, tblock
);
7354 GET_BBLOCK (cfg
, tblock
, ip
);
7355 link_bblock (cfg
, bblock
, tblock
);
7357 if (sp
!= stack_start
) {
7358 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7359 CHECK_UNVERIFIABLE (cfg
);
7362 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
7363 cmp
->sreg1
= sp
[0]->dreg
;
7364 type_from_op (cmp
, sp
[0], NULL
);
7367 #if SIZEOF_REGISTER == 4
7368 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
7369 /* Convert it to OP_LCOMPARE */
7370 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
7371 ins
->type
= STACK_I8
;
7372 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
7374 MONO_ADD_INS (bblock
, ins
);
7375 cmp
->opcode
= OP_LCOMPARE
;
7376 cmp
->sreg2
= ins
->dreg
;
7379 MONO_ADD_INS (bblock
, cmp
);
7381 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
7382 type_from_op (ins
, sp
[0], NULL
);
7383 MONO_ADD_INS (bblock
, ins
);
7384 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
7385 GET_BBLOCK (cfg
, tblock
, target
);
7386 ins
->inst_true_bb
= tblock
;
7387 GET_BBLOCK (cfg
, tblock
, ip
);
7388 ins
->inst_false_bb
= tblock
;
7389 start_new_bblock
= 2;
7392 inline_costs
+= BRANCH_COST
;
7407 MONO_INST_NEW (cfg
, ins
, *ip
);
7409 target
= ip
+ 4 + (gint32
)read32(ip
);
7415 inline_costs
+= BRANCH_COST
;
7419 MonoBasicBlock
**targets
;
7420 MonoBasicBlock
*default_bblock
;
7421 MonoJumpInfoBBTable
*table
;
7422 int offset_reg
= alloc_preg (cfg
);
7423 int target_reg
= alloc_preg (cfg
);
7424 int table_reg
= alloc_preg (cfg
);
7425 int sum_reg
= alloc_preg (cfg
);
7426 gboolean use_op_switch
;
7430 n
= read32 (ip
+ 1);
7433 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
7437 CHECK_OPSIZE (n
* sizeof (guint32
));
7438 target
= ip
+ n
* sizeof (guint32
);
7440 GET_BBLOCK (cfg
, default_bblock
, target
);
7441 default_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
7443 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
7444 for (i
= 0; i
< n
; ++i
) {
7445 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
7446 targets
[i
] = tblock
;
7447 targets
[i
]->flags
|= BB_INDIRECT_JUMP_TARGET
;
7451 if (sp
!= stack_start
) {
7453 * Link the current bb with the targets as well, so handle_stack_args
7454 * will set their in_stack correctly.
7456 link_bblock (cfg
, bblock
, default_bblock
);
7457 for (i
= 0; i
< n
; ++i
)
7458 link_bblock (cfg
, bblock
, targets
[i
]);
7460 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7462 CHECK_UNVERIFIABLE (cfg
);
7465 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
7466 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
7469 for (i
= 0; i
< n
; ++i
)
7470 link_bblock (cfg
, bblock
, targets
[i
]);
7472 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
7473 table
->table
= targets
;
7474 table
->table_size
= n
;
7476 use_op_switch
= FALSE
;
7478 /* ARM implements SWITCH statements differently */
7479 /* FIXME: Make it use the generic implementation */
7480 if (!cfg
->compile_aot
)
7481 use_op_switch
= TRUE
;
7484 if (COMPILE_LLVM (cfg
))
7485 use_op_switch
= TRUE
;
7487 cfg
->cbb
->has_jump_table
= 1;
7489 if (use_op_switch
) {
7490 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
7491 ins
->sreg1
= src1
->dreg
;
7492 ins
->inst_p0
= table
;
7493 ins
->inst_many_bb
= targets
;
7494 ins
->klass
= GUINT_TO_POINTER (n
);
7495 MONO_ADD_INS (cfg
->cbb
, ins
);
7497 if (sizeof (gpointer
) == 8)
7498 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
7500 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
7502 #if SIZEOF_REGISTER == 8
7503 /* The upper word might not be zero, and we add it to a 64 bit address later */
7504 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
7507 if (cfg
->compile_aot
) {
7508 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
7510 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
7511 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
7512 ins
->inst_p0
= table
;
7513 ins
->dreg
= table_reg
;
7514 MONO_ADD_INS (cfg
->cbb
, ins
);
7517 /* FIXME: Use load_memindex */
7518 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
7519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
7520 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
7522 start_new_bblock
= 1;
7523 inline_costs
+= (BRANCH_COST
* 2);
7543 dreg
= alloc_freg (cfg
);
7546 dreg
= alloc_lreg (cfg
);
7549 dreg
= alloc_ireg_ref (cfg
);
7552 dreg
= alloc_preg (cfg
);
7555 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
7556 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
7557 ins
->flags
|= ins_flag
;
7559 MONO_ADD_INS (bblock
, ins
);
7561 if (ins
->flags
& MONO_INST_VOLATILE
) {
7562 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7563 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7564 emit_memory_barrier (cfg
, FullBarrier
);
7579 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
7580 ins
->flags
|= ins_flag
;
7583 if (ins
->flags
& MONO_INST_VOLATILE
) {
7584 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7585 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7586 emit_memory_barrier (cfg
, FullBarrier
);
7589 MONO_ADD_INS (bblock
, ins
);
7591 if (cfg
->gen_write_barriers
&& *ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0)))
7592 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
7601 MONO_INST_NEW (cfg
, ins
, (*ip
));
7603 ins
->sreg1
= sp
[0]->dreg
;
7604 ins
->sreg2
= sp
[1]->dreg
;
7605 type_from_op (ins
, sp
[0], sp
[1]);
7607 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7609 /* Use the immediate opcodes if possible */
7610 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
7611 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
7612 if (imm_opcode
!= -1) {
7613 ins
->opcode
= imm_opcode
;
7614 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7617 sp
[1]->opcode
= OP_NOP
;
7621 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7623 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7640 MONO_INST_NEW (cfg
, ins
, (*ip
));
7642 ins
->sreg1
= sp
[0]->dreg
;
7643 ins
->sreg2
= sp
[1]->dreg
;
7644 type_from_op (ins
, sp
[0], sp
[1]);
7646 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
7647 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7649 /* FIXME: Pass opcode to is_inst_imm */
7651 /* Use the immediate opcodes if possible */
7652 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
7655 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
7656 if (imm_opcode
!= -1) {
7657 ins
->opcode
= imm_opcode
;
7658 if (sp
[1]->opcode
== OP_I8CONST
) {
7659 #if SIZEOF_REGISTER == 8
7660 ins
->inst_imm
= sp
[1]->inst_l
;
7662 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
7663 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
7667 ins
->inst_imm
= (gssize
)(sp
[1]->inst_c0
);
7670 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7671 if (sp
[1]->next
== NULL
)
7672 sp
[1]->opcode
= OP_NOP
;
7675 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7677 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7690 case CEE_CONV_OVF_I8
:
7691 case CEE_CONV_OVF_U8
:
7695 /* Special case this earlier so we have long constants in the IR */
7696 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
7697 int data
= sp
[-1]->inst_c0
;
7698 sp
[-1]->opcode
= OP_I8CONST
;
7699 sp
[-1]->type
= STACK_I8
;
7700 #if SIZEOF_REGISTER == 8
7701 if ((*ip
) == CEE_CONV_U8
)
7702 sp
[-1]->inst_c0
= (guint32
)data
;
7704 sp
[-1]->inst_c0
= data
;
7706 sp
[-1]->inst_ls_word
= data
;
7707 if ((*ip
) == CEE_CONV_U8
)
7708 sp
[-1]->inst_ms_word
= 0;
7710 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
7712 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
7719 case CEE_CONV_OVF_I4
:
7720 case CEE_CONV_OVF_I1
:
7721 case CEE_CONV_OVF_I2
:
7722 case CEE_CONV_OVF_I
:
7723 case CEE_CONV_OVF_U
:
7726 if (sp
[-1]->type
== STACK_R8
) {
7727 ADD_UNOP (CEE_CONV_OVF_I8
);
7734 case CEE_CONV_OVF_U1
:
7735 case CEE_CONV_OVF_U2
:
7736 case CEE_CONV_OVF_U4
:
7739 if (sp
[-1]->type
== STACK_R8
) {
7740 ADD_UNOP (CEE_CONV_OVF_U8
);
7747 case CEE_CONV_OVF_I1_UN
:
7748 case CEE_CONV_OVF_I2_UN
:
7749 case CEE_CONV_OVF_I4_UN
:
7750 case CEE_CONV_OVF_I8_UN
:
7751 case CEE_CONV_OVF_U1_UN
:
7752 case CEE_CONV_OVF_U2_UN
:
7753 case CEE_CONV_OVF_U4_UN
:
7754 case CEE_CONV_OVF_U8_UN
:
7755 case CEE_CONV_OVF_I_UN
:
7756 case CEE_CONV_OVF_U_UN
:
7763 CHECK_CFG_EXCEPTION
;
7767 case CEE_ADD_OVF_UN
:
7769 case CEE_MUL_OVF_UN
:
7771 case CEE_SUB_OVF_UN
:
7779 token
= read32 (ip
+ 1);
7780 klass
= mini_get_class (method
, token
, generic_context
);
7781 CHECK_TYPELOAD (klass
);
7783 if (generic_class_is_reference_type (cfg
, klass
)) {
7784 MonoInst
*store
, *load
;
7785 int dreg
= alloc_ireg_ref (cfg
);
7787 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7788 load
->flags
|= ins_flag
;
7789 MONO_ADD_INS (cfg
->cbb
, load
);
7791 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7792 store
->flags
|= ins_flag
;
7793 MONO_ADD_INS (cfg
->cbb
, store
);
7795 if (cfg
->gen_write_barriers
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
)
7796 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
7798 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7810 token
= read32 (ip
+ 1);
7811 klass
= mini_get_class (method
, token
, generic_context
);
7812 CHECK_TYPELOAD (klass
);
7814 /* Optimize the common ldobj+stloc combination */
7824 loc_index
= ip
[5] - CEE_STLOC_0
;
7831 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7832 CHECK_LOCAL (loc_index
);
7834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7835 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7841 /* Optimize the ldobj+stobj combination */
7842 /* The reference case ends up being a load+store anyway */
7843 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 5) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7848 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7855 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7864 CHECK_STACK_OVF (1);
7866 n
= read32 (ip
+ 1);
7868 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7869 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7870 ins
->type
= STACK_OBJ
;
7873 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7874 MonoInst
*iargs
[1];
7876 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7877 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7879 if (cfg
->opt
& MONO_OPT_SHARED
) {
7880 MonoInst
*iargs
[3];
7882 if (cfg
->compile_aot
) {
7883 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7885 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7886 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7887 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7888 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7889 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7891 if (bblock
->out_of_line
) {
7892 MonoInst
*iargs
[2];
7894 if (image
== mono_defaults
.corlib
) {
7896 * Avoid relocations in AOT and save some space by using a
7897 * version of helper_ldstr specialized to mscorlib.
7899 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7900 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7902 /* Avoid creating the string object */
7903 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7904 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7905 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7909 if (cfg
->compile_aot
) {
7910 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7912 MONO_ADD_INS (bblock
, ins
);
7915 NEW_PCONST (cfg
, ins
, NULL
);
7916 ins
->type
= STACK_OBJ
;
7917 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7919 OUT_OF_MEMORY_FAILURE
;
7922 MONO_ADD_INS (bblock
, ins
);
7931 MonoInst
*iargs
[2];
7932 MonoMethodSignature
*fsig
;
7935 MonoInst
*vtable_arg
= NULL
;
7938 token
= read32 (ip
+ 1);
7939 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7940 if (!cmethod
|| mono_loader_get_last_error ())
7942 fsig
= mono_method_get_signature (cmethod
, image
, token
);
7946 mono_save_token_info (cfg
, image
, token
, cmethod
);
7948 if (!mono_class_init (cmethod
->klass
))
7951 if (cfg
->generic_sharing_context
)
7952 context_used
= mono_method_check_context_used (cmethod
);
7954 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7955 if (check_linkdemand (cfg
, method
, cmethod
))
7957 CHECK_CFG_EXCEPTION
;
7958 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
7959 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
7962 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
7963 emit_generic_class_init (cfg
, cmethod
->klass
);
7964 CHECK_TYPELOAD (cmethod
->klass
);
7967 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
7968 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7969 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
7970 mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7971 CHECK_TYPELOAD (cmethod
->klass
);
7973 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
7974 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7977 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
7978 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7980 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7982 CHECK_TYPELOAD (cmethod
->klass
);
7983 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7988 n
= fsig
->param_count
;
7992 * Generate smaller code for the common newobj <exception> instruction in
7993 * argument checking code.
7995 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
7996 is_exception_class (cmethod
->klass
) && n
<= 2 &&
7997 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
7998 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
7999 MonoInst
*iargs
[3];
8001 g_assert (!vtable_arg
);
8005 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
8008 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
8012 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
8017 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
8020 g_assert_not_reached ();
8028 /* move the args to allow room for 'this' in the first position */
8034 /* check_call_signature () requires sp[0] to be set */
8035 this_ins
.type
= STACK_OBJ
;
8037 if (check_call_signature (cfg
, fsig
, sp
))
8042 if (mini_class_is_system_array (cmethod
->klass
)) {
8043 g_assert (!vtable_arg
);
8045 *sp
= emit_get_rgctx_method (cfg
, context_used
,
8046 cmethod
, MONO_RGCTX_INFO_METHOD
);
8048 /* Avoid varargs in the common case */
8049 if (fsig
->param_count
== 1)
8050 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
8051 else if (fsig
->param_count
== 2)
8052 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
8053 else if (fsig
->param_count
== 3)
8054 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_3
, sp
);
8056 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
8057 } else if (cmethod
->string_ctor
) {
8058 g_assert (!context_used
);
8059 g_assert (!vtable_arg
);
8060 /* we simply pass a null pointer */
8061 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
8062 /* now call the string ctor */
8063 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
, NULL
);
8065 MonoInst
* callvirt_this_arg
= NULL
;
8067 if (cmethod
->klass
->valuetype
) {
8068 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
8069 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
8070 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
8075 * The code generated by mini_emit_virtual_call () expects
8076 * iargs [0] to be a boxed instance, but luckily the vcall
8077 * will be transformed into a normal call there.
8079 } else if (context_used
) {
8080 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, context_used
);
8083 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8085 CHECK_TYPELOAD (cmethod
->klass
);
8088 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8089 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8090 * As a workaround, we call class cctors before allocating objects.
8092 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8093 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8094 if (cfg
->verbose_level
> 2)
8095 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
8096 class_inits
= g_slist_prepend (class_inits
, vtable
);
8099 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, 0);
8102 CHECK_CFG_EXCEPTION
; /*for handle_alloc*/
8105 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
8107 /* Now call the actual ctor */
8108 /* Avoid virtual calls to ctors if possible */
8109 if (cmethod
->klass
->marshalbyref
)
8110 callvirt_this_arg
= sp
[0];
8113 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_ctor (cfg
, cmethod
, fsig
, sp
))) {
8114 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
8115 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
8120 CHECK_CFG_EXCEPTION
;
8121 } else if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
8122 !disable_inline
&& mono_method_check_inlining (cfg
, cmethod
) &&
8123 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
8124 !g_list_find (dont_inline
, cmethod
)) {
8127 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
8128 cfg
->real_offset
+= 5;
8131 inline_costs
+= costs
- 5;
8134 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
, NULL
);
8136 } else if (context_used
&&
8137 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
8138 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
8139 MonoInst
*cmethod_addr
;
8141 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
8142 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
8144 mono_emit_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
8147 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
,
8148 callvirt_this_arg
, NULL
, vtable_arg
);
8152 if (alloc
== NULL
) {
8154 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
8155 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
8169 token
= read32 (ip
+ 1);
8170 klass
= mini_get_class (method
, token
, generic_context
);
8171 CHECK_TYPELOAD (klass
);
8172 if (sp
[0]->type
!= STACK_OBJ
)
8175 if (cfg
->generic_sharing_context
)
8176 context_used
= mono_class_check_context_used (klass
);
8178 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8179 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
8186 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8189 if (cfg
->compile_aot
)
8190 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8192 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8194 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8195 *sp
++ = mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
8198 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8199 MonoMethod
*mono_castclass
;
8200 MonoInst
*iargs
[1];
8203 mono_castclass
= mono_marshal_get_castclass (klass
);
8206 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
8207 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8208 CHECK_CFG_EXCEPTION
;
8209 g_assert (costs
> 0);
8212 cfg
->real_offset
+= 5;
8217 inline_costs
+= costs
;
8220 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
8221 CHECK_CFG_EXCEPTION
;
8231 token
= read32 (ip
+ 1);
8232 klass
= mini_get_class (method
, token
, generic_context
);
8233 CHECK_TYPELOAD (klass
);
8234 if (sp
[0]->type
!= STACK_OBJ
)
8237 if (cfg
->generic_sharing_context
)
8238 context_used
= mono_class_check_context_used (klass
);
8240 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8241 MonoMethod
*mono_isinst
= mono_marshal_get_isinst_with_cache ();
8248 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8251 if (cfg
->compile_aot
)
8252 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8254 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8256 *sp
++ = mono_emit_method_call (cfg
, mono_isinst
, args
, NULL
);
8259 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8260 MonoMethod
*mono_isinst
;
8261 MonoInst
*iargs
[1];
8264 mono_isinst
= mono_marshal_get_isinst (klass
);
8267 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
8268 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8269 CHECK_CFG_EXCEPTION
;
8270 g_assert (costs
> 0);
8273 cfg
->real_offset
+= 5;
8278 inline_costs
+= costs
;
8281 ins
= handle_isinst (cfg
, klass
, *sp
, context_used
);
8282 CHECK_CFG_EXCEPTION
;
8289 case CEE_UNBOX_ANY
: {
8293 token
= read32 (ip
+ 1);
8294 klass
= mini_get_class (method
, token
, generic_context
);
8295 CHECK_TYPELOAD (klass
);
8297 mono_save_token_info (cfg
, image
, token
, klass
);
8299 if (cfg
->generic_sharing_context
)
8300 context_used
= mono_class_check_context_used (klass
);
8302 if (generic_class_is_reference_type (cfg
, klass
)) {
8303 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8304 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8305 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
8312 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8315 /*FIXME AOT support*/
8316 if (cfg
->compile_aot
)
8317 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8319 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8321 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8322 *sp
++ = mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
8325 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8326 MonoMethod
*mono_castclass
;
8327 MonoInst
*iargs
[1];
8330 mono_castclass
= mono_marshal_get_castclass (klass
);
8333 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
8334 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8335 CHECK_CFG_EXCEPTION
;
8336 g_assert (costs
> 0);
8339 cfg
->real_offset
+= 5;
8343 inline_costs
+= costs
;
8345 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
8346 CHECK_CFG_EXCEPTION
;
8354 if (mono_class_is_nullable (klass
)) {
8355 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
8362 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
8368 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
8381 token
= read32 (ip
+ 1);
8382 klass
= mini_get_class (method
, token
, generic_context
);
8383 CHECK_TYPELOAD (klass
);
8385 mono_save_token_info (cfg
, image
, token
, klass
);
8387 if (cfg
->generic_sharing_context
)
8388 context_used
= mono_class_check_context_used (klass
);
8390 if (generic_class_is_reference_type (cfg
, klass
)) {
8396 if (klass
== mono_defaults
.void_class
)
8398 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
8400 /* frequent check in generic code: box (struct), brtrue */
8402 // FIXME: LLVM can't handle the inconsistent bb linking
8403 if (!mono_class_is_nullable (klass
) &&
8404 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8405 (ip
[5] == CEE_BRTRUE
||
8406 ip
[5] == CEE_BRTRUE_S
||
8407 ip
[5] == CEE_BRFALSE
||
8408 ip
[5] == CEE_BRFALSE_S
)) {
8409 gboolean is_true
= ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
;
8411 MonoBasicBlock
*true_bb
, *false_bb
;
8415 if (cfg
->verbose_level
> 3) {
8416 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
8417 printf ("<box+brtrue opt>\n");
8425 target
= ip
+ 1 + (signed char)(*ip
);
8432 target
= ip
+ 4 + (gint
)(read32 (ip
));
8436 g_assert_not_reached ();
8440 * We need to link both bblocks, since it is needed for handling stack
8441 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8442 * Branching to only one of them would lead to inconsistencies, so
8443 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8445 GET_BBLOCK (cfg
, true_bb
, target
);
8446 GET_BBLOCK (cfg
, false_bb
, ip
);
8448 mono_link_bblock (cfg
, cfg
->cbb
, true_bb
);
8449 mono_link_bblock (cfg
, cfg
->cbb
, false_bb
);
8451 if (sp
!= stack_start
) {
8452 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8454 CHECK_UNVERIFIABLE (cfg
);
8457 if (COMPILE_LLVM (cfg
)) {
8458 dreg
= alloc_ireg (cfg
);
8459 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
8460 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, dreg
, is_true
? 0 : 1);
8462 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg
, OP_IBEQ
, true_bb
, false_bb
);
8464 /* The JIT can't eliminate the iconst+compare */
8465 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8466 ins
->inst_target_bb
= is_true
? true_bb
: false_bb
;
8467 MONO_ADD_INS (cfg
->cbb
, ins
);
8470 start_new_bblock
= 1;
8474 *sp
++ = handle_box (cfg
, val
, klass
, context_used
);
8476 CHECK_CFG_EXCEPTION
;
8485 token
= read32 (ip
+ 1);
8486 klass
= mini_get_class (method
, token
, generic_context
);
8487 CHECK_TYPELOAD (klass
);
8489 mono_save_token_info (cfg
, image
, token
, klass
);
8491 if (cfg
->generic_sharing_context
)
8492 context_used
= mono_class_check_context_used (klass
);
8494 if (mono_class_is_nullable (klass
)) {
8497 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
8498 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
8502 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
8512 MonoClassField
*field
;
8516 if (*ip
== CEE_STFLD
) {
8523 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
8525 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
8528 token
= read32 (ip
+ 1);
8529 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8530 field
= mono_method_get_wrapper_data (method
, token
);
8531 klass
= field
->parent
;
8534 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8538 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8539 FIELD_ACCESS_FAILURE
;
8540 mono_class_init (klass
);
8542 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8543 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8544 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8545 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8548 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
8549 if (*ip
== CEE_STFLD
) {
8550 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
8552 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8553 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
8554 MonoInst
*iargs
[5];
8557 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8558 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8559 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
8563 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8564 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
8565 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8566 CHECK_CFG_EXCEPTION
;
8567 g_assert (costs
> 0);
8569 cfg
->real_offset
+= 5;
8572 inline_costs
+= costs
;
8574 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
8579 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
8581 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
8582 if (sp
[0]->opcode
!= OP_LDADDR
)
8583 store
->flags
|= MONO_INST_FAULT
;
8585 if (cfg
->gen_write_barriers
&& mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
8586 /* insert call to write barrier */
8590 dreg
= alloc_ireg_mp (cfg
);
8591 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8592 emit_write_barrier (cfg
, ptr
, sp
[1], -1);
8595 store
->flags
|= ins_flag
;
8602 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8603 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
8604 MonoInst
*iargs
[4];
8607 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8608 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8609 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
8610 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8611 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
8612 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8613 CHECK_CFG_EXCEPTION
;
8615 g_assert (costs
> 0);
8617 cfg
->real_offset
+= 5;
8621 inline_costs
+= costs
;
8623 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
8627 if (sp
[0]->type
== STACK_VTYPE
) {
8630 /* Have to compute the address of the variable */
8632 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8634 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8636 g_assert (var
->klass
== klass
);
8638 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
8642 if (*ip
== CEE_LDFLDA
) {
8643 if (sp
[0]->type
== STACK_OBJ
) {
8644 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, sp
[0]->dreg
, 0);
8645 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "NullReferenceException");
8648 dreg
= alloc_ireg_mp (cfg
);
8650 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8651 ins
->klass
= mono_class_from_mono_type (field
->type
);
8652 ins
->type
= STACK_MP
;
8657 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
8659 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
8660 load
->flags
|= ins_flag
;
8661 if (sp
[0]->opcode
!= OP_LDADDR
)
8662 load
->flags
|= MONO_INST_FAULT
;
8673 MonoClassField
*field
;
8674 gpointer addr
= NULL
;
8675 gboolean is_special_static
;
8679 token
= read32 (ip
+ 1);
8681 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8682 field
= mono_method_get_wrapper_data (method
, token
);
8683 klass
= field
->parent
;
8686 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8689 mono_class_init (klass
);
8690 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8691 FIELD_ACCESS_FAILURE
;
8693 /* if the class is Critical then transparent code cannot access it's fields */
8694 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
8695 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
8698 * We can only support shared generic static
8699 * field access on architectures where the
8700 * trampoline code has been extended to handle
8701 * the generic class init.
8703 #ifndef MONO_ARCH_VTABLE_REG
8704 GENERIC_SHARING_FAILURE (*ip
);
8707 if (cfg
->generic_sharing_context
)
8708 context_used
= mono_class_check_context_used (klass
);
8710 ftype
= mono_field_get_type (field
);
8712 g_assert (!(ftype
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
8714 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8715 * to be called here.
8717 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
8718 mono_class_vtable (cfg
->domain
, klass
);
8719 CHECK_TYPELOAD (klass
);
8721 mono_domain_lock (cfg
->domain
);
8722 if (cfg
->domain
->special_static_fields
)
8723 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
8724 mono_domain_unlock (cfg
->domain
);
8726 is_special_static
= mono_class_field_is_special_static (field
);
8728 /* Generate IR to compute the field address */
8729 if (is_special_static
&& ((gsize
)addr
& 0x80000000) == 0 && mono_get_thread_intrinsic (cfg
) && !(cfg
->opt
& MONO_OPT_SHARED
) && !context_used
) {
8731 * Fast access to TLS data
8732 * Inline version of get_thread_static_data () in
8736 int idx
, static_data_reg
, array_reg
, dreg
;
8737 MonoInst
*thread_ins
;
8739 // offset &= 0x7fffffff;
8740 // idx = (offset >> 24) - 1;
8741 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8743 thread_ins
= mono_get_thread_intrinsic (cfg
);
8744 MONO_ADD_INS (cfg
->cbb
, thread_ins
);
8745 static_data_reg
= alloc_ireg (cfg
);
8746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, G_STRUCT_OFFSET (MonoInternalThread
, static_data
));
8748 if (cfg
->compile_aot
) {
8749 int offset_reg
, offset2_reg
, idx_reg
;
8751 /* For TLS variables, this will return the TLS offset */
8752 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8753 offset_reg
= ins
->dreg
;
8754 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset_reg
, offset_reg
, 0x7fffffff);
8755 idx_reg
= alloc_ireg (cfg
);
8756 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, idx_reg
, offset_reg
, 24);
8757 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISUB_IMM
, idx_reg
, idx_reg
, 1);
8758 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, sizeof (gpointer
) == 8 ? 3 : 2);
8759 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
8760 array_reg
= alloc_ireg (cfg
);
8761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
8762 offset2_reg
= alloc_ireg (cfg
);
8763 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset_reg
, 0xffffff);
8764 dreg
= alloc_ireg (cfg
);
8765 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
8767 offset
= (gsize
)addr
& 0x7fffffff;
8768 idx
= (offset
>> 24) - 1;
8770 array_reg
= alloc_ireg (cfg
);
8771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, idx
* sizeof (gpointer
));
8772 dreg
= alloc_ireg (cfg
);
8773 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_ADD_IMM
, dreg
, array_reg
, (offset
& 0xffffff));
8775 } else if ((cfg
->opt
& MONO_OPT_SHARED
) ||
8776 (cfg
->compile_aot
&& is_special_static
) ||
8777 (context_used
&& is_special_static
)) {
8778 MonoInst
*iargs
[2];
8780 g_assert (field
->parent
);
8781 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8783 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
8784 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
8786 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8788 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8789 } else if (context_used
) {
8790 MonoInst
*static_data
;
8793 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8794 method->klass->name_space, method->klass->name, method->name,
8795 depth, field->offset);
8798 if (mono_class_needs_cctor_run (klass
, method
))
8799 emit_generic_class_init (cfg
, klass
);
8802 * The pointer we're computing here is
8804 * super_info.static_data + field->offset
8806 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
8807 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
8809 if (field
->offset
== 0) {
8812 int addr_reg
= mono_alloc_preg (cfg
);
8813 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
8815 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
8816 MonoInst
*iargs
[2];
8818 g_assert (field
->parent
);
8819 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8820 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8821 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8823 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
8825 CHECK_TYPELOAD (klass
);
8827 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
)) {
8828 if (!(g_slist_find (class_inits
, vtable
))) {
8829 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8830 if (cfg
->verbose_level
> 2)
8831 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
8832 class_inits
= g_slist_prepend (class_inits
, vtable
);
8835 if (cfg
->run_cctors
) {
8837 /* This makes so that inline cannot trigger */
8838 /* .cctors: too many apps depend on them */
8839 /* running with a specific order... */
8840 if (! vtable
->initialized
)
8842 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
8844 set_exception_object (cfg
, ex
);
8845 goto exception_exit
;
8849 addr
= (char*)vtable
->data
+ field
->offset
;
8851 if (cfg
->compile_aot
)
8852 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8854 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8856 MonoInst
*iargs
[1];
8857 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8858 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8862 /* Generate IR to do the actual load/store operation */
8864 if (*ip
== CEE_LDSFLDA
) {
8865 ins
->klass
= mono_class_from_mono_type (ftype
);
8866 ins
->type
= STACK_PTR
;
8868 } else if (*ip
== CEE_STSFLD
) {
8873 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, ftype
, ins
->dreg
, 0, sp
[0]->dreg
);
8874 store
->flags
|= ins_flag
;
8876 gboolean is_const
= FALSE
;
8877 MonoVTable
*vtable
= NULL
;
8879 if (!context_used
) {
8880 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8881 CHECK_TYPELOAD (klass
);
8883 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8884 vtable
->initialized
&& (ftype
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8885 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8886 int ro_type
= ftype
->type
;
8887 if (ro_type
== MONO_TYPE_VALUETYPE
&& ftype
->data
.klass
->enumtype
) {
8888 ro_type
= mono_class_enum_basetype (ftype
->data
.klass
)->type
;
8890 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8893 case MONO_TYPE_BOOLEAN
:
8895 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8899 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8902 case MONO_TYPE_CHAR
:
8904 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8908 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8913 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8917 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8923 case MONO_TYPE_FNPTR
:
8924 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8925 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8928 case MONO_TYPE_STRING
:
8929 case MONO_TYPE_OBJECT
:
8930 case MONO_TYPE_CLASS
:
8931 case MONO_TYPE_SZARRAY
:
8932 case MONO_TYPE_ARRAY
:
8933 if (!mono_gc_is_moving ()) {
8934 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8935 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8943 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
8948 case MONO_TYPE_VALUETYPE
:
8958 CHECK_STACK_OVF (1);
8960 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
8961 load
->flags
|= ins_flag
;
8974 token
= read32 (ip
+ 1);
8975 klass
= mini_get_class (method
, token
, generic_context
);
8976 CHECK_TYPELOAD (klass
);
8977 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8978 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
8979 if (cfg
->gen_write_barriers
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&&
8980 generic_class_is_reference_type (cfg
, klass
)) {
8981 /* insert call to write barrier */
8982 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
8994 const char *data_ptr
;
8996 guint32 field_token
;
9002 token
= read32 (ip
+ 1);
9004 klass
= mini_get_class (method
, token
, generic_context
);
9005 CHECK_TYPELOAD (klass
);
9007 if (cfg
->generic_sharing_context
)
9008 context_used
= mono_class_check_context_used (klass
);
9010 if (sp
[0]->type
== STACK_I8
|| (SIZEOF_VOID_P
== 8 && sp
[0]->type
== STACK_PTR
)) {
9011 MONO_INST_NEW (cfg
, ins
, OP_LCONV_TO_I4
);
9012 ins
->sreg1
= sp
[0]->dreg
;
9013 ins
->type
= STACK_I4
;
9014 ins
->dreg
= alloc_ireg (cfg
);
9015 MONO_ADD_INS (cfg
->cbb
, ins
);
9016 *sp
= mono_decompose_opcode (cfg
, ins
);
9021 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
9022 /* FIXME: we cannot get a managed
9023 allocator because we can't get the
9024 open generic class's vtable. We
9025 have the same problem in
9026 handle_alloc(). This
9027 needs to be solved so that we can
9028 have managed allocs of shared
9031 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9032 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9034 MonoMethod
*managed_alloc
= NULL
;
9036 /* FIXME: Decompose later to help abcrem */
9039 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
9040 array_class
, MONO_RGCTX_INFO_VTABLE
);
9045 ins
= mono_emit_method_call (cfg
, managed_alloc
, args
, NULL
);
9047 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
9049 if (cfg
->opt
& MONO_OPT_SHARED
) {
9050 /* Decompose now to avoid problems with references to the domainvar */
9051 MonoInst
*iargs
[3];
9053 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9054 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9057 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
9059 /* Decompose later since it is needed by abcrem */
9060 MonoClass
*array_type
= mono_array_class_get (klass
, 1);
9061 mono_class_vtable (cfg
->domain
, array_type
);
9062 CHECK_TYPELOAD (array_type
);
9064 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
9065 ins
->dreg
= alloc_ireg_ref (cfg
);
9066 ins
->sreg1
= sp
[0]->dreg
;
9067 ins
->inst_newa_class
= klass
;
9068 ins
->type
= STACK_OBJ
;
9070 MONO_ADD_INS (cfg
->cbb
, ins
);
9071 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
9072 cfg
->cbb
->has_array_access
= TRUE
;
9074 /* Needed so mono_emit_load_get_addr () gets called */
9075 mono_get_got_var (cfg
);
9085 * we inline/optimize the initialization sequence if possible.
9086 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9087 * for small sizes open code the memcpy
9088 * ensure the rva field is big enough
9090 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
9091 MonoMethod
*memcpy_method
= get_memcpy_method ();
9092 MonoInst
*iargs
[3];
9093 int add_reg
= alloc_ireg_mp (cfg
);
9095 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
9096 if (cfg
->compile_aot
) {
9097 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
9099 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
9101 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
9102 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9111 if (sp
[0]->type
!= STACK_OBJ
)
9114 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
9115 ins
->dreg
= alloc_preg (cfg
);
9116 ins
->sreg1
= sp
[0]->dreg
;
9117 ins
->type
= STACK_I4
;
9118 /* This flag will be inherited by the decomposition */
9119 ins
->flags
|= MONO_INST_FAULT
;
9120 MONO_ADD_INS (cfg
->cbb
, ins
);
9121 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
9122 cfg
->cbb
->has_array_access
= TRUE
;
9130 if (sp
[0]->type
!= STACK_OBJ
)
9133 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9135 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
9136 CHECK_TYPELOAD (klass
);
9137 /* we need to make sure that this array is exactly the type it needs
9138 * to be for correctness. the wrappers are lax with their usage
9139 * so we need to ignore them here
9141 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
) {
9142 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
9143 mini_emit_check_array_type (cfg
, sp
[0], array_class
);
9144 CHECK_TYPELOAD (array_class
);
9148 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9163 case CEE_LDELEM_REF
: {
9169 if (*ip
== CEE_LDELEM
) {
9171 token
= read32 (ip
+ 1);
9172 klass
= mini_get_class (method
, token
, generic_context
);
9173 CHECK_TYPELOAD (klass
);
9174 mono_class_init (klass
);
9177 klass
= array_access_to_klass (*ip
);
9179 if (sp
[0]->type
!= STACK_OBJ
)
9182 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9184 if (sp
[1]->opcode
== OP_ICONST
) {
9185 int array_reg
= sp
[0]->dreg
;
9186 int index_reg
= sp
[1]->dreg
;
9187 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
9189 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
9190 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
9192 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9193 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
9196 if (*ip
== CEE_LDELEM
)
9209 case CEE_STELEM_REF
:
9216 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9218 if (*ip
== CEE_STELEM
) {
9220 token
= read32 (ip
+ 1);
9221 klass
= mini_get_class (method
, token
, generic_context
);
9222 CHECK_TYPELOAD (klass
);
9223 mono_class_init (klass
);
9226 klass
= array_access_to_klass (*ip
);
9228 if (sp
[0]->type
!= STACK_OBJ
)
9231 /* storing a NULL doesn't need any of the complex checks in stelemref */
9232 if (generic_class_is_reference_type (cfg
, klass
) &&
9233 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
9234 MonoClass
*obj_array
= mono_array_class_get_cached (mono_defaults
.object_class
, 1);
9235 MonoMethod
*helper
= mono_marshal_get_virtual_stelemref (obj_array
);
9236 MonoInst
*iargs
[3];
9239 mono_class_setup_vtable (obj_array
);
9240 g_assert (helper
->slot
);
9242 if (sp
[0]->type
!= STACK_OBJ
)
9244 if (sp
[2]->type
!= STACK_OBJ
)
9251 mono_emit_method_call (cfg
, helper
, iargs
, sp
[0]);
9253 if (sp
[1]->opcode
== OP_ICONST
) {
9254 int array_reg
= sp
[0]->dreg
;
9255 int index_reg
= sp
[1]->dreg
;
9256 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
9258 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
9259 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
9261 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
9266 if (*ip
== CEE_STELEM
)
9273 case CEE_CKFINITE
: {
9277 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
9278 ins
->sreg1
= sp
[0]->dreg
;
9279 ins
->dreg
= alloc_freg (cfg
);
9280 ins
->type
= STACK_R8
;
9281 MONO_ADD_INS (bblock
, ins
);
9283 *sp
++ = mono_decompose_opcode (cfg
, ins
);
9288 case CEE_REFANYVAL
: {
9289 MonoInst
*src_var
, *src
;
9291 int klass_reg
= alloc_preg (cfg
);
9292 int dreg
= alloc_preg (cfg
);
9295 MONO_INST_NEW (cfg
, ins
, *ip
);
9298 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
9299 CHECK_TYPELOAD (klass
);
9300 mono_class_init (klass
);
9302 if (cfg
->generic_sharing_context
)
9303 context_used
= mono_class_check_context_used (klass
);
9306 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9308 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9309 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
9313 MonoInst
*klass_ins
;
9315 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
9316 klass
, MONO_RGCTX_INFO_KLASS
);
9319 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
9320 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
9322 mini_emit_class_check (cfg
, klass_reg
, klass
);
9324 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
9325 ins
->type
= STACK_MP
;
9330 case CEE_MKREFANY
: {
9331 MonoInst
*loc
, *addr
;
9334 MONO_INST_NEW (cfg
, ins
, *ip
);
9337 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
9338 CHECK_TYPELOAD (klass
);
9339 mono_class_init (klass
);
9341 if (cfg
->generic_sharing_context
)
9342 context_used
= mono_class_check_context_used (klass
);
9344 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
9345 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
9348 MonoInst
*const_ins
;
9349 int type_reg
= alloc_preg (cfg
);
9351 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
9352 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
9353 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
9354 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
9355 } else if (cfg
->compile_aot
) {
9356 int const_reg
= alloc_preg (cfg
);
9357 int type_reg
= alloc_preg (cfg
);
9359 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
9360 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
9361 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
9362 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
9364 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
9365 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
9367 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
9369 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
9370 ins
->type
= STACK_VTYPE
;
9371 ins
->klass
= mono_defaults
.typed_reference_class
;
9378 MonoClass
*handle_class
;
9380 CHECK_STACK_OVF (1);
9383 n
= read32 (ip
+ 1);
9385 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
9386 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
9387 handle
= mono_method_get_wrapper_data (method
, n
);
9388 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
9389 if (handle_class
== mono_defaults
.typehandle_class
)
9390 handle
= &((MonoClass
*)handle
)->byval_arg
;
9393 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
9397 mono_class_init (handle_class
);
9398 if (cfg
->generic_sharing_context
) {
9399 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
9400 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
9401 /* This case handles ldtoken
9402 of an open type, like for
9405 } else if (handle_class
== mono_defaults
.typehandle_class
) {
9406 /* If we get a MONO_TYPE_CLASS
9407 then we need to provide the
9409 instantiation of it. */
9410 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
9413 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
9414 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
9415 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
9416 else if (handle_class
== mono_defaults
.methodhandle_class
)
9417 context_used
= mono_method_check_context_used (handle
);
9419 g_assert_not_reached ();
9422 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
9423 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
9424 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
9425 MonoInst
*addr
, *vtvar
, *iargs
[3];
9426 int method_context_used
;
9428 if (cfg
->generic_sharing_context
)
9429 method_context_used
= mono_method_check_context_used (method
);
9431 method_context_used
= 0;
9433 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
9435 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
9436 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
9437 if (method_context_used
) {
9438 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
9439 method
, MONO_RGCTX_INFO_METHOD
);
9440 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
9442 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
9443 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
9445 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9447 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
9449 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9451 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
9452 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
9453 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
9454 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
9455 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
9456 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
9458 mono_class_init (tclass
);
9460 ins
= emit_get_rgctx_klass (cfg
, context_used
,
9461 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
9462 } else if (cfg
->compile_aot
) {
9463 if (method
->wrapper_type
) {
9464 if (mono_class_get (tclass
->image
, tclass
->type_token
) == tclass
&& !generic_context
) {
9465 /* Special case for static synchronized wrappers */
9466 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, tclass
->image
, tclass
->type_token
, generic_context
);
9468 /* FIXME: n is not a normal token */
9469 cfg
->disable_aot
= TRUE
;
9470 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
9473 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
9476 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
9478 ins
->type
= STACK_OBJ
;
9479 ins
->klass
= cmethod
->klass
;
9482 MonoInst
*addr
, *vtvar
;
9484 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
9487 if (handle_class
== mono_defaults
.typehandle_class
) {
9488 ins
= emit_get_rgctx_klass (cfg
, context_used
,
9489 mono_class_from_mono_type (handle
),
9490 MONO_RGCTX_INFO_TYPE
);
9491 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
9492 ins
= emit_get_rgctx_method (cfg
, context_used
,
9493 handle
, MONO_RGCTX_INFO_METHOD
);
9494 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
9495 ins
= emit_get_rgctx_field (cfg
, context_used
,
9496 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
9498 g_assert_not_reached ();
9500 } else if (cfg
->compile_aot
) {
9501 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
9503 EMIT_NEW_PCONST (cfg
, ins
, handle
);
9505 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9506 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
9507 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9517 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
9519 ins
->sreg1
= sp
[0]->dreg
;
9521 bblock
->out_of_line
= TRUE
;
9522 MONO_ADD_INS (bblock
, ins
);
9523 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
9524 MONO_ADD_INS (bblock
, ins
);
9527 link_bblock (cfg
, bblock
, end_bblock
);
9528 start_new_bblock
= 1;
9530 case CEE_ENDFINALLY
:
9531 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
9532 MONO_ADD_INS (bblock
, ins
);
9534 start_new_bblock
= 1;
9537 * Control will leave the method so empty the stack, otherwise
9538 * the next basic block will start with a nonempty stack.
9540 while (sp
!= stack_start
) {
9548 if (*ip
== CEE_LEAVE
) {
9550 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
9553 target
= ip
+ 2 + (signed char)(ip
[1]);
9556 /* empty the stack */
9557 while (sp
!= stack_start
) {
9562 * If this leave statement is in a catch block, check for a
9563 * pending exception, and rethrow it if necessary.
9564 * We avoid doing this in runtime invoke wrappers, since those are called
9565 * by native code which excepts the wrapper to catch all exceptions.
9567 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9568 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9571 * Use <= in the final comparison to handle clauses with multiple
9572 * leave statements, like in bug #78024.
9573 * The ordering of the exception clauses guarantees that we find the
9576 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
) && method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
) {
9578 MonoBasicBlock
*dont_throw
;
9583 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9586 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
9588 NEW_BBLOCK (cfg
, dont_throw
);
9591 * Currently, we always rethrow the abort exception, despite the
9592 * fact that this is not correct. See thread6.cs for an example.
9593 * But propagating the abort exception is more important than
9594 * getting the sematics right.
9596 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
9597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
9598 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
9600 MONO_START_BB (cfg
, dont_throw
);
9605 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
9607 MonoExceptionClause
*clause
;
9609 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
9611 tblock
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
9613 link_bblock (cfg
, bblock
, tblock
);
9614 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
9615 ins
->inst_target_bb
= tblock
;
9616 ins
->inst_eh_block
= clause
;
9617 MONO_ADD_INS (bblock
, ins
);
9618 bblock
->has_call_handler
= 1;
9619 if (COMPILE_LLVM (cfg
)) {
9620 MonoBasicBlock
*target_bb
;
9623 * Link the finally bblock with the target, since it will
9624 * conceptually branch there.
9625 * FIXME: Have to link the bblock containing the endfinally.
9627 GET_BBLOCK (cfg
, target_bb
, target
);
9628 link_bblock (cfg
, tblock
, target_bb
);
9631 g_list_free (handlers
);
9634 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9635 MONO_ADD_INS (bblock
, ins
);
9636 GET_BBLOCK (cfg
, tblock
, target
);
9637 link_bblock (cfg
, bblock
, tblock
);
9638 ins
->inst_target_bb
= tblock
;
9639 start_new_bblock
= 1;
9641 if (*ip
== CEE_LEAVE
)
9650 * Mono specific opcodes
9652 case MONO_CUSTOM_PREFIX
: {
9654 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
9658 case CEE_MONO_ICALL
: {
9660 MonoJitICallInfo
*info
;
9662 token
= read32 (ip
+ 2);
9663 func
= mono_method_get_wrapper_data (method
, token
);
9664 info
= mono_find_jit_icall_by_addr (func
);
9667 CHECK_STACK (info
->sig
->param_count
);
9668 sp
-= info
->sig
->param_count
;
9670 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
9671 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
9675 inline_costs
+= 10 * num_calls
++;
9679 case CEE_MONO_LDPTR
: {
9682 CHECK_STACK_OVF (1);
9684 token
= read32 (ip
+ 2);
9686 ptr
= mono_method_get_wrapper_data (method
, token
);
9687 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
9688 MonoJitICallInfo
*callinfo
;
9689 const char *icall_name
;
9691 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
9692 g_assert (icall_name
);
9693 callinfo
= mono_find_jit_icall_by_name (icall_name
);
9694 g_assert (callinfo
);
9696 if (ptr
== callinfo
->func
) {
9697 /* Will be transformed into an AOTCONST later */
9698 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9704 /* FIXME: Generalize this */
9705 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
9706 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
9711 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9714 inline_costs
+= 10 * num_calls
++;
9715 /* Can't embed random pointers into AOT code */
9716 cfg
->disable_aot
= 1;
9719 case CEE_MONO_ICALL_ADDR
: {
9720 MonoMethod
*cmethod
;
9723 CHECK_STACK_OVF (1);
9725 token
= read32 (ip
+ 2);
9727 cmethod
= mono_method_get_wrapper_data (method
, token
);
9729 if (cfg
->compile_aot
) {
9730 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
9732 ptr
= mono_lookup_internal_call (cmethod
);
9734 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9740 case CEE_MONO_VTADDR
: {
9741 MonoInst
*src_var
, *src
;
9747 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9748 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
9753 case CEE_MONO_NEWOBJ
: {
9754 MonoInst
*iargs
[2];
9756 CHECK_STACK_OVF (1);
9758 token
= read32 (ip
+ 2);
9759 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9760 mono_class_init (klass
);
9761 NEW_DOMAINCONST (cfg
, iargs
[0]);
9762 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
9763 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9764 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
9765 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
9767 inline_costs
+= 10 * num_calls
++;
9770 case CEE_MONO_OBJADDR
:
9773 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
9774 ins
->dreg
= alloc_ireg_mp (cfg
);
9775 ins
->sreg1
= sp
[0]->dreg
;
9776 ins
->type
= STACK_MP
;
9777 MONO_ADD_INS (cfg
->cbb
, ins
);
9781 case CEE_MONO_LDNATIVEOBJ
:
9783 * Similar to LDOBJ, but instead load the unmanaged
9784 * representation of the vtype to the stack.
9789 token
= read32 (ip
+ 2);
9790 klass
= mono_method_get_wrapper_data (method
, token
);
9791 g_assert (klass
->valuetype
);
9792 mono_class_init (klass
);
9795 MonoInst
*src
, *dest
, *temp
;
9798 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
9799 temp
->backend
.is_pinvoke
= 1;
9800 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
9801 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
9803 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
9804 dest
->type
= STACK_VTYPE
;
9805 dest
->klass
= klass
;
9811 case CEE_MONO_RETOBJ
: {
9813 * Same as RET, but return the native representation of a vtype
9816 g_assert (cfg
->ret
);
9817 g_assert (mono_method_signature (method
)->pinvoke
);
9822 token
= read32 (ip
+ 2);
9823 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9825 if (!cfg
->vret_addr
) {
9826 g_assert (cfg
->ret_var_is_local
);
9828 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
9830 EMIT_NEW_RETLOADA (cfg
, ins
);
9832 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
9834 if (sp
!= stack_start
)
9837 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9838 ins
->inst_target_bb
= end_bblock
;
9839 MONO_ADD_INS (bblock
, ins
);
9840 link_bblock (cfg
, bblock
, end_bblock
);
9841 start_new_bblock
= 1;
9845 case CEE_MONO_CISINST
:
9846 case CEE_MONO_CCASTCLASS
: {
9851 token
= read32 (ip
+ 2);
9852 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9853 if (ip
[1] == CEE_MONO_CISINST
)
9854 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
9856 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
9862 case CEE_MONO_SAVE_LMF
:
9863 case CEE_MONO_RESTORE_LMF
:
9864 #ifdef MONO_ARCH_HAVE_LMF_OPS
9865 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
9866 MONO_ADD_INS (bblock
, ins
);
9867 cfg
->need_lmf_area
= TRUE
;
9871 case CEE_MONO_CLASSCONST
:
9872 CHECK_STACK_OVF (1);
9874 token
= read32 (ip
+ 2);
9875 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
9878 inline_costs
+= 10 * num_calls
++;
9880 case CEE_MONO_NOT_TAKEN
:
9881 bblock
->out_of_line
= TRUE
;
9885 CHECK_STACK_OVF (1);
9887 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
9888 ins
->dreg
= alloc_preg (cfg
);
9889 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
9890 ins
->type
= STACK_PTR
;
9891 MONO_ADD_INS (bblock
, ins
);
9895 case CEE_MONO_DYN_CALL
: {
9898 /* It would be easier to call a trampoline, but that would put an
9899 * extra frame on the stack, confusing exception handling. So
9900 * implement it inline using an opcode for now.
9903 if (!cfg
->dyn_call_var
) {
9904 cfg
->dyn_call_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
9905 /* prevent it from being register allocated */
9906 cfg
->dyn_call_var
->flags
|= MONO_INST_INDIRECT
;
9909 /* Has to use a call inst since it local regalloc expects it */
9910 MONO_INST_NEW_CALL (cfg
, call
, OP_DYN_CALL
);
9911 ins
= (MonoInst
*)call
;
9913 ins
->sreg1
= sp
[0]->dreg
;
9914 ins
->sreg2
= sp
[1]->dreg
;
9915 MONO_ADD_INS (bblock
, ins
);
9917 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9918 cfg
->param_area
= MAX (cfg
->param_area
, MONO_ARCH_DYN_CALL_PARAM_AREA
);
9922 inline_costs
+= 10 * num_calls
++;
9927 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
9937 /* somewhat similar to LDTOKEN */
9938 MonoInst
*addr
, *vtvar
;
9939 CHECK_STACK_OVF (1);
9940 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
9942 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9943 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
9945 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9946 ins
->type
= STACK_VTYPE
;
9947 ins
->klass
= mono_defaults
.argumenthandle_class
;
9960 * The following transforms:
9961 * CEE_CEQ into OP_CEQ
9962 * CEE_CGT into OP_CGT
9963 * CEE_CGT_UN into OP_CGT_UN
9964 * CEE_CLT into OP_CLT
9965 * CEE_CLT_UN into OP_CLT_UN
9967 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
9969 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
9971 cmp
->sreg1
= sp
[0]->dreg
;
9972 cmp
->sreg2
= sp
[1]->dreg
;
9973 type_from_op (cmp
, sp
[0], sp
[1]);
9975 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
9976 cmp
->opcode
= OP_LCOMPARE
;
9977 else if (sp
[0]->type
== STACK_R8
)
9978 cmp
->opcode
= OP_FCOMPARE
;
9980 cmp
->opcode
= OP_ICOMPARE
;
9981 MONO_ADD_INS (bblock
, cmp
);
9982 ins
->type
= STACK_I4
;
9983 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
9984 type_from_op (ins
, sp
[0], sp
[1]);
9986 if (cmp
->opcode
== OP_FCOMPARE
) {
9988 * The backends expect the fceq opcodes to do the
9991 cmp
->opcode
= OP_NOP
;
9992 ins
->sreg1
= cmp
->sreg1
;
9993 ins
->sreg2
= cmp
->sreg2
;
9995 MONO_ADD_INS (bblock
, ins
);
10001 MonoInst
*argconst
;
10002 MonoMethod
*cil_method
;
10003 gboolean needs_static_rgctx_invoke
;
10005 CHECK_STACK_OVF (1);
10007 n
= read32 (ip
+ 2);
10008 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10009 if (!cmethod
|| mono_loader_get_last_error ())
10011 mono_class_init (cmethod
->klass
);
10013 mono_save_token_info (cfg
, image
, n
, cmethod
);
10015 if (cfg
->generic_sharing_context
)
10016 context_used
= mono_method_check_context_used (cmethod
);
10018 needs_static_rgctx_invoke
= mono_method_needs_static_rgctx_invoke (cmethod
, TRUE
);
10020 cil_method
= cmethod
;
10021 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
10022 METHOD_ACCESS_FAILURE
;
10024 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
10025 if (check_linkdemand (cfg
, method
, cmethod
))
10027 CHECK_CFG_EXCEPTION
;
10028 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
10029 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
10033 * Optimize the common case of ldftn+delegate creation
10035 if ((sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
10036 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
10037 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
10038 MonoInst
*target_ins
;
10039 MonoMethod
*invoke
;
10040 int invoke_context_used
= 0;
10042 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
10043 if (!invoke
|| !mono_method_signature (invoke
))
10046 if (cfg
->generic_sharing_context
)
10047 invoke_context_used
= mono_method_check_context_used (invoke
);
10049 target_ins
= sp
[-1];
10051 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
)) {
10052 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10053 if (mono_method_signature (invoke
)->param_count
== mono_method_signature (cmethod
)->param_count
) {
10054 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, target_ins
->dreg
, 0);
10055 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "ArgumentException");
10059 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10060 /* FIXME: SGEN support */
10061 if (invoke_context_used
== 0) {
10063 if (cfg
->verbose_level
> 3)
10064 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
10066 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
, context_used
);
10067 CHECK_CFG_EXCEPTION
;
10076 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
10077 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
10081 inline_costs
+= 10 * num_calls
++;
10084 case CEE_LDVIRTFTN
: {
10085 MonoInst
*args
[2];
10089 n
= read32 (ip
+ 2);
10090 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10091 if (!cmethod
|| mono_loader_get_last_error ())
10093 mono_class_init (cmethod
->klass
);
10095 if (cfg
->generic_sharing_context
)
10096 context_used
= mono_method_check_context_used (cmethod
);
10098 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
10099 if (check_linkdemand (cfg
, method
, cmethod
))
10101 CHECK_CFG_EXCEPTION
;
10102 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
10103 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
10109 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
10110 cmethod
, MONO_RGCTX_INFO_METHOD
);
10113 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
10115 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
10118 inline_costs
+= 10 * num_calls
++;
10122 CHECK_STACK_OVF (1);
10124 n
= read16 (ip
+ 2);
10126 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
10131 CHECK_STACK_OVF (1);
10133 n
= read16 (ip
+ 2);
10135 NEW_ARGLOADA (cfg
, ins
, n
);
10136 MONO_ADD_INS (cfg
->cbb
, ins
);
10144 n
= read16 (ip
+ 2);
10146 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
10148 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
10152 CHECK_STACK_OVF (1);
10154 n
= read16 (ip
+ 2);
10156 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
10161 unsigned char *tmp_ip
;
10162 CHECK_STACK_OVF (1);
10164 n
= read16 (ip
+ 2);
10167 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
10173 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
10182 n
= read16 (ip
+ 2);
10184 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
10186 emit_stloc_ir (cfg
, sp
, header
, n
);
10193 if (sp
!= stack_start
)
10195 if (cfg
->method
!= method
)
10197 * Inlining this into a loop in a parent could lead to
10198 * stack overflows which is different behavior than the
10199 * non-inlined case, thus disable inlining in this case.
10201 goto inline_failure
;
10203 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
10204 ins
->dreg
= alloc_preg (cfg
);
10205 ins
->sreg1
= sp
[0]->dreg
;
10206 ins
->type
= STACK_PTR
;
10207 MONO_ADD_INS (cfg
->cbb
, ins
);
10209 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10211 ins
->flags
|= MONO_INST_INIT
;
10216 case CEE_ENDFILTER
: {
10217 MonoExceptionClause
*clause
, *nearest
;
10218 int cc
, nearest_num
;
10222 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
10224 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
10225 ins
->sreg1
= (*sp
)->dreg
;
10226 MONO_ADD_INS (bblock
, ins
);
10227 start_new_bblock
= 1;
10232 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
10233 clause
= &header
->clauses
[cc
];
10234 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
10235 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
10236 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
10241 g_assert (nearest
);
10242 if ((ip
- header
->code
) != nearest
->handler_offset
)
10247 case CEE_UNALIGNED_
:
10248 ins_flag
|= MONO_INST_UNALIGNED
;
10249 /* FIXME: record alignment? we can assume 1 for now */
10253 case CEE_VOLATILE_
:
10254 ins_flag
|= MONO_INST_VOLATILE
;
10258 ins_flag
|= MONO_INST_TAILCALL
;
10259 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
10260 /* Can't inline tail calls at this time */
10261 inline_costs
+= 100000;
10268 token
= read32 (ip
+ 2);
10269 klass
= mini_get_class (method
, token
, generic_context
);
10270 CHECK_TYPELOAD (klass
);
10271 if (generic_class_is_reference_type (cfg
, klass
))
10272 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
10274 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
10278 case CEE_CONSTRAINED_
:
10280 token
= read32 (ip
+ 2);
10281 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
10282 constrained_call
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10284 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
10285 CHECK_TYPELOAD (constrained_call
);
10289 case CEE_INITBLK
: {
10290 MonoInst
*iargs
[3];
10294 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
10295 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
10296 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
10297 /* emit_memset only works when val == 0 */
10298 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
10300 iargs
[0] = sp
[0];
10301 iargs
[1] = sp
[1];
10302 iargs
[2] = sp
[2];
10303 if (ip
[1] == CEE_CPBLK
) {
10304 MonoMethod
*memcpy_method
= get_memcpy_method ();
10305 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
10307 MonoMethod
*memset_method
= get_memset_method ();
10308 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
10318 ins_flag
|= MONO_INST_NOTYPECHECK
;
10320 ins_flag
|= MONO_INST_NORANGECHECK
;
10321 /* we ignore the no-nullcheck for now since we
10322 * really do it explicitly only when doing callvirt->call
10326 case CEE_RETHROW
: {
10328 int handler_offset
= -1;
10330 for (i
= 0; i
< header
->num_clauses
; ++i
) {
10331 MonoExceptionClause
*clause
= &header
->clauses
[i
];
10332 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
10333 handler_offset
= clause
->handler_offset
;
10338 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
10340 g_assert (handler_offset
!= -1);
10342 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
10343 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
10344 ins
->sreg1
= load
->dreg
;
10345 MONO_ADD_INS (bblock
, ins
);
10347 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
10348 MONO_ADD_INS (bblock
, ins
);
10351 link_bblock (cfg
, bblock
, end_bblock
);
10352 start_new_bblock
= 1;
10360 CHECK_STACK_OVF (1);
10362 token
= read32 (ip
+ 2);
10363 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
&& !method
->klass
->image
->dynamic
&& !generic_context
) {
10364 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
10365 token
= mono_type_size (type
, &ialign
);
10367 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
10368 CHECK_TYPELOAD (klass
);
10369 mono_class_init (klass
);
10370 token
= mono_class_value_size (klass
, &align
);
10372 EMIT_NEW_ICONST (cfg
, ins
, token
);
10377 case CEE_REFANYTYPE
: {
10378 MonoInst
*src_var
, *src
;
10384 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
10386 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
10387 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
10388 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
10393 case CEE_READONLY_
:
10406 g_warning ("opcode 0xfe 0x%02x not handled", ip
[1]);
10416 g_warning ("opcode 0x%02x not handled", *ip
);
10420 if (start_new_bblock
!= 1)
10423 bblock
->cil_length
= ip
- bblock
->cil_code
;
10424 if (bblock
->next_bb
) {
10425 /* This could already be set because of inlining, #693905 */
10426 MonoBasicBlock
*bb
= bblock
;
10428 while (bb
->next_bb
)
10430 bb
->next_bb
= end_bblock
;
10432 bblock
->next_bb
= end_bblock
;
10435 if (cfg
->method
== method
&& cfg
->domainvar
) {
10437 MonoInst
*get_domain
;
10439 cfg
->cbb
= init_localsbb
;
10441 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
10442 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
10445 get_domain
->dreg
= alloc_preg (cfg
);
10446 MONO_ADD_INS (cfg
->cbb
, get_domain
);
10448 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
10449 MONO_ADD_INS (cfg
->cbb
, store
);
10452 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10453 if (cfg
->compile_aot
)
10454 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10455 mono_get_got_var (cfg
);
10458 if (cfg
->method
== method
&& cfg
->got_var
)
10459 mono_emit_load_got_addr (cfg
);
10464 cfg
->cbb
= init_localsbb
;
10466 for (i
= 0; i
< header
->num_locals
; ++i
) {
10467 MonoType
*ptype
= header
->locals
[i
];
10468 int t
= ptype
->type
;
10469 dreg
= cfg
->locals
[i
]->dreg
;
10471 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
10472 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
10473 if (ptype
->byref
) {
10474 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
10475 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
10476 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
10477 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
10478 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
10479 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
10480 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
10481 ins
->type
= STACK_R8
;
10482 ins
->inst_p0
= (void*)&r8_0
;
10483 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
10484 MONO_ADD_INS (init_localsbb
, ins
);
10485 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
10486 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
10487 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
10488 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
10490 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
10495 if (cfg
->init_ref_vars
&& cfg
->method
== method
) {
10496 /* Emit initialization for ref vars */
10497 // FIXME: Avoid duplication initialization for IL locals.
10498 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10499 MonoInst
*ins
= cfg
->varinfo
[i
];
10501 if (ins
->opcode
== OP_LOCAL
&& ins
->type
== STACK_OBJ
)
10502 MONO_EMIT_NEW_PCONST (cfg
, ins
->dreg
, NULL
);
10506 /* Add a sequence point for method entry/exit events */
10508 NEW_SEQ_POINT (cfg
, ins
, METHOD_ENTRY_IL_OFFSET
, FALSE
);
10509 MONO_ADD_INS (init_localsbb
, ins
);
10510 NEW_SEQ_POINT (cfg
, ins
, METHOD_EXIT_IL_OFFSET
, FALSE
);
10511 MONO_ADD_INS (cfg
->bb_exit
, ins
);
10516 if (cfg
->method
== method
) {
10517 MonoBasicBlock
*bb
;
10518 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10519 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
10521 mono_create_spvar_for_region (cfg
, bb
->region
);
10522 if (cfg
->verbose_level
> 2)
10523 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
10527 g_slist_free (class_inits
);
10528 dont_inline
= g_list_remove (dont_inline
, method
);
10530 if (inline_costs
< 0) {
10533 /* Method is too large */
10534 mname
= mono_method_full_name (method
, TRUE
);
10535 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
10536 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
10538 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10539 mono_basic_block_free (original_bb
);
10543 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
10544 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
10546 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10547 mono_basic_block_free (original_bb
);
10548 return inline_costs
;
10551 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
10558 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
10562 set_exception_type_from_invalid_il (cfg
, method
, ip
);
10566 g_slist_free (class_inits
);
10567 mono_basic_block_free (original_bb
);
10568 dont_inline
= g_list_remove (dont_inline
, method
);
10569 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10574 store_membase_reg_to_store_membase_imm (int opcode
)
10577 case OP_STORE_MEMBASE_REG
:
10578 return OP_STORE_MEMBASE_IMM
;
10579 case OP_STOREI1_MEMBASE_REG
:
10580 return OP_STOREI1_MEMBASE_IMM
;
10581 case OP_STOREI2_MEMBASE_REG
:
10582 return OP_STOREI2_MEMBASE_IMM
;
10583 case OP_STOREI4_MEMBASE_REG
:
10584 return OP_STOREI4_MEMBASE_IMM
;
10585 case OP_STOREI8_MEMBASE_REG
:
10586 return OP_STOREI8_MEMBASE_IMM
;
10588 g_assert_not_reached ();
10594 #endif /* DISABLE_JIT */
10597 mono_op_to_op_imm (int opcode
)
10601 return OP_IADD_IMM
;
10603 return OP_ISUB_IMM
;
10605 return OP_IDIV_IMM
;
10607 return OP_IDIV_UN_IMM
;
10609 return OP_IREM_IMM
;
10611 return OP_IREM_UN_IMM
;
10613 return OP_IMUL_IMM
;
10615 return OP_IAND_IMM
;
10619 return OP_IXOR_IMM
;
10621 return OP_ISHL_IMM
;
10623 return OP_ISHR_IMM
;
10625 return OP_ISHR_UN_IMM
;
10628 return OP_LADD_IMM
;
10630 return OP_LSUB_IMM
;
10632 return OP_LAND_IMM
;
10636 return OP_LXOR_IMM
;
10638 return OP_LSHL_IMM
;
10640 return OP_LSHR_IMM
;
10642 return OP_LSHR_UN_IMM
;
10645 return OP_COMPARE_IMM
;
10647 return OP_ICOMPARE_IMM
;
10649 return OP_LCOMPARE_IMM
;
10651 case OP_STORE_MEMBASE_REG
:
10652 return OP_STORE_MEMBASE_IMM
;
10653 case OP_STOREI1_MEMBASE_REG
:
10654 return OP_STOREI1_MEMBASE_IMM
;
10655 case OP_STOREI2_MEMBASE_REG
:
10656 return OP_STOREI2_MEMBASE_IMM
;
10657 case OP_STOREI4_MEMBASE_REG
:
10658 return OP_STOREI4_MEMBASE_IMM
;
10660 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10662 return OP_X86_PUSH_IMM
;
10663 case OP_X86_COMPARE_MEMBASE_REG
:
10664 return OP_X86_COMPARE_MEMBASE_IMM
;
10666 #if defined(TARGET_AMD64)
10667 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
10668 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10670 case OP_VOIDCALL_REG
:
10671 return OP_VOIDCALL
;
10679 return OP_LOCALLOC_IMM
;
10686 ldind_to_load_membase (int opcode
)
10690 return OP_LOADI1_MEMBASE
;
10692 return OP_LOADU1_MEMBASE
;
10694 return OP_LOADI2_MEMBASE
;
10696 return OP_LOADU2_MEMBASE
;
10698 return OP_LOADI4_MEMBASE
;
10700 return OP_LOADU4_MEMBASE
;
10702 return OP_LOAD_MEMBASE
;
10703 case CEE_LDIND_REF
:
10704 return OP_LOAD_MEMBASE
;
10706 return OP_LOADI8_MEMBASE
;
10708 return OP_LOADR4_MEMBASE
;
10710 return OP_LOADR8_MEMBASE
;
10712 g_assert_not_reached ();
10719 stind_to_store_membase (int opcode
)
10723 return OP_STOREI1_MEMBASE_REG
;
10725 return OP_STOREI2_MEMBASE_REG
;
10727 return OP_STOREI4_MEMBASE_REG
;
10729 case CEE_STIND_REF
:
10730 return OP_STORE_MEMBASE_REG
;
10732 return OP_STOREI8_MEMBASE_REG
;
10734 return OP_STORER4_MEMBASE_REG
;
10736 return OP_STORER8_MEMBASE_REG
;
10738 g_assert_not_reached ();
10745 mono_load_membase_to_load_mem (int opcode
)
10747 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10748 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10750 case OP_LOAD_MEMBASE
:
10751 return OP_LOAD_MEM
;
10752 case OP_LOADU1_MEMBASE
:
10753 return OP_LOADU1_MEM
;
10754 case OP_LOADU2_MEMBASE
:
10755 return OP_LOADU2_MEM
;
10756 case OP_LOADI4_MEMBASE
:
10757 return OP_LOADI4_MEM
;
10758 case OP_LOADU4_MEMBASE
:
10759 return OP_LOADU4_MEM
;
10760 #if SIZEOF_REGISTER == 8
10761 case OP_LOADI8_MEMBASE
:
10762 return OP_LOADI8_MEM
;
10771 op_to_op_dest_membase (int store_opcode
, int opcode
)
10773 #if defined(TARGET_X86)
10774 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
10779 return OP_X86_ADD_MEMBASE_REG
;
10781 return OP_X86_SUB_MEMBASE_REG
;
10783 return OP_X86_AND_MEMBASE_REG
;
10785 return OP_X86_OR_MEMBASE_REG
;
10787 return OP_X86_XOR_MEMBASE_REG
;
10790 return OP_X86_ADD_MEMBASE_IMM
;
10793 return OP_X86_SUB_MEMBASE_IMM
;
10796 return OP_X86_AND_MEMBASE_IMM
;
10799 return OP_X86_OR_MEMBASE_IMM
;
10802 return OP_X86_XOR_MEMBASE_IMM
;
10808 #if defined(TARGET_AMD64)
10809 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
10814 return OP_X86_ADD_MEMBASE_REG
;
10816 return OP_X86_SUB_MEMBASE_REG
;
10818 return OP_X86_AND_MEMBASE_REG
;
10820 return OP_X86_OR_MEMBASE_REG
;
10822 return OP_X86_XOR_MEMBASE_REG
;
10824 return OP_X86_ADD_MEMBASE_IMM
;
10826 return OP_X86_SUB_MEMBASE_IMM
;
10828 return OP_X86_AND_MEMBASE_IMM
;
10830 return OP_X86_OR_MEMBASE_IMM
;
10832 return OP_X86_XOR_MEMBASE_IMM
;
10834 return OP_AMD64_ADD_MEMBASE_REG
;
10836 return OP_AMD64_SUB_MEMBASE_REG
;
10838 return OP_AMD64_AND_MEMBASE_REG
;
10840 return OP_AMD64_OR_MEMBASE_REG
;
10842 return OP_AMD64_XOR_MEMBASE_REG
;
10845 return OP_AMD64_ADD_MEMBASE_IMM
;
10848 return OP_AMD64_SUB_MEMBASE_IMM
;
10851 return OP_AMD64_AND_MEMBASE_IMM
;
10854 return OP_AMD64_OR_MEMBASE_IMM
;
10857 return OP_AMD64_XOR_MEMBASE_IMM
;
10867 op_to_op_store_membase (int store_opcode
, int opcode
)
10869 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10872 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10873 return OP_X86_SETEQ_MEMBASE
;
10875 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10876 return OP_X86_SETNE_MEMBASE
;
10884 op_to_op_src1_membase (int load_opcode
, int opcode
)
10887 /* FIXME: This has sign extension issues */
10889 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10890 return OP_X86_COMPARE_MEMBASE8_IMM;
10893 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10898 return OP_X86_PUSH_MEMBASE
;
10899 case OP_COMPARE_IMM
:
10900 case OP_ICOMPARE_IMM
:
10901 return OP_X86_COMPARE_MEMBASE_IMM
;
10904 return OP_X86_COMPARE_MEMBASE_REG
;
10908 #ifdef TARGET_AMD64
10909 /* FIXME: This has sign extension issues */
10911 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10912 return OP_X86_COMPARE_MEMBASE8_IMM;
10917 #ifdef __mono_ilp32__
10918 if (load_opcode
== OP_LOADI8_MEMBASE
)
10920 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10922 return OP_X86_PUSH_MEMBASE
;
10924 /* FIXME: This only works for 32 bit immediates
10925 case OP_COMPARE_IMM:
10926 case OP_LCOMPARE_IMM:
10927 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10928 return OP_AMD64_COMPARE_MEMBASE_IMM;
10930 case OP_ICOMPARE_IMM
:
10931 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10932 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10936 #ifdef __mono_ilp32__
10937 if (load_opcode
== OP_LOAD_MEMBASE
)
10938 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
10939 if (load_opcode
== OP_LOADI8_MEMBASE
)
10941 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10943 return OP_AMD64_COMPARE_MEMBASE_REG
;
10946 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10947 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
10956 op_to_op_src2_membase (int load_opcode
, int opcode
)
10959 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10965 return OP_X86_COMPARE_REG_MEMBASE
;
10967 return OP_X86_ADD_REG_MEMBASE
;
10969 return OP_X86_SUB_REG_MEMBASE
;
10971 return OP_X86_AND_REG_MEMBASE
;
10973 return OP_X86_OR_REG_MEMBASE
;
10975 return OP_X86_XOR_REG_MEMBASE
;
10979 #ifdef TARGET_AMD64
10980 #ifdef __mono_ilp32__
10981 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
) ) {
10983 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)) {
10987 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
10989 return OP_X86_ADD_REG_MEMBASE
;
10991 return OP_X86_SUB_REG_MEMBASE
;
10993 return OP_X86_AND_REG_MEMBASE
;
10995 return OP_X86_OR_REG_MEMBASE
;
10997 return OP_X86_XOR_REG_MEMBASE
;
10999 #ifdef __mono_ilp32__
11000 } else if (load_opcode
== OP_LOADI8_MEMBASE
) {
11002 } else if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
)) {
11007 return OP_AMD64_COMPARE_REG_MEMBASE
;
11009 return OP_AMD64_ADD_REG_MEMBASE
;
11011 return OP_AMD64_SUB_REG_MEMBASE
;
11013 return OP_AMD64_AND_REG_MEMBASE
;
11015 return OP_AMD64_OR_REG_MEMBASE
;
11017 return OP_AMD64_XOR_REG_MEMBASE
;
11026 mono_op_to_op_imm_noemul (int opcode
)
11029 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11035 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11043 return mono_op_to_op_imm (opcode
);
11047 #ifndef DISABLE_JIT
11050 * mono_handle_global_vregs:
11052 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11056 mono_handle_global_vregs (MonoCompile
*cfg
)
11058 gint32
*vreg_to_bb
;
11059 MonoBasicBlock
*bb
;
11062 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
11064 #ifdef MONO_ARCH_SIMD_INTRINSICS
11065 if (cfg
->uses_simd_intrinsics
)
11066 mono_simd_simplify_indirection (cfg
);
11069 /* Find local vregs used in more than one bb */
11070 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11071 MonoInst
*ins
= bb
->code
;
11072 int block_num
= bb
->block_num
;
11074 if (cfg
->verbose_level
> 2)
11075 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
11078 for (; ins
; ins
= ins
->next
) {
11079 const char *spec
= INS_INFO (ins
->opcode
);
11080 int regtype
= 0, regindex
;
11083 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11084 mono_print_ins (ins
);
11086 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
11088 for (regindex
= 0; regindex
< 4; regindex
++) {
11091 if (regindex
== 0) {
11092 regtype
= spec
[MONO_INST_DEST
];
11093 if (regtype
== ' ')
11096 } else if (regindex
== 1) {
11097 regtype
= spec
[MONO_INST_SRC1
];
11098 if (regtype
== ' ')
11101 } else if (regindex
== 2) {
11102 regtype
= spec
[MONO_INST_SRC2
];
11103 if (regtype
== ' ')
11106 } else if (regindex
== 3) {
11107 regtype
= spec
[MONO_INST_SRC3
];
11108 if (regtype
== ' ')
11113 #if SIZEOF_REGISTER == 4
11114 /* In the LLVM case, the long opcodes are not decomposed */
11115 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
11117 * Since some instructions reference the original long vreg,
11118 * and some reference the two component vregs, it is quite hard
11119 * to determine when it needs to be global. So be conservative.
11121 if (!get_vreg_to_inst (cfg
, vreg
)) {
11122 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
11124 if (cfg
->verbose_level
> 2)
11125 printf ("LONG VREG R%d made global.\n", vreg
);
11129 * Make the component vregs volatile since the optimizations can
11130 * get confused otherwise.
11132 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
11133 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
11137 g_assert (vreg
!= -1);
11139 prev_bb
= vreg_to_bb
[vreg
];
11140 if (prev_bb
== 0) {
11141 /* 0 is a valid block num */
11142 vreg_to_bb
[vreg
] = block_num
+ 1;
11143 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
11144 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
11147 if (!get_vreg_to_inst (cfg
, vreg
)) {
11148 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11149 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
11153 if (vreg_is_ref (cfg
, vreg
))
11154 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
, vreg
);
11156 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
11159 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
11162 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
11165 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
11168 g_assert_not_reached ();
11172 /* Flag as having been used in more than one bb */
11173 vreg_to_bb
[vreg
] = -1;
11179 /* If a variable is used in only one bblock, convert it into a local vreg */
11180 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11181 MonoInst
*var
= cfg
->varinfo
[i
];
11182 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
11184 switch (var
->type
) {
11190 #if SIZEOF_REGISTER == 8
11193 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11194 /* Enabling this screws up the fp stack on x86 */
11197 /* Arguments are implicitly global */
11198 /* Putting R4 vars into registers doesn't work currently */
11199 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
11201 * Make that the variable's liveness interval doesn't contain a call, since
11202 * that would cause the lvreg to be spilled, making the whole optimization
11205 /* This is too slow for JIT compilation */
11207 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
11209 int def_index
, call_index
, ins_index
;
11210 gboolean spilled
= FALSE
;
11215 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
11216 const char *spec
= INS_INFO (ins
->opcode
);
11218 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
11219 def_index
= ins_index
;
11221 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
11222 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
11223 if (call_index
> def_index
) {
11229 if (MONO_IS_CALL (ins
))
11230 call_index
= ins_index
;
11240 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11241 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
11242 var
->flags
|= MONO_INST_IS_DEAD
;
11243 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
11250 * Compress the varinfo and vars tables so the liveness computation is faster and
11251 * takes up less space.
11254 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11255 MonoInst
*var
= cfg
->varinfo
[i
];
11256 if (pos
< i
&& cfg
->locals_start
== i
)
11257 cfg
->locals_start
= pos
;
11258 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
11260 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
11261 cfg
->varinfo
[pos
]->inst_c0
= pos
;
11262 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
11263 cfg
->vars
[pos
].idx
= pos
;
11264 #if SIZEOF_REGISTER == 4
11265 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
11266 /* Modify the two component vars too */
11269 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
11270 var1
->inst_c0
= pos
;
11271 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
11272 var1
->inst_c0
= pos
;
11279 cfg
->num_varinfo
= pos
;
11280 if (cfg
->locals_start
> cfg
->num_varinfo
)
11281 cfg
->locals_start
= cfg
->num_varinfo
;
11285 * mono_spill_global_vars:
11287 * Generate spill code for variables which are not allocated to registers,
11288 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11289 * code is generated which could be optimized by the local optimization passes.
11292 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
11294 MonoBasicBlock
*bb
;
11296 int orig_next_vreg
;
11297 guint32
*vreg_to_lvreg
;
11299 guint32 i
, lvregs_len
;
11300 gboolean dest_has_lvreg
= FALSE
;
11301 guint32 stacktypes
[128];
11302 MonoInst
**live_range_start
, **live_range_end
;
11303 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
11305 *need_local_opts
= FALSE
;
11307 memset (spec2
, 0, sizeof (spec2
));
11309 /* FIXME: Move this function to mini.c */
11310 stacktypes
['i'] = STACK_PTR
;
11311 stacktypes
['l'] = STACK_I8
;
11312 stacktypes
['f'] = STACK_R8
;
11313 #ifdef MONO_ARCH_SIMD_INTRINSICS
11314 stacktypes
['x'] = STACK_VTYPE
;
11317 #if SIZEOF_REGISTER == 4
11318 /* Create MonoInsts for longs */
11319 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11320 MonoInst
*ins
= cfg
->varinfo
[i
];
11322 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
11323 switch (ins
->type
) {
11328 if (ins
->type
== STACK_R8
&& !COMPILE_SOFT_FLOAT (cfg
))
11331 g_assert (ins
->opcode
== OP_REGOFFSET
);
11333 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
11335 tree
->opcode
= OP_REGOFFSET
;
11336 tree
->inst_basereg
= ins
->inst_basereg
;
11337 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
11339 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
11341 tree
->opcode
= OP_REGOFFSET
;
11342 tree
->inst_basereg
= ins
->inst_basereg
;
11343 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
11353 if (cfg
->compute_gc_maps
) {
11354 /* registers need liveness info even for !non refs */
11355 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11356 MonoInst
*ins
= cfg
->varinfo
[i
];
11358 if (ins
->opcode
== OP_REGVAR
)
11359 ins
->flags
|= MONO_INST_GC_TRACK
;
11363 /* FIXME: widening and truncation */
11366 * As an optimization, when a variable allocated to the stack is first loaded into
11367 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11368 * the variable again.
11370 orig_next_vreg
= cfg
->next_vreg
;
11371 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
11372 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
11376 * These arrays contain the first and last instructions accessing a given
11378 * Since we emit bblocks in the same order we process them here, and we
11379 * don't split live ranges, these will precisely describe the live range of
11380 * the variable, i.e. the instruction range where a valid value can be found
11381 * in the variables location.
11382 * The live range is computed using the liveness info computed by the liveness pass.
11383 * We can't use vmv->range, since that is an abstract live range, and we need
11384 * one which is instruction precise.
11385 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11387 /* FIXME: Only do this if debugging info is requested */
11388 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
11389 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
11390 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
11391 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
11393 /* Add spill loads/stores */
11394 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11397 if (cfg
->verbose_level
> 2)
11398 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
11400 /* Clear vreg_to_lvreg array */
11401 for (i
= 0; i
< lvregs_len
; i
++)
11402 vreg_to_lvreg
[lvregs
[i
]] = 0;
11406 MONO_BB_FOR_EACH_INS (bb
, ins
) {
11407 const char *spec
= INS_INFO (ins
->opcode
);
11408 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
11409 gboolean store
, no_lvreg
;
11410 int sregs
[MONO_MAX_SRC_REGS
];
11412 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11413 mono_print_ins (ins
);
11415 if (ins
->opcode
== OP_NOP
)
11419 * We handle LDADDR here as well, since it can only be decomposed
11420 * when variable addresses are known.
11422 if (ins
->opcode
== OP_LDADDR
) {
11423 MonoInst
*var
= ins
->inst_p0
;
11425 if (var
->opcode
== OP_VTARG_ADDR
) {
11426 /* Happens on SPARC/S390 where vtypes are passed by reference */
11427 MonoInst
*vtaddr
= var
->inst_left
;
11428 if (vtaddr
->opcode
== OP_REGVAR
) {
11429 ins
->opcode
= OP_MOVE
;
11430 ins
->sreg1
= vtaddr
->dreg
;
11432 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
11433 ins
->opcode
= OP_LOAD_MEMBASE
;
11434 ins
->inst_basereg
= vtaddr
->inst_basereg
;
11435 ins
->inst_offset
= vtaddr
->inst_offset
;
11439 g_assert (var
->opcode
== OP_REGOFFSET
);
11441 ins
->opcode
= OP_ADD_IMM
;
11442 ins
->sreg1
= var
->inst_basereg
;
11443 ins
->inst_imm
= var
->inst_offset
;
11446 *need_local_opts
= TRUE
;
11447 spec
= INS_INFO (ins
->opcode
);
11450 if (ins
->opcode
< MONO_CEE_LAST
) {
11451 mono_print_ins (ins
);
11452 g_assert_not_reached ();
11456 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11460 if (MONO_IS_STORE_MEMBASE (ins
)) {
11461 tmp_reg
= ins
->dreg
;
11462 ins
->dreg
= ins
->sreg2
;
11463 ins
->sreg2
= tmp_reg
;
11466 spec2
[MONO_INST_DEST
] = ' ';
11467 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
11468 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
11469 spec2
[MONO_INST_SRC3
] = ' ';
11471 } else if (MONO_IS_STORE_MEMINDEX (ins
))
11472 g_assert_not_reached ();
11477 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
11478 printf ("\t %.3s %d", spec
, ins
->dreg
);
11479 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
11480 for (srcindex
= 0; srcindex
< 3; ++srcindex
)
11481 printf (" %d", sregs
[srcindex
]);
11488 regtype
= spec
[MONO_INST_DEST
];
11489 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
11492 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
11493 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
11494 MonoInst
*store_ins
;
11496 MonoInst
*def_ins
= ins
;
11497 int dreg
= ins
->dreg
; /* The original vreg */
11499 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
11501 if (var
->opcode
== OP_REGVAR
) {
11502 ins
->dreg
= var
->dreg
;
11503 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
11505 * Instead of emitting a load+store, use a _membase opcode.
11507 g_assert (var
->opcode
== OP_REGOFFSET
);
11508 if (ins
->opcode
== OP_MOVE
) {
11512 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
11513 ins
->inst_basereg
= var
->inst_basereg
;
11514 ins
->inst_offset
= var
->inst_offset
;
11517 spec
= INS_INFO (ins
->opcode
);
11521 g_assert (var
->opcode
== OP_REGOFFSET
);
11523 prev_dreg
= ins
->dreg
;
11525 /* Invalidate any previous lvreg for this vreg */
11526 vreg_to_lvreg
[ins
->dreg
] = 0;
11530 if (COMPILE_SOFT_FLOAT (cfg
) && store_opcode
== OP_STORER8_MEMBASE_REG
) {
11532 store_opcode
= OP_STOREI8_MEMBASE_REG
;
11535 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
11537 if (regtype
== 'l') {
11538 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
11539 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11540 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
11541 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11542 def_ins
= store_ins
;
11545 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
11547 /* Try to fuse the store into the instruction itself */
11548 /* FIXME: Add more instructions */
11549 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
11550 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
11551 ins
->inst_imm
= ins
->inst_c0
;
11552 ins
->inst_destbasereg
= var
->inst_basereg
;
11553 ins
->inst_offset
= var
->inst_offset
;
11554 spec
= INS_INFO (ins
->opcode
);
11555 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
11556 ins
->opcode
= store_opcode
;
11557 ins
->inst_destbasereg
= var
->inst_basereg
;
11558 ins
->inst_offset
= var
->inst_offset
;
11562 tmp_reg
= ins
->dreg
;
11563 ins
->dreg
= ins
->sreg2
;
11564 ins
->sreg2
= tmp_reg
;
11567 spec2
[MONO_INST_DEST
] = ' ';
11568 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
11569 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
11570 spec2
[MONO_INST_SRC3
] = ' ';
11572 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
11573 // FIXME: The backends expect the base reg to be in inst_basereg
11574 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
11576 ins
->inst_basereg
= var
->inst_basereg
;
11577 ins
->inst_offset
= var
->inst_offset
;
11578 spec
= INS_INFO (ins
->opcode
);
11580 /* printf ("INS: "); mono_print_ins (ins); */
11581 /* Create a store instruction */
11582 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
11584 /* Insert it after the instruction */
11585 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11587 def_ins
= store_ins
;
11590 * We can't assign ins->dreg to var->dreg here, since the
11591 * sregs could use it. So set a flag, and do it after
11594 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
11595 dest_has_lvreg
= TRUE
;
11600 if (def_ins
&& !live_range_start
[dreg
]) {
11601 live_range_start
[dreg
] = def_ins
;
11602 live_range_start_bb
[dreg
] = bb
;
11605 if (cfg
->compute_gc_maps
&& def_ins
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
11608 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_DEF
);
11609 tmp
->inst_c1
= dreg
;
11610 mono_bblock_insert_after_ins (bb
, def_ins
, tmp
);
11617 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
11618 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
11619 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
11620 sreg
= sregs
[srcindex
];
11622 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
11623 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
11624 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
11625 MonoInst
*use_ins
= ins
;
11626 MonoInst
*load_ins
;
11627 guint32 load_opcode
;
11629 if (var
->opcode
== OP_REGVAR
) {
11630 sregs
[srcindex
] = var
->dreg
;
11631 //mono_inst_set_src_registers (ins, sregs);
11632 live_range_end
[sreg
] = use_ins
;
11633 live_range_end_bb
[sreg
] = bb
;
11635 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
11638 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
11639 /* var->dreg is a hreg */
11640 tmp
->inst_c1
= sreg
;
11641 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
11647 g_assert (var
->opcode
== OP_REGOFFSET
);
11649 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
11651 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
11653 if (vreg_to_lvreg
[sreg
]) {
11654 g_assert (vreg_to_lvreg
[sreg
] != -1);
11656 /* The variable is already loaded to an lvreg */
11657 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11658 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
11659 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
11660 //mono_inst_set_src_registers (ins, sregs);
11664 /* Try to fuse the load into the instruction */
11665 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
11666 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
11667 sregs
[0] = var
->inst_basereg
;
11668 //mono_inst_set_src_registers (ins, sregs);
11669 ins
->inst_offset
= var
->inst_offset
;
11670 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
11671 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
11672 sregs
[1] = var
->inst_basereg
;
11673 //mono_inst_set_src_registers (ins, sregs);
11674 ins
->inst_offset
= var
->inst_offset
;
11676 if (MONO_IS_REAL_MOVE (ins
)) {
11677 ins
->opcode
= OP_NOP
;
11680 //printf ("%d ", srcindex); mono_print_ins (ins);
11682 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
11684 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
11685 if (var
->dreg
== prev_dreg
) {
11687 * sreg refers to the value loaded by the load
11688 * emitted below, but we need to use ins->dreg
11689 * since it refers to the store emitted earlier.
11693 g_assert (sreg
!= -1);
11694 vreg_to_lvreg
[var
->dreg
] = sreg
;
11695 g_assert (lvregs_len
< 1024);
11696 lvregs
[lvregs_len
++] = var
->dreg
;
11700 sregs
[srcindex
] = sreg
;
11701 //mono_inst_set_src_registers (ins, sregs);
11703 if (regtype
== 'l') {
11704 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
11705 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11706 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
11707 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11708 use_ins
= load_ins
;
11711 #if SIZEOF_REGISTER == 4
11712 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
11714 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
11715 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11716 use_ins
= load_ins
;
11720 if (var
->dreg
< orig_next_vreg
) {
11721 live_range_end
[var
->dreg
] = use_ins
;
11722 live_range_end_bb
[var
->dreg
] = bb
;
11725 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
11728 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
11729 tmp
->inst_c1
= var
->dreg
;
11730 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
11734 mono_inst_set_src_registers (ins
, sregs
);
11736 if (dest_has_lvreg
) {
11737 g_assert (ins
->dreg
!= -1);
11738 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
11739 g_assert (lvregs_len
< 1024);
11740 lvregs
[lvregs_len
++] = prev_dreg
;
11741 dest_has_lvreg
= FALSE
;
11745 tmp_reg
= ins
->dreg
;
11746 ins
->dreg
= ins
->sreg2
;
11747 ins
->sreg2
= tmp_reg
;
11750 if (MONO_IS_CALL (ins
)) {
11751 /* Clear vreg_to_lvreg array */
11752 for (i
= 0; i
< lvregs_len
; i
++)
11753 vreg_to_lvreg
[lvregs
[i
]] = 0;
11755 } else if (ins
->opcode
== OP_NOP
) {
11757 MONO_INST_NULLIFY_SREGS (ins
);
11760 if (cfg
->verbose_level
> 2)
11761 mono_print_ins_index (1, ins
);
11764 /* Extend the live range based on the liveness info */
11765 if (cfg
->compute_precise_live_ranges
&& bb
->live_out_set
&& bb
->code
) {
11766 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11767 MonoMethodVar
*vi
= MONO_VARINFO (cfg
, i
);
11769 if (vreg_is_volatile (cfg
, vi
->vreg
))
11770 /* The liveness info is incomplete */
11773 if (mono_bitset_test_fast (bb
->live_in_set
, i
) && !live_range_start
[vi
->vreg
]) {
11774 /* Live from at least the first ins of this bb */
11775 live_range_start
[vi
->vreg
] = bb
->code
;
11776 live_range_start_bb
[vi
->vreg
] = bb
;
11779 if (mono_bitset_test_fast (bb
->live_out_set
, i
)) {
11780 /* Live at least until the last ins of this bb */
11781 live_range_end
[vi
->vreg
] = bb
->last_ins
;
11782 live_range_end_bb
[vi
->vreg
] = bb
;
11788 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11790 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11791 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11793 if (cfg
->compute_precise_live_ranges
&& cfg
->comp_done
& MONO_COMP_LIVENESS
) {
11794 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11795 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
11798 if (live_range_start
[vreg
]) {
11799 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
11801 ins
->inst_c1
= vreg
;
11802 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
11804 if (live_range_end
[vreg
]) {
11805 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
11807 ins
->inst_c1
= vreg
;
11808 if (live_range_end
[vreg
] == live_range_end_bb
[vreg
]->last_ins
)
11809 mono_add_ins_to_end (live_range_end_bb
[vreg
], ins
);
11811 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
11817 g_free (live_range_start
);
11818 g_free (live_range_end
);
11819 g_free (live_range_start_bb
);
11820 g_free (live_range_end_bb
);
11825 * - use 'iadd' instead of 'int_add'
11826 * - handling ovf opcodes: decompose in method_to_ir.
11827 * - unify iregs/fregs
11828 * -> partly done, the missing parts are:
11829 * - a more complete unification would involve unifying the hregs as well, so
11830 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11831 * would no longer map to the machine hregs, so the code generators would need to
11832 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11833 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11834 * fp/non-fp branches speeds it up by about 15%.
11835 * - use sext/zext opcodes instead of shifts
11837 * - get rid of TEMPLOADs if possible and use vregs instead
11838 * - clean up usage of OP_P/OP_ opcodes
11839 * - cleanup usage of DUMMY_USE
11840 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11842 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11843 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11844 * - make sure handle_stack_args () is called before the branch is emitted
11845 * - when the new IR is done, get rid of all unused stuff
11846 * - COMPARE/BEQ as separate instructions or unify them ?
11847 * - keeping them separate allows specialized compare instructions like
11848 * compare_imm, compare_membase
11849 * - most back ends unify fp compare+branch, fp compare+ceq
11850 * - integrate mono_save_args into inline_method
11851 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11852 * - handle long shift opts on 32 bit platforms somehow: they require
11853 * 3 sregs (2 for arg1 and 1 for arg2)
11854 * - make byref a 'normal' type.
11855 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11856 * variable if needed.
11857 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11858 * like inline_method.
11859 * - remove inlining restrictions
11860 * - fix LNEG and enable cfold of INEG
11861 * - generalize x86 optimizations like ldelema as a peephole optimization
11862 * - add store_mem_imm for amd64
11863 * - optimize the loading of the interruption flag in the managed->native wrappers
11864 * - avoid special handling of OP_NOP in passes
11865 * - move code inserting instructions into one function/macro.
11866 * - try a coalescing phase after liveness analysis
11867 * - add float -> vreg conversion + local optimizations on !x86
11868 * - figure out how to handle decomposed branches during optimizations, ie.
11869 * compare+branch, op_jump_table+op_br etc.
11870 * - promote RuntimeXHandles to vregs
11871 * - vtype cleanups:
11872 * - add a NEW_VARLOADA_VREG macro
11873 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11874 * accessing vtype fields.
11875 * - get rid of I8CONST on 64 bit platforms
11876 * - dealing with the increase in code size due to branches created during opcode
11878 * - use extended basic blocks
11879 * - all parts of the JIT
11880 * - handle_global_vregs () && local regalloc
11881 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11882 * - sources of increase in code size:
11885 * - isinst and castclass
11886 * - lvregs not allocated to global registers even if used multiple times
11887 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11889 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11890 * - add all micro optimizations from the old JIT
11891 * - put tree optimizations into the deadce pass
11892 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11893 * specific function.
11894 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11895 * fcompare + branchCC.
11896 * - create a helper function for allocating a stack slot, taking into account
11897 * MONO_CFG_HAS_SPILLUP.
11899 * - merge the ia64 switch changes.
11900 * - optimize mono_regstate2_alloc_int/float.
11901 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11902 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11903 * parts of the tree could be separated by other instructions, killing the tree
11904 * arguments, or stores killing loads etc. Also, should we fold loads into other
11905 * instructions if the result of the load is used multiple times ?
11906 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11907 * - LAST MERGE: 108395.
11908 * - when returning vtypes in registers, generate IR and append it to the end of the
11909 * last bb instead of doing it in the epilog.
11910 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11918 - When to decompose opcodes:
11919 - earlier: this makes some optimizations hard to implement, since the low level IR
11920 no longer contains the neccessary information. But it is easier to do.
11921 - later: harder to implement, enables more optimizations.
11922 - Branches inside bblocks:
11923 - created when decomposing complex opcodes.
11924 - branches to another bblock: harmless, but not tracked by the branch
11925 optimizations, so need to branch to a label at the start of the bblock.
11926 - branches to inside the same bblock: very problematic, trips up the local
11927 reg allocator. Can be fixed by spitting the current bblock, but that is a
11928 complex operation, since some local vregs can become global vregs etc.
11929 - Local/global vregs:
11930 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11931 local register allocator.
11932 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11933 structure, created by mono_create_var (). Assigned to hregs or the stack by
11934 the global register allocator.
11935 - When to do optimizations like alu->alu_imm:
11936 - earlier -> saves work later on since the IR will be smaller/simpler
11937 - later -> can work on more instructions
11938 - Handling of valuetypes:
11939 - When a vtype is pushed on the stack, a new temporary is created, an
11940 instruction computing its address (LDADDR) is emitted and pushed on
11941 the stack. Need to optimize cases when the vtype is used immediately as in
11942 argument passing, stloc etc.
11943 - Instead of the to_end stuff in the old JIT, simply call the function handling
11944 the values on the stack before emitting the last instruction of the bb.
11947 #endif /* DISABLE_JIT */