2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/metadata/debug-mono-symfile.h>
56 #include <mono/utils/mono-compiler.h>
57 #include <mono/utils/mono-memory-model.h>
58 #include <mono/metadata/mono-basic-block.h>
65 #include "jit-icalls.h"
67 #include "debugger-agent.h"
69 #define BRANCH_COST 10
70 #define INLINE_LENGTH_LIMIT 20
71 #define INLINE_FAILURE(msg) do { \
72 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
73 if (cfg->verbose_level >= 2) \
74 printf ("inline failed: %s\n", msg); \
75 goto inline_failure; \
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 2) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
105 goto exception_exit; \
108 #define GSHAREDVT_FAILURE(opcode) do { \
109 if (cfg->gsharedvt) { \
110 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
111 if (cfg->verbose_level >= 2) \
112 printf ("%s\n", cfg->exception_message); \
113 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
114 goto exception_exit; \
117 #define OUT_OF_MEMORY_FAILURE do { \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
119 goto exception_exit; \
121 /* Determine whenever 'ins' represents a load of the 'this' argument */
122 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
124 static int ldind_to_load_membase (int opcode
);
125 static int stind_to_store_membase (int opcode
);
127 int mono_op_to_op_imm (int opcode
);
128 int mono_op_to_op_imm_noemul (int opcode
);
130 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
132 /* helper methods signatures */
133 static MonoMethodSignature
*helper_sig_class_init_trampoline
= NULL
;
134 static MonoMethodSignature
*helper_sig_domain_get
= NULL
;
135 static MonoMethodSignature
*helper_sig_generic_class_init_trampoline
= NULL
;
136 static MonoMethodSignature
*helper_sig_generic_class_init_trampoline_llvm
= NULL
;
137 static MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
= NULL
;
138 static MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
= NULL
;
139 static MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline_llvm
= NULL
;
142 * Instruction metadata
150 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
151 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
157 #if SIZEOF_REGISTER == 8
162 /* keep in sync with the enum in mini.h */
165 #include "mini-ops.h"
170 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
171 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
173 * This should contain the index of the last sreg + 1. This is not the same
174 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
176 const gint8 ins_sreg_counts
[] = {
177 #include "mini-ops.h"
182 #define MONO_INIT_VARINFO(vi,id) do { \
183 (vi)->range.first_use.pos.bid = 0xffff; \
189 mono_inst_set_src_registers (MonoInst
*ins
, int *regs
)
191 ins
->sreg1
= regs
[0];
192 ins
->sreg2
= regs
[1];
193 ins
->sreg3
= regs
[2];
197 mono_alloc_ireg (MonoCompile
*cfg
)
199 return alloc_ireg (cfg
);
203 mono_alloc_freg (MonoCompile
*cfg
)
205 return alloc_freg (cfg
);
209 mono_alloc_preg (MonoCompile
*cfg
)
211 return alloc_preg (cfg
);
215 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
217 return alloc_dreg (cfg
, stack_type
);
221 * mono_alloc_ireg_ref:
223 * Allocate an IREG, and mark it as holding a GC ref.
226 mono_alloc_ireg_ref (MonoCompile
*cfg
)
228 return alloc_ireg_ref (cfg
);
232 * mono_alloc_ireg_mp:
234 * Allocate an IREG, and mark it as holding a managed pointer.
237 mono_alloc_ireg_mp (MonoCompile
*cfg
)
239 return alloc_ireg_mp (cfg
);
243 * mono_alloc_ireg_copy:
245 * Allocate an IREG with the same GC type as VREG.
248 mono_alloc_ireg_copy (MonoCompile
*cfg
, guint32 vreg
)
250 if (vreg_is_ref (cfg
, vreg
))
251 return alloc_ireg_ref (cfg
);
252 else if (vreg_is_mp (cfg
, vreg
))
253 return alloc_ireg_mp (cfg
);
255 return alloc_ireg (cfg
);
259 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
265 switch (type
->type
) {
268 case MONO_TYPE_BOOLEAN
:
280 case MONO_TYPE_FNPTR
:
282 case MONO_TYPE_CLASS
:
283 case MONO_TYPE_STRING
:
284 case MONO_TYPE_OBJECT
:
285 case MONO_TYPE_SZARRAY
:
286 case MONO_TYPE_ARRAY
:
290 #if SIZEOF_REGISTER == 8
299 case MONO_TYPE_VALUETYPE
:
300 if (type
->data
.klass
->enumtype
) {
301 type
= mono_class_enum_basetype (type
->data
.klass
);
304 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
307 case MONO_TYPE_TYPEDBYREF
:
309 case MONO_TYPE_GENERICINST
:
310 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
314 g_assert (cfg
->generic_sharing_context
);
315 if (mini_type_var_is_vt (cfg
, type
))
320 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
326 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
331 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
332 for (i
= 0; i
< bb
->in_count
; ++i
)
333 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
335 for (i
= 0; i
< bb
->out_count
; ++i
)
336 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
338 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
339 mono_print_ins_index (-1, tree
);
343 mono_create_helper_signatures (void)
345 helper_sig_domain_get
= mono_create_icall_signature ("ptr");
346 helper_sig_class_init_trampoline
= mono_create_icall_signature ("void");
347 helper_sig_generic_class_init_trampoline
= mono_create_icall_signature ("void");
348 helper_sig_generic_class_init_trampoline_llvm
= mono_create_icall_signature ("void ptr");
349 helper_sig_rgctx_lazy_fetch_trampoline
= mono_create_icall_signature ("ptr ptr");
350 helper_sig_monitor_enter_exit_trampoline
= mono_create_icall_signature ("void");
351 helper_sig_monitor_enter_exit_trampoline_llvm
= mono_create_icall_signature ("void object");
355 * Can't put this at the beginning, since other files reference stuff from this
361 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
362 * foo<T> (int i) { ldarg.0; box T; }
364 #define UNVERIFIED do { \
365 if (cfg->gsharedvt) { \
366 if (cfg->verbose_level > 2) \
367 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
368 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
369 goto exception_exit; \
371 if (mini_get_debug_options ()->break_on_unverified) \
377 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
379 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
381 #define GET_BBLOCK(cfg,tblock,ip) do { \
382 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
384 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
385 NEW_BBLOCK (cfg, (tblock)); \
386 (tblock)->cil_code = (ip); \
387 ADD_BBLOCK (cfg, (tblock)); \
391 #if defined(TARGET_X86) || defined(TARGET_AMD64)
392 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
393 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
394 (dest)->dreg = alloc_ireg_mp ((cfg)); \
395 (dest)->sreg1 = (sr1); \
396 (dest)->sreg2 = (sr2); \
397 (dest)->inst_imm = (imm); \
398 (dest)->backend.shift_amount = (shift); \
399 MONO_ADD_INS ((cfg)->cbb, (dest)); \
403 #if SIZEOF_REGISTER == 8
404 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
405 /* FIXME: Need to add many more cases */ \
406 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
408 int dr = alloc_preg (cfg); \
409 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
410 (ins)->sreg2 = widen->dreg; \
414 #define ADD_WIDEN_OP(ins, arg1, arg2)
417 #define ADD_BINOP(op) do { \
418 MONO_INST_NEW (cfg, ins, (op)); \
420 ins->sreg1 = sp [0]->dreg; \
421 ins->sreg2 = sp [1]->dreg; \
422 type_from_op (ins, sp [0], sp [1]); \
424 /* Have to insert a widening op */ \
425 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
426 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
427 MONO_ADD_INS ((cfg)->cbb, (ins)); \
428 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
431 #define ADD_UNOP(op) do { \
432 MONO_INST_NEW (cfg, ins, (op)); \
434 ins->sreg1 = sp [0]->dreg; \
435 type_from_op (ins, sp [0], NULL); \
437 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
438 MONO_ADD_INS ((cfg)->cbb, (ins)); \
439 *sp++ = mono_decompose_opcode (cfg, ins); \
442 #define ADD_BINCOND(next_block) do { \
445 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
446 cmp->sreg1 = sp [0]->dreg; \
447 cmp->sreg2 = sp [1]->dreg; \
448 type_from_op (cmp, sp [0], sp [1]); \
450 type_from_op (ins, sp [0], sp [1]); \
451 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
452 GET_BBLOCK (cfg, tblock, target); \
453 link_bblock (cfg, bblock, tblock); \
454 ins->inst_true_bb = tblock; \
455 if ((next_block)) { \
456 link_bblock (cfg, bblock, (next_block)); \
457 ins->inst_false_bb = (next_block); \
458 start_new_bblock = 1; \
460 GET_BBLOCK (cfg, tblock, ip); \
461 link_bblock (cfg, bblock, tblock); \
462 ins->inst_false_bb = tblock; \
463 start_new_bblock = 2; \
465 if (sp != stack_start) { \
466 handle_stack_args (cfg, stack_start, sp - stack_start); \
467 CHECK_UNVERIFIABLE (cfg); \
469 MONO_ADD_INS (bblock, cmp); \
470 MONO_ADD_INS (bblock, ins); \
474 * link_bblock: Links two basic blocks
476 * links two basic blocks in the control flow graph, the 'from'
477 * argument is the starting block and the 'to' argument is the block
478 * the control flow ends to after 'from'.
481 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
483 MonoBasicBlock
**newa
;
487 if (from
->cil_code
) {
489 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
491 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
494 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
496 printf ("edge from entry to exit\n");
501 for (i
= 0; i
< from
->out_count
; ++i
) {
502 if (to
== from
->out_bb
[i
]) {
508 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
509 for (i
= 0; i
< from
->out_count
; ++i
) {
510 newa
[i
] = from
->out_bb
[i
];
518 for (i
= 0; i
< to
->in_count
; ++i
) {
519 if (from
== to
->in_bb
[i
]) {
525 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
526 for (i
= 0; i
< to
->in_count
; ++i
) {
527 newa
[i
] = to
->in_bb
[i
];
536 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
538 link_bblock (cfg
, from
, to
);
542 * mono_find_block_region:
544 * We mark each basic block with a region ID. We use that to avoid BB
545 * optimizations when blocks are in different regions.
548 * A region token that encodes where this region is, and information
549 * about the clause owner for this block.
551 * The region encodes the try/catch/filter clause that owns this block
552 * as well as the type. -1 is a special value that represents a block
553 * that is in none of try/catch/filter.
556 mono_find_block_region (MonoCompile
*cfg
, int offset
)
558 MonoMethodHeader
*header
= cfg
->header
;
559 MonoExceptionClause
*clause
;
562 for (i
= 0; i
< header
->num_clauses
; ++i
) {
563 clause
= &header
->clauses
[i
];
564 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
565 (offset
< (clause
->handler_offset
)))
566 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
568 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
569 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
570 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
571 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
572 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
574 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
577 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
578 return ((i
+ 1) << 8) | clause
->flags
;
585 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
587 MonoMethodHeader
*header
= cfg
->header
;
588 MonoExceptionClause
*clause
;
592 for (i
= 0; i
< header
->num_clauses
; ++i
) {
593 clause
= &header
->clauses
[i
];
594 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
595 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
596 if (clause
->flags
== type
)
597 res
= g_list_append (res
, clause
);
604 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
608 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
612 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
613 /* prevent it from being register allocated */
614 var
->flags
|= MONO_INST_INDIRECT
;
616 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
620 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
622 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
626 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
630 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
634 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
635 /* prevent it from being register allocated */
636 var
->flags
|= MONO_INST_INDIRECT
;
638 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
644 * Returns the type used in the eval stack when @type is loaded.
645 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
648 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
652 inst
->klass
= klass
= mono_class_from_mono_type (type
);
654 inst
->type
= STACK_MP
;
659 switch (type
->type
) {
661 inst
->type
= STACK_INV
;
665 case MONO_TYPE_BOOLEAN
:
671 inst
->type
= STACK_I4
;
676 case MONO_TYPE_FNPTR
:
677 inst
->type
= STACK_PTR
;
679 case MONO_TYPE_CLASS
:
680 case MONO_TYPE_STRING
:
681 case MONO_TYPE_OBJECT
:
682 case MONO_TYPE_SZARRAY
:
683 case MONO_TYPE_ARRAY
:
684 inst
->type
= STACK_OBJ
;
688 inst
->type
= STACK_I8
;
692 inst
->type
= STACK_R8
;
694 case MONO_TYPE_VALUETYPE
:
695 if (type
->data
.klass
->enumtype
) {
696 type
= mono_class_enum_basetype (type
->data
.klass
);
700 inst
->type
= STACK_VTYPE
;
703 case MONO_TYPE_TYPEDBYREF
:
704 inst
->klass
= mono_defaults
.typed_reference_class
;
705 inst
->type
= STACK_VTYPE
;
707 case MONO_TYPE_GENERICINST
:
708 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
712 g_assert (cfg
->generic_sharing_context
);
713 if (mini_is_gsharedvt_type (cfg
, type
)) {
714 g_assert (cfg
->gsharedvt
);
715 inst
->type
= STACK_VTYPE
;
717 inst
->type
= STACK_OBJ
;
721 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
726 * The following tables are used to quickly validate the IL code in type_from_op ().
729 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
730 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
731 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
732 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
733 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
734 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
735 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
736 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
737 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
742 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
745 /* reduce the size of this table */
747 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
748 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
749 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
750 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
751 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
752 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
753 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
754 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
755 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
759 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
760 /* Inv i L p F & O vt */
762 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
763 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
764 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
765 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
766 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
767 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
768 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
771 /* reduce the size of this table */
773 shift_table
[STACK_MAX
] [STACK_MAX
] = {
774 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
775 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
776 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
777 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
778 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
779 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
780 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
781 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
785 * Tables to map from the non-specific opcode to the matching
786 * type-specific opcode.
788 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
790 binops_op_map
[STACK_MAX
] = {
791 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
794 /* handles from CEE_NEG to CEE_CONV_U8 */
796 unops_op_map
[STACK_MAX
] = {
797 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
800 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
802 ovfops_op_map
[STACK_MAX
] = {
803 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
806 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
808 ovf2ops_op_map
[STACK_MAX
] = {
809 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
812 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
814 ovf3ops_op_map
[STACK_MAX
] = {
815 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
818 /* handles from CEE_BEQ to CEE_BLT_UN */
820 beqops_op_map
[STACK_MAX
] = {
821 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
824 /* handles from CEE_CEQ to CEE_CLT_UN */
826 ceqops_op_map
[STACK_MAX
] = {
827 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
831 * Sets ins->type (the type on the eval stack) according to the
832 * type of the opcode and the arguments to it.
833 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
835 * FIXME: this function sets ins->type unconditionally in some cases, but
836 * it should set it to invalid for some types (a conv.x on an object)
839 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
841 switch (ins
->opcode
) {
848 /* FIXME: check unverifiable args for STACK_MP */
849 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
850 ins
->opcode
+= binops_op_map
[ins
->type
];
857 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
858 ins
->opcode
+= binops_op_map
[ins
->type
];
863 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
864 ins
->opcode
+= binops_op_map
[ins
->type
];
869 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
870 if ((src1
->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
871 ins
->opcode
= OP_LCOMPARE
;
872 else if (src1
->type
== STACK_R8
)
873 ins
->opcode
= OP_FCOMPARE
;
875 ins
->opcode
= OP_ICOMPARE
;
877 case OP_ICOMPARE_IMM
:
878 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
879 if ((src1
->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
880 ins
->opcode
= OP_LCOMPARE_IMM
;
892 ins
->opcode
+= beqops_op_map
[src1
->type
];
895 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
896 ins
->opcode
+= ceqops_op_map
[src1
->type
];
902 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
903 ins
->opcode
+= ceqops_op_map
[src1
->type
];
907 ins
->type
= neg_table
[src1
->type
];
908 ins
->opcode
+= unops_op_map
[ins
->type
];
911 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
912 ins
->type
= src1
->type
;
914 ins
->type
= STACK_INV
;
915 ins
->opcode
+= unops_op_map
[ins
->type
];
921 ins
->type
= STACK_I4
;
922 ins
->opcode
+= unops_op_map
[src1
->type
];
925 ins
->type
= STACK_R8
;
926 switch (src1
->type
) {
929 ins
->opcode
= OP_ICONV_TO_R_UN
;
932 ins
->opcode
= OP_LCONV_TO_R_UN
;
936 case CEE_CONV_OVF_I1
:
937 case CEE_CONV_OVF_U1
:
938 case CEE_CONV_OVF_I2
:
939 case CEE_CONV_OVF_U2
:
940 case CEE_CONV_OVF_I4
:
941 case CEE_CONV_OVF_U4
:
942 ins
->type
= STACK_I4
;
943 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
945 case CEE_CONV_OVF_I_UN
:
946 case CEE_CONV_OVF_U_UN
:
947 ins
->type
= STACK_PTR
;
948 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
950 case CEE_CONV_OVF_I1_UN
:
951 case CEE_CONV_OVF_I2_UN
:
952 case CEE_CONV_OVF_I4_UN
:
953 case CEE_CONV_OVF_U1_UN
:
954 case CEE_CONV_OVF_U2_UN
:
955 case CEE_CONV_OVF_U4_UN
:
956 ins
->type
= STACK_I4
;
957 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
960 ins
->type
= STACK_PTR
;
961 switch (src1
->type
) {
963 ins
->opcode
= OP_ICONV_TO_U
;
967 #if SIZEOF_VOID_P == 8
968 ins
->opcode
= OP_LCONV_TO_U
;
970 ins
->opcode
= OP_MOVE
;
974 ins
->opcode
= OP_LCONV_TO_U
;
977 ins
->opcode
= OP_FCONV_TO_U
;
983 ins
->type
= STACK_I8
;
984 ins
->opcode
+= unops_op_map
[src1
->type
];
986 case CEE_CONV_OVF_I8
:
987 case CEE_CONV_OVF_U8
:
988 ins
->type
= STACK_I8
;
989 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
991 case CEE_CONV_OVF_U8_UN
:
992 case CEE_CONV_OVF_I8_UN
:
993 ins
->type
= STACK_I8
;
994 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
998 ins
->type
= STACK_R8
;
999 ins
->opcode
+= unops_op_map
[src1
->type
];
1002 ins
->type
= STACK_R8
;
1006 ins
->type
= STACK_I4
;
1007 ins
->opcode
+= ovfops_op_map
[src1
->type
];
1010 case CEE_CONV_OVF_I
:
1011 case CEE_CONV_OVF_U
:
1012 ins
->type
= STACK_PTR
;
1013 ins
->opcode
+= ovfops_op_map
[src1
->type
];
1016 case CEE_ADD_OVF_UN
:
1018 case CEE_MUL_OVF_UN
:
1020 case CEE_SUB_OVF_UN
:
1021 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
1022 ins
->opcode
+= ovfops_op_map
[src1
->type
];
1023 if (ins
->type
== STACK_R8
)
1024 ins
->type
= STACK_INV
;
1026 case OP_LOAD_MEMBASE
:
1027 ins
->type
= STACK_PTR
;
1029 case OP_LOADI1_MEMBASE
:
1030 case OP_LOADU1_MEMBASE
:
1031 case OP_LOADI2_MEMBASE
:
1032 case OP_LOADU2_MEMBASE
:
1033 case OP_LOADI4_MEMBASE
:
1034 case OP_LOADU4_MEMBASE
:
1035 ins
->type
= STACK_PTR
;
1037 case OP_LOADI8_MEMBASE
:
1038 ins
->type
= STACK_I8
;
1040 case OP_LOADR4_MEMBASE
:
1041 case OP_LOADR8_MEMBASE
:
1042 ins
->type
= STACK_R8
;
1045 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
1049 if (ins
->type
== STACK_MP
)
1050 ins
->klass
= mono_defaults
.object_class
;
1055 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
1061 param_table
[STACK_MAX
] [STACK_MAX
] = {
1066 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
1070 switch (args
->type
) {
1080 for (i
= 0; i
< sig
->param_count
; ++i
) {
1081 switch (args
[i
].type
) {
1085 if (!sig
->params
[i
]->byref
)
1089 if (sig
->params
[i
]->byref
)
1091 switch (sig
->params
[i
]->type
) {
1092 case MONO_TYPE_CLASS
:
1093 case MONO_TYPE_STRING
:
1094 case MONO_TYPE_OBJECT
:
1095 case MONO_TYPE_SZARRAY
:
1096 case MONO_TYPE_ARRAY
:
1103 if (sig
->params
[i
]->byref
)
1105 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1114 /*if (!param_table [args [i].type] [sig->params [i]->type])
1122 * When we need a pointer to the current domain many times in a method, we
1123 * call mono_domain_get() once and we store the result in a local variable.
1124 * This function returns the variable that represents the MonoDomain*.
1126 inline static MonoInst
*
1127 mono_get_domainvar (MonoCompile
*cfg
)
1129 if (!cfg
->domainvar
)
1130 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1131 return cfg
->domainvar
;
1135 * The got_var contains the address of the Global Offset Table when AOT
1139 mono_get_got_var (MonoCompile
*cfg
)
1141 #ifdef MONO_ARCH_NEED_GOT_VAR
1142 if (!cfg
->compile_aot
)
1144 if (!cfg
->got_var
) {
1145 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1147 return cfg
->got_var
;
1154 mono_get_vtable_var (MonoCompile
*cfg
)
1156 g_assert (cfg
->generic_sharing_context
);
1158 if (!cfg
->rgctx_var
) {
1159 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1160 /* force the var to be stack allocated */
1161 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1164 return cfg
->rgctx_var
;
1168 type_from_stack_type (MonoInst
*ins
) {
1169 switch (ins
->type
) {
1170 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1171 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1172 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1173 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1175 return &ins
->klass
->this_arg
;
1176 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1177 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1179 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1184 static G_GNUC_UNUSED
int
1185 type_to_stack_type (MonoType
*t
)
1187 t
= mono_type_get_underlying_type (t
);
1191 case MONO_TYPE_BOOLEAN
:
1194 case MONO_TYPE_CHAR
:
1201 case MONO_TYPE_FNPTR
:
1203 case MONO_TYPE_CLASS
:
1204 case MONO_TYPE_STRING
:
1205 case MONO_TYPE_OBJECT
:
1206 case MONO_TYPE_SZARRAY
:
1207 case MONO_TYPE_ARRAY
:
1215 case MONO_TYPE_VALUETYPE
:
1216 case MONO_TYPE_TYPEDBYREF
:
1218 case MONO_TYPE_GENERICINST
:
1219 if (mono_type_generic_inst_is_valuetype (t
))
1225 g_assert_not_reached ();
1232 array_access_to_klass (int opcode
)
1236 return mono_defaults
.byte_class
;
1238 return mono_defaults
.uint16_class
;
1241 return mono_defaults
.int_class
;
1244 return mono_defaults
.sbyte_class
;
1247 return mono_defaults
.int16_class
;
1250 return mono_defaults
.int32_class
;
1252 return mono_defaults
.uint32_class
;
1255 return mono_defaults
.int64_class
;
1258 return mono_defaults
.single_class
;
1261 return mono_defaults
.double_class
;
1262 case CEE_LDELEM_REF
:
1263 case CEE_STELEM_REF
:
1264 return mono_defaults
.object_class
;
1266 g_assert_not_reached ();
1272 * We try to share variables when possible
1275 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1280 /* inlining can result in deeper stacks */
1281 if (slot
>= cfg
->header
->max_stack
)
1282 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1284 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1286 switch (ins
->type
) {
1293 if ((vnum
= cfg
->intvars
[pos
]))
1294 return cfg
->varinfo
[vnum
];
1295 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1296 cfg
->intvars
[pos
] = res
->inst_c0
;
1299 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1305 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1308 * Don't use this if a generic_context is set, since that means AOT can't
1309 * look up the method using just the image+token.
1310 * table == 0 means this is a reference made from a wrapper.
1312 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1313 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1314 jump_info_token
->image
= image
;
1315 jump_info_token
->token
= token
;
1316 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1321 * This function is called to handle items that are left on the evaluation stack
1322 * at basic block boundaries. What happens is that we save the values to local variables
1323 * and we reload them later when first entering the target basic block (with the
1324 * handle_loaded_temps () function).
1325 * A single joint point will use the same variables (stored in the array bb->out_stack or
1326 * bb->in_stack, if the basic block is before or after the joint point).
1328 * This function needs to be called _before_ emitting the last instruction of
1329 * the bb (i.e. before emitting a branch).
1330 * If the stack merge fails at a join point, cfg->unverifiable is set.
1333 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1336 MonoBasicBlock
*bb
= cfg
->cbb
;
1337 MonoBasicBlock
*outb
;
1338 MonoInst
*inst
, **locals
;
1343 if (cfg
->verbose_level
> 3)
1344 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1345 if (!bb
->out_scount
) {
1346 bb
->out_scount
= count
;
1347 //printf ("bblock %d has out:", bb->block_num);
1349 for (i
= 0; i
< bb
->out_count
; ++i
) {
1350 outb
= bb
->out_bb
[i
];
1351 /* exception handlers are linked, but they should not be considered for stack args */
1352 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1354 //printf (" %d", outb->block_num);
1355 if (outb
->in_stack
) {
1357 bb
->out_stack
= outb
->in_stack
;
1363 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1364 for (i
= 0; i
< count
; ++i
) {
1366 * try to reuse temps already allocated for this purpouse, if they occupy the same
1367 * stack slot and if they are of the same type.
1368 * This won't cause conflicts since if 'local' is used to
1369 * store one of the values in the in_stack of a bblock, then
1370 * the same variable will be used for the same outgoing stack
1372 * This doesn't work when inlining methods, since the bblocks
1373 * in the inlined methods do not inherit their in_stack from
1374 * the bblock they are inlined to. See bug #58863 for an
1377 if (cfg
->inlined_method
)
1378 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1380 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1385 for (i
= 0; i
< bb
->out_count
; ++i
) {
1386 outb
= bb
->out_bb
[i
];
1387 /* exception handlers are linked, but they should not be considered for stack args */
1388 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1390 if (outb
->in_scount
) {
1391 if (outb
->in_scount
!= bb
->out_scount
) {
1392 cfg
->unverifiable
= TRUE
;
1395 continue; /* check they are the same locals */
1397 outb
->in_scount
= count
;
1398 outb
->in_stack
= bb
->out_stack
;
1401 locals
= bb
->out_stack
;
1403 for (i
= 0; i
< count
; ++i
) {
1404 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1405 inst
->cil_code
= sp
[i
]->cil_code
;
1406 sp
[i
] = locals
[i
];
1407 if (cfg
->verbose_level
> 3)
1408 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1412 * It is possible that the out bblocks already have in_stack assigned, and
1413 * the in_stacks differ. In this case, we will store to all the different
1420 /* Find a bblock which has a different in_stack */
1422 while (bindex
< bb
->out_count
) {
1423 outb
= bb
->out_bb
[bindex
];
1424 /* exception handlers are linked, but they should not be considered for stack args */
1425 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1429 if (outb
->in_stack
!= locals
) {
1430 for (i
= 0; i
< count
; ++i
) {
1431 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1432 inst
->cil_code
= sp
[i
]->cil_code
;
1433 sp
[i
] = locals
[i
];
1434 if (cfg
->verbose_level
> 3)
1435 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1437 locals
= outb
->in_stack
;
1446 /* Emit code which loads interface_offsets [klass->interface_id]
1447 * The array is stored in memory before vtable.
1450 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1452 if (cfg
->compile_aot
) {
1453 int ioffset_reg
= alloc_preg (cfg
);
1454 int iid_reg
= alloc_preg (cfg
);
1456 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1457 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1466 mini_emit_interface_bitmap_check (MonoCompile
*cfg
, int intf_bit_reg
, int base_reg
, int offset
, MonoClass
*klass
)
1468 int ibitmap_reg
= alloc_preg (cfg
);
1469 #ifdef COMPRESSED_INTERFACE_BITMAP
1471 MonoInst
*res
, *ins
;
1472 NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, ibitmap_reg
, base_reg
, offset
);
1473 MONO_ADD_INS (cfg
->cbb
, ins
);
1475 if (cfg
->compile_aot
)
1476 EMIT_NEW_AOTCONST (cfg
, args
[1], MONO_PATCH_INFO_IID
, klass
);
1478 EMIT_NEW_ICONST (cfg
, args
[1], klass
->interface_id
);
1479 res
= mono_emit_jit_icall (cfg
, mono_class_interface_match
, args
);
1480 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, intf_bit_reg
, res
->dreg
);
1482 int ibitmap_byte_reg
= alloc_preg (cfg
);
1484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, base_reg
, offset
);
1486 if (cfg
->compile_aot
) {
1487 int iid_reg
= alloc_preg (cfg
);
1488 int shifted_iid_reg
= alloc_preg (cfg
);
1489 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1490 int masked_iid_reg
= alloc_preg (cfg
);
1491 int iid_one_bit_reg
= alloc_preg (cfg
);
1492 int iid_bit_reg
= alloc_preg (cfg
);
1493 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1495 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1497 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1498 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1499 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1500 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1503 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1509 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1510 * stored in "klass_reg" implements the interface "klass".
1513 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1515 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
), klass
);
1519 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1520 * stored in "vtable_reg" implements the interface "klass".
1523 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1525 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
), klass
);
1529 * Emit code which checks whenever the interface id of @klass is smaller than
1530 * than the value given by max_iid_reg.
1533 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1534 MonoBasicBlock
*false_target
)
1536 if (cfg
->compile_aot
) {
1537 int iid_reg
= alloc_preg (cfg
);
1538 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1539 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1542 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1546 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1549 /* Same as above, but obtains max_iid from a vtable */
1551 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1552 MonoBasicBlock
*false_target
)
1554 int max_iid_reg
= alloc_preg (cfg
);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1557 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1560 /* Same as above, but obtains max_iid from a klass */
1562 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1563 MonoBasicBlock
*false_target
)
1565 int max_iid_reg
= alloc_preg (cfg
);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1568 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1572 mini_emit_isninst_cast_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_ins
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1574 int idepth_reg
= alloc_preg (cfg
);
1575 int stypes_reg
= alloc_preg (cfg
);
1576 int stype
= alloc_preg (cfg
);
1578 mono_class_setup_supertypes (klass
);
1580 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1588 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, klass_ins
->dreg
);
1589 } else if (cfg
->compile_aot
) {
1590 int const_reg
= alloc_preg (cfg
);
1591 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1592 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1600 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1602 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, NULL
, false_target
, true_target
);
1606 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1608 int intf_reg
= alloc_preg (cfg
);
1610 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1611 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1616 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1620 * Variant of the above that takes a register to the class, not the vtable.
1623 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1625 int intf_bit_reg
= alloc_preg (cfg
);
1627 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1628 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1629 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1633 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1637 mini_emit_class_check_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
)
1640 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_inst
->dreg
);
1641 } else if (cfg
->compile_aot
) {
1642 int const_reg
= alloc_preg (cfg
);
1643 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1644 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1646 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1648 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1652 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1654 mini_emit_class_check_inst (cfg
, klass_reg
, klass
, NULL
);
1658 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1660 if (cfg
->compile_aot
) {
1661 int const_reg
= alloc_preg (cfg
);
1662 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1663 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1665 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1671 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
);
1674 mini_emit_castclass_inst (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
, MonoBasicBlock
*object_is_null
)
1677 int rank_reg
= alloc_preg (cfg
);
1678 int eclass_reg
= alloc_preg (cfg
);
1680 g_assert (!klass_inst
);
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1682 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1683 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1684 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1686 if (klass
->cast_class
== mono_defaults
.object_class
) {
1687 int parent_reg
= alloc_preg (cfg
);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1689 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1690 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1691 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1692 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1693 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1694 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1695 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1696 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1697 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1699 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1700 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1703 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1704 /* Check that the object is a vector too */
1705 int bounds_reg
= alloc_preg (cfg
);
1706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1707 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1708 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1711 int idepth_reg
= alloc_preg (cfg
);
1712 int stypes_reg
= alloc_preg (cfg
);
1713 int stype
= alloc_preg (cfg
);
1715 mono_class_setup_supertypes (klass
);
1717 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1719 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1720 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1724 mini_emit_class_check_inst (cfg
, stype
, klass
, klass_inst
);
1729 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1731 mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, NULL
, object_is_null
);
1735 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1739 g_assert (val
== 0);
1744 if ((size
<= 4) && (size
<= align
)) {
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1753 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1755 #if SIZEOF_REGISTER == 8
1757 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1763 val_reg
= alloc_preg (cfg
);
1765 if (SIZEOF_REGISTER
== 8)
1766 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1768 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1771 /* This could be optimized further if neccesary */
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1780 #if !NO_UNALIGNED_ACCESS
1781 if (SIZEOF_REGISTER
== 8) {
1783 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1796 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1813 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1820 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1821 g_assert (size
< 10000);
1824 /* This could be optimized further if neccesary */
1826 cur_reg
= alloc_preg (cfg
);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1835 #if !NO_UNALIGNED_ACCESS
1836 if (SIZEOF_REGISTER
== 8) {
1838 cur_reg
= alloc_preg (cfg
);
1839 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1840 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1849 cur_reg
= alloc_preg (cfg
);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1857 cur_reg
= alloc_preg (cfg
);
1858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1859 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1865 cur_reg
= alloc_preg (cfg
);
1866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1867 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1875 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1878 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1881 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1882 switch (type
->type
) {
1883 case MONO_TYPE_VOID
:
1884 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1887 case MONO_TYPE_BOOLEAN
:
1890 case MONO_TYPE_CHAR
:
1893 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1897 case MONO_TYPE_FNPTR
:
1898 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1899 case MONO_TYPE_CLASS
:
1900 case MONO_TYPE_STRING
:
1901 case MONO_TYPE_OBJECT
:
1902 case MONO_TYPE_SZARRAY
:
1903 case MONO_TYPE_ARRAY
:
1904 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1907 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1910 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1911 case MONO_TYPE_VALUETYPE
:
1912 if (type
->data
.klass
->enumtype
) {
1913 type
= mono_class_enum_basetype (type
->data
.klass
);
1916 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1917 case MONO_TYPE_TYPEDBYREF
:
1918 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1919 case MONO_TYPE_GENERICINST
:
1920 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1923 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1929 * target_type_is_incompatible:
1930 * @cfg: MonoCompile context
1932 * Check that the item @arg on the evaluation stack can be stored
1933 * in the target type (can be a local, or field, etc).
1934 * The cfg arg can be used to check if we need verification or just
1937 * Returns: non-0 value if arg can't be stored on a target.
1940 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1942 MonoType
*simple_type
;
1945 if (target
->byref
) {
1946 /* FIXME: check that the pointed to types match */
1947 if (arg
->type
== STACK_MP
)
1948 return arg
->klass
!= mono_class_from_mono_type (target
);
1949 if (arg
->type
== STACK_PTR
)
1954 simple_type
= mono_type_get_underlying_type (target
);
1955 switch (simple_type
->type
) {
1956 case MONO_TYPE_VOID
:
1960 case MONO_TYPE_BOOLEAN
:
1963 case MONO_TYPE_CHAR
:
1966 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1970 /* STACK_MP is needed when setting pinned locals */
1971 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1976 case MONO_TYPE_FNPTR
:
1978 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1979 * in native int. (#688008).
1981 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1984 case MONO_TYPE_CLASS
:
1985 case MONO_TYPE_STRING
:
1986 case MONO_TYPE_OBJECT
:
1987 case MONO_TYPE_SZARRAY
:
1988 case MONO_TYPE_ARRAY
:
1989 if (arg
->type
!= STACK_OBJ
)
1991 /* FIXME: check type compatibility */
1995 if (arg
->type
!= STACK_I8
)
2000 if (arg
->type
!= STACK_R8
)
2003 case MONO_TYPE_VALUETYPE
:
2004 if (arg
->type
!= STACK_VTYPE
)
2006 klass
= mono_class_from_mono_type (simple_type
);
2007 if (klass
!= arg
->klass
)
2010 case MONO_TYPE_TYPEDBYREF
:
2011 if (arg
->type
!= STACK_VTYPE
)
2013 klass
= mono_class_from_mono_type (simple_type
);
2014 if (klass
!= arg
->klass
)
2017 case MONO_TYPE_GENERICINST
:
2018 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
2019 if (arg
->type
!= STACK_VTYPE
)
2021 klass
= mono_class_from_mono_type (simple_type
);
2022 if (klass
!= arg
->klass
)
2026 if (arg
->type
!= STACK_OBJ
)
2028 /* FIXME: check type compatibility */
2032 case MONO_TYPE_MVAR
:
2033 g_assert (cfg
->generic_sharing_context
);
2034 if (mini_type_var_is_vt (cfg
, simple_type
)) {
2035 if (arg
->type
!= STACK_VTYPE
)
2038 if (arg
->type
!= STACK_OBJ
)
2043 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
2049 * Prepare arguments for passing to a function call.
2050 * Return a non-zero value if the arguments can't be passed to the given
2052 * The type checks are not yet complete and some conversions may need
2053 * casts on 32 or 64 bit architectures.
2055 * FIXME: implement this using target_type_is_incompatible ()
2058 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
2060 MonoType
*simple_type
;
2064 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
2068 for (i
= 0; i
< sig
->param_count
; ++i
) {
2069 if (sig
->params
[i
]->byref
) {
2070 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
2074 simple_type
= sig
->params
[i
];
2075 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
2077 switch (simple_type
->type
) {
2078 case MONO_TYPE_VOID
:
2083 case MONO_TYPE_BOOLEAN
:
2086 case MONO_TYPE_CHAR
:
2089 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
2095 case MONO_TYPE_FNPTR
:
2096 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
2099 case MONO_TYPE_CLASS
:
2100 case MONO_TYPE_STRING
:
2101 case MONO_TYPE_OBJECT
:
2102 case MONO_TYPE_SZARRAY
:
2103 case MONO_TYPE_ARRAY
:
2104 if (args
[i
]->type
!= STACK_OBJ
)
2109 if (args
[i
]->type
!= STACK_I8
)
2114 if (args
[i
]->type
!= STACK_R8
)
2117 case MONO_TYPE_VALUETYPE
:
2118 if (simple_type
->data
.klass
->enumtype
) {
2119 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
2122 if (args
[i
]->type
!= STACK_VTYPE
)
2125 case MONO_TYPE_TYPEDBYREF
:
2126 if (args
[i
]->type
!= STACK_VTYPE
)
2129 case MONO_TYPE_GENERICINST
:
2130 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
2134 g_error ("unknown type 0x%02x in check_call_signature",
2142 callvirt_to_call (int opcode
)
2147 case OP_VOIDCALLVIRT
:
2156 g_assert_not_reached ();
2163 callvirt_to_call_membase (int opcode
)
2167 return OP_CALL_MEMBASE
;
2168 case OP_VOIDCALLVIRT
:
2169 return OP_VOIDCALL_MEMBASE
;
2171 return OP_FCALL_MEMBASE
;
2173 return OP_LCALL_MEMBASE
;
2175 return OP_VCALL_MEMBASE
;
2177 g_assert_not_reached ();
2183 #ifdef MONO_ARCH_HAVE_IMT
2185 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoMethod
*method
, MonoInst
*imt_arg
)
2189 if (COMPILE_LLVM (cfg
)) {
2190 method_reg
= alloc_preg (cfg
);
2193 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2194 } else if (cfg
->compile_aot
) {
2195 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, method
, MONO_PATCH_INFO_METHODCONST
);
2198 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2199 ins
->inst_p0
= method
;
2200 ins
->dreg
= method_reg
;
2201 MONO_ADD_INS (cfg
->cbb
, ins
);
2205 call
->imt_arg_reg
= method_reg
;
2207 #ifdef MONO_ARCH_IMT_REG
2208 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2210 /* Need this to keep the IMT arg alive */
2211 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, 0, FALSE
);
2216 #ifdef MONO_ARCH_IMT_REG
2217 method_reg
= alloc_preg (cfg
);
2220 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2221 } else if (cfg
->compile_aot
) {
2222 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, method
, MONO_PATCH_INFO_METHODCONST
);
2225 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2226 ins
->inst_p0
= method
;
2227 ins
->dreg
= method_reg
;
2228 MONO_ADD_INS (cfg
->cbb
, ins
);
2231 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2233 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2238 static MonoJumpInfo
*
2239 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2241 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2245 ji
->data
.target
= target
;
2250 inline static MonoCallInst
*
2251 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2252 MonoInst
**args
, int calli
, int virtual, int tail
, int rgctx
, int unbox_trampoline
)
2255 #ifdef MONO_ARCH_SOFT_FLOAT
2260 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
2262 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2265 call
->signature
= sig
;
2266 call
->rgctx_reg
= rgctx
;
2268 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2271 if (mini_type_is_vtype (cfg
, sig
->ret
)) {
2272 call
->vret_var
= cfg
->vret_addr
;
2273 //g_assert_not_reached ();
2275 } else if (mini_type_is_vtype (cfg
, sig
->ret
)) {
2276 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2279 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2282 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2283 * address of return value to increase optimization opportunities.
2284 * Before vtype decomposition, the dreg of the call ins itself represents the
2285 * fact the call modifies the return value. After decomposition, the call will
2286 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2287 * will be transformed into an LDADDR.
2289 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2290 loada
->dreg
= alloc_preg (cfg
);
2291 loada
->inst_p0
= temp
;
2292 /* We reference the call too since call->dreg could change during optimization */
2293 loada
->inst_p1
= call
;
2294 MONO_ADD_INS (cfg
->cbb
, loada
);
2296 call
->inst
.dreg
= temp
->dreg
;
2298 call
->vret_var
= loada
;
2299 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2300 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2302 #ifdef MONO_ARCH_SOFT_FLOAT
2303 if (COMPILE_SOFT_FLOAT (cfg
)) {
2305 * If the call has a float argument, we would need to do an r8->r4 conversion using
2306 * an icall, but that cannot be done during the call sequence since it would clobber
2307 * the call registers + the stack. So we do it before emitting the call.
2309 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2311 MonoInst
*in
= call
->args
[i
];
2313 if (i
>= sig
->hasthis
)
2314 t
= sig
->params
[i
- sig
->hasthis
];
2316 t
= &mono_defaults
.int_class
->byval_arg
;
2317 t
= mono_type_get_underlying_type (t
);
2319 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2320 MonoInst
*iargs
[1];
2324 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2326 /* The result will be in an int vreg */
2327 call
->args
[i
] = conv
;
2333 call
->need_unbox_trampoline
= unbox_trampoline
;
2336 if (COMPILE_LLVM (cfg
))
2337 mono_llvm_emit_call (cfg
, call
);
2339 mono_arch_emit_call (cfg
, call
);
2341 mono_arch_emit_call (cfg
, call
);
2344 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2345 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2351 set_rgctx_arg (MonoCompile
*cfg
, MonoCallInst
*call
, int rgctx_reg
, MonoInst
*rgctx_arg
)
2353 #ifdef MONO_ARCH_RGCTX_REG
2354 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2355 cfg
->uses_rgctx_reg
= TRUE
;
2356 call
->rgctx_reg
= TRUE
;
2358 call
->rgctx_arg_reg
= rgctx_reg
;
2365 inline static MonoInst
*
2366 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2372 rgctx_reg
= mono_alloc_preg (cfg
);
2373 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2376 call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
, rgctx_arg
? TRUE
: FALSE
, FALSE
);
2378 call
->inst
.sreg1
= addr
->dreg
;
2380 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2383 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2385 return (MonoInst
*)call
;
2388 /* This is like calli, but we pass rgctx/imt arguments as well */
2390 emit_gsharedvt_call (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoMethod
*method
, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
2396 rgctx_reg
= mono_alloc_preg (cfg
);
2397 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2400 call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
, rgctx_arg
? TRUE
: FALSE
, FALSE
);
2402 call
->inst
.sreg1
= addr
->dreg
;
2405 emit_imt_argument (cfg
, call
, method
, imt_arg
);
2407 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2410 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2412 return (MonoInst
*)call
;
2416 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
, MonoMethod
*cmethod
, MonoRgctxInfoType rgctx_type
);
2418 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
, MonoClass
*klass
, MonoRgctxInfoType rgctx_type
);
2421 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2422 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
2424 gboolean might_be_remote
;
2425 gboolean
virtual = this != NULL
;
2426 gboolean enable_for_aot
= TRUE
;
2430 gboolean need_unbox_trampoline
;
2433 rgctx_reg
= mono_alloc_preg (cfg
);
2434 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2437 if (method
->string_ctor
) {
2438 /* Create the real signature */
2439 /* FIXME: Cache these */
2440 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2441 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2446 context_used
= mono_method_check_context_used (method
);
2448 might_be_remote
= this && sig
->hasthis
&&
2449 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2450 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && (!MONO_CHECK_THIS (this) || context_used
);
2452 if (might_be_remote
&& context_used
) {
2455 g_assert (cfg
->generic_sharing_context
);
2457 addr
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
2459 return mono_emit_calli (cfg
, sig
, args
, addr
, NULL
);
2462 need_unbox_trampoline
= method
->klass
== mono_defaults
.object_class
|| (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
);
2464 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual, FALSE
, rgctx_arg
? TRUE
: FALSE
, need_unbox_trampoline
);
2466 if (might_be_remote
)
2467 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2469 call
->method
= method
;
2470 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2471 call
->inst
.inst_left
= this;
2474 int vtable_reg
, slot_reg
, this_reg
;
2476 this_reg
= this->dreg
;
2478 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2479 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2480 MonoInst
*dummy_use
;
2482 MONO_EMIT_NULL_CHECK (cfg
, this_reg
);
2484 /* Make a call to delegate->invoke_impl */
2485 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2486 call
->inst
.inst_basereg
= this_reg
;
2487 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2488 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2490 /* We must emit a dummy use here because the delegate trampoline will
2491 replace the 'this' argument with the delegate target making this activation
2492 no longer a root for the delegate.
2493 This is an issue for delegates that target collectible code such as dynamic
2494 methods of GC'able assemblies.
2496 For a test case look into #667921.
2498 FIXME: a dummy use is not the best way to do it as the local register allocator
2499 will put it on a caller save register and spil it around the call.
2500 Ideally, we would either put it on a callee save register or only do the store part.
2502 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[0]);
2504 return (MonoInst
*)call
;
2508 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2509 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2510 (MONO_METHOD_IS_FINAL (method
) &&
2511 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)) &&
2512 !(method
->klass
->marshalbyref
&& context_used
)) {
2514 * the method is not virtual, we just need to ensure this is not null
2515 * and then we can call the method directly.
2517 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2519 * The check above ensures method is not gshared, this is needed since
2520 * gshared methods can't have wrappers.
2522 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2525 if (!method
->string_ctor
)
2526 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2528 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2529 } else if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2531 * the method is virtual, but we can statically dispatch since either
2532 * it's class or the method itself are sealed.
2533 * But first we need to ensure it's not a null reference.
2535 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2537 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2539 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2541 vtable_reg
= alloc_preg (cfg
);
2542 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2543 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2545 #ifdef MONO_ARCH_HAVE_IMT
2547 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2548 emit_imt_argument (cfg
, call
, call
->method
, imt_arg
);
2549 slot_reg
= vtable_reg
;
2550 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2553 if (slot_reg
== -1) {
2554 slot_reg
= alloc_preg (cfg
);
2555 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2556 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2559 slot_reg
= vtable_reg
;
2560 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2561 ((mono_method_get_vtable_index (method
)) * (SIZEOF_VOID_P
));
2562 #ifdef MONO_ARCH_HAVE_IMT
2564 g_assert (mono_method_signature (method
)->generic_param_count
);
2565 emit_imt_argument (cfg
, call
, call
->method
, imt_arg
);
2570 call
->inst
.sreg1
= slot_reg
;
2571 call
->virtual = TRUE
;
2575 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2578 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2580 return (MonoInst
*)call
;
2584 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2586 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
, NULL
);
2590 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2597 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
, FALSE
, FALSE
);
2600 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2602 return (MonoInst
*)call
;
2606 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2608 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2612 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2616 * mono_emit_abs_call:
2618 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2620 inline static MonoInst
*
2621 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2622 MonoMethodSignature
*sig
, MonoInst
**args
)
2624 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2628 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2631 if (cfg
->abs_patches
== NULL
)
2632 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2633 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2634 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2635 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2640 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2642 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2643 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2647 * Native code might return non register sized integers
2648 * without initializing the upper bits.
2650 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2651 case OP_LOADI1_MEMBASE
:
2652 widen_op
= OP_ICONV_TO_I1
;
2654 case OP_LOADU1_MEMBASE
:
2655 widen_op
= OP_ICONV_TO_U1
;
2657 case OP_LOADI2_MEMBASE
:
2658 widen_op
= OP_ICONV_TO_I2
;
2660 case OP_LOADU2_MEMBASE
:
2661 widen_op
= OP_ICONV_TO_U2
;
2667 if (widen_op
!= -1) {
2668 int dreg
= alloc_preg (cfg
);
2671 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2672 widen
->type
= ins
->type
;
2682 get_memcpy_method (void)
2684 static MonoMethod
*memcpy_method
= NULL
;
2685 if (!memcpy_method
) {
2686 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2688 g_error ("Old corlib found. Install a new one");
2690 return memcpy_method
;
2694 create_write_barrier_bitmap (MonoCompile
*cfg
, MonoClass
*klass
, unsigned *wb_bitmap
, int offset
)
2696 MonoClassField
*field
;
2697 gpointer iter
= NULL
;
2699 while ((field
= mono_class_get_fields (klass
, &iter
))) {
2702 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
2704 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
2705 if (mini_type_is_reference (cfg
, mono_field_get_type (field
))) {
2706 g_assert ((foffset
% SIZEOF_VOID_P
) == 0);
2707 *wb_bitmap
|= 1 << ((offset
+ foffset
) / SIZEOF_VOID_P
);
2709 MonoClass
*field_class
= mono_class_from_mono_type (field
->type
);
2710 if (field_class
->has_references
)
2711 create_write_barrier_bitmap (cfg
, field_class
, wb_bitmap
, offset
+ foffset
);
2717 emit_write_barrier (MonoCompile
*cfg
, MonoInst
*ptr
, MonoInst
*value
, int value_reg
)
2719 int card_table_shift_bits
;
2720 gpointer card_table_mask
;
2722 MonoInst
*dummy_use
;
2723 int nursery_shift_bits
;
2724 size_t nursery_size
;
2725 gboolean has_card_table_wb
= FALSE
;
2727 if (!cfg
->gen_write_barriers
)
2730 card_table
= mono_gc_get_card_table (&card_table_shift_bits
, &card_table_mask
);
2732 mono_gc_get_nursery (&nursery_shift_bits
, &nursery_size
);
2734 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2735 has_card_table_wb
= TRUE
;
2738 if (has_card_table_wb
&& !cfg
->compile_aot
&& card_table
&& nursery_shift_bits
> 0) {
2741 MONO_INST_NEW (cfg
, wbarrier
, OP_CARD_TABLE_WBARRIER
);
2742 wbarrier
->sreg1
= ptr
->dreg
;
2744 wbarrier
->sreg2
= value
->dreg
;
2746 wbarrier
->sreg2
= value_reg
;
2747 MONO_ADD_INS (cfg
->cbb
, wbarrier
);
2748 } else if (card_table
) {
2749 int offset_reg
= alloc_preg (cfg
);
2750 int card_reg
= alloc_preg (cfg
);
2753 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, offset_reg
, ptr
->dreg
, card_table_shift_bits
);
2754 if (card_table_mask
)
2755 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PAND_IMM
, offset_reg
, offset_reg
, card_table_mask
);
2757 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2758 * IMM's larger than 32bits.
2760 if (cfg
->compile_aot
) {
2761 MONO_EMIT_NEW_AOTCONST (cfg
, card_reg
, NULL
, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
);
2763 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2764 ins
->inst_p0
= card_table
;
2765 ins
->dreg
= card_reg
;
2766 MONO_ADD_INS (cfg
->cbb
, ins
);
2769 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, offset_reg
, offset_reg
, card_reg
);
2770 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, offset_reg
, 0, 1);
2772 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
2773 mono_emit_method_call (cfg
, write_barrier
, &ptr
, NULL
);
2777 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, value
);
2779 MONO_INST_NEW (cfg
, dummy_use
, OP_DUMMY_USE
);
2780 dummy_use
->sreg1
= value_reg
;
2781 MONO_ADD_INS (cfg
->cbb
, dummy_use
);
2786 mono_emit_wb_aware_memcpy (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*iargs
[4], int size
, int align
)
2788 int dest_ptr_reg
, tmp_reg
, destreg
, srcreg
, offset
;
2789 unsigned need_wb
= 0;
2794 /*types with references can't have alignment smaller than sizeof(void*) */
2795 if (align
< SIZEOF_VOID_P
)
2798 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2799 if (size
> 32 * SIZEOF_VOID_P
)
2802 create_write_barrier_bitmap (cfg
, klass
, &need_wb
, 0);
2804 /* We don't unroll more than 5 stores to avoid code bloat. */
2805 if (size
> 5 * SIZEOF_VOID_P
) {
2806 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2807 size
+= (SIZEOF_VOID_P
- 1);
2808 size
&= ~(SIZEOF_VOID_P
- 1);
2810 EMIT_NEW_ICONST (cfg
, iargs
[2], size
);
2811 EMIT_NEW_ICONST (cfg
, iargs
[3], need_wb
);
2812 mono_emit_jit_icall (cfg
, mono_gc_wbarrier_value_copy_bitmap
, iargs
);
2816 destreg
= iargs
[0]->dreg
;
2817 srcreg
= iargs
[1]->dreg
;
2820 dest_ptr_reg
= alloc_preg (cfg
);
2821 tmp_reg
= alloc_preg (cfg
);
2824 EMIT_NEW_UNALU (cfg
, iargs
[0], OP_MOVE
, dest_ptr_reg
, destreg
);
2826 while (size
>= SIZEOF_VOID_P
) {
2827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, tmp_reg
, srcreg
, offset
);
2828 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, dest_ptr_reg
, 0, tmp_reg
);
2831 emit_write_barrier (cfg
, iargs
[0], NULL
, tmp_reg
);
2833 offset
+= SIZEOF_VOID_P
;
2834 size
-= SIZEOF_VOID_P
;
2837 /*tmp += sizeof (void*)*/
2838 if (size
>= SIZEOF_VOID_P
) {
2839 NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dest_ptr_reg
, dest_ptr_reg
, SIZEOF_VOID_P
);
2840 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
2844 /* Those cannot be references since size < sizeof (void*) */
2846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, tmp_reg
, srcreg
, offset
);
2847 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmp_reg
, srcreg
, offset
);
2854 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmp_reg
, srcreg
, offset
);
2861 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2870 * Emit code to copy a valuetype of type @klass whose address is stored in
2871 * @src->dreg to memory whose address is stored at @dest->dreg.
2874 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2876 MonoInst
*iargs
[4];
2877 int context_used
, n
;
2879 MonoMethod
*memcpy_method
;
2880 MonoInst
*size_ins
= NULL
;
2884 * This check breaks with spilled vars... need to handle it during verification anyway.
2885 * g_assert (klass && klass == src->klass && klass == dest->klass);
2888 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
2890 context_used
= mono_class_check_context_used (klass
);
2891 size_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_VALUE_SIZE
);
2895 n
= mono_class_native_size (klass
, &align
);
2897 n
= mono_class_value_size (klass
, &align
);
2899 /* if native is true there should be no references in the struct */
2900 if (cfg
->gen_write_barriers
&& klass
->has_references
&& !native
) {
2901 /* Avoid barriers when storing to the stack */
2902 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2903 (dest
->opcode
== OP_LDADDR
))) {
2904 int context_used
= 0;
2909 if (cfg
->generic_sharing_context
)
2910 context_used
= mono_class_check_context_used (klass
);
2912 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2913 if (!size_ins
&& (cfg
->opt
& MONO_OPT_INTRINS
) && mono_emit_wb_aware_memcpy (cfg
, klass
, iargs
, n
, align
)) {
2915 } else if (context_used
) {
2916 iargs
[2] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
2918 if (cfg
->compile_aot
) {
2919 EMIT_NEW_CLASSCONST (cfg
, iargs
[2], klass
);
2921 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2922 mono_class_compute_gc_descriptor (klass
);
2926 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2931 if (!size_ins
&& (cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2932 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2933 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2938 iargs
[2] = size_ins
;
2940 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2942 memcpy_method
= get_memcpy_method ();
2943 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2948 get_memset_method (void)
2950 static MonoMethod
*memset_method
= NULL
;
2951 if (!memset_method
) {
2952 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2954 g_error ("Old corlib found. Install a new one");
2956 return memset_method
;
2960 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2962 MonoInst
*iargs
[3];
2963 int n
, context_used
;
2965 MonoMethod
*memset_method
;
2966 MonoInst
*size_ins
= NULL
;
2968 /* FIXME: Optimize this for the case when dest is an LDADDR */
2970 mono_class_init (klass
);
2971 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
2972 context_used
= mono_class_check_context_used (klass
);
2973 size_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_VALUE_SIZE
);
2976 n
= mono_class_value_size (klass
, &align
);
2979 if (!size_ins
&& n
<= sizeof (gpointer
) * 5) {
2980 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2983 memset_method
= get_memset_method ();
2985 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2987 iargs
[2] = size_ins
;
2989 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2990 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2995 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2997 MonoInst
*this = NULL
;
2999 g_assert (cfg
->generic_sharing_context
);
3001 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
3002 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
3003 !method
->klass
->valuetype
)
3004 EMIT_NEW_ARGLOAD (cfg
, this, 0);
3006 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
3007 MonoInst
*mrgctx_loc
, *mrgctx_var
;
3010 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
3012 mrgctx_loc
= mono_get_vtable_var (cfg
);
3013 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
3016 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
3017 MonoInst
*vtable_loc
, *vtable_var
;
3021 vtable_loc
= mono_get_vtable_var (cfg
);
3022 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
3024 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
3025 MonoInst
*mrgctx_var
= vtable_var
;
3028 vtable_reg
= alloc_preg (cfg
);
3029 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
3030 vtable_var
->type
= STACK_PTR
;
3038 vtable_reg
= alloc_preg (cfg
);
3039 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3044 static MonoJumpInfoRgctxEntry
*
3045 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, MonoRgctxInfoType info_type
)
3047 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
3048 res
->method
= method
;
3049 res
->in_mrgctx
= in_mrgctx
;
3050 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
3051 res
->data
->type
= patch_type
;
3052 res
->data
->data
.target
= patch_data
;
3053 res
->info_type
= info_type
;
3058 static inline MonoInst
*
3059 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
3061 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
3065 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
3066 MonoClass
*klass
, MonoRgctxInfoType rgctx_type
)
3068 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
3069 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3071 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3075 * emit_get_rgctx_method:
3077 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3078 * normal constants, else emit a load from the rgctx.
3081 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
3082 MonoMethod
*cmethod
, MonoRgctxInfoType rgctx_type
)
3084 if (!context_used
) {
3087 switch (rgctx_type
) {
3088 case MONO_RGCTX_INFO_METHOD
:
3089 EMIT_NEW_METHODCONST (cfg
, ins
, cmethod
);
3091 case MONO_RGCTX_INFO_METHOD_RGCTX
:
3092 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, ins
, cmethod
);
3095 g_assert_not_reached ();
3098 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
3099 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3101 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3106 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
3107 MonoClassField
*field
, MonoRgctxInfoType rgctx_type
)
3109 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
3110 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3112 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3116 * On return the caller must check @klass for load errors.
3119 emit_generic_class_init (MonoCompile
*cfg
, MonoClass
*klass
)
3121 MonoInst
*vtable_arg
;
3123 int context_used
= 0;
3125 if (cfg
->generic_sharing_context
)
3126 context_used
= mono_class_check_context_used (klass
);
3129 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
3130 klass
, MONO_RGCTX_INFO_VTABLE
);
3132 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3136 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
3139 if (COMPILE_LLVM (cfg
))
3140 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline_llvm
, &vtable_arg
);
3142 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable_arg
);
3143 #ifdef MONO_ARCH_VTABLE_REG
3144 mono_call_inst_add_outarg_reg (cfg
, call
, vtable_arg
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
3145 cfg
->uses_vtable_reg
= TRUE
;
3152 emit_seq_point (MonoCompile
*cfg
, MonoMethod
*method
, guint8
* ip
, gboolean intr_loc
)
3156 if (cfg
->gen_seq_points
&& cfg
->method
== method
) {
3157 NEW_SEQ_POINT (cfg
, ins
, ip
- cfg
->header
->code
, intr_loc
);
3158 MONO_ADD_INS (cfg
->cbb
, ins
);
3163 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
3165 if (mini_get_debug_options ()->better_cast_details
) {
3166 int to_klass_reg
= alloc_preg (cfg
);
3167 int vtable_reg
= alloc_preg (cfg
);
3168 int klass_reg
= alloc_preg (cfg
);
3169 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3172 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
3176 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3180 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
3181 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
3182 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
3187 reset_cast_details (MonoCompile
*cfg
)
3189 /* Reset the variables holding the cast details */
3190 if (mini_get_debug_options ()->better_cast_details
) {
3191 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3193 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3194 /* It is enough to reset the from field */
3195 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
3200 * On return the caller must check @array_class for load errors
3203 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
3205 int vtable_reg
= alloc_preg (cfg
);
3206 int context_used
= 0;
3208 if (cfg
->generic_sharing_context
)
3209 context_used
= mono_class_check_context_used (array_class
);
3211 save_cast_details (cfg
, array_class
, obj
->dreg
);
3213 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3215 if (cfg
->opt
& MONO_OPT_SHARED
) {
3216 int class_reg
= alloc_preg (cfg
);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3218 if (cfg
->compile_aot
) {
3219 int klass_reg
= alloc_preg (cfg
);
3220 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
3221 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
3223 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
3225 } else if (context_used
) {
3226 MonoInst
*vtable_ins
;
3228 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
3229 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
3231 if (cfg
->compile_aot
) {
3235 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
3237 vt_reg
= alloc_preg (cfg
);
3238 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, vtable
);
3239 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
3242 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
3244 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vtable
);
3248 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
3250 reset_cast_details (cfg
);
3254 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3255 * generic code is generated.
3258 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
3260 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
3263 MonoInst
*rgctx
, *addr
;
3265 /* FIXME: What if the class is shared? We might not
3266 have to get the address of the method from the
3268 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3269 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3271 rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3273 return mono_emit_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3275 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3280 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
3284 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
3285 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3286 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3287 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
3289 obj_reg
= sp
[0]->dreg
;
3290 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3293 /* FIXME: generics */
3294 g_assert (klass
->rank
== 0);
3297 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
3298 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3300 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
3304 MonoInst
*element_class
;
3306 /* This assertion is from the unboxcast insn */
3307 g_assert (klass
->rank
== 0);
3309 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
3310 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
3312 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
3313 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3315 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
3316 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
3317 reset_cast_details (cfg
);
3320 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_MP
), obj_reg
, sizeof (MonoObject
));
3321 MONO_ADD_INS (cfg
->cbb
, add
);
3322 add
->type
= STACK_MP
;
3329 * Returns NULL and set the cfg exception on error.
3332 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
, int context_used
)
3334 MonoInst
*iargs
[2];
3340 MonoInst
*iargs
[2];
3343 FIXME: we cannot get managed_alloc here because we can't get
3344 the class's vtable (because it's not a closed class)
3346 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3347 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3350 if (cfg
->opt
& MONO_OPT_SHARED
)
3351 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
3353 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
3354 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
3356 if (cfg
->opt
& MONO_OPT_SHARED
) {
3357 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3359 alloc_ftn
= mono_object_new
;
3362 alloc_ftn
= mono_object_new_specific
;
3365 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3368 if (cfg
->opt
& MONO_OPT_SHARED
) {
3369 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3370 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
3372 alloc_ftn
= mono_object_new
;
3373 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
3374 /* This happens often in argument checking code, eg. throw new FooException... */
3375 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3376 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
3377 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
3379 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3380 MonoMethod
*managed_alloc
= NULL
;
3384 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3385 cfg
->exception_ptr
= klass
;
3389 #ifndef MONO_CROSS_COMPILE
3390 managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
3393 if (managed_alloc
) {
3394 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3395 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
3397 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
3399 guint32 lw
= vtable
->klass
->instance_size
;
3400 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
3401 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
3402 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
3405 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3409 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3413 * Returns NULL and set the cfg exception on error.
3416 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
)
3418 MonoInst
*alloc
, *ins
;
3420 if (mono_class_is_nullable (klass
)) {
3421 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
3424 /* FIXME: What if the class is shared? We might not
3425 have to get the method address from the RGCTX. */
3426 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3427 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3428 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3430 return mono_emit_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3432 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3436 alloc
= handle_alloc (cfg
, klass
, TRUE
, context_used
);
3440 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
3441 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3442 ins
->opcode
= OP_STOREV_MEMBASE
;
3444 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3452 mini_class_has_reference_variant_generic_argument (MonoCompile
*cfg
, MonoClass
*klass
, int context_used
)
3455 MonoGenericContainer
*container
;
3456 MonoGenericInst
*ginst
;
3458 if (klass
->generic_class
) {
3459 container
= klass
->generic_class
->container_class
->generic_container
;
3460 ginst
= klass
->generic_class
->context
.class_inst
;
3461 } else if (klass
->generic_container
&& context_used
) {
3462 container
= klass
->generic_container
;
3463 ginst
= container
->context
.class_inst
;
3468 for (i
= 0; i
< container
->type_argc
; ++i
) {
3470 if (!(mono_generic_container_get_param_info (container
, i
)->flags
& (MONO_GEN_PARAM_VARIANT
|MONO_GEN_PARAM_COVARIANT
)))
3472 type
= ginst
->type_argv
[i
];
3473 if (mini_type_is_reference (cfg
, type
))
3479 // FIXME: This doesn't work yet (class libs tests fail?)
3480 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3483 * Returns NULL and set the cfg exception on error.
3486 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3488 MonoBasicBlock
*is_null_bb
;
3489 int obj_reg
= src
->dreg
;
3490 int vtable_reg
= alloc_preg (cfg
);
3491 MonoInst
*klass_inst
= NULL
;
3496 if(mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
3497 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
3498 MonoInst
*cache_ins
;
3500 cache_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_CAST_CACHE
);
3505 /* klass - it's the second element of the cache entry*/
3506 EMIT_NEW_LOAD_MEMBASE (cfg
, args
[1], OP_LOAD_MEMBASE
, alloc_preg (cfg
), cache_ins
->dreg
, sizeof (gpointer
));
3509 args
[2] = cache_ins
;
3511 return mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
3514 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3516 if (is_complex_isinst (klass
)) {
3517 /* Complex case, handle by an icall */
3523 args
[1] = klass_inst
;
3525 return mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
3527 /* Simple case, handled by the code below */
3531 NEW_BBLOCK (cfg
, is_null_bb
);
3533 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
3536 save_cast_details (cfg
, klass
, obj_reg
);
3538 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3540 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
3542 int klass_reg
= alloc_preg (cfg
);
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3546 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3547 /* the remoting code is broken, access the class for now */
3548 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3549 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3551 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3552 cfg
->exception_ptr
= klass
;
3555 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3558 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3560 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3563 mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, klass_inst
, is_null_bb
);
3567 MONO_START_BB (cfg
, is_null_bb
);
3569 reset_cast_details (cfg
);
3575 * Returns NULL and set the cfg exception on error.
3578 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3581 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
3582 int obj_reg
= src
->dreg
;
3583 int vtable_reg
= alloc_preg (cfg
);
3584 int res_reg
= alloc_ireg_ref (cfg
);
3585 MonoInst
*klass_inst
= NULL
;
3590 if(mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
3591 MonoMethod
*mono_isinst
= mono_marshal_get_isinst_with_cache ();
3592 MonoInst
*cache_ins
;
3594 cache_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_CAST_CACHE
);
3599 /* klass - it's the second element of the cache entry*/
3600 EMIT_NEW_LOAD_MEMBASE (cfg
, args
[1], OP_LOAD_MEMBASE
, alloc_preg (cfg
), cache_ins
->dreg
, sizeof (gpointer
));
3603 args
[2] = cache_ins
;
3605 return mono_emit_method_call (cfg
, mono_isinst
, args
, NULL
);
3608 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3610 if (is_complex_isinst (klass
)) {
3611 /* Complex case, handle by an icall */
3617 args
[1] = klass_inst
;
3619 return mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
3621 /* Simple case, the code below can handle it */
3625 NEW_BBLOCK (cfg
, is_null_bb
);
3626 NEW_BBLOCK (cfg
, false_bb
);
3627 NEW_BBLOCK (cfg
, end_bb
);
3629 /* Do the assignment at the beginning, so the other assignment can be if converted */
3630 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
3631 ins
->type
= STACK_OBJ
;
3634 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3635 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
3637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3639 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3640 g_assert (!context_used
);
3641 /* the is_null_bb target simply copies the input register to the output */
3642 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
3644 int klass_reg
= alloc_preg (cfg
);
3647 int rank_reg
= alloc_preg (cfg
);
3648 int eclass_reg
= alloc_preg (cfg
);
3650 g_assert (!context_used
);
3651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3652 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
3653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
3656 if (klass
->cast_class
== mono_defaults
.object_class
) {
3657 int parent_reg
= alloc_preg (cfg
);
3658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
3659 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
3660 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3662 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
3663 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
3664 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3666 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
3667 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3669 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3670 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3672 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
3673 /* Check that the object is a vector too */
3674 int bounds_reg
= alloc_preg (cfg
);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3676 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3680 /* the is_null_bb target simply copies the input register to the output */
3681 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3683 } else if (mono_class_is_nullable (klass
)) {
3684 g_assert (!context_used
);
3685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3686 /* the is_null_bb target simply copies the input register to the output */
3687 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3689 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3690 g_assert (!context_used
);
3691 /* the remoting code is broken, access the class for now */
3692 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3693 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3695 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3696 cfg
->exception_ptr
= klass
;
3699 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3702 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3704 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3705 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3708 /* the is_null_bb target simply copies the input register to the output */
3709 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, klass_inst
, false_bb
, is_null_bb
);
3714 MONO_START_BB (cfg
, false_bb
);
3716 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3719 MONO_START_BB (cfg
, is_null_bb
);
3721 MONO_START_BB (cfg
, end_bb
);
3727 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3729 /* This opcode takes as input an object reference and a class, and returns:
3730 0) if the object is an instance of the class,
3731 1) if the object is not instance of the class,
3732 2) if the object is a proxy whose type cannot be determined */
3735 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3736 int obj_reg
= src
->dreg
;
3737 int dreg
= alloc_ireg (cfg
);
3739 int klass_reg
= alloc_preg (cfg
);
3741 NEW_BBLOCK (cfg
, true_bb
);
3742 NEW_BBLOCK (cfg
, false_bb
);
3743 NEW_BBLOCK (cfg
, false2_bb
);
3744 NEW_BBLOCK (cfg
, end_bb
);
3745 NEW_BBLOCK (cfg
, no_proxy_bb
);
3747 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3748 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3750 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3751 NEW_BBLOCK (cfg
, interface_fail_bb
);
3753 tmp_reg
= alloc_preg (cfg
);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3755 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3756 MONO_START_BB (cfg
, interface_fail_bb
);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3759 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3761 tmp_reg
= alloc_preg (cfg
);
3762 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3763 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3766 tmp_reg
= alloc_preg (cfg
);
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3770 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3771 tmp_reg
= alloc_preg (cfg
);
3772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3775 tmp_reg
= alloc_preg (cfg
);
3776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3777 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3780 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3781 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3783 MONO_START_BB (cfg
, no_proxy_bb
);
3785 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3788 MONO_START_BB (cfg
, false_bb
);
3790 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3793 MONO_START_BB (cfg
, false2_bb
);
3795 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3798 MONO_START_BB (cfg
, true_bb
);
3800 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3802 MONO_START_BB (cfg
, end_bb
);
3805 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3807 ins
->type
= STACK_I4
;
3813 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3815 /* This opcode takes as input an object reference and a class, and returns:
3816 0) if the object is an instance of the class,
3817 1) if the object is a proxy whose type cannot be determined
3818 an InvalidCastException exception is thrown otherwhise*/
3821 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3822 int obj_reg
= src
->dreg
;
3823 int dreg
= alloc_ireg (cfg
);
3824 int tmp_reg
= alloc_preg (cfg
);
3825 int klass_reg
= alloc_preg (cfg
);
3827 NEW_BBLOCK (cfg
, end_bb
);
3828 NEW_BBLOCK (cfg
, ok_result_bb
);
3830 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3833 save_cast_details (cfg
, klass
, obj_reg
);
3835 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3836 NEW_BBLOCK (cfg
, interface_fail_bb
);
3838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3839 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3840 MONO_START_BB (cfg
, interface_fail_bb
);
3841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3843 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3845 tmp_reg
= alloc_preg (cfg
);
3846 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3847 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3848 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3850 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3851 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3854 NEW_BBLOCK (cfg
, no_proxy_bb
);
3856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3858 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3860 tmp_reg
= alloc_preg (cfg
);
3861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3864 tmp_reg
= alloc_preg (cfg
);
3865 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3866 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3869 NEW_BBLOCK (cfg
, fail_1_bb
);
3871 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3873 MONO_START_BB (cfg
, fail_1_bb
);
3875 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3878 MONO_START_BB (cfg
, no_proxy_bb
);
3880 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3883 MONO_START_BB (cfg
, ok_result_bb
);
3885 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3887 MONO_START_BB (cfg
, end_bb
);
3890 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3892 ins
->type
= STACK_I4
;
3898 * Returns NULL and set the cfg exception on error.
3900 static G_GNUC_UNUSED MonoInst
*
3901 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
, int context_used
)
3905 gpointer
*trampoline
;
3906 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3910 obj
= handle_alloc (cfg
, klass
, FALSE
, 0);
3914 /* Inline the contents of mono_delegate_ctor */
3916 /* Set target field */
3917 /* Optimize away setting of NULL target */
3918 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0)) {
3919 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3920 if (cfg
->gen_write_barriers
) {
3921 dreg
= alloc_preg (cfg
);
3922 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
));
3923 emit_write_barrier (cfg
, ptr
, target
, 0);
3927 /* Set method field */
3928 method_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD
);
3929 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3930 if (cfg
->gen_write_barriers
) {
3931 dreg
= alloc_preg (cfg
);
3932 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
));
3933 emit_write_barrier (cfg
, ptr
, method_ins
, 0);
3936 * To avoid looking up the compiled code belonging to the target method
3937 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3938 * store it, and we fill it after the method has been compiled.
3940 if (!cfg
->compile_aot
&& !method
->dynamic
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
3941 MonoInst
*code_slot_ins
;
3944 code_slot_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE
);
3946 domain
= mono_domain_get ();
3947 mono_domain_lock (domain
);
3948 if (!domain_jit_info (domain
)->method_code_hash
)
3949 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3950 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3952 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3953 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3955 mono_domain_unlock (domain
);
3957 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3962 /* Set invoke_impl field */
3963 if (cfg
->compile_aot
) {
3964 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3966 trampoline
= mono_create_delegate_trampoline (cfg
->domain
, klass
);
3967 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3969 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3971 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3977 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3979 MonoJitICallInfo
*info
;
3981 /* Need to register the icall so it gets an icall wrapper */
3982 info
= mono_get_array_new_va_icall (rank
);
3984 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3986 /* mono_array_new_va () needs a vararg calling convention */
3987 cfg
->disable_llvm
= TRUE
;
3989 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3990 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3994 mono_emit_load_got_addr (MonoCompile
*cfg
)
3996 MonoInst
*getaddr
, *dummy_use
;
3998 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
4001 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
4002 getaddr
->cil_code
= cfg
->header
->code
;
4003 getaddr
->dreg
= cfg
->got_var
->dreg
;
4005 /* Add it to the start of the first bblock */
4006 if (cfg
->bb_entry
->code
) {
4007 getaddr
->next
= cfg
->bb_entry
->code
;
4008 cfg
->bb_entry
->code
= getaddr
;
4011 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
4013 cfg
->got_var_allocated
= TRUE
;
4016 * Add a dummy use to keep the got_var alive, since real uses might
4017 * only be generated by the back ends.
4018 * Add it to end_bblock, so the variable's lifetime covers the whole
4020 * It would be better to make the usage of the got var explicit in all
4021 * cases when the backend needs it (i.e. calls, throw etc.), so this
4022 * wouldn't be needed.
4024 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
4025 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
4028 static int inline_limit
;
4029 static gboolean inline_limit_inited
;
4032 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
4034 MonoMethodHeaderSummary header
;
4036 #ifdef MONO_ARCH_SOFT_FLOAT
4037 MonoMethodSignature
*sig
= mono_method_signature (method
);
4041 if (cfg
->generic_sharing_context
)
4044 if (cfg
->inline_depth
> 10)
4047 #ifdef MONO_ARCH_HAVE_LMF_OPS
4048 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
4049 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
4050 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
4055 if (!mono_method_get_header_summary (method
, &header
))
4058 /*runtime, icall and pinvoke are checked by summary call*/
4059 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
4060 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
4061 (method
->klass
->marshalbyref
) ||
4065 /* also consider num_locals? */
4066 /* Do the size check early to avoid creating vtables */
4067 if (!inline_limit_inited
) {
4068 if (getenv ("MONO_INLINELIMIT"))
4069 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
4071 inline_limit
= INLINE_LENGTH_LIMIT
;
4072 inline_limit_inited
= TRUE
;
4074 if (header
.code_size
>= inline_limit
&& !(method
->iflags
& METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING
))
4078 * if we can initialize the class of the method right away, we do,
4079 * otherwise we don't allow inlining if the class needs initialization,
4080 * since it would mean inserting a call to mono_runtime_class_init()
4081 * inside the inlined code
4083 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
4084 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
4085 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
4086 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4087 if (!method
->klass
->runtime_info
)
4088 /* No vtable created yet */
4090 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4093 /* This makes so that inline cannot trigger */
4094 /* .cctors: too many apps depend on them */
4095 /* running with a specific order... */
4096 if (! vtable
->initialized
)
4098 mono_runtime_class_init (vtable
);
4100 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
4101 if (!method
->klass
->runtime_info
)
4102 /* No vtable created yet */
4104 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4107 if (!vtable
->initialized
)
4112 * If we're compiling for shared code
4113 * the cctor will need to be run at aot method load time, for example,
4114 * or at the end of the compilation of the inlining method.
4116 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
4121 * CAS - do not inline methods with declarative security
4122 * Note: this has to be before any possible return TRUE;
4124 if (mono_method_has_declsec (method
))
4127 #ifdef MONO_ARCH_SOFT_FLOAT
4129 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
4131 for (i
= 0; i
< sig
->param_count
; ++i
)
4132 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
4140 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
4142 if (vtable
->initialized
&& !cfg
->compile_aot
)
4145 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
4148 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
4151 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
4152 /* The initialization is already done before the method is called */
4159 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
, gboolean bcheck
)
4163 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
4166 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
4169 mono_class_init (klass
);
4170 size
= mono_class_array_element_size (klass
);
4173 mult_reg
= alloc_preg (cfg
);
4174 array_reg
= arr
->dreg
;
4175 index_reg
= index
->dreg
;
4177 #if SIZEOF_REGISTER == 8
4178 /* The array reg is 64 bits but the index reg is only 32 */
4179 if (COMPILE_LLVM (cfg
)) {
4181 index2_reg
= index_reg
;
4183 index2_reg
= alloc_preg (cfg
);
4184 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
4187 if (index
->type
== STACK_I8
) {
4188 index2_reg
= alloc_preg (cfg
);
4189 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
4191 index2_reg
= index_reg
;
4196 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
4198 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4199 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
4200 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4202 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
4203 ins
->klass
= mono_class_get_element_class (klass
);
4204 ins
->type
= STACK_MP
;
4210 add_reg
= alloc_ireg_mp (cfg
);
4213 MonoInst
*rgctx_ins
;
4216 g_assert (cfg
->generic_sharing_context
);
4217 context_used
= mono_class_check_context_used (klass
);
4218 g_assert (context_used
);
4219 rgctx_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE
);
4220 MONO_EMIT_NEW_BIALU (cfg
, OP_IMUL
, mult_reg
, index2_reg
, rgctx_ins
->dreg
);
4222 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
4224 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
4225 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
4226 ins
->klass
= mono_class_get_element_class (klass
);
4227 ins
->type
= STACK_MP
;
4228 MONO_ADD_INS (cfg
->cbb
, ins
);
4233 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4235 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
4237 int bounds_reg
= alloc_preg (cfg
);
4238 int add_reg
= alloc_ireg_mp (cfg
);
4239 int mult_reg
= alloc_preg (cfg
);
4240 int mult2_reg
= alloc_preg (cfg
);
4241 int low1_reg
= alloc_preg (cfg
);
4242 int low2_reg
= alloc_preg (cfg
);
4243 int high1_reg
= alloc_preg (cfg
);
4244 int high2_reg
= alloc_preg (cfg
);
4245 int realidx1_reg
= alloc_preg (cfg
);
4246 int realidx2_reg
= alloc_preg (cfg
);
4247 int sum_reg
= alloc_preg (cfg
);
4248 int index1
, index2
, tmpreg
;
4252 mono_class_init (klass
);
4253 size
= mono_class_array_element_size (klass
);
4255 index1
= index_ins1
->dreg
;
4256 index2
= index_ins2
->dreg
;
4258 #if SIZEOF_REGISTER == 8
4259 /* The array reg is 64 bits but the index reg is only 32 */
4260 if (COMPILE_LLVM (cfg
)) {
4263 tmpreg
= alloc_preg (cfg
);
4264 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, tmpreg
, index1
);
4266 tmpreg
= alloc_preg (cfg
);
4267 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, tmpreg
, index2
);
4271 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4275 /* range checking */
4276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
4277 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
4279 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
4280 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4281 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
4282 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
4283 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4284 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
4285 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4287 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
4288 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4289 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
4290 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
4291 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4292 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
4293 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4295 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
4296 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
4297 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
4298 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
4299 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
4301 ins
->type
= STACK_MP
;
4303 MONO_ADD_INS (cfg
->cbb
, ins
);
4310 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
4314 MonoMethod
*addr_method
;
4317 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
4320 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], TRUE
);
4322 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4323 /* emit_ldelema_2 depends on OP_LMUL */
4324 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
4325 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
4329 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
4330 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
4331 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
4336 static MonoBreakPolicy
4337 always_insert_breakpoint (MonoMethod
*method
)
4339 return MONO_BREAK_POLICY_ALWAYS
;
4342 static MonoBreakPolicyFunc break_policy_func
= always_insert_breakpoint
;
4345 * mono_set_break_policy:
4346 * policy_callback: the new callback function
4348 * Allow embedders to decide wherther to actually obey breakpoint instructions
4349 * (both break IL instructions and Debugger.Break () method calls), for example
4350 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4351 * untrusted or semi-trusted code.
4353 * @policy_callback will be called every time a break point instruction needs to
4354 * be inserted with the method argument being the method that calls Debugger.Break()
4355 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4356 * if it wants the breakpoint to not be effective in the given method.
4357 * #MONO_BREAK_POLICY_ALWAYS is the default.
4360 mono_set_break_policy (MonoBreakPolicyFunc policy_callback
)
4362 if (policy_callback
)
4363 break_policy_func
= policy_callback
;
4365 break_policy_func
= always_insert_breakpoint
;
4369 should_insert_brekpoint (MonoMethod
*method
) {
4370 switch (break_policy_func (method
)) {
4371 case MONO_BREAK_POLICY_ALWAYS
:
4373 case MONO_BREAK_POLICY_NEVER
:
4375 case MONO_BREAK_POLICY_ON_DBG
:
4376 return mono_debug_using_mono_debugger ();
4378 g_warning ("Incorrect value returned from break policy callback");
4383 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4385 emit_array_generic_access (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, int is_set
)
4387 MonoInst
*addr
, *store
, *load
;
4388 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[2]);
4390 /* the bounds check is already done by the callers */
4391 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1], FALSE
);
4393 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, args
[2]->dreg
, 0);
4394 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, addr
->dreg
, 0, load
->dreg
);
4395 if (mini_type_is_reference (cfg
, fsig
->params
[2]))
4396 emit_write_barrier (cfg
, addr
, load
, -1);
4398 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
4399 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
4406 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
4408 return mini_type_is_reference (cfg
, &klass
->byval_arg
);
4412 emit_array_store (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, gboolean safety_checks
)
4414 if (safety_checks
&& generic_class_is_reference_type (cfg
, klass
) &&
4415 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
4416 MonoClass
*obj_array
= mono_array_class_get_cached (mono_defaults
.object_class
, 1);
4417 MonoMethod
*helper
= mono_marshal_get_virtual_stelemref (obj_array
);
4418 MonoInst
*iargs
[3];
4421 mono_class_setup_vtable (obj_array
);
4422 g_assert (helper
->slot
);
4424 if (sp
[0]->type
!= STACK_OBJ
)
4426 if (sp
[2]->type
!= STACK_OBJ
)
4433 return mono_emit_method_call (cfg
, helper
, iargs
, sp
[0]);
4437 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
4440 // FIXME-VT: OP_ICONST optimization
4441 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
4442 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
4443 ins
->opcode
= OP_STOREV_MEMBASE
;
4444 } else if (sp
[1]->opcode
== OP_ICONST
) {
4445 int array_reg
= sp
[0]->dreg
;
4446 int index_reg
= sp
[1]->dreg
;
4447 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
4450 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
4451 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
4453 MonoInst
*addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], safety_checks
);
4454 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
4455 if (generic_class_is_reference_type (cfg
, klass
))
4456 emit_write_barrier (cfg
, addr
, sp
[2], -1);
4463 emit_array_unsafe_access (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, int is_set
)
4468 eklass
= mono_class_from_mono_type (fsig
->params
[2]);
4470 eklass
= mono_class_from_mono_type (fsig
->ret
);
4474 return emit_array_store (cfg
, eklass
, args
, FALSE
);
4476 MonoInst
*ins
, *addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1], FALSE
);
4477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &eklass
->byval_arg
, addr
->dreg
, 0);
4483 mini_emit_inst_for_ctor (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4485 MonoInst
*ins
= NULL
;
4486 #ifdef MONO_ARCH_SIMD_INTRINSICS
4487 if (cfg
->opt
& MONO_OPT_SIMD
) {
4488 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4498 emit_memory_barrier (MonoCompile
*cfg
, int kind
)
4500 MonoInst
*ins
= NULL
;
4501 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
4502 MONO_ADD_INS (cfg
->cbb
, ins
);
4503 ins
->backend
.memory_barrier_kind
= kind
;
4509 llvm_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4511 MonoInst
*ins
= NULL
;
4514 /* The LLVM backend supports these intrinsics */
4515 if (cmethod
->klass
== mono_defaults
.math_class
) {
4516 if (strcmp (cmethod
->name
, "Sin") == 0) {
4518 } else if (strcmp (cmethod
->name
, "Cos") == 0) {
4520 } else if (strcmp (cmethod
->name
, "Sqrt") == 0) {
4522 } else if (strcmp (cmethod
->name
, "Abs") == 0 && fsig
->params
[0]->type
== MONO_TYPE_R8
) {
4527 MONO_INST_NEW (cfg
, ins
, opcode
);
4528 ins
->type
= STACK_R8
;
4529 ins
->dreg
= mono_alloc_freg (cfg
);
4530 ins
->sreg1
= args
[0]->dreg
;
4531 MONO_ADD_INS (cfg
->cbb
, ins
);
4535 if (cfg
->opt
& MONO_OPT_CMOV
) {
4536 if (strcmp (cmethod
->name
, "Min") == 0) {
4537 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4539 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
4540 opcode
= OP_IMIN_UN
;
4541 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4543 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
4544 opcode
= OP_LMIN_UN
;
4545 } else if (strcmp (cmethod
->name
, "Max") == 0) {
4546 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4548 if (fsig
->params
[0]->type
== MONO_TYPE_U4
)
4549 opcode
= OP_IMAX_UN
;
4550 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4552 else if (fsig
->params
[0]->type
== MONO_TYPE_U8
)
4553 opcode
= OP_LMAX_UN
;
4558 MONO_INST_NEW (cfg
, ins
, opcode
);
4559 ins
->type
= fsig
->params
[0]->type
== MONO_TYPE_I4
? STACK_I4
: STACK_I8
;
4560 ins
->dreg
= mono_alloc_ireg (cfg
);
4561 ins
->sreg1
= args
[0]->dreg
;
4562 ins
->sreg2
= args
[1]->dreg
;
4563 MONO_ADD_INS (cfg
->cbb
, ins
);
4571 mini_emit_inst_for_sharable_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4573 if (cmethod
->klass
== mono_defaults
.array_class
) {
4574 if (strcmp (cmethod
->name
, "UnsafeStore") == 0)
4575 return emit_array_unsafe_access (cfg
, fsig
, args
, TRUE
);
4576 if (strcmp (cmethod
->name
, "UnsafeLoad") == 0)
4577 return emit_array_unsafe_access (cfg
, fsig
, args
, FALSE
);
4584 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4586 MonoInst
*ins
= NULL
;
4588 static MonoClass
*runtime_helpers_class
= NULL
;
4589 if (! runtime_helpers_class
)
4590 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
4591 "System.Runtime.CompilerServices", "RuntimeHelpers");
4593 if (cmethod
->klass
== mono_defaults
.string_class
) {
4594 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
4595 int dreg
= alloc_ireg (cfg
);
4596 int index_reg
= alloc_preg (cfg
);
4597 int mult_reg
= alloc_preg (cfg
);
4598 int add_reg
= alloc_preg (cfg
);
4600 #if SIZEOF_REGISTER == 8
4601 /* The array reg is 64 bits but the index reg is only 32 */
4602 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
4604 index_reg
= args
[1]->dreg
;
4606 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
4608 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4609 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
4610 add_reg
= ins
->dreg
;
4611 /* Avoid a warning */
4613 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4616 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
4617 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4618 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4619 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
4621 type_from_op (ins
, NULL
, NULL
);
4623 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4624 int dreg
= alloc_ireg (cfg
);
4625 /* Decompose later to allow more optimizations */
4626 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
4627 ins
->type
= STACK_I4
;
4628 ins
->flags
|= MONO_INST_FAULT
;
4629 cfg
->cbb
->has_array_access
= TRUE
;
4630 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
4633 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
4634 int mult_reg
= alloc_preg (cfg
);
4635 int add_reg
= alloc_preg (cfg
);
4637 /* The corlib functions check for oob already. */
4638 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
4639 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4640 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
4641 return cfg
->cbb
->last_ins
;
4644 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
4646 if (strcmp (cmethod
->name
, "GetType") == 0) {
4647 int dreg
= alloc_ireg_ref (cfg
);
4648 int vt_reg
= alloc_preg (cfg
);
4649 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4650 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
4651 type_from_op (ins
, NULL
, NULL
);
4654 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4655 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4656 int dreg
= alloc_ireg (cfg
);
4657 int t1
= alloc_ireg (cfg
);
4659 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
4660 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
4661 ins
->type
= STACK_I4
;
4665 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
4666 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4667 MONO_ADD_INS (cfg
->cbb
, ins
);
4671 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
4672 if (strcmp (cmethod
->name
+ 1, "etGenericValueImpl") == 0)
4673 return emit_array_generic_access (cfg
, fsig
, args
, *cmethod
->name
== 'S');
4675 #ifndef MONO_BIG_ARRAYS
4677 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4680 if ((strcmp (cmethod
->name
, "GetLength") == 0 || strcmp (cmethod
->name
, "GetLowerBound") == 0) && args
[1]->opcode
== OP_ICONST
&& args
[1]->inst_c0
== 0) {
4681 int dreg
= alloc_ireg (cfg
);
4682 int bounds_reg
= alloc_ireg_mp (cfg
);
4683 MonoBasicBlock
*end_bb
, *szarray_bb
;
4684 gboolean get_length
= strcmp (cmethod
->name
, "GetLength") == 0;
4686 NEW_BBLOCK (cfg
, end_bb
);
4687 NEW_BBLOCK (cfg
, szarray_bb
);
4689 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOAD_MEMBASE
, bounds_reg
,
4690 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
4691 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, szarray_bb
);
4693 /* Non-szarray case */
4695 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4696 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4698 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4699 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
4701 MONO_START_BB (cfg
, szarray_bb
);
4704 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4705 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4707 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
4708 MONO_START_BB (cfg
, end_bb
);
4710 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, dreg
, dreg
);
4711 ins
->type
= STACK_I4
;
4717 if (cmethod
->name
[0] != 'g')
4720 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
4721 int dreg
= alloc_ireg (cfg
);
4722 int vtable_reg
= alloc_preg (cfg
);
4723 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
4724 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4725 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
4726 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
4727 type_from_op (ins
, NULL
, NULL
);
4730 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4731 int dreg
= alloc_ireg (cfg
);
4733 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4734 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4735 type_from_op (ins
, NULL
, NULL
);
4740 } else if (cmethod
->klass
== runtime_helpers_class
) {
4742 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
4743 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
4747 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
4748 if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
4749 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
4750 MONO_ADD_INS (cfg
->cbb
, ins
);
4752 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
4753 return emit_memory_barrier (cfg
, FullBarrier
);
4755 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
4757 /* FIXME this should be integrated to the check below once we support the trampoline version */
4758 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4759 if (strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 2) {
4760 MonoMethod
*fast_method
= NULL
;
4762 /* Avoid infinite recursion */
4763 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&& !strcmp (cfg
->method
->name
, "FastMonitorEnterV4"))
4766 fast_method
= mono_monitor_get_fast_path (cmethod
);
4770 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
4774 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4775 if (strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 1) {
4778 if (COMPILE_LLVM (cfg
)) {
4780 * Pass the argument normally, the LLVM backend will handle the
4781 * calling convention problems.
4783 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4785 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
4786 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4787 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4788 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4791 return (MonoInst
*)call
;
4792 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
4795 if (COMPILE_LLVM (cfg
)) {
4796 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4798 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
4799 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4800 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4801 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4804 return (MonoInst
*)call
;
4806 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4808 MonoMethod
*fast_method
= NULL
;
4810 /* Avoid infinite recursion */
4811 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
4812 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
4813 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
4816 if ((strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 2) ||
4817 strcmp (cmethod
->name
, "Exit") == 0)
4818 fast_method
= mono_monitor_get_fast_path (cmethod
);
4822 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
4825 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
4826 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
4827 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
4830 #if SIZEOF_REGISTER == 8
4831 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
4832 /* 64 bit reads are already atomic */
4833 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
4834 ins
->dreg
= mono_alloc_preg (cfg
);
4835 ins
->inst_basereg
= args
[0]->dreg
;
4836 ins
->inst_offset
= 0;
4837 MONO_ADD_INS (cfg
->cbb
, ins
);
4841 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4842 if (strcmp (cmethod
->name
, "Increment") == 0) {
4843 MonoInst
*ins_iconst
;
4846 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4847 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4848 #if SIZEOF_REGISTER == 8
4849 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4850 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4853 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4854 ins_iconst
->inst_c0
= 1;
4855 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4856 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4858 MONO_INST_NEW (cfg
, ins
, opcode
);
4859 ins
->dreg
= mono_alloc_ireg (cfg
);
4860 ins
->inst_basereg
= args
[0]->dreg
;
4861 ins
->inst_offset
= 0;
4862 ins
->sreg2
= ins_iconst
->dreg
;
4863 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4864 MONO_ADD_INS (cfg
->cbb
, ins
);
4866 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
4867 MonoInst
*ins_iconst
;
4870 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4871 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4872 #if SIZEOF_REGISTER == 8
4873 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4874 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4877 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4878 ins_iconst
->inst_c0
= -1;
4879 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4880 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4882 MONO_INST_NEW (cfg
, ins
, opcode
);
4883 ins
->dreg
= mono_alloc_ireg (cfg
);
4884 ins
->inst_basereg
= args
[0]->dreg
;
4885 ins
->inst_offset
= 0;
4886 ins
->sreg2
= ins_iconst
->dreg
;
4887 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4888 MONO_ADD_INS (cfg
->cbb
, ins
);
4890 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4893 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4894 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4895 #if SIZEOF_REGISTER == 8
4896 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4897 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4901 MONO_INST_NEW (cfg
, ins
, opcode
);
4902 ins
->dreg
= mono_alloc_ireg (cfg
);
4903 ins
->inst_basereg
= args
[0]->dreg
;
4904 ins
->inst_offset
= 0;
4905 ins
->sreg2
= args
[1]->dreg
;
4906 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4907 MONO_ADD_INS (cfg
->cbb
, ins
);
4910 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4912 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4913 if (strcmp (cmethod
->name
, "Exchange") == 0) {
4915 gboolean is_ref
= fsig
->params
[0]->type
== MONO_TYPE_OBJECT
;
4917 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4918 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4919 #if SIZEOF_REGISTER == 8
4920 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
4921 (fsig
->params
[0]->type
== MONO_TYPE_I
))
4922 opcode
= OP_ATOMIC_EXCHANGE_I8
;
4924 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I
))
4925 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4930 MONO_INST_NEW (cfg
, ins
, opcode
);
4931 ins
->dreg
= is_ref
? mono_alloc_ireg_ref (cfg
) : mono_alloc_ireg (cfg
);
4932 ins
->inst_basereg
= args
[0]->dreg
;
4933 ins
->inst_offset
= 0;
4934 ins
->sreg2
= args
[1]->dreg
;
4935 MONO_ADD_INS (cfg
->cbb
, ins
);
4937 switch (fsig
->params
[0]->type
) {
4939 ins
->type
= STACK_I4
;
4943 ins
->type
= STACK_I8
;
4945 case MONO_TYPE_OBJECT
:
4946 ins
->type
= STACK_OBJ
;
4949 g_assert_not_reached ();
4952 if (cfg
->gen_write_barriers
&& is_ref
)
4953 emit_write_barrier (cfg
, args
[0], args
[1], -1);
4955 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4957 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4958 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
4960 gboolean is_ref
= mini_type_is_reference (cfg
, fsig
->params
[1]);
4961 if (fsig
->params
[1]->type
== MONO_TYPE_I4
)
4963 else if (is_ref
|| fsig
->params
[1]->type
== MONO_TYPE_I
)
4964 size
= sizeof (gpointer
);
4965 else if (sizeof (gpointer
) == 8 && fsig
->params
[1]->type
== MONO_TYPE_I8
)
4968 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I4
);
4969 ins
->dreg
= is_ref
? alloc_ireg_ref (cfg
) : alloc_ireg (cfg
);
4970 ins
->sreg1
= args
[0]->dreg
;
4971 ins
->sreg2
= args
[1]->dreg
;
4972 ins
->sreg3
= args
[2]->dreg
;
4973 ins
->type
= STACK_I4
;
4974 MONO_ADD_INS (cfg
->cbb
, ins
);
4975 } else if (size
== 8) {
4976 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I8
);
4977 ins
->dreg
= is_ref
? alloc_ireg_ref (cfg
) : alloc_ireg (cfg
);
4978 ins
->sreg1
= args
[0]->dreg
;
4979 ins
->sreg2
= args
[1]->dreg
;
4980 ins
->sreg3
= args
[2]->dreg
;
4981 ins
->type
= STACK_I8
;
4982 MONO_ADD_INS (cfg
->cbb
, ins
);
4984 /* g_assert_not_reached (); */
4986 if (cfg
->gen_write_barriers
&& is_ref
)
4987 emit_write_barrier (cfg
, args
[0], args
[1], -1);
4989 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4993 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
4994 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
4995 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
4996 if (should_insert_brekpoint (cfg
->method
)) {
4997 ins
= mono_emit_jit_icall (cfg
, mono_debugger_agent_user_break
, NULL
);
4999 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5000 MONO_ADD_INS (cfg
->cbb
, ins
);
5004 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
5005 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
5007 EMIT_NEW_ICONST (cfg
, ins
, 1);
5009 EMIT_NEW_ICONST (cfg
, ins
, 0);
5013 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
5015 * There is general branches code for Min/Max, but it does not work for
5017 * http://everything2.com/?node_id=1051618
5021 #ifdef MONO_ARCH_SIMD_INTRINSICS
5022 if (cfg
->opt
& MONO_OPT_SIMD
) {
5023 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
5029 if (COMPILE_LLVM (cfg
)) {
5030 ins
= llvm_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
5035 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
5039 * This entry point could be used later for arbitrary method
5042 inline static MonoInst
*
5043 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
5044 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
5046 if (method
->klass
== mono_defaults
.string_class
) {
5047 /* managed string allocation support */
5048 if (strcmp (method
->name
, "InternalAllocateStr") == 0 && !(mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
) && !(cfg
->opt
& MONO_OPT_SHARED
)) {
5049 MonoInst
*iargs
[2];
5050 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
5051 MonoMethod
*managed_alloc
= NULL
;
5053 g_assert (vtable
); /*Should not fail since it System.String*/
5054 #ifndef MONO_CROSS_COMPILE
5055 managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
5059 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
5060 iargs
[1] = args
[0];
5061 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
5068 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
5070 MonoInst
*store
, *temp
;
5073 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
5074 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
5077 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5078 * would be different than the MonoInst's used to represent arguments, and
5079 * the ldelema implementation can't deal with that.
5080 * Solution: When ldelema is used on an inline argument, create a var for
5081 * it, emit ldelema on that var, and emit the saving code below in
5082 * inline_method () if needed.
5084 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
5085 cfg
->args
[i
] = temp
;
5086 /* This uses cfg->args [i] which is set by the preceeding line */
5087 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
5088 store
->cil_code
= sp
[0]->cil_code
;
5093 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5094 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5096 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5098 check_inline_called_method_name_limit (MonoMethod
*called_method
)
5101 static char *limit
= NULL
;
5103 if (limit
== NULL
) {
5104 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5106 if (limit_string
!= NULL
)
5107 limit
= limit_string
;
5109 limit
= (char *) "";
5112 if (limit
[0] != '\0') {
5113 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
5115 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
5116 g_free (called_method_name
);
5118 //return (strncmp_result <= 0);
5119 return (strncmp_result
== 0);
5126 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5128 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
5131 static char *limit
= NULL
;
5133 if (limit
== NULL
) {
5134 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5135 if (limit_string
!= NULL
) {
5136 limit
= limit_string
;
5138 limit
= (char *) "";
5142 if (limit
[0] != '\0') {
5143 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
5145 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
5146 g_free (caller_method_name
);
5148 //return (strncmp_result <= 0);
5149 return (strncmp_result
== 0);
5157 emit_init_rvar (MonoCompile
*cfg
, MonoInst
*rvar
, MonoType
*rtype
)
5159 static double r8_0
= 0.0;
5162 switch (rvar
->type
) {
5164 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
5167 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
5172 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
5175 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
5176 ins
->type
= STACK_R8
;
5177 ins
->inst_p0
= (void*)&r8_0
;
5178 ins
->dreg
= rvar
->dreg
;
5179 MONO_ADD_INS (cfg
->cbb
, ins
);
5182 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (rtype
));
5185 g_assert_not_reached ();
5190 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
5191 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_always
)
5193 MonoInst
*ins
, *rvar
= NULL
;
5194 MonoMethodHeader
*cheader
;
5195 MonoBasicBlock
*ebblock
, *sbblock
;
5197 MonoMethod
*prev_inlined_method
;
5198 MonoInst
**prev_locals
, **prev_args
;
5199 MonoType
**prev_arg_types
;
5200 guint prev_real_offset
;
5201 GHashTable
*prev_cbb_hash
;
5202 MonoBasicBlock
**prev_cil_offset_to_bb
;
5203 MonoBasicBlock
*prev_cbb
;
5204 unsigned char* prev_cil_start
;
5205 guint32 prev_cil_offset_to_bb_len
;
5206 MonoMethod
*prev_current_method
;
5207 MonoGenericContext
*prev_generic_context
;
5208 gboolean ret_var_set
, prev_ret_var_set
, virtual = FALSE
;
5210 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
5212 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5213 if ((! inline_always
) && ! check_inline_called_method_name_limit (cmethod
))
5216 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5217 if ((! inline_always
) && ! check_inline_caller_method_name_limit (cfg
->method
))
5221 if (cfg
->verbose_level
> 2)
5222 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
5224 if (!cmethod
->inline_info
) {
5225 cfg
->stat_inlineable_methods
++;
5226 cmethod
->inline_info
= 1;
5229 /* allocate local variables */
5230 cheader
= mono_method_get_header (cmethod
);
5232 if (cheader
== NULL
|| mono_loader_get_last_error ()) {
5233 MonoLoaderError
*error
= mono_loader_get_last_error ();
5236 mono_metadata_free_mh (cheader
);
5237 if (inline_always
&& error
)
5238 mono_cfg_set_exception (cfg
, error
->exception_type
);
5240 mono_loader_clear_error ();
5244 /*Must verify before creating locals as it can cause the JIT to assert.*/
5245 if (mono_compile_is_broken (cfg
, cmethod
, FALSE
)) {
5246 mono_metadata_free_mh (cheader
);
5250 /* allocate space to store the return value */
5251 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
5252 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
5255 prev_locals
= cfg
->locals
;
5256 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
5257 for (i
= 0; i
< cheader
->num_locals
; ++i
)
5258 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
5260 /* allocate start and end blocks */
5261 /* This is needed so if the inline is aborted, we can clean up */
5262 NEW_BBLOCK (cfg
, sbblock
);
5263 sbblock
->real_offset
= real_offset
;
5265 NEW_BBLOCK (cfg
, ebblock
);
5266 ebblock
->block_num
= cfg
->num_bblocks
++;
5267 ebblock
->real_offset
= real_offset
;
5269 prev_args
= cfg
->args
;
5270 prev_arg_types
= cfg
->arg_types
;
5271 prev_inlined_method
= cfg
->inlined_method
;
5272 cfg
->inlined_method
= cmethod
;
5273 cfg
->ret_var_set
= FALSE
;
5274 cfg
->inline_depth
++;
5275 prev_real_offset
= cfg
->real_offset
;
5276 prev_cbb_hash
= cfg
->cbb_hash
;
5277 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
5278 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
5279 prev_cil_start
= cfg
->cil_start
;
5280 prev_cbb
= cfg
->cbb
;
5281 prev_current_method
= cfg
->current_method
;
5282 prev_generic_context
= cfg
->generic_context
;
5283 prev_ret_var_set
= cfg
->ret_var_set
;
5285 if (*ip
== CEE_CALLVIRT
&& !(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
))
5288 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, virtual);
5290 ret_var_set
= cfg
->ret_var_set
;
5292 cfg
->inlined_method
= prev_inlined_method
;
5293 cfg
->real_offset
= prev_real_offset
;
5294 cfg
->cbb_hash
= prev_cbb_hash
;
5295 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
5296 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
5297 cfg
->cil_start
= prev_cil_start
;
5298 cfg
->locals
= prev_locals
;
5299 cfg
->args
= prev_args
;
5300 cfg
->arg_types
= prev_arg_types
;
5301 cfg
->current_method
= prev_current_method
;
5302 cfg
->generic_context
= prev_generic_context
;
5303 cfg
->ret_var_set
= prev_ret_var_set
;
5304 cfg
->inline_depth
--;
5306 if ((costs
>= 0 && costs
< 60) || inline_always
) {
5307 if (cfg
->verbose_level
> 2)
5308 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
5310 cfg
->stat_inlined_methods
++;
5312 /* always add some code to avoid block split failures */
5313 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5314 MONO_ADD_INS (prev_cbb
, ins
);
5316 prev_cbb
->next_bb
= sbblock
;
5317 link_bblock (cfg
, prev_cbb
, sbblock
);
5320 * Get rid of the begin and end bblocks if possible to aid local
5323 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
5325 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
5326 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
5328 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
5329 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
5330 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
5332 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
5333 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
5334 cfg
->cbb
= prev_cbb
;
5338 * Its possible that the rvar is set in some prev bblock, but not in others.
5344 for (i
= 0; i
< ebblock
->in_count
; ++i
) {
5345 bb
= ebblock
->in_bb
[i
];
5347 if (bb
->last_ins
&& bb
->last_ins
->opcode
== OP_NOT_REACHED
) {
5350 emit_init_rvar (cfg
, rvar
, fsig
->ret
);
5360 * If the inlined method contains only a throw, then the ret var is not
5361 * set, so set it to a dummy value.
5364 emit_init_rvar (cfg
, rvar
, fsig
->ret
);
5366 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
5369 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
5372 if (cfg
->verbose_level
> 2)
5373 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod
, TRUE
), costs
);
5374 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
5375 mono_loader_clear_error ();
5377 /* This gets rid of the newly added bblocks */
5378 cfg
->cbb
= prev_cbb
;
5380 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
5385 * Some of these comments may well be out-of-date.
5386 * Design decisions: we do a single pass over the IL code (and we do bblock
5387 * splitting/merging in the few cases when it's required: a back jump to an IL
5388 * address that was not already seen as bblock starting point).
5389 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5390 * Complex operations are decomposed in simpler ones right away. We need to let the
5391 * arch-specific code peek and poke inside this process somehow (except when the
5392 * optimizations can take advantage of the full semantic info of coarse opcodes).
5393 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5394 * MonoInst->opcode initially is the IL opcode or some simplification of that
5395 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5396 * opcode with value bigger than OP_LAST.
5397 * At this point the IR can be handed over to an interpreter, a dumb code generator
5398 * or to the optimizing code generator that will translate it to SSA form.
5400 * Profiling directed optimizations.
5401 * We may compile by default with few or no optimizations and instrument the code
5402 * or the user may indicate what methods to optimize the most either in a config file
5403 * or through repeated runs where the compiler applies offline the optimizations to
5404 * each method and then decides if it was worth it.
5407 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5408 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5409 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5410 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5411 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5412 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5413 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5414 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5416 /* offset from br.s -> br like opcodes */
5417 #define BIG_BRANCH_OFFSET 13
5420 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
5422 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
5424 return b
== NULL
|| b
== bb
;
5428 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
5430 unsigned char *ip
= start
;
5431 unsigned char *target
;
5434 MonoBasicBlock
*bblock
;
5435 const MonoOpcode
*opcode
;
5438 cli_addr
= ip
- start
;
5439 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
5442 opcode
= &mono_opcodes
[i
];
5443 switch (opcode
->argument
) {
5444 case MonoInlineNone
:
5447 case MonoInlineString
:
5448 case MonoInlineType
:
5449 case MonoInlineField
:
5450 case MonoInlineMethod
:
5453 case MonoShortInlineR
:
5460 case MonoShortInlineVar
:
5461 case MonoShortInlineI
:
5464 case MonoShortInlineBrTarget
:
5465 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
5466 GET_BBLOCK (cfg
, bblock
, target
);
5469 GET_BBLOCK (cfg
, bblock
, ip
);
5471 case MonoInlineBrTarget
:
5472 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
5473 GET_BBLOCK (cfg
, bblock
, target
);
5476 GET_BBLOCK (cfg
, bblock
, ip
);
5478 case MonoInlineSwitch
: {
5479 guint32 n
= read32 (ip
+ 1);
5482 cli_addr
+= 5 + 4 * n
;
5483 target
= start
+ cli_addr
;
5484 GET_BBLOCK (cfg
, bblock
, target
);
5486 for (j
= 0; j
< n
; ++j
) {
5487 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
5488 GET_BBLOCK (cfg
, bblock
, target
);
5498 g_assert_not_reached ();
5501 if (i
== CEE_THROW
) {
5502 unsigned char *bb_start
= ip
- 1;
5504 /* Find the start of the bblock containing the throw */
5506 while ((bb_start
>= start
) && !bblock
) {
5507 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
5511 bblock
->out_of_line
= 1;
5521 static inline MonoMethod
*
5522 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
5526 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
5527 return mono_method_get_wrapper_data (m
, token
);
5529 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
5534 static inline MonoMethod
*
5535 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
5537 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
5539 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
5545 static inline MonoClass
*
5546 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
5550 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
5551 klass
= mono_method_get_wrapper_data (method
, token
);
5553 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
5555 mono_class_init (klass
);
5560 * Returns TRUE if the JIT should abort inlining because "callee"
5561 * is influenced by security attributes.
5564 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
5568 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
5572 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
5573 if (result
== MONO_JIT_SECURITY_OK
)
5576 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
5577 /* Generate code to throw a SecurityException before the actual call/link */
5578 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
5581 NEW_ICONST (cfg
, args
[0], 4);
5582 NEW_METHODCONST (cfg
, args
[1], caller
);
5583 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
5584 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
5585 /* don't hide previous results */
5586 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_SECURITY_LINKDEMAND
);
5587 cfg
->exception_data
= result
;
5595 throw_exception (void)
5597 static MonoMethod
*method
= NULL
;
5600 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
5601 method
= mono_class_get_method_from_name (secman
->securitymanager
, "ThrowException", 1);
5608 emit_throw_exception (MonoCompile
*cfg
, MonoException
*ex
)
5610 MonoMethod
*thrower
= throw_exception ();
5613 EMIT_NEW_PCONST (cfg
, args
[0], ex
);
5614 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
5618 * Return the original method is a wrapper is specified. We can only access
5619 * the custom attributes from the original method.
5622 get_original_method (MonoMethod
*method
)
5624 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
5627 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5628 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
5631 /* in other cases we need to find the original method */
5632 return mono_marshal_method_from_wrapper (method
);
5636 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
5637 MonoBasicBlock
*bblock
, unsigned char *ip
)
5639 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5640 MonoException
*ex
= mono_security_core_clr_is_field_access_allowed (get_original_method (caller
), field
);
5642 emit_throw_exception (cfg
, ex
);
5646 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
5647 MonoBasicBlock
*bblock
, unsigned char *ip
)
5649 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5650 MonoException
*ex
= mono_security_core_clr_is_call_allowed (get_original_method (caller
), callee
);
5652 emit_throw_exception (cfg
, ex
);
5656 * Check that the IL instructions at ip are the array initialization
5657 * sequence and return the pointer to the data and the size.
5660 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
5663 * newarr[System.Int32]
5665 * ldtoken field valuetype ...
5666 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5668 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
5669 guint32 token
= read32 (ip
+ 7);
5670 guint32 field_token
= read32 (ip
+ 2);
5671 guint32 field_index
= field_token
& 0xffffff;
5673 const char *data_ptr
;
5675 MonoMethod
*cmethod
;
5676 MonoClass
*dummy_class
;
5677 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
5683 *out_field_token
= field_token
;
5685 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
5688 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
5690 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
5691 case MONO_TYPE_BOOLEAN
:
5695 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5696 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5697 case MONO_TYPE_CHAR
:
5707 return NULL
; /* stupid ARM FP swapped format */
5717 if (size
> mono_type_size (field
->type
, &dummy_align
))
5720 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5721 if (!method
->klass
->image
->dynamic
) {
5722 field_index
= read32 (ip
+ 2) & 0xffffff;
5723 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
5724 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
5725 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5726 /* for aot code we do the lookup on load */
5727 if (aot
&& data_ptr
)
5728 return GUINT_TO_POINTER (rva
);
5730 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5732 data_ptr
= mono_field_get_data (field
);
5740 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
5742 char *method_fname
= mono_method_full_name (method
, TRUE
);
5744 MonoMethodHeader
*header
= mono_method_get_header (method
);
5746 if (header
->code_size
== 0)
5747 method_code
= g_strdup ("method body is empty.");
5749 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
5750 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
5751 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
5752 g_free (method_fname
);
5753 g_free (method_code
);
5754 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
5758 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
5760 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_OBJECT_SUPPLIED
);
5761 MONO_GC_REGISTER_ROOT_SINGLE (cfg
->exception_ptr
);
5762 cfg
->exception_ptr
= exception
;
5766 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5769 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5770 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5771 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5772 /* Optimize reg-reg moves away */
5774 * Can't optimize other opcodes, since sp[0] might point to
5775 * the last ins of a decomposed opcode.
5777 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5779 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5784 * ldloca inhibits many optimizations so try to get rid of it in common
5787 static inline unsigned char *
5788 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5797 local
= read16 (ip
+ 2);
5801 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5802 gboolean skip
= FALSE
;
5804 /* From the INITOBJ case */
5805 token
= read32 (ip
+ 2);
5806 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5807 CHECK_TYPELOAD (klass
);
5808 if (mini_type_is_reference (cfg
, &klass
->byval_arg
)) {
5809 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5810 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5811 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5824 is_exception_class (MonoClass
*class)
5827 if (class == mono_defaults
.exception_class
)
5829 class = class->parent
;
5835 * is_jit_optimizer_disabled:
5837 * Determine whenever M's assembly has a DebuggableAttribute with the
5838 * IsJITOptimizerDisabled flag set.
5841 is_jit_optimizer_disabled (MonoMethod
*m
)
5843 MonoAssembly
*ass
= m
->klass
->image
->assembly
;
5844 MonoCustomAttrInfo
* attrs
;
5845 static MonoClass
*klass
;
5847 gboolean val
= FALSE
;
5850 if (ass
->jit_optimizer_disabled_inited
)
5851 return ass
->jit_optimizer_disabled
;
5854 klass
= mono_class_from_name (mono_defaults
.corlib
, "System.Diagnostics", "DebuggableAttribute");
5857 ass
->jit_optimizer_disabled
= FALSE
;
5858 mono_memory_barrier ();
5859 ass
->jit_optimizer_disabled_inited
= TRUE
;
5863 attrs
= mono_custom_attrs_from_assembly (ass
);
5865 for (i
= 0; i
< attrs
->num_attrs
; ++i
) {
5866 MonoCustomAttrEntry
*attr
= &attrs
->attrs
[i
];
5869 MonoMethodSignature
*sig
;
5871 if (!attr
->ctor
|| attr
->ctor
->klass
!= klass
)
5873 /* Decode the attribute. See reflection.c */
5874 len
= attr
->data_size
;
5875 p
= (const char*)attr
->data
;
5876 g_assert (read16 (p
) == 0x0001);
5879 // FIXME: Support named parameters
5880 sig
= mono_method_signature (attr
->ctor
);
5881 if (sig
->param_count
!= 2 || sig
->params
[0]->type
!= MONO_TYPE_BOOLEAN
|| sig
->params
[1]->type
!= MONO_TYPE_BOOLEAN
)
5883 /* Two boolean arguments */
5887 mono_custom_attrs_free (attrs
);
5890 ass
->jit_optimizer_disabled
= val
;
5891 mono_memory_barrier ();
5892 ass
->jit_optimizer_disabled_inited
= TRUE
;
5898 is_supported_tail_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
)
5900 gboolean supported_tail_call
;
5903 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5904 supported_tail_call
= MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method
), mono_method_signature (cmethod
));
5906 supported_tail_call
= mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->ret
);
5909 for (i
= 0; i
< fsig
->param_count
; ++i
) {
5910 if (fsig
->params
[i
]->byref
|| fsig
->params
[i
]->type
== MONO_TYPE_PTR
|| fsig
->params
[i
]->type
== MONO_TYPE_FNPTR
)
5911 /* These can point to the current method's stack */
5912 supported_tail_call
= FALSE
;
5914 if (fsig
->hasthis
&& cmethod
->klass
->valuetype
)
5915 /* this might point to the current method's stack */
5916 supported_tail_call
= FALSE
;
5917 if (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)
5918 supported_tail_call
= FALSE
;
5919 if (cfg
->method
->save_lmf
)
5920 supported_tail_call
= FALSE
;
5921 if (cmethod
->wrapper_type
&& cmethod
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
5922 supported_tail_call
= FALSE
;
5924 /* Debugging support */
5926 if (supported_tail_call
) {
5927 if (!mono_debug_count ())
5928 supported_tail_call
= FALSE
;
5932 return supported_tail_call
;
5935 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5936 * it to the thread local value based on the tls_offset field. Every other kind of access to
5937 * the field causes an assert.
5940 is_magic_tls_access (MonoClassField
*field
)
5942 if (strcmp (field
->name
, "tlsdata"))
5944 if (strcmp (field
->parent
->name
, "ThreadLocal`1"))
5946 return field
->parent
->image
== mono_defaults
.corlib
;
5949 /* emits the code needed to access a managed tls var (like ThreadStatic)
5950 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5951 * pointer for the current thread.
5952 * Returns the MonoInst* representing the address of the tls var.
5955 emit_managed_static_data_access (MonoCompile
*cfg
, MonoInst
*thread_ins
, int offset_reg
)
5958 int static_data_reg
, array_reg
, dreg
;
5959 int offset2_reg
, idx_reg
;
5960 // inlined access to the tls data
5961 // idx = (offset >> 24) - 1;
5962 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5963 static_data_reg
= alloc_ireg (cfg
);
5964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, G_STRUCT_OFFSET (MonoInternalThread
, static_data
));
5965 idx_reg
= alloc_ireg (cfg
);
5966 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, idx_reg
, offset_reg
, 24);
5967 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISUB_IMM
, idx_reg
, idx_reg
, 1);
5968 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, sizeof (gpointer
) == 8 ? 3 : 2);
5969 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
5970 array_reg
= alloc_ireg (cfg
);
5971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
5972 offset2_reg
= alloc_ireg (cfg
);
5973 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset_reg
, 0xffffff);
5974 dreg
= alloc_ireg (cfg
);
5975 EMIT_NEW_BIALU (cfg
, addr
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
5980 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5981 * this address is cached per-method in cached_tls_addr.
5984 create_magic_tls_access (MonoCompile
*cfg
, MonoClassField
*tls_field
, MonoInst
**cached_tls_addr
, MonoInst
*thread_local
)
5986 MonoInst
*load
, *addr
, *temp
, *store
, *thread_ins
;
5987 MonoClassField
*offset_field
;
5989 if (*cached_tls_addr
) {
5990 EMIT_NEW_TEMPLOAD (cfg
, addr
, (*cached_tls_addr
)->inst_c0
);
5993 thread_ins
= mono_get_thread_intrinsic (cfg
);
5994 offset_field
= mono_class_get_field_from_name (tls_field
->parent
, "tls_offset");
5996 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, offset_field
->type
, thread_local
->dreg
, offset_field
->offset
);
5998 MONO_ADD_INS (cfg
->cbb
, thread_ins
);
6000 MonoMethod
*thread_method
;
6001 thread_method
= mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6002 thread_ins
= mono_emit_method_call (cfg
, thread_method
, NULL
, NULL
);
6004 addr
= emit_managed_static_data_access (cfg
, thread_ins
, load
->dreg
);
6005 addr
->klass
= mono_class_from_mono_type (tls_field
->type
);
6006 addr
->type
= STACK_MP
;
6007 *cached_tls_addr
= temp
= mono_compile_create_var (cfg
, type_from_stack_type (addr
), OP_LOCAL
);
6008 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, addr
);
6010 EMIT_NEW_TEMPLOAD (cfg
, addr
, temp
->inst_c0
);
6015 * mono_method_to_ir:
6017 * Translate the .net IL into linear IR.
6020 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
6021 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
6022 guint inline_offset
, gboolean is_virtual_call
)
6025 MonoInst
*ins
, **sp
, **stack_start
;
6026 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
6027 MonoSimpleBasicBlock
*bb
= NULL
, *original_bb
= NULL
;
6028 MonoMethod
*cmethod
, *method_definition
;
6029 MonoInst
**arg_array
;
6030 MonoMethodHeader
*header
;
6032 guint32 token
, ins_flag
;
6034 MonoClass
*constrained_call
= NULL
;
6035 unsigned char *ip
, *end
, *target
, *err_pos
;
6036 static double r8_0
= 0.0;
6037 MonoMethodSignature
*sig
;
6038 MonoGenericContext
*generic_context
= NULL
;
6039 MonoGenericContainer
*generic_container
= NULL
;
6040 MonoType
**param_types
;
6041 int i
, n
, start_new_bblock
, dreg
;
6042 int num_calls
= 0, inline_costs
= 0;
6043 int breakpoint_id
= 0;
6045 MonoBoolean security
, pinvoke
;
6046 MonoSecurityManager
* secman
= NULL
;
6047 MonoDeclSecurityActions actions
;
6048 GSList
*class_inits
= NULL
;
6049 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
6051 gboolean init_locals
, seq_points
, skip_dead_blocks
;
6052 gboolean disable_inline
, sym_seq_points
= FALSE
;
6053 MonoInst
*cached_tls_addr
= NULL
;
6054 MonoDebugMethodInfo
*minfo
;
6055 MonoBitSet
*seq_point_locs
= NULL
;
6057 disable_inline
= is_jit_optimizer_disabled (method
);
6059 /* serialization and xdomain stuff may need access to private fields and methods */
6060 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
6061 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
6062 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
6063 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
6064 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
6065 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
6067 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
6069 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6070 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
6071 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
6072 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
6073 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_STELEMREF
;
6075 image
= method
->klass
->image
;
6076 header
= mono_method_get_header (method
);
6078 MonoLoaderError
*error
;
6080 if ((error
= mono_loader_get_last_error ())) {
6081 mono_cfg_set_exception (cfg
, error
->exception_type
);
6083 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
6084 cfg
->exception_message
= g_strdup_printf ("Missing or incorrect header for method %s", cfg
->method
->name
);
6086 goto exception_exit
;
6088 generic_container
= mono_method_get_generic_container (method
);
6089 sig
= mono_method_signature (method
);
6090 num_args
= sig
->hasthis
+ sig
->param_count
;
6091 ip
= (unsigned char*)header
->code
;
6092 cfg
->cil_start
= ip
;
6093 end
= ip
+ header
->code_size
;
6094 cfg
->stat_cil_code_size
+= header
->code_size
;
6095 init_locals
= header
->init_locals
;
6097 seq_points
= cfg
->gen_seq_points
&& cfg
->method
== method
;
6098 #ifdef PLATFORM_ANDROID
6099 seq_points
&= cfg
->method
->wrapper_type
== MONO_WRAPPER_NONE
;
6102 if (cfg
->gen_seq_points
&& cfg
->method
== method
) {
6103 minfo
= mono_debug_lookup_method (method
);
6105 int i
, n_il_offsets
;
6109 mono_debug_symfile_get_line_numbers_full (minfo
, NULL
, NULL
, &n_il_offsets
, &il_offsets
, &line_numbers
, NULL
, NULL
);
6110 seq_point_locs
= mono_bitset_mem_new (mono_mempool_alloc0 (cfg
->mempool
, mono_bitset_alloc_size (header
->code_size
, 0)), header
->code_size
, 0);
6111 sym_seq_points
= TRUE
;
6112 for (i
= 0; i
< n_il_offsets
; ++i
) {
6113 if (il_offsets
[i
] < header
->code_size
)
6114 mono_bitset_set_fast (seq_point_locs
, il_offsets
[i
]);
6120 * Methods without init_locals set could cause asserts in various passes
6125 method_definition
= method
;
6126 while (method_definition
->is_inflated
) {
6127 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
6128 method_definition
= imethod
->declaring
;
6131 /* SkipVerification is not allowed if core-clr is enabled */
6132 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
6134 dont_verify_stloc
= TRUE
;
6137 if (mono_debug_using_mono_debugger ())
6138 cfg
->keep_cil_nops
= TRUE
;
6140 if (sig
->is_inflated
)
6141 generic_context
= mono_method_get_context (method
);
6142 else if (generic_container
)
6143 generic_context
= &generic_container
->context
;
6144 cfg
->generic_context
= generic_context
;
6146 if (!cfg
->generic_sharing_context
)
6147 g_assert (!sig
->has_type_parameters
);
6149 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
6150 g_assert (method
->is_inflated
);
6151 g_assert (mono_method_get_context (method
)->method_inst
);
6153 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
6154 g_assert (sig
->generic_param_count
);
6156 if (cfg
->method
== method
) {
6157 cfg
->real_offset
= 0;
6159 cfg
->real_offset
= inline_offset
;
6162 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
6163 cfg
->cil_offset_to_bb_len
= header
->code_size
;
6165 cfg
->current_method
= method
;
6167 if (cfg
->verbose_level
> 2)
6168 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
6170 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
6172 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
6173 for (n
= 0; n
< sig
->param_count
; ++n
)
6174 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
6175 cfg
->arg_types
= param_types
;
6177 dont_inline
= g_list_prepend (dont_inline
, method
);
6178 if (cfg
->method
== method
) {
6180 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
6181 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
6184 NEW_BBLOCK (cfg
, start_bblock
);
6185 cfg
->bb_entry
= start_bblock
;
6186 start_bblock
->cil_code
= NULL
;
6187 start_bblock
->cil_length
= 0;
6188 #if defined(__native_client_codegen__)
6189 MONO_INST_NEW (cfg
, ins
, OP_NACL_GC_SAFE_POINT
);
6190 ins
->dreg
= alloc_dreg (cfg
, STACK_I4
);
6191 MONO_ADD_INS (start_bblock
, ins
);
6195 NEW_BBLOCK (cfg
, end_bblock
);
6196 cfg
->bb_exit
= end_bblock
;
6197 end_bblock
->cil_code
= NULL
;
6198 end_bblock
->cil_length
= 0;
6199 end_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
6200 g_assert (cfg
->num_bblocks
== 2);
6202 arg_array
= cfg
->args
;
6204 if (header
->num_clauses
) {
6205 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
6206 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
6208 /* handle exception clauses */
6209 for (i
= 0; i
< header
->num_clauses
; ++i
) {
6210 MonoBasicBlock
*try_bb
;
6211 MonoExceptionClause
*clause
= &header
->clauses
[i
];
6212 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
6213 try_bb
->real_offset
= clause
->try_offset
;
6214 try_bb
->try_start
= TRUE
;
6215 try_bb
->region
= ((i
+ 1) << 8) | clause
->flags
;
6216 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
6217 tblock
->real_offset
= clause
->handler_offset
;
6218 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
6220 link_bblock (cfg
, try_bb
, tblock
);
6222 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
6223 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
6225 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
6226 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
6227 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
6228 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
6229 MONO_ADD_INS (tblock
, ins
);
6231 if (seq_points
&& clause
->flags
!= MONO_EXCEPTION_CLAUSE_FINALLY
) {
6232 /* finally clauses already have a seq point */
6233 NEW_SEQ_POINT (cfg
, ins
, clause
->handler_offset
, TRUE
);
6234 MONO_ADD_INS (tblock
, ins
);
6237 /* todo: is a fault block unsafe to optimize? */
6238 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
6239 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
6243 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6245 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6247 /* catch and filter blocks get the exception object on the stack */
6248 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
6249 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
6250 MonoInst
*dummy_use
;
6252 /* mostly like handle_stack_args (), but just sets the input args */
6253 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6254 tblock
->in_scount
= 1;
6255 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
6256 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
6259 * Add a dummy use for the exvar so its liveness info will be
6263 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
6265 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
6266 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
6267 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
6268 tblock
->real_offset
= clause
->data
.filter_offset
;
6269 tblock
->in_scount
= 1;
6270 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
6271 /* The filter block shares the exvar with the handler block */
6272 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
6273 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
6274 MONO_ADD_INS (tblock
, ins
);
6278 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
6279 clause
->data
.catch_class
&&
6280 cfg
->generic_sharing_context
&&
6281 mono_class_check_context_used (clause
->data
.catch_class
)) {
6283 * In shared generic code with catch
6284 * clauses containing type variables
6285 * the exception handling code has to
6286 * be able to get to the rgctx.
6287 * Therefore we have to make sure that
6288 * the vtable/mrgctx argument (for
6289 * static or generic methods) or the
6290 * "this" argument (for non-static
6291 * methods) are live.
6293 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
6294 mini_method_get_context (method
)->method_inst
||
6295 method
->klass
->valuetype
) {
6296 mono_get_vtable_var (cfg
);
6298 MonoInst
*dummy_use
;
6300 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
6305 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
6306 cfg
->cbb
= start_bblock
;
6307 cfg
->args
= arg_array
;
6308 mono_save_args (cfg
, sig
, inline_args
);
6311 /* FIRST CODE BLOCK */
6312 NEW_BBLOCK (cfg
, bblock
);
6313 bblock
->cil_code
= ip
;
6317 ADD_BBLOCK (cfg
, bblock
);
6319 if (cfg
->method
== method
) {
6320 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
6321 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
6322 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
6323 MONO_ADD_INS (bblock
, ins
);
6327 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6328 secman
= mono_security_manager_get_methods ();
6330 security
= (secman
&& mono_method_has_declsec (method
));
6331 /* at this point having security doesn't mean we have any code to generate */
6332 if (security
&& (cfg
->method
== method
)) {
6333 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6334 * And we do not want to enter the next section (with allocation) if we
6335 * have nothing to generate */
6336 security
= mono_declsec_get_demands (method
, &actions
);
6339 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6340 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
6342 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
6343 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6344 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
6346 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6347 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
6351 mono_custom_attrs_free (custom
);
6354 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
6355 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
6359 mono_custom_attrs_free (custom
);
6362 /* not a P/Invoke after all */
6367 if ((init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
6368 /* we use a separate basic block for the initialization code */
6369 NEW_BBLOCK (cfg
, init_localsbb
);
6370 cfg
->bb_init
= init_localsbb
;
6371 init_localsbb
->real_offset
= cfg
->real_offset
;
6372 start_bblock
->next_bb
= init_localsbb
;
6373 init_localsbb
->next_bb
= bblock
;
6374 link_bblock (cfg
, start_bblock
, init_localsbb
);
6375 link_bblock (cfg
, init_localsbb
, bblock
);
6377 cfg
->cbb
= init_localsbb
;
6379 start_bblock
->next_bb
= bblock
;
6380 link_bblock (cfg
, start_bblock
, bblock
);
6383 /* at this point we know, if security is TRUE, that some code needs to be generated */
6384 if (security
&& (cfg
->method
== method
)) {
6387 cfg
->stat_cas_demand_generation
++;
6389 if (actions
.demand
.blob
) {
6390 /* Add code for SecurityAction.Demand */
6391 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
6392 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
6393 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6394 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
6396 if (actions
.noncasdemand
.blob
) {
6397 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6398 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6399 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
6400 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
6401 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6402 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
6404 if (actions
.demandchoice
.blob
) {
6405 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6406 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
6407 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
6408 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6409 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
6413 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6415 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
6418 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
6419 /* check if this is native code, e.g. an icall or a p/invoke */
6420 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
6421 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
6423 gboolean pinvk
= (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
);
6424 gboolean icall
= (wrapped
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
);
6426 /* if this ia a native call then it can only be JITted from platform code */
6427 if ((icall
|| pinvk
) && method
->klass
&& method
->klass
->image
) {
6428 if (!mono_security_core_clr_is_platform_image (method
->klass
->image
)) {
6429 MonoException
*ex
= icall
? mono_get_exception_security () :
6430 mono_get_exception_method_access ();
6431 emit_throw_exception (cfg
, ex
);
6438 if (header
->code_size
== 0)
6441 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
6446 if (cfg
->method
== method
)
6447 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
6449 for (n
= 0; n
< header
->num_locals
; ++n
) {
6450 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
6455 /* We force the vtable variable here for all shared methods
6456 for the possibility that they might show up in a stack
6457 trace where their exact instantiation is needed. */
6458 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
6459 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
6460 mini_method_get_context (method
)->method_inst
||
6461 method
->klass
->valuetype
) {
6462 mono_get_vtable_var (cfg
);
6464 /* FIXME: Is there a better way to do this?
6465 We need the variable live for the duration
6466 of the whole method. */
6467 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
6471 /* add a check for this != NULL to inlined methods */
6472 if (is_virtual_call
) {
6475 NEW_ARGLOAD (cfg
, arg_ins
, 0);
6476 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
6477 MONO_EMIT_NEW_CHECK_THIS (cfg
, arg_ins
->dreg
);
6480 skip_dead_blocks
= !dont_verify
;
6481 if (skip_dead_blocks
) {
6482 original_bb
= bb
= mono_basic_block_split (method
, &error
);
6483 if (!mono_error_ok (&error
)) {
6484 mono_error_cleanup (&error
);
6490 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6491 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
6494 start_new_bblock
= 0;
6497 if (cfg
->method
== method
)
6498 cfg
->real_offset
= ip
- header
->code
;
6500 cfg
->real_offset
= inline_offset
;
6505 if (start_new_bblock
) {
6506 bblock
->cil_length
= ip
- bblock
->cil_code
;
6507 if (start_new_bblock
== 2) {
6508 g_assert (ip
== tblock
->cil_code
);
6510 GET_BBLOCK (cfg
, tblock
, ip
);
6512 bblock
->next_bb
= tblock
;
6515 start_new_bblock
= 0;
6516 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
6517 if (cfg
->verbose_level
> 3)
6518 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
6519 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
6523 g_slist_free (class_inits
);
6526 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
6527 link_bblock (cfg
, bblock
, tblock
);
6528 if (sp
!= stack_start
) {
6529 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6531 CHECK_UNVERIFIABLE (cfg
);
6533 bblock
->next_bb
= tblock
;
6536 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
6537 if (cfg
->verbose_level
> 3)
6538 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
6539 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
6542 g_slist_free (class_inits
);
6547 if (skip_dead_blocks
) {
6548 int ip_offset
= ip
- header
->code
;
6550 if (ip_offset
== bb
->end
)
6554 int op_size
= mono_opcode_size (ip
, end
);
6555 g_assert (op_size
> 0); /*The BB formation pass must catch all bad ops*/
6557 if (cfg
->verbose_level
> 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset
);
6559 if (ip_offset
+ op_size
== bb
->end
) {
6560 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6561 MONO_ADD_INS (bblock
, ins
);
6562 start_new_bblock
= 1;
6570 * Sequence points are points where the debugger can place a breakpoint.
6571 * Currently, we generate these automatically at points where the IL
6574 if (seq_points
&& ((sp
== stack_start
) || (sym_seq_points
&& mono_bitset_test_fast (seq_point_locs
, ip
- header
->code
)))) {
6576 * Make methods interruptable at the beginning, and at the targets of
6577 * backward branches.
6578 * Also, do this at the start of every bblock in methods with clauses too,
6579 * to be able to handle instructions with inprecise control flow like
6581 * Backward branches are handled at the end of method-to-ir ().
6583 gboolean intr_loc
= ip
== header
->code
|| (!cfg
->cbb
->last_ins
&& cfg
->header
->num_clauses
);
6585 /* Avoid sequence points on empty IL like .volatile */
6586 // FIXME: Enable this
6587 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6588 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, intr_loc
);
6589 MONO_ADD_INS (cfg
->cbb
, ins
);
6592 bblock
->real_offset
= cfg
->real_offset
;
6594 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
6595 guint32 cil_offset
= ip
- header
->code
;
6596 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
6598 /* TODO: Use an increment here */
6599 #if defined(TARGET_X86)
6600 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
6601 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
6603 MONO_ADD_INS (cfg
->cbb
, ins
);
6605 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
6606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
6610 if (cfg
->verbose_level
> 3)
6611 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
6615 if (seq_points
&& !sym_seq_points
&& sp
!= stack_start
) {
6617 * The C# compiler uses these nops to notify the JIT that it should
6618 * insert seq points.
6620 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, FALSE
);
6621 MONO_ADD_INS (cfg
->cbb
, ins
);
6623 if (cfg
->keep_cil_nops
)
6624 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
6626 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6628 MONO_ADD_INS (bblock
, ins
);
6631 if (should_insert_brekpoint (cfg
->method
)) {
6632 ins
= mono_emit_jit_icall (cfg
, mono_debugger_agent_user_break
, NULL
);
6634 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6637 MONO_ADD_INS (bblock
, ins
);
6643 CHECK_STACK_OVF (1);
6644 n
= (*ip
)-CEE_LDARG_0
;
6646 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6654 CHECK_STACK_OVF (1);
6655 n
= (*ip
)-CEE_LDLOC_0
;
6657 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6666 n
= (*ip
)-CEE_STLOC_0
;
6669 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
6671 emit_stloc_ir (cfg
, sp
, header
, n
);
6678 CHECK_STACK_OVF (1);
6681 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6687 CHECK_STACK_OVF (1);
6690 NEW_ARGLOADA (cfg
, ins
, n
);
6691 MONO_ADD_INS (cfg
->cbb
, ins
);
6701 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
6703 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
6708 CHECK_STACK_OVF (1);
6711 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6715 case CEE_LDLOCA_S
: {
6716 unsigned char *tmp_ip
;
6718 CHECK_STACK_OVF (1);
6719 CHECK_LOCAL (ip
[1]);
6721 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
6727 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
6736 CHECK_LOCAL (ip
[1]);
6737 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
6739 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
6744 CHECK_STACK_OVF (1);
6745 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
6746 ins
->type
= STACK_OBJ
;
6751 CHECK_STACK_OVF (1);
6752 EMIT_NEW_ICONST (cfg
, ins
, -1);
6765 CHECK_STACK_OVF (1);
6766 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
6772 CHECK_STACK_OVF (1);
6774 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
6780 CHECK_STACK_OVF (1);
6781 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
6787 CHECK_STACK_OVF (1);
6788 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6789 ins
->type
= STACK_I8
;
6790 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6792 ins
->inst_l
= (gint64
)read64 (ip
);
6793 MONO_ADD_INS (bblock
, ins
);
6799 gboolean use_aotconst
= FALSE
;
6801 #ifdef TARGET_POWERPC
6802 /* FIXME: Clean this up */
6803 if (cfg
->compile_aot
)
6804 use_aotconst
= TRUE
;
6807 /* FIXME: we should really allocate this only late in the compilation process */
6808 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
6810 CHECK_STACK_OVF (1);
6816 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
6818 dreg
= alloc_freg (cfg
);
6819 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
6820 ins
->type
= STACK_R8
;
6822 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
6823 ins
->type
= STACK_R8
;
6824 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6826 MONO_ADD_INS (bblock
, ins
);
6836 gboolean use_aotconst
= FALSE
;
6838 #ifdef TARGET_POWERPC
6839 /* FIXME: Clean this up */
6840 if (cfg
->compile_aot
)
6841 use_aotconst
= TRUE
;
6844 /* FIXME: we should really allocate this only late in the compilation process */
6845 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
6847 CHECK_STACK_OVF (1);
6853 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
6855 dreg
= alloc_freg (cfg
);
6856 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
6857 ins
->type
= STACK_R8
;
6859 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
6860 ins
->type
= STACK_R8
;
6861 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6863 MONO_ADD_INS (bblock
, ins
);
6872 MonoInst
*temp
, *store
;
6874 CHECK_STACK_OVF (1);
6878 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
6879 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
6881 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6884 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6897 if (sp
[0]->type
== STACK_R8
)
6898 /* we need to pop the value from the x86 FP stack */
6899 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
6905 INLINE_FAILURE ("jmp");
6906 GSHAREDVT_FAILURE (*ip
);
6909 if (stack_start
!= sp
)
6911 token
= read32 (ip
+ 1);
6912 /* FIXME: check the signature matches */
6913 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6915 if (!cmethod
|| mono_loader_get_last_error ())
6918 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
6919 GENERIC_SHARING_FAILURE (CEE_JMP
);
6921 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6922 CHECK_CFG_EXCEPTION
;
6924 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6926 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
6929 /* Handle tail calls similarly to calls */
6930 n
= fsig
->param_count
+ fsig
->hasthis
;
6932 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6933 call
->method
= cmethod
;
6934 call
->tail_call
= TRUE
;
6935 call
->signature
= mono_method_signature (cmethod
);
6936 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6937 call
->inst
.inst_p0
= cmethod
;
6938 for (i
= 0; i
< n
; ++i
)
6939 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6941 mono_arch_emit_call (cfg
, call
);
6942 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
6945 for (i
= 0; i
< num_args
; ++i
)
6946 /* Prevent arguments from being optimized away */
6947 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6949 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6950 ins
= (MonoInst
*)call
;
6951 ins
->inst_p0
= cmethod
;
6952 MONO_ADD_INS (bblock
, ins
);
6956 start_new_bblock
= 1;
6961 case CEE_CALLVIRT
: {
6962 MonoInst
*addr
= NULL
;
6963 MonoMethodSignature
*fsig
= NULL
;
6965 int virtual = *ip
== CEE_CALLVIRT
;
6966 int calli
= *ip
== CEE_CALLI
;
6967 gboolean pass_imt_from_rgctx
= FALSE
;
6968 MonoInst
*imt_arg
= NULL
;
6969 gboolean pass_vtable
= FALSE
;
6970 gboolean pass_mrgctx
= FALSE
;
6971 MonoInst
*vtable_arg
= NULL
;
6972 gboolean check_this
= FALSE
;
6973 gboolean supported_tail_call
= FALSE
;
6974 gboolean need_seq_point
= FALSE
;
6977 token
= read32 (ip
+ 1);
6980 GSHAREDVT_FAILURE (*ip
);
6985 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
6986 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
6988 fsig
= mono_metadata_parse_signature (image
, token
);
6990 n
= fsig
->param_count
+ fsig
->hasthis
;
6992 if (method
->dynamic
&& fsig
->pinvoke
) {
6996 * This is a call through a function pointer using a pinvoke
6997 * signature. Have to create a wrapper and call that instead.
6998 * FIXME: This is very slow, need to create a wrapper at JIT time
6999 * instead based on the signature.
7001 EMIT_NEW_IMAGECONST (cfg
, args
[0], method
->klass
->image
);
7002 EMIT_NEW_PCONST (cfg
, args
[1], fsig
);
7004 addr
= mono_emit_jit_icall (cfg
, mono_get_native_calli_wrapper
, args
);
7007 MonoMethod
*cil_method
;
7009 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7010 if (constrained_call
&& cfg
->verbose_level
> 2)
7011 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call
));
7012 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
7013 cil_method
= cmethod
;
7014 if (constrained_call
&& !((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
||
7015 constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) &&
7016 cfg
->generic_sharing_context
)) {
7017 cmethod
= mono_get_method_constrained_with_method (image
, cil_method
, constrained_call
, generic_context
);
7019 } else if (constrained_call
) {
7020 if (cfg
->verbose_level
> 2)
7021 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call
));
7023 GSHAREDVT_FAILURE (*ip
);
7025 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
7027 * This is needed since get_method_constrained can't find
7028 * the method in klass representing a type var.
7029 * The type var is guaranteed to be a reference type in this
7032 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7033 cil_method
= cmethod
;
7034 g_assert (!cmethod
->klass
->valuetype
);
7036 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
7039 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7040 cil_method
= cmethod
;
7043 if (!cmethod
|| mono_loader_get_last_error ())
7045 if (!dont_verify
&& !cfg
->skip_visibility
) {
7046 MonoMethod
*target_method
= cil_method
;
7047 if (method
->is_inflated
) {
7048 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
7050 if (!mono_method_can_access_method (method_definition
, target_method
) &&
7051 !mono_method_can_access_method (method
, cil_method
))
7052 METHOD_ACCESS_FAILURE
;
7055 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
7056 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
7058 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
7059 /* MS.NET seems to silently convert this to a callvirt */
7064 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7065 * converts to a callvirt.
7067 * tests/bug-515884.il is an example of this behavior
7069 const int test_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
| METHOD_ATTRIBUTE_STATIC
;
7070 const int expected_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
;
7071 if (!virtual && cmethod
->klass
->marshalbyref
&& (cmethod
->flags
& test_flags
) == expected_flags
&& cfg
->method
->wrapper_type
== MONO_WRAPPER_NONE
)
7075 if (!cmethod
->klass
->inited
)
7076 if (!mono_class_init (cmethod
->klass
))
7077 TYPE_LOAD_ERROR (cmethod
->klass
);
7079 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
7080 mini_class_is_system_array (cmethod
->klass
)) {
7081 array_rank
= cmethod
->klass
->rank
;
7082 fsig
= mono_method_signature (cmethod
);
7084 fsig
= mono_method_signature (cmethod
);
7089 if (fsig
->pinvoke
) {
7090 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
7091 check_for_pending_exc
, FALSE
);
7092 fsig
= mono_method_signature (wrapper
);
7093 } else if (constrained_call
) {
7094 fsig
= mono_method_signature (cmethod
);
7096 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
7100 mono_save_token_info (cfg
, image
, token
, cil_method
);
7102 if (!MONO_TYPE_IS_VOID (fsig
->ret
) && !sym_seq_points
) {
7104 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7105 * foo (bar (), baz ())
7106 * works correctly. MS does this also:
7107 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7108 * The problem with this approach is that the debugger will stop after all calls returning a value,
7109 * even for simple cases, like:
7112 /* Special case a few common successor opcodes */
7113 if (!(ip
+ 5 < end
&& ip
[5] == CEE_POP
))
7114 need_seq_point
= TRUE
;
7117 n
= fsig
->param_count
+ fsig
->hasthis
;
7119 /* Don't support calls made using type arguments for now */
7121 if (cfg->gsharedvt) {
7122 if (mini_is_gsharedvt_signature (cfg, fsig))
7123 GSHAREDVT_FAILURE (*ip);
7127 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7128 if (check_linkdemand (cfg
, method
, cmethod
))
7129 INLINE_FAILURE ("linkdemand");
7130 CHECK_CFG_EXCEPTION
;
7133 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
7134 g_assert_not_reached ();
7137 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
7140 if (!cfg
->generic_sharing_context
&& cmethod
)
7141 g_assert (!mono_method_check_context_used (cmethod
));
7145 //g_assert (!virtual || fsig->hasthis);
7149 if (constrained_call
) {
7151 * We have the `constrained.' prefix opcode.
7153 if (constrained_call
->valuetype
&& (cmethod
->klass
== mono_defaults
.object_class
|| cmethod
->klass
== mono_defaults
.enum_class
->parent
|| cmethod
->klass
== mono_defaults
.enum_class
)) {
7155 * The type parameter is instantiated as a valuetype,
7156 * but that type doesn't override the method we're
7157 * calling, so we need to box `this'.
7159 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &constrained_call
->byval_arg
, sp
[0]->dreg
, 0);
7160 ins
->klass
= constrained_call
;
7161 sp
[0] = handle_box (cfg
, ins
, constrained_call
, mono_class_check_context_used (constrained_call
));
7162 CHECK_CFG_EXCEPTION
;
7163 } else if (!constrained_call
->valuetype
) {
7164 int dreg
= alloc_ireg_ref (cfg
);
7167 * The type parameter is instantiated as a reference
7168 * type. We have a managed pointer on the stack, so
7169 * we need to dereference it here.
7171 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
7172 ins
->type
= STACK_OBJ
;
7175 if (cmethod
->klass
->valuetype
) {
7178 /* Interface method */
7181 mono_class_setup_vtable (constrained_call
);
7182 CHECK_TYPELOAD (constrained_call
);
7183 ioffset
= mono_class_interface_offset (constrained_call
, cmethod
->klass
);
7185 TYPE_LOAD_ERROR (constrained_call
);
7186 slot
= mono_method_get_vtable_slot (cmethod
);
7188 TYPE_LOAD_ERROR (cmethod
->klass
);
7189 cmethod
= constrained_call
->vtable
[ioffset
+ slot
];
7191 if (cmethod
->klass
== mono_defaults
.enum_class
) {
7192 /* Enum implements some interfaces, so treat this as the first case */
7193 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &constrained_call
->byval_arg
, sp
[0]->dreg
, 0);
7194 ins
->klass
= constrained_call
;
7195 sp
[0] = handle_box (cfg
, ins
, constrained_call
, mono_class_check_context_used (constrained_call
));
7196 CHECK_CFG_EXCEPTION
;
7201 constrained_call
= NULL
;
7204 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
7207 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_sharable_method (cfg
, cmethod
, fsig
, sp
))) {
7209 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
7210 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
7215 CHECK_CFG_EXCEPTION
;
7220 emit_seq_point (cfg
, method
, ip
, FALSE
);
7225 * If the callee is a shared method, then its static cctor
7226 * might not get called after the call was patched.
7228 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
7229 emit_generic_class_init (cfg
, cmethod
->klass
);
7230 CHECK_TYPELOAD (cmethod
->klass
);
7233 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
7234 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
7235 gboolean sharable
= FALSE
;
7237 if (mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7240 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
7241 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
7242 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
7244 sharable
= sharing_enabled
&& context_sharable
;
7248 * Pass vtable iff target method might
7249 * be shared, which means that sharing
7250 * is enabled for its class and its
7251 * context is sharable (and it's not a
7254 if (sharable
&& !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
7258 if (cmethod
&& mini_method_get_context (cmethod
) &&
7259 mini_method_get_context (cmethod
)->method_inst
) {
7260 g_assert (!pass_vtable
);
7262 if (mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7265 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
7266 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
7267 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
7269 if (sharing_enabled
&& context_sharable
)
7271 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (cfg
, mono_method_signature (cmethod
)))
7276 if (cfg
->generic_sharing_context
&& cmethod
) {
7277 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
7279 context_used
= mono_method_check_context_used (cmethod
);
7281 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
7282 /* Generic method interface
7283 calls are resolved via a
7284 helper function and don't
7286 if (!cmethod_context
|| !cmethod_context
->method_inst
)
7287 pass_imt_from_rgctx
= TRUE
;
7291 * If a shared method calls another
7292 * shared method then the caller must
7293 * have a generic sharing context
7294 * because the magic trampoline
7295 * requires it. FIXME: We shouldn't
7296 * have to force the vtable/mrgctx
7297 * variable here. Instead there
7298 * should be a flag in the cfg to
7299 * request a generic sharing context.
7302 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
7303 mono_get_vtable_var (cfg
);
7308 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7310 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7312 CHECK_TYPELOAD (cmethod
->klass
);
7313 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7318 g_assert (!vtable_arg
);
7320 if (!cfg
->compile_aot
) {
7322 * emit_get_rgctx_method () calls mono_class_vtable () so check
7323 * for type load errors before.
7325 mono_class_setup_vtable (cmethod
->klass
);
7326 CHECK_TYPELOAD (cmethod
->klass
);
7329 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7331 /* !marshalbyref is needed to properly handle generic methods + remoting */
7332 if ((!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
7333 MONO_METHOD_IS_FINAL (cmethod
)) &&
7334 !cmethod
->klass
->marshalbyref
) {
7341 if (pass_imt_from_rgctx
) {
7342 g_assert (!pass_vtable
);
7345 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
7346 cmethod
, MONO_RGCTX_INFO_METHOD
);
7350 MONO_EMIT_NEW_CHECK_THIS (cfg
, sp
[0]->dreg
);
7352 /* Calling virtual generic methods */
7353 if (cmethod
&& virtual &&
7354 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
7355 !(MONO_METHOD_IS_FINAL (cmethod
) &&
7356 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
7357 mono_method_signature (cmethod
)->generic_param_count
) {
7358 MonoInst
*this_temp
, *this_arg_temp
, *store
;
7359 MonoInst
*iargs
[4];
7361 g_assert (mono_method_signature (cmethod
)->is_inflated
);
7363 /* Prevent inlining of methods that contain indirect calls */
7364 INLINE_FAILURE ("virtual generic call");
7366 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (cfg
, fsig
))
7367 GSHAREDVT_FAILURE (*ip
);
7369 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7370 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
&& mono_use_imt
) {
7371 g_assert (!imt_arg
);
7373 g_assert (cmethod
->is_inflated
);
7374 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
7375 cmethod
, MONO_RGCTX_INFO_METHOD
);
7376 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
, NULL
);
7380 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
7381 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
7382 MONO_ADD_INS (bblock
, store
);
7384 /* FIXME: This should be a managed pointer */
7385 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
7387 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
7388 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
7389 cmethod
, MONO_RGCTX_INFO_METHOD
);
7390 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
7391 addr
= mono_emit_jit_icall (cfg
,
7392 mono_helper_compile_generic_method
, iargs
);
7394 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
7396 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, NULL
);
7399 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7400 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7402 CHECK_CFG_EXCEPTION
;
7407 emit_seq_point (cfg
, method
, ip
, FALSE
);
7412 * Implement a workaround for the inherent races involved in locking:
7418 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7419 * try block, the Exit () won't be executed, see:
7420 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7421 * To work around this, we extend such try blocks to include the last x bytes
7422 * of the Monitor.Enter () call.
7424 if (cmethod
&& cmethod
->klass
== mono_defaults
.monitor_class
&& !strcmp (cmethod
->name
, "Enter") && mono_method_signature (cmethod
)->param_count
== 1) {
7425 MonoBasicBlock
*tbb
;
7427 GET_BBLOCK (cfg
, tbb
, ip
+ 5);
7429 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7430 * from Monitor.Enter like ArgumentNullException.
7432 if (tbb
->try_start
&& MONO_REGION_FLAGS(tbb
->region
) == MONO_EXCEPTION_CLAUSE_FINALLY
) {
7433 /* Mark this bblock as needing to be extended */
7434 tbb
->extend_try_block
= TRUE
;
7438 /* Conversion to a JIT intrinsic */
7439 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
7441 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
7442 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
7447 CHECK_CFG_EXCEPTION
;
7452 emit_seq_point (cfg
, method
, ip
, FALSE
);
7457 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
7458 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
7459 !disable_inline
&& mono_method_check_inlining (cfg
, cmethod
) &&
7460 !g_list_find (dont_inline
, cmethod
)) {
7462 gboolean always
= FALSE
;
7464 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
7465 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
7466 /* Prevent inlining of methods that call wrappers */
7467 INLINE_FAILURE ("wrapper call");
7468 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
7472 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, always
))) {
7474 cfg
->real_offset
+= 5;
7477 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
7478 /* *sp is already set by inline_method */
7482 inline_costs
+= costs
;
7485 emit_seq_point (cfg
, method
, ip
, FALSE
);
7491 * Making generic calls out of gsharedvt methods.
7493 if (cmethod
&& cfg
->gsharedvt
&& mini_is_gsharedvt_signature (cfg
, fsig
)) {
7497 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7498 //GSHAREDVT_FAILURE (*ip);
7499 // disable for possible remoting calls
7500 if (fsig
->hasthis
&& (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
))
7501 GSHAREDVT_FAILURE (*ip
);
7502 // virtual generic calls were disabled earlier
7505 if (cmethod
->klass
->rank
&& cmethod
->klass
->byval_arg
.type
!= MONO_TYPE_SZARRAY
)
7506 /* test_0_multi_dim_arrays () in gshared.cs */
7507 GSHAREDVT_FAILURE (*ip
);
7509 if (virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))
7510 addr
= emit_get_rgctx_method (cfg
, context_used
,
7511 cmethod
, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT
);
7513 addr
= emit_get_rgctx_method (cfg
, context_used
,
7514 cmethod
, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE
);
7515 ins
= emit_gsharedvt_call (cfg
, fsig
, sp
, addr
, cmethod
, imt_arg
, vtable_arg
);
7517 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7518 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7520 CHECK_CFG_EXCEPTION
;
7525 emit_seq_point (cfg
, method
, ip
, FALSE
);
7529 if (virtual && cmethod
&& cfg
->gsharedvt
&& cmethod
->slot
== -1) {
7530 mono_class_setup_vtable (cmethod
->klass
);
7531 if (cmethod
->slot
== -1)
7532 // FIXME: How can this happen ?
7533 GSHAREDVT_FAILURE (*ip
);
7536 inline_costs
+= 10 * num_calls
++;
7538 /* Tail recursion elimination */
7539 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
7540 gboolean has_vtargs
= FALSE
;
7543 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7544 INLINE_FAILURE ("tail call");
7546 /* keep it simple */
7547 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
7548 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
7553 for (i
= 0; i
< n
; ++i
)
7554 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
7555 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7556 MONO_ADD_INS (bblock
, ins
);
7557 tblock
= start_bblock
->out_bb
[0];
7558 link_bblock (cfg
, bblock
, tblock
);
7559 ins
->inst_target_bb
= tblock
;
7560 start_new_bblock
= 1;
7562 /* skip the CEE_RET, too */
7563 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
7573 /* Generic sharing */
7574 /* FIXME: only do this for generic methods if
7575 they are not shared! */
7576 if (context_used
&& !imt_arg
&& !array_rank
&&
7577 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7578 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
7579 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
7580 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
7581 INLINE_FAILURE ("gshared");
7583 g_assert (cfg
->generic_sharing_context
&& cmethod
);
7587 * We are compiling a call to a
7588 * generic method from shared code,
7589 * which means that we have to look up
7590 * the method in the rgctx and do an
7593 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7596 /* Indirect calls */
7598 g_assert (!imt_arg
);
7600 if (*ip
== CEE_CALL
)
7601 g_assert (context_used
);
7602 else if (*ip
== CEE_CALLI
)
7603 g_assert (!vtable_arg
);
7605 /* FIXME: what the hell is this??? */
7606 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
7607 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
7609 /* Prevent inlining of methods with indirect calls */
7610 INLINE_FAILURE ("indirect call");
7613 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, vtable_arg
);
7615 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
7617 * Instead of emitting an indirect call, emit a direct call
7618 * with the contents of the aotconst as the patch info.
7620 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
7622 } else if (addr
->opcode
== OP_GOT_ENTRY
&& addr
->inst_right
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
7623 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_right
->inst_left
, fsig
, sp
);
7626 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, NULL
);
7629 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7630 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7632 CHECK_CFG_EXCEPTION
;
7637 emit_seq_point (cfg
, method
, ip
, FALSE
);
7645 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
7646 MonoInst
*val
= sp
[fsig
->param_count
];
7648 if (val
->type
== STACK_OBJ
) {
7649 MonoInst
*iargs
[2];
7654 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
7657 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
7658 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, val
->dreg
);
7659 if (cfg
->gen_write_barriers
&& val
->type
== STACK_OBJ
&& !(val
->opcode
== OP_PCONST
&& val
->inst_c0
== 0))
7660 emit_write_barrier (cfg
, addr
, val
, 0);
7661 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
7662 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7664 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
7667 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
7668 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
7669 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
7670 CHECK_TYPELOAD (cmethod
->klass
);
7673 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7676 g_assert_not_reached ();
7679 CHECK_CFG_EXCEPTION
;
7683 emit_seq_point (cfg
, method
, ip
, FALSE
);
7687 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
7689 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7690 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7692 CHECK_CFG_EXCEPTION
;
7697 emit_seq_point (cfg
, method
, ip
, FALSE
);
7701 /* Tail prefix / tail call optimization */
7703 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7704 /* FIXME: runtime generic context pointer for jumps? */
7705 /* FIXME: handle this for generic sharing eventually */
7706 supported_tail_call
= cmethod
&&
7707 ((((ins_flag
& MONO_INST_TAILCALL
) && (*ip
== CEE_CALL
))
7708 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7709 && !vtable_arg
&& !cfg
->generic_sharing_context
&& is_supported_tail_call (cfg
, method
, cmethod
, fsig
);
7711 if (supported_tail_call
) {
7714 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7715 INLINE_FAILURE ("tail call");
7717 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7719 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7720 /* Handle tail calls similarly to calls */
7721 call
= mono_emit_call_args (cfg
, mono_method_signature (cmethod
), sp
, FALSE
, FALSE
, TRUE
, FALSE
, FALSE
);
7723 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
7724 call
->tail_call
= TRUE
;
7725 call
->method
= cmethod
;
7726 call
->signature
= mono_method_signature (cmethod
);
7729 * We implement tail calls by storing the actual arguments into the
7730 * argument variables, then emitting a CEE_JMP.
7732 for (i
= 0; i
< n
; ++i
) {
7733 /* Prevent argument from being register allocated */
7734 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
7735 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
7739 ins
= (MonoInst
*)call
;
7740 ins
->inst_p0
= cmethod
;
7741 ins
->inst_p1
= arg_array
[0];
7742 MONO_ADD_INS (bblock
, ins
);
7743 link_bblock (cfg
, bblock
, end_bblock
);
7744 start_new_bblock
= 1;
7746 CHECK_CFG_EXCEPTION
;
7751 // FIXME: Eliminate unreachable epilogs
7754 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7755 * only reachable from this call.
7757 GET_BBLOCK (cfg
, tblock
, ip
);
7758 if (tblock
== bblock
|| tblock
->in_count
== 0)
7764 * Synchronized wrappers.
7765 * Its hard to determine where to replace a method with its synchronized
7766 * wrapper without causing an infinite recursion. The current solution is
7767 * to add the synchronized wrapper in the trampolines, and to
7768 * change the called method to a dummy wrapper, and resolve that wrapper
7769 * to the real method in mono_jit_compile_method ().
7771 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
&& mono_marshal_method_from_wrapper (cfg
->method
) == cmethod
) {
7772 cmethod
= mono_marshal_get_synchronized_inner_wrapper (cmethod
);
7776 INLINE_FAILURE ("call");
7777 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
7778 imt_arg
, vtable_arg
);
7780 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7781 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7783 CHECK_CFG_EXCEPTION
;
7788 emit_seq_point (cfg
, method
, ip
, FALSE
);
7792 if (cfg
->method
!= method
) {
7793 /* return from inlined method */
7795 * If in_count == 0, that means the ret is unreachable due to
7796 * being preceeded by a throw. In that case, inline_method () will
7797 * handle setting the return value
7798 * (test case: test_0_inline_throw ()).
7800 if (return_var
&& cfg
->cbb
->in_count
) {
7801 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
7807 if ((method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
|| method
->wrapper_type
== MONO_WRAPPER_NONE
) && target_type_is_incompatible (cfg
, ret_type
, *sp
))
7810 //g_assert (returnvar != -1);
7811 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
7812 cfg
->ret_var_set
= TRUE
;
7816 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
7818 if (seq_points
&& !sym_seq_points
) {
7820 * Place a seq point here too even through the IL stack is not
7821 * empty, so a step over on
7824 * will work correctly.
7826 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
7827 MONO_ADD_INS (cfg
->cbb
, ins
);
7830 g_assert (!return_var
);
7834 if ((method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
|| method
->wrapper_type
== MONO_WRAPPER_NONE
) && target_type_is_incompatible (cfg
, ret_type
, *sp
))
7837 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
7840 if (!cfg
->vret_addr
) {
7843 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
7845 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
7847 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
7848 ins
->klass
= mono_class_from_mono_type (ret_type
);
7851 #ifdef MONO_ARCH_SOFT_FLOAT
7852 if (COMPILE_SOFT_FLOAT (cfg
) && !ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
7853 MonoInst
*iargs
[1];
7857 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
7858 mono_arch_emit_setret (cfg
, method
, conv
);
7860 mono_arch_emit_setret (cfg
, method
, *sp
);
7863 mono_arch_emit_setret (cfg
, method
, *sp
);
7868 if (sp
!= stack_start
)
7870 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7872 ins
->inst_target_bb
= end_bblock
;
7873 MONO_ADD_INS (bblock
, ins
);
7874 link_bblock (cfg
, bblock
, end_bblock
);
7875 start_new_bblock
= 1;
7879 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7881 target
= ip
+ 1 + (signed char)(*ip
);
7883 GET_BBLOCK (cfg
, tblock
, target
);
7884 link_bblock (cfg
, bblock
, tblock
);
7885 ins
->inst_target_bb
= tblock
;
7886 if (sp
!= stack_start
) {
7887 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7889 CHECK_UNVERIFIABLE (cfg
);
7891 MONO_ADD_INS (bblock
, ins
);
7892 start_new_bblock
= 1;
7893 inline_costs
+= BRANCH_COST
;
7907 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
7909 target
= ip
+ 1 + *(signed char*)ip
;
7915 inline_costs
+= BRANCH_COST
;
7919 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7922 target
= ip
+ 4 + (gint32
)read32(ip
);
7924 GET_BBLOCK (cfg
, tblock
, target
);
7925 link_bblock (cfg
, bblock
, tblock
);
7926 ins
->inst_target_bb
= tblock
;
7927 if (sp
!= stack_start
) {
7928 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7930 CHECK_UNVERIFIABLE (cfg
);
7933 MONO_ADD_INS (bblock
, ins
);
7935 start_new_bblock
= 1;
7936 inline_costs
+= BRANCH_COST
;
7943 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
7944 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
7945 guint32 opsize
= is_short
? 1 : 4;
7947 CHECK_OPSIZE (opsize
);
7949 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
7952 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
7957 GET_BBLOCK (cfg
, tblock
, target
);
7958 link_bblock (cfg
, bblock
, tblock
);
7959 GET_BBLOCK (cfg
, tblock
, ip
);
7960 link_bblock (cfg
, bblock
, tblock
);
7962 if (sp
!= stack_start
) {
7963 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7964 CHECK_UNVERIFIABLE (cfg
);
7967 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
7968 cmp
->sreg1
= sp
[0]->dreg
;
7969 type_from_op (cmp
, sp
[0], NULL
);
7972 #if SIZEOF_REGISTER == 4
7973 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
7974 /* Convert it to OP_LCOMPARE */
7975 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
7976 ins
->type
= STACK_I8
;
7977 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
7979 MONO_ADD_INS (bblock
, ins
);
7980 cmp
->opcode
= OP_LCOMPARE
;
7981 cmp
->sreg2
= ins
->dreg
;
7984 MONO_ADD_INS (bblock
, cmp
);
7986 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
7987 type_from_op (ins
, sp
[0], NULL
);
7988 MONO_ADD_INS (bblock
, ins
);
7989 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
7990 GET_BBLOCK (cfg
, tblock
, target
);
7991 ins
->inst_true_bb
= tblock
;
7992 GET_BBLOCK (cfg
, tblock
, ip
);
7993 ins
->inst_false_bb
= tblock
;
7994 start_new_bblock
= 2;
7997 inline_costs
+= BRANCH_COST
;
8012 MONO_INST_NEW (cfg
, ins
, *ip
);
8014 target
= ip
+ 4 + (gint32
)read32(ip
);
8020 inline_costs
+= BRANCH_COST
;
8024 MonoBasicBlock
**targets
;
8025 MonoBasicBlock
*default_bblock
;
8026 MonoJumpInfoBBTable
*table
;
8027 int offset_reg
= alloc_preg (cfg
);
8028 int target_reg
= alloc_preg (cfg
);
8029 int table_reg
= alloc_preg (cfg
);
8030 int sum_reg
= alloc_preg (cfg
);
8031 gboolean use_op_switch
;
8035 n
= read32 (ip
+ 1);
8038 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
8042 CHECK_OPSIZE (n
* sizeof (guint32
));
8043 target
= ip
+ n
* sizeof (guint32
);
8045 GET_BBLOCK (cfg
, default_bblock
, target
);
8046 default_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
8048 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
8049 for (i
= 0; i
< n
; ++i
) {
8050 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
8051 targets
[i
] = tblock
;
8052 targets
[i
]->flags
|= BB_INDIRECT_JUMP_TARGET
;
8056 if (sp
!= stack_start
) {
8058 * Link the current bb with the targets as well, so handle_stack_args
8059 * will set their in_stack correctly.
8061 link_bblock (cfg
, bblock
, default_bblock
);
8062 for (i
= 0; i
< n
; ++i
)
8063 link_bblock (cfg
, bblock
, targets
[i
]);
8065 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8067 CHECK_UNVERIFIABLE (cfg
);
8070 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
8071 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
8074 for (i
= 0; i
< n
; ++i
)
8075 link_bblock (cfg
, bblock
, targets
[i
]);
8077 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
8078 table
->table
= targets
;
8079 table
->table_size
= n
;
8081 use_op_switch
= FALSE
;
8083 /* ARM implements SWITCH statements differently */
8084 /* FIXME: Make it use the generic implementation */
8085 if (!cfg
->compile_aot
)
8086 use_op_switch
= TRUE
;
8089 if (COMPILE_LLVM (cfg
))
8090 use_op_switch
= TRUE
;
8092 cfg
->cbb
->has_jump_table
= 1;
8094 if (use_op_switch
) {
8095 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
8096 ins
->sreg1
= src1
->dreg
;
8097 ins
->inst_p0
= table
;
8098 ins
->inst_many_bb
= targets
;
8099 ins
->klass
= GUINT_TO_POINTER (n
);
8100 MONO_ADD_INS (cfg
->cbb
, ins
);
8102 if (sizeof (gpointer
) == 8)
8103 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
8105 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
8107 #if SIZEOF_REGISTER == 8
8108 /* The upper word might not be zero, and we add it to a 64 bit address later */
8109 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
8112 if (cfg
->compile_aot
) {
8113 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
8115 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
8116 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
8117 ins
->inst_p0
= table
;
8118 ins
->dreg
= table_reg
;
8119 MONO_ADD_INS (cfg
->cbb
, ins
);
8122 /* FIXME: Use load_memindex */
8123 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
8124 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
8125 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
8127 start_new_bblock
= 1;
8128 inline_costs
+= (BRANCH_COST
* 2);
8148 dreg
= alloc_freg (cfg
);
8151 dreg
= alloc_lreg (cfg
);
8154 dreg
= alloc_ireg_ref (cfg
);
8157 dreg
= alloc_preg (cfg
);
8160 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
8161 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
8162 ins
->flags
|= ins_flag
;
8164 MONO_ADD_INS (bblock
, ins
);
8166 if (ins
->flags
& MONO_INST_VOLATILE
) {
8167 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8168 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8169 emit_memory_barrier (cfg
, FullBarrier
);
8184 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
8185 ins
->flags
|= ins_flag
;
8188 if (ins
->flags
& MONO_INST_VOLATILE
) {
8189 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8190 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8191 emit_memory_barrier (cfg
, FullBarrier
);
8194 MONO_ADD_INS (bblock
, ins
);
8196 if (cfg
->gen_write_barriers
&& *ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0)))
8197 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
8206 MONO_INST_NEW (cfg
, ins
, (*ip
));
8208 ins
->sreg1
= sp
[0]->dreg
;
8209 ins
->sreg2
= sp
[1]->dreg
;
8210 type_from_op (ins
, sp
[0], sp
[1]);
8212 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
8214 /* Use the immediate opcodes if possible */
8215 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
8216 int imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
8217 if (imm_opcode
!= -1) {
8218 ins
->opcode
= imm_opcode
;
8219 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
8222 sp
[1]->opcode
= OP_NOP
;
8226 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
8228 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8245 MONO_INST_NEW (cfg
, ins
, (*ip
));
8247 ins
->sreg1
= sp
[0]->dreg
;
8248 ins
->sreg2
= sp
[1]->dreg
;
8249 type_from_op (ins
, sp
[0], sp
[1]);
8251 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
8252 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
8254 /* FIXME: Pass opcode to is_inst_imm */
8256 /* Use the immediate opcodes if possible */
8257 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
8260 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
8261 if (imm_opcode
!= -1) {
8262 ins
->opcode
= imm_opcode
;
8263 if (sp
[1]->opcode
== OP_I8CONST
) {
8264 #if SIZEOF_REGISTER == 8
8265 ins
->inst_imm
= sp
[1]->inst_l
;
8267 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
8268 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
8272 ins
->inst_imm
= (gssize
)(sp
[1]->inst_c0
);
8275 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8276 if (sp
[1]->next
== NULL
)
8277 sp
[1]->opcode
= OP_NOP
;
8280 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
8282 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8295 case CEE_CONV_OVF_I8
:
8296 case CEE_CONV_OVF_U8
:
8300 /* Special case this earlier so we have long constants in the IR */
8301 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
8302 int data
= sp
[-1]->inst_c0
;
8303 sp
[-1]->opcode
= OP_I8CONST
;
8304 sp
[-1]->type
= STACK_I8
;
8305 #if SIZEOF_REGISTER == 8
8306 if ((*ip
) == CEE_CONV_U8
)
8307 sp
[-1]->inst_c0
= (guint32
)data
;
8309 sp
[-1]->inst_c0
= data
;
8311 sp
[-1]->inst_ls_word
= data
;
8312 if ((*ip
) == CEE_CONV_U8
)
8313 sp
[-1]->inst_ms_word
= 0;
8315 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
8317 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
8324 case CEE_CONV_OVF_I4
:
8325 case CEE_CONV_OVF_I1
:
8326 case CEE_CONV_OVF_I2
:
8327 case CEE_CONV_OVF_I
:
8328 case CEE_CONV_OVF_U
:
8331 if (sp
[-1]->type
== STACK_R8
) {
8332 ADD_UNOP (CEE_CONV_OVF_I8
);
8339 case CEE_CONV_OVF_U1
:
8340 case CEE_CONV_OVF_U2
:
8341 case CEE_CONV_OVF_U4
:
8344 if (sp
[-1]->type
== STACK_R8
) {
8345 ADD_UNOP (CEE_CONV_OVF_U8
);
8352 case CEE_CONV_OVF_I1_UN
:
8353 case CEE_CONV_OVF_I2_UN
:
8354 case CEE_CONV_OVF_I4_UN
:
8355 case CEE_CONV_OVF_I8_UN
:
8356 case CEE_CONV_OVF_U1_UN
:
8357 case CEE_CONV_OVF_U2_UN
:
8358 case CEE_CONV_OVF_U4_UN
:
8359 case CEE_CONV_OVF_U8_UN
:
8360 case CEE_CONV_OVF_I_UN
:
8361 case CEE_CONV_OVF_U_UN
:
8368 CHECK_CFG_EXCEPTION
;
8372 case CEE_ADD_OVF_UN
:
8374 case CEE_MUL_OVF_UN
:
8376 case CEE_SUB_OVF_UN
:
8382 GSHAREDVT_FAILURE (*ip
);
8385 token
= read32 (ip
+ 1);
8386 klass
= mini_get_class (method
, token
, generic_context
);
8387 CHECK_TYPELOAD (klass
);
8389 if (generic_class_is_reference_type (cfg
, klass
)) {
8390 MonoInst
*store
, *load
;
8391 int dreg
= alloc_ireg_ref (cfg
);
8393 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
8394 load
->flags
|= ins_flag
;
8395 MONO_ADD_INS (cfg
->cbb
, load
);
8397 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
8398 store
->flags
|= ins_flag
;
8399 MONO_ADD_INS (cfg
->cbb
, store
);
8401 if (cfg
->gen_write_barriers
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
)
8402 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
8404 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
8416 token
= read32 (ip
+ 1);
8417 klass
= mini_get_class (method
, token
, generic_context
);
8418 CHECK_TYPELOAD (klass
);
8420 /* Optimize the common ldobj+stloc combination */
8430 loc_index
= ip
[5] - CEE_STLOC_0
;
8437 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
8438 CHECK_LOCAL (loc_index
);
8440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
8441 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
8447 /* Optimize the ldobj+stobj combination */
8448 /* The reference case ends up being a load+store anyway */
8449 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 5) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
8454 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
8461 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
8470 CHECK_STACK_OVF (1);
8472 n
= read32 (ip
+ 1);
8474 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
8475 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
8476 ins
->type
= STACK_OBJ
;
8479 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8480 MonoInst
*iargs
[1];
8482 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
8483 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
8485 if (cfg
->opt
& MONO_OPT_SHARED
) {
8486 MonoInst
*iargs
[3];
8488 if (cfg
->compile_aot
) {
8489 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
8491 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8492 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
8493 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
8494 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
8495 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
8497 if (bblock
->out_of_line
) {
8498 MonoInst
*iargs
[2];
8500 if (image
== mono_defaults
.corlib
) {
8502 * Avoid relocations in AOT and save some space by using a
8503 * version of helper_ldstr specialized to mscorlib.
8505 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
8506 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
8508 /* Avoid creating the string object */
8509 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8510 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
8511 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
8515 if (cfg
->compile_aot
) {
8516 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
8518 MONO_ADD_INS (bblock
, ins
);
8521 NEW_PCONST (cfg
, ins
, NULL
);
8522 ins
->type
= STACK_OBJ
;
8523 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
8525 OUT_OF_MEMORY_FAILURE
;
8528 MONO_ADD_INS (bblock
, ins
);
8537 MonoInst
*iargs
[2];
8538 MonoMethodSignature
*fsig
;
8541 MonoInst
*vtable_arg
= NULL
;
8544 token
= read32 (ip
+ 1);
8545 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
8546 if (!cmethod
|| mono_loader_get_last_error ())
8548 fsig
= mono_method_get_signature (cmethod
, image
, token
);
8552 mono_save_token_info (cfg
, image
, token
, cmethod
);
8554 if (!mono_class_init (cmethod
->klass
))
8555 TYPE_LOAD_ERROR (cmethod
->klass
);
8557 if (cfg
->generic_sharing_context
)
8558 context_used
= mono_method_check_context_used (cmethod
);
8560 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
8561 if (check_linkdemand (cfg
, method
, cmethod
))
8562 INLINE_FAILURE ("linkdemand");
8563 CHECK_CFG_EXCEPTION
;
8564 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
8565 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
8568 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
8569 emit_generic_class_init (cfg
, cmethod
->klass
);
8570 CHECK_TYPELOAD (cmethod
->klass
);
8573 if (cmethod
->klass
->valuetype
)
8574 GSHAREDVT_FAILURE (*ip
);
8577 if (cfg->gsharedvt) {
8578 if (mini_is_gsharedvt_variable_signature (sig))
8579 GSHAREDVT_FAILURE (*ip);
8583 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
8584 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
8585 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
8586 mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8587 CHECK_TYPELOAD (cmethod
->klass
);
8589 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
8590 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
8593 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
8594 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
8596 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8598 CHECK_TYPELOAD (cmethod
->klass
);
8599 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
8604 n
= fsig
->param_count
;
8608 * Generate smaller code for the common newobj <exception> instruction in
8609 * argument checking code.
8611 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
8612 is_exception_class (cmethod
->klass
) && n
<= 2 &&
8613 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
8614 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
8615 MonoInst
*iargs
[3];
8617 g_assert (!vtable_arg
);
8621 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
8624 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
8628 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
8633 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
8636 g_assert_not_reached ();
8644 /* move the args to allow room for 'this' in the first position */
8650 /* check_call_signature () requires sp[0] to be set */
8651 this_ins
.type
= STACK_OBJ
;
8653 if (check_call_signature (cfg
, fsig
, sp
))
8658 if (mini_class_is_system_array (cmethod
->klass
)) {
8659 g_assert (!vtable_arg
);
8661 *sp
= emit_get_rgctx_method (cfg
, context_used
,
8662 cmethod
, MONO_RGCTX_INFO_METHOD
);
8664 /* Avoid varargs in the common case */
8665 if (fsig
->param_count
== 1)
8666 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
8667 else if (fsig
->param_count
== 2)
8668 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
8669 else if (fsig
->param_count
== 3)
8670 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_3
, sp
);
8672 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
8673 } else if (cmethod
->string_ctor
) {
8674 g_assert (!context_used
);
8675 g_assert (!vtable_arg
);
8676 /* we simply pass a null pointer */
8677 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
8678 /* now call the string ctor */
8679 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
, NULL
);
8681 MonoInst
* callvirt_this_arg
= NULL
;
8683 if (cmethod
->klass
->valuetype
) {
8684 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
8685 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
8686 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
8691 * The code generated by mini_emit_virtual_call () expects
8692 * iargs [0] to be a boxed instance, but luckily the vcall
8693 * will be transformed into a normal call there.
8695 } else if (context_used
) {
8696 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, context_used
);
8699 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8701 CHECK_TYPELOAD (cmethod
->klass
);
8704 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8705 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8706 * As a workaround, we call class cctors before allocating objects.
8708 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8709 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8710 if (cfg
->verbose_level
> 2)
8711 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
8712 class_inits
= g_slist_prepend (class_inits
, vtable
);
8715 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, 0);
8718 CHECK_CFG_EXCEPTION
; /*for handle_alloc*/
8721 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
8723 /* Now call the actual ctor */
8724 /* Avoid virtual calls to ctors if possible */
8725 if (cmethod
->klass
->marshalbyref
)
8726 callvirt_this_arg
= sp
[0];
8729 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_ctor (cfg
, cmethod
, fsig
, sp
))) {
8730 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
8731 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
8736 CHECK_CFG_EXCEPTION
;
8737 } else if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
8738 !disable_inline
&& mono_method_check_inlining (cfg
, cmethod
) &&
8739 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
8740 !g_list_find (dont_inline
, cmethod
)) {
8743 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
8744 cfg
->real_offset
+= 5;
8747 inline_costs
+= costs
- 5;
8749 INLINE_FAILURE ("inline failure");
8750 // FIXME-VT: Clean this up
8751 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (cfg
, fsig
))
8752 GSHAREDVT_FAILURE(*ip
);
8753 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
, NULL
);
8755 } else if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (cfg
, fsig
)) {
8758 addr
= emit_get_rgctx_method (cfg
, context_used
,
8759 cmethod
, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE
);
8760 mono_emit_calli (cfg
, fsig
, sp
, addr
, vtable_arg
);
8761 } else if (context_used
&&
8762 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
8763 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
8764 MonoInst
*cmethod_addr
;
8766 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
8767 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
8769 mono_emit_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
8771 INLINE_FAILURE ("ctor call");
8772 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
,
8773 callvirt_this_arg
, NULL
, vtable_arg
);
8777 if (alloc
== NULL
) {
8779 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
8780 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
8794 token
= read32 (ip
+ 1);
8795 klass
= mini_get_class (method
, token
, generic_context
);
8796 CHECK_TYPELOAD (klass
);
8797 if (sp
[0]->type
!= STACK_OBJ
)
8800 if (cfg
->generic_sharing_context
)
8801 context_used
= mono_class_check_context_used (klass
);
8803 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8804 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
8811 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8814 if (cfg
->compile_aot
)
8815 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8817 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8819 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8820 *sp
++ = mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
8823 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8824 MonoMethod
*mono_castclass
;
8825 MonoInst
*iargs
[1];
8828 mono_castclass
= mono_marshal_get_castclass (klass
);
8831 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
8832 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8833 CHECK_CFG_EXCEPTION
;
8834 g_assert (costs
> 0);
8837 cfg
->real_offset
+= 5;
8842 inline_costs
+= costs
;
8845 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
8846 CHECK_CFG_EXCEPTION
;
8856 token
= read32 (ip
+ 1);
8857 klass
= mini_get_class (method
, token
, generic_context
);
8858 CHECK_TYPELOAD (klass
);
8859 if (sp
[0]->type
!= STACK_OBJ
)
8862 if (cfg
->generic_sharing_context
)
8863 context_used
= mono_class_check_context_used (klass
);
8865 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8866 MonoMethod
*mono_isinst
= mono_marshal_get_isinst_with_cache ();
8873 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8876 if (cfg
->compile_aot
)
8877 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8879 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8881 *sp
++ = mono_emit_method_call (cfg
, mono_isinst
, args
, NULL
);
8884 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8885 MonoMethod
*mono_isinst
;
8886 MonoInst
*iargs
[1];
8889 mono_isinst
= mono_marshal_get_isinst (klass
);
8892 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
8893 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8894 CHECK_CFG_EXCEPTION
;
8895 g_assert (costs
> 0);
8898 cfg
->real_offset
+= 5;
8903 inline_costs
+= costs
;
8906 ins
= handle_isinst (cfg
, klass
, *sp
, context_used
);
8907 CHECK_CFG_EXCEPTION
;
8914 case CEE_UNBOX_ANY
: {
8918 token
= read32 (ip
+ 1);
8919 klass
= mini_get_class (method
, token
, generic_context
);
8920 CHECK_TYPELOAD (klass
);
8922 mono_save_token_info (cfg
, image
, token
, klass
);
8924 if (cfg
->generic_sharing_context
)
8925 context_used
= mono_class_check_context_used (klass
);
8927 if (mini_is_gsharedvt_klass (cfg
, klass
))
8928 /* Need to check for nullable types at runtime */
8929 GSHAREDVT_FAILURE (*ip
);
8931 if (generic_class_is_reference_type (cfg
, klass
)) {
8932 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8933 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8934 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
8941 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8944 /*FIXME AOT support*/
8945 if (cfg
->compile_aot
)
8946 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8948 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8950 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8951 *sp
++ = mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
8954 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8955 MonoMethod
*mono_castclass
;
8956 MonoInst
*iargs
[1];
8959 mono_castclass
= mono_marshal_get_castclass (klass
);
8962 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
8963 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8964 CHECK_CFG_EXCEPTION
;
8965 g_assert (costs
> 0);
8968 cfg
->real_offset
+= 5;
8972 inline_costs
+= costs
;
8974 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
8975 CHECK_CFG_EXCEPTION
;
8983 if (mono_class_is_nullable (klass
)) {
8984 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
8991 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
8997 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
9010 token
= read32 (ip
+ 1);
9011 klass
= mini_get_class (method
, token
, generic_context
);
9012 CHECK_TYPELOAD (klass
);
9014 mono_save_token_info (cfg
, image
, token
, klass
);
9016 if (cfg
->generic_sharing_context
)
9017 context_used
= mono_class_check_context_used (klass
);
9019 if (generic_class_is_reference_type (cfg
, klass
)) {
9025 if (klass
== mono_defaults
.void_class
)
9027 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
9029 /* frequent check in generic code: box (struct), brtrue */
9031 // FIXME: LLVM can't handle the inconsistent bb linking
9032 if (!mono_class_is_nullable (klass
) &&
9033 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) &&
9034 (ip
[5] == CEE_BRTRUE
||
9035 ip
[5] == CEE_BRTRUE_S
||
9036 ip
[5] == CEE_BRFALSE
||
9037 ip
[5] == CEE_BRFALSE_S
)) {
9038 gboolean is_true
= ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
;
9040 MonoBasicBlock
*true_bb
, *false_bb
;
9044 if (cfg
->verbose_level
> 3) {
9045 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
9046 printf ("<box+brtrue opt>\n");
9054 target
= ip
+ 1 + (signed char)(*ip
);
9061 target
= ip
+ 4 + (gint
)(read32 (ip
));
9065 g_assert_not_reached ();
9069 * We need to link both bblocks, since it is needed for handling stack
9070 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9071 * Branching to only one of them would lead to inconsistencies, so
9072 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9074 GET_BBLOCK (cfg
, true_bb
, target
);
9075 GET_BBLOCK (cfg
, false_bb
, ip
);
9077 mono_link_bblock (cfg
, cfg
->cbb
, true_bb
);
9078 mono_link_bblock (cfg
, cfg
->cbb
, false_bb
);
9080 if (sp
!= stack_start
) {
9081 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
9083 CHECK_UNVERIFIABLE (cfg
);
9086 if (COMPILE_LLVM (cfg
)) {
9087 dreg
= alloc_ireg (cfg
);
9088 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
9089 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, dreg
, is_true
? 0 : 1);
9091 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg
, OP_IBEQ
, true_bb
, false_bb
);
9093 /* The JIT can't eliminate the iconst+compare */
9094 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9095 ins
->inst_target_bb
= is_true
? true_bb
: false_bb
;
9096 MONO_ADD_INS (cfg
->cbb
, ins
);
9099 start_new_bblock
= 1;
9103 *sp
++ = handle_box (cfg
, val
, klass
, context_used
);
9105 CHECK_CFG_EXCEPTION
;
9114 token
= read32 (ip
+ 1);
9115 klass
= mini_get_class (method
, token
, generic_context
);
9116 CHECK_TYPELOAD (klass
);
9118 mono_save_token_info (cfg
, image
, token
, klass
);
9120 if (cfg
->generic_sharing_context
)
9121 context_used
= mono_class_check_context_used (klass
);
9123 if (mono_class_is_nullable (klass
)) {
9126 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
9127 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
9131 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
9144 MonoClassField
*field
;
9147 gboolean is_instance
;
9149 gpointer addr
= NULL
;
9150 gboolean is_special_static
;
9152 MonoInst
*store_val
= NULL
;
9155 is_instance
= (op
== CEE_LDFLD
|| op
== CEE_LDFLDA
|| op
== CEE_STFLD
);
9157 if (op
== CEE_STFLD
) {
9165 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
9167 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
9170 if (op
== CEE_STSFLD
) {
9178 token
= read32 (ip
+ 1);
9179 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
9180 field
= mono_method_get_wrapper_data (method
, token
);
9181 klass
= field
->parent
;
9184 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
9188 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
9189 FIELD_ACCESS_FAILURE
;
9190 mono_class_init (klass
);
9192 if (is_instance
&& *ip
!= CEE_LDFLDA
&& is_magic_tls_access (field
))
9195 /* if the class is Critical then transparent code cannot access it's fields */
9196 if (!is_instance
&& mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
9197 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
9199 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9200 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9201 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9202 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9206 * LDFLD etc. is usable on static fields as well, so convert those cases to
9209 if (is_instance
&& field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
) {
9221 g_assert_not_reached ();
9223 is_instance
= FALSE
;
9226 if (cfg
->generic_sharing_context
)
9227 context_used
= mono_class_check_context_used (klass
);
9231 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
9232 if (op
== CEE_STFLD
) {
9233 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
9235 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
9236 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
9237 MonoInst
*iargs
[5];
9239 GSHAREDVT_FAILURE (op
);
9242 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9243 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
9244 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
9248 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
9249 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
9250 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
9251 CHECK_CFG_EXCEPTION
;
9252 g_assert (costs
> 0);
9254 cfg
->real_offset
+= 5;
9257 inline_costs
+= costs
;
9259 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
9264 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
9266 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
9267 MonoInst
*offset_ins
;
9269 if (cfg
->generic_sharing_context
)
9270 context_used
= mono_class_check_context_used (klass
);
9272 offset_ins
= emit_get_rgctx_field (cfg
, context_used
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9273 dreg
= alloc_ireg_mp (cfg
);
9274 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, sp
[0]->dreg
, offset_ins
->dreg
);
9275 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, dreg
, 0, sp
[1]->dreg
);
9276 // FIXME-VT: wbarriers ?
9278 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
9280 if (sp
[0]->opcode
!= OP_LDADDR
)
9281 store
->flags
|= MONO_INST_FAULT
;
9283 if (cfg
->gen_write_barriers
&& mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
9284 /* insert call to write barrier */
9288 dreg
= alloc_ireg_mp (cfg
);
9289 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
9290 emit_write_barrier (cfg
, ptr
, sp
[1], -1);
9293 store
->flags
|= ins_flag
;
9300 if (is_instance
&& ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
)) {
9301 MonoMethod
*wrapper
= (op
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
9302 MonoInst
*iargs
[4];
9304 GSHAREDVT_FAILURE (op
);
9307 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9308 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
9309 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
9310 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
9311 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
9312 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
9313 CHECK_CFG_EXCEPTION
;
9315 g_assert (costs
> 0);
9317 cfg
->real_offset
+= 5;
9321 inline_costs
+= costs
;
9323 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
9326 } else if (is_instance
) {
9327 if (sp
[0]->type
== STACK_VTYPE
) {
9330 /* Have to compute the address of the variable */
9332 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9334 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9336 g_assert (var
->klass
== klass
);
9338 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
9342 if (op
== CEE_LDFLDA
) {
9343 if (is_magic_tls_access (field
)) {
9344 GSHAREDVT_FAILURE (*ip
);
9346 *sp
++ = create_magic_tls_access (cfg
, field
, &cached_tls_addr
, ins
);
9348 if (sp
[0]->type
== STACK_OBJ
) {
9349 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, sp
[0]->dreg
, 0);
9350 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "NullReferenceException");
9353 dreg
= alloc_ireg_mp (cfg
);
9355 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
9356 MonoInst
*offset_ins
;
9358 offset_ins
= emit_get_rgctx_field (cfg
, context_used
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9359 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, sp
[0]->dreg
, offset_ins
->dreg
);
9361 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
9363 ins
->klass
= mono_class_from_mono_type (field
->type
);
9364 ins
->type
= STACK_MP
;
9370 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
9372 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
9373 MonoInst
*offset_ins
;
9375 offset_ins
= emit_get_rgctx_field (cfg
, context_used
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9376 dreg
= alloc_ireg_mp (cfg
);
9377 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, sp
[0]->dreg
, offset_ins
->dreg
);
9378 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, dreg
, 0);
9380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
9382 load
->flags
|= ins_flag
;
9383 if (sp
[0]->opcode
!= OP_LDADDR
)
9384 load
->flags
|= MONO_INST_FAULT
;
9398 * We can only support shared generic static
9399 * field access on architectures where the
9400 * trampoline code has been extended to handle
9401 * the generic class init.
9403 #ifndef MONO_ARCH_VTABLE_REG
9404 GENERIC_SHARING_FAILURE (op
);
9407 if (cfg
->generic_sharing_context
)
9408 context_used
= mono_class_check_context_used (klass
);
9410 ftype
= mono_field_get_type (field
);
9412 if (ftype
->attrs
& FIELD_ATTRIBUTE_LITERAL
)
9415 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9416 * to be called here.
9418 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
9419 mono_class_vtable (cfg
->domain
, klass
);
9420 CHECK_TYPELOAD (klass
);
9422 mono_domain_lock (cfg
->domain
);
9423 if (cfg
->domain
->special_static_fields
)
9424 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
9425 mono_domain_unlock (cfg
->domain
);
9427 is_special_static
= mono_class_field_is_special_static (field
);
9429 /* Generate IR to compute the field address */
9430 if (is_special_static
&& ((gsize
)addr
& 0x80000000) == 0 && mono_get_thread_intrinsic (cfg
) && !(cfg
->opt
& MONO_OPT_SHARED
) && !context_used
) {
9432 * Fast access to TLS data
9433 * Inline version of get_thread_static_data () in
9437 int idx
, static_data_reg
, array_reg
, dreg
;
9438 MonoInst
*thread_ins
;
9440 GSHAREDVT_FAILURE (op
);
9442 // offset &= 0x7fffffff;
9443 // idx = (offset >> 24) - 1;
9444 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9446 thread_ins
= mono_get_thread_intrinsic (cfg
);
9447 MONO_ADD_INS (cfg
->cbb
, thread_ins
);
9448 static_data_reg
= alloc_ireg (cfg
);
9449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, G_STRUCT_OFFSET (MonoInternalThread
, static_data
));
9451 if (cfg
->compile_aot
) {
9452 int offset_reg
, offset2_reg
, idx_reg
;
9454 /* For TLS variables, this will return the TLS offset */
9455 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
9456 offset_reg
= ins
->dreg
;
9457 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset_reg
, offset_reg
, 0x7fffffff);
9458 idx_reg
= alloc_ireg (cfg
);
9459 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, idx_reg
, offset_reg
, 24);
9460 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISUB_IMM
, idx_reg
, idx_reg
, 1);
9461 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, sizeof (gpointer
) == 8 ? 3 : 2);
9462 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
9463 array_reg
= alloc_ireg (cfg
);
9464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
9465 offset2_reg
= alloc_ireg (cfg
);
9466 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset_reg
, 0xffffff);
9467 dreg
= alloc_ireg (cfg
);
9468 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
9470 offset
= (gsize
)addr
& 0x7fffffff;
9471 idx
= (offset
>> 24) - 1;
9473 array_reg
= alloc_ireg (cfg
);
9474 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, idx
* sizeof (gpointer
));
9475 dreg
= alloc_ireg (cfg
);
9476 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_ADD_IMM
, dreg
, array_reg
, (offset
& 0xffffff));
9478 } else if ((cfg
->opt
& MONO_OPT_SHARED
) ||
9479 (cfg
->compile_aot
&& is_special_static
) ||
9480 (context_used
&& is_special_static
)) {
9481 MonoInst
*iargs
[2];
9483 g_assert (field
->parent
);
9484 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9486 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
9487 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
9489 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
9491 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
9492 } else if (context_used
) {
9493 MonoInst
*static_data
;
9496 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9497 method->klass->name_space, method->klass->name, method->name,
9498 depth, field->offset);
9501 if (mono_class_needs_cctor_run (klass
, method
))
9502 emit_generic_class_init (cfg
, klass
);
9505 * The pointer we're computing here is
9507 * super_info.static_data + field->offset
9509 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
9510 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
9512 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
9513 MonoInst
*offset_ins
;
9515 offset_ins
= emit_get_rgctx_field (cfg
, context_used
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9516 dreg
= alloc_ireg_mp (cfg
);
9517 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, static_data
->dreg
, offset_ins
->dreg
);
9518 } else if (field
->offset
== 0) {
9521 int addr_reg
= mono_alloc_preg (cfg
);
9522 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
9524 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
9525 MonoInst
*iargs
[2];
9527 g_assert (field
->parent
);
9528 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9529 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
9530 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
9532 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
9534 CHECK_TYPELOAD (klass
);
9536 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
)) {
9537 if (!(g_slist_find (class_inits
, vtable
))) {
9538 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
9539 if (cfg
->verbose_level
> 2)
9540 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
9541 class_inits
= g_slist_prepend (class_inits
, vtable
);
9544 if (cfg
->run_cctors
) {
9546 /* This makes so that inline cannot trigger */
9547 /* .cctors: too many apps depend on them */
9548 /* running with a specific order... */
9549 if (! vtable
->initialized
)
9550 INLINE_FAILURE ("class init");
9551 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
9553 set_exception_object (cfg
, ex
);
9554 goto exception_exit
;
9558 addr
= (char*)mono_vtable_get_static_field_data (vtable
) + field
->offset
;
9560 if (cfg
->compile_aot
)
9561 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
9563 EMIT_NEW_PCONST (cfg
, ins
, addr
);
9565 MonoInst
*iargs
[1];
9566 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
9567 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
9571 /* Generate IR to do the actual load/store operation */
9573 if (op
== CEE_LDSFLDA
) {
9574 ins
->klass
= mono_class_from_mono_type (ftype
);
9575 ins
->type
= STACK_PTR
;
9577 } else if (op
== CEE_STSFLD
) {
9580 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, ftype
, ins
->dreg
, 0, store_val
->dreg
);
9581 store
->flags
|= ins_flag
;
9583 gboolean is_const
= FALSE
;
9584 MonoVTable
*vtable
= NULL
;
9585 gpointer addr
= NULL
;
9587 if (!context_used
) {
9588 vtable
= mono_class_vtable (cfg
->domain
, klass
);
9589 CHECK_TYPELOAD (klass
);
9591 if ((ftype
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
) && (((addr
= mono_aot_readonly_field_override (field
)) != NULL
) ||
9592 (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) && vtable
->initialized
))) {
9593 int ro_type
= ftype
->type
;
9595 addr
= (char*)mono_vtable_get_static_field_data (vtable
) + field
->offset
;
9596 if (ro_type
== MONO_TYPE_VALUETYPE
&& ftype
->data
.klass
->enumtype
) {
9597 ro_type
= mono_class_enum_basetype (ftype
->data
.klass
)->type
;
9600 GSHAREDVT_FAILURE (op
);
9602 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9605 case MONO_TYPE_BOOLEAN
:
9607 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
9611 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
9614 case MONO_TYPE_CHAR
:
9616 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
9620 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
9625 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
9629 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
9635 case MONO_TYPE_FNPTR
:
9636 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
9637 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
9640 case MONO_TYPE_STRING
:
9641 case MONO_TYPE_OBJECT
:
9642 case MONO_TYPE_CLASS
:
9643 case MONO_TYPE_SZARRAY
:
9644 case MONO_TYPE_ARRAY
:
9645 if (!mono_gc_is_moving ()) {
9646 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
9647 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
9655 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
9660 case MONO_TYPE_VALUETYPE
:
9670 CHECK_STACK_OVF (1);
9672 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
9673 load
->flags
|= ins_flag
;
9686 token
= read32 (ip
+ 1);
9687 klass
= mini_get_class (method
, token
, generic_context
);
9688 CHECK_TYPELOAD (klass
);
9689 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9690 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
9691 if (cfg
->gen_write_barriers
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&&
9692 generic_class_is_reference_type (cfg
, klass
)) {
9693 /* insert call to write barrier */
9694 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
9706 const char *data_ptr
;
9708 guint32 field_token
;
9714 token
= read32 (ip
+ 1);
9716 klass
= mini_get_class (method
, token
, generic_context
);
9717 CHECK_TYPELOAD (klass
);
9719 if (cfg
->generic_sharing_context
)
9720 context_used
= mono_class_check_context_used (klass
);
9722 if (sp
[0]->type
== STACK_I8
|| (SIZEOF_VOID_P
== 8 && sp
[0]->type
== STACK_PTR
)) {
9723 MONO_INST_NEW (cfg
, ins
, OP_LCONV_TO_I4
);
9724 ins
->sreg1
= sp
[0]->dreg
;
9725 ins
->type
= STACK_I4
;
9726 ins
->dreg
= alloc_ireg (cfg
);
9727 MONO_ADD_INS (cfg
->cbb
, ins
);
9728 *sp
= mono_decompose_opcode (cfg
, ins
);
9733 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
9734 /* FIXME: we cannot get a managed
9735 allocator because we can't get the
9736 open generic class's vtable. We
9737 have the same problem in
9738 handle_alloc(). This
9739 needs to be solved so that we can
9740 have managed allocs of shared
9743 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9744 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9746 MonoMethod
*managed_alloc
= NULL
;
9748 /* FIXME: Decompose later to help abcrem */
9751 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
9752 array_class
, MONO_RGCTX_INFO_VTABLE
);
9757 ins
= mono_emit_method_call (cfg
, managed_alloc
, args
, NULL
);
9759 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
9761 if (cfg
->opt
& MONO_OPT_SHARED
) {
9762 /* Decompose now to avoid problems with references to the domainvar */
9763 MonoInst
*iargs
[3];
9765 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9766 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9769 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
9771 /* Decompose later since it is needed by abcrem */
9772 MonoClass
*array_type
= mono_array_class_get (klass
, 1);
9773 mono_class_vtable (cfg
->domain
, array_type
);
9774 CHECK_TYPELOAD (array_type
);
9776 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
9777 ins
->dreg
= alloc_ireg_ref (cfg
);
9778 ins
->sreg1
= sp
[0]->dreg
;
9779 ins
->inst_newa_class
= klass
;
9780 ins
->type
= STACK_OBJ
;
9781 ins
->klass
= array_type
;
9782 MONO_ADD_INS (cfg
->cbb
, ins
);
9783 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
9784 cfg
->cbb
->has_array_access
= TRUE
;
9786 /* Needed so mono_emit_load_get_addr () gets called */
9787 mono_get_got_var (cfg
);
9797 * we inline/optimize the initialization sequence if possible.
9798 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9799 * for small sizes open code the memcpy
9800 * ensure the rva field is big enough
9802 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
9803 MonoMethod
*memcpy_method
= get_memcpy_method ();
9804 MonoInst
*iargs
[3];
9805 int add_reg
= alloc_ireg_mp (cfg
);
9807 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
9808 if (cfg
->compile_aot
) {
9809 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
9811 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
9813 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
9814 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9823 if (sp
[0]->type
!= STACK_OBJ
)
9826 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
9827 ins
->dreg
= alloc_preg (cfg
);
9828 ins
->sreg1
= sp
[0]->dreg
;
9829 ins
->type
= STACK_I4
;
9830 /* This flag will be inherited by the decomposition */
9831 ins
->flags
|= MONO_INST_FAULT
;
9832 MONO_ADD_INS (cfg
->cbb
, ins
);
9833 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
9834 cfg
->cbb
->has_array_access
= TRUE
;
9842 if (sp
[0]->type
!= STACK_OBJ
)
9845 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9847 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
9848 CHECK_TYPELOAD (klass
);
9849 /* we need to make sure that this array is exactly the type it needs
9850 * to be for correctness. the wrappers are lax with their usage
9851 * so we need to ignore them here
9853 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
) {
9854 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
9855 mini_emit_check_array_type (cfg
, sp
[0], array_class
);
9856 CHECK_TYPELOAD (array_class
);
9860 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9875 case CEE_LDELEM_REF
: {
9881 if (*ip
== CEE_LDELEM
) {
9883 token
= read32 (ip
+ 1);
9884 klass
= mini_get_class (method
, token
, generic_context
);
9885 CHECK_TYPELOAD (klass
);
9886 mono_class_init (klass
);
9889 klass
= array_access_to_klass (*ip
);
9891 if (sp
[0]->type
!= STACK_OBJ
)
9894 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9896 if (mini_is_gsharedvt_klass (cfg
, klass
)) {
9897 // FIXME-VT: OP_ICONST optimization
9898 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9899 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
9900 ins
->opcode
= OP_LOADV_MEMBASE
;
9901 } else if (sp
[1]->opcode
== OP_ICONST
) {
9902 int array_reg
= sp
[0]->dreg
;
9903 int index_reg
= sp
[1]->dreg
;
9904 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
9906 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
9907 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
9909 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9910 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
9913 if (*ip
== CEE_LDELEM
)
9926 case CEE_STELEM_REF
:
9931 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9933 if (*ip
== CEE_STELEM
) {
9935 token
= read32 (ip
+ 1);
9936 klass
= mini_get_class (method
, token
, generic_context
);
9937 CHECK_TYPELOAD (klass
);
9938 mono_class_init (klass
);
9941 klass
= array_access_to_klass (*ip
);
9943 if (sp
[0]->type
!= STACK_OBJ
)
9946 emit_array_store (cfg
, klass
, sp
, TRUE
);
9948 if (*ip
== CEE_STELEM
)
9955 case CEE_CKFINITE
: {
9959 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
9960 ins
->sreg1
= sp
[0]->dreg
;
9961 ins
->dreg
= alloc_freg (cfg
);
9962 ins
->type
= STACK_R8
;
9963 MONO_ADD_INS (bblock
, ins
);
9965 *sp
++ = mono_decompose_opcode (cfg
, ins
);
9970 case CEE_REFANYVAL
: {
9971 MonoInst
*src_var
, *src
;
9973 int klass_reg
= alloc_preg (cfg
);
9974 int dreg
= alloc_preg (cfg
);
9976 GSHAREDVT_FAILURE (*ip
);
9979 MONO_INST_NEW (cfg
, ins
, *ip
);
9982 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
9983 CHECK_TYPELOAD (klass
);
9984 mono_class_init (klass
);
9986 if (cfg
->generic_sharing_context
)
9987 context_used
= mono_class_check_context_used (klass
);
9990 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9992 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9993 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
9997 MonoInst
*klass_ins
;
9999 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
10000 klass
, MONO_RGCTX_INFO_KLASS
);
10003 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
10004 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
10006 mini_emit_class_check (cfg
, klass_reg
, klass
);
10008 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
10009 ins
->type
= STACK_MP
;
10014 case CEE_MKREFANY
: {
10015 MonoInst
*loc
, *addr
;
10017 GSHAREDVT_FAILURE (*ip
);
10020 MONO_INST_NEW (cfg
, ins
, *ip
);
10023 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
10024 CHECK_TYPELOAD (klass
);
10025 mono_class_init (klass
);
10027 if (cfg
->generic_sharing_context
)
10028 context_used
= mono_class_check_context_used (klass
);
10030 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
10031 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
10033 if (context_used
) {
10034 MonoInst
*const_ins
;
10035 int type_reg
= alloc_preg (cfg
);
10037 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
10038 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
10039 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
10040 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
10041 } else if (cfg
->compile_aot
) {
10042 int const_reg
= alloc_preg (cfg
);
10043 int type_reg
= alloc_preg (cfg
);
10045 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
10046 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
10047 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
10048 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
10050 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
10051 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
10053 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
10055 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
10056 ins
->type
= STACK_VTYPE
;
10057 ins
->klass
= mono_defaults
.typed_reference_class
;
10062 case CEE_LDTOKEN
: {
10064 MonoClass
*handle_class
;
10066 CHECK_STACK_OVF (1);
10069 n
= read32 (ip
+ 1);
10071 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
10072 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
10073 handle
= mono_method_get_wrapper_data (method
, n
);
10074 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
10075 if (handle_class
== mono_defaults
.typehandle_class
)
10076 handle
= &((MonoClass
*)handle
)->byval_arg
;
10079 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
10083 mono_class_init (handle_class
);
10084 if (cfg
->generic_sharing_context
) {
10085 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
10086 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
10087 /* This case handles ldtoken
10088 of an open type, like for
10091 } else if (handle_class
== mono_defaults
.typehandle_class
) {
10092 /* If we get a MONO_TYPE_CLASS
10093 then we need to provide the
10095 instantiation of it. */
10096 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
10099 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
10100 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
10101 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
10102 else if (handle_class
== mono_defaults
.methodhandle_class
)
10103 context_used
= mono_method_check_context_used (handle
);
10105 g_assert_not_reached ();
10108 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
10109 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
10110 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
10111 MonoInst
*addr
, *vtvar
, *iargs
[3];
10112 int method_context_used
;
10114 if (cfg
->generic_sharing_context
)
10115 method_context_used
= mono_method_check_context_used (method
);
10117 method_context_used
= 0;
10119 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
10121 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
10122 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
10123 if (method_context_used
) {
10124 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
10125 method
, MONO_RGCTX_INFO_METHOD
);
10126 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
10128 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
10129 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
10131 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
10133 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
10135 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
10137 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
10138 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
10139 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
10140 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
10141 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
10142 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
10144 mono_class_init (tclass
);
10145 if (context_used
) {
10146 ins
= emit_get_rgctx_klass (cfg
, context_used
,
10147 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
10148 } else if (cfg
->compile_aot
) {
10149 if (method
->wrapper_type
) {
10150 if (mono_class_get (tclass
->image
, tclass
->type_token
) == tclass
&& !generic_context
) {
10151 /* Special case for static synchronized wrappers */
10152 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, tclass
->image
, tclass
->type_token
, generic_context
);
10154 /* FIXME: n is not a normal token */
10155 cfg
->disable_aot
= TRUE
;
10156 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
10159 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
10162 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
10164 ins
->type
= STACK_OBJ
;
10165 ins
->klass
= cmethod
->klass
;
10168 MonoInst
*addr
, *vtvar
;
10170 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
10172 if (context_used
) {
10173 if (handle_class
== mono_defaults
.typehandle_class
) {
10174 ins
= emit_get_rgctx_klass (cfg
, context_used
,
10175 mono_class_from_mono_type (handle
),
10176 MONO_RGCTX_INFO_TYPE
);
10177 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
10178 ins
= emit_get_rgctx_method (cfg
, context_used
,
10179 handle
, MONO_RGCTX_INFO_METHOD
);
10180 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
10181 ins
= emit_get_rgctx_field (cfg
, context_used
,
10182 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
10184 g_assert_not_reached ();
10186 } else if (cfg
->compile_aot
) {
10187 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
10189 EMIT_NEW_PCONST (cfg
, ins
, handle
);
10191 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
10192 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
10193 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
10203 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
10205 ins
->sreg1
= sp
[0]->dreg
;
10207 bblock
->out_of_line
= TRUE
;
10208 MONO_ADD_INS (bblock
, ins
);
10209 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
10210 MONO_ADD_INS (bblock
, ins
);
10213 link_bblock (cfg
, bblock
, end_bblock
);
10214 start_new_bblock
= 1;
10216 case CEE_ENDFINALLY
:
10217 /* mono_save_seq_point_info () depends on this */
10218 if (sp
!= stack_start
)
10219 emit_seq_point (cfg
, method
, ip
, FALSE
);
10220 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
10221 MONO_ADD_INS (bblock
, ins
);
10223 start_new_bblock
= 1;
10226 * Control will leave the method so empty the stack, otherwise
10227 * the next basic block will start with a nonempty stack.
10229 while (sp
!= stack_start
) {
10234 case CEE_LEAVE_S
: {
10237 if (*ip
== CEE_LEAVE
) {
10239 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
10242 target
= ip
+ 2 + (signed char)(ip
[1]);
10245 /* empty the stack */
10246 while (sp
!= stack_start
) {
10251 * If this leave statement is in a catch block, check for a
10252 * pending exception, and rethrow it if necessary.
10253 * We avoid doing this in runtime invoke wrappers, since those are called
10254 * by native code which excepts the wrapper to catch all exceptions.
10256 for (i
= 0; i
< header
->num_clauses
; ++i
) {
10257 MonoExceptionClause
*clause
= &header
->clauses
[i
];
10260 * Use <= in the final comparison to handle clauses with multiple
10261 * leave statements, like in bug #78024.
10262 * The ordering of the exception clauses guarantees that we find the
10263 * innermost clause.
10265 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
) && method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
) {
10267 MonoBasicBlock
*dont_throw
;
10272 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10275 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
10277 NEW_BBLOCK (cfg
, dont_throw
);
10280 * Currently, we always rethrow the abort exception, despite the
10281 * fact that this is not correct. See thread6.cs for an example.
10282 * But propagating the abort exception is more important than
10283 * getting the sematics right.
10285 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
10286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
10287 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
10289 MONO_START_BB (cfg
, dont_throw
);
10294 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
10296 MonoExceptionClause
*clause
;
10298 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
10299 clause
= tmp
->data
;
10300 tblock
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
10302 link_bblock (cfg
, bblock
, tblock
);
10303 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
10304 ins
->inst_target_bb
= tblock
;
10305 ins
->inst_eh_block
= clause
;
10306 MONO_ADD_INS (bblock
, ins
);
10307 bblock
->has_call_handler
= 1;
10308 if (COMPILE_LLVM (cfg
)) {
10309 MonoBasicBlock
*target_bb
;
10312 * Link the finally bblock with the target, since it will
10313 * conceptually branch there.
10314 * FIXME: Have to link the bblock containing the endfinally.
10316 GET_BBLOCK (cfg
, target_bb
, target
);
10317 link_bblock (cfg
, tblock
, target_bb
);
10320 g_list_free (handlers
);
10323 MONO_INST_NEW (cfg
, ins
, OP_BR
);
10324 MONO_ADD_INS (bblock
, ins
);
10325 GET_BBLOCK (cfg
, tblock
, target
);
10326 link_bblock (cfg
, bblock
, tblock
);
10327 ins
->inst_target_bb
= tblock
;
10328 start_new_bblock
= 1;
10330 if (*ip
== CEE_LEAVE
)
10339 * Mono specific opcodes
10341 case MONO_CUSTOM_PREFIX
: {
10343 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10347 case CEE_MONO_ICALL
: {
10349 MonoJitICallInfo
*info
;
10351 token
= read32 (ip
+ 2);
10352 func
= mono_method_get_wrapper_data (method
, token
);
10353 info
= mono_find_jit_icall_by_addr (func
);
10355 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method
, 1));
10358 CHECK_STACK (info
->sig
->param_count
);
10359 sp
-= info
->sig
->param_count
;
10361 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
10362 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
10366 inline_costs
+= 10 * num_calls
++;
10370 case CEE_MONO_LDPTR
: {
10373 CHECK_STACK_OVF (1);
10375 token
= read32 (ip
+ 2);
10377 ptr
= mono_method_get_wrapper_data (method
, token
);
10378 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
10379 MonoJitICallInfo
*callinfo
;
10380 const char *icall_name
;
10382 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
10383 g_assert (icall_name
);
10384 callinfo
= mono_find_jit_icall_by_name (icall_name
);
10385 g_assert (callinfo
);
10387 if (ptr
== callinfo
->func
) {
10388 /* Will be transformed into an AOTCONST later */
10389 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
10395 /* FIXME: Generalize this */
10396 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
10397 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
10402 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
10405 inline_costs
+= 10 * num_calls
++;
10406 /* Can't embed random pointers into AOT code */
10407 cfg
->disable_aot
= 1;
10410 case CEE_MONO_ICALL_ADDR
: {
10411 MonoMethod
*cmethod
;
10414 CHECK_STACK_OVF (1);
10416 token
= read32 (ip
+ 2);
10418 cmethod
= mono_method_get_wrapper_data (method
, token
);
10420 if (cfg
->compile_aot
) {
10421 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
10423 ptr
= mono_lookup_internal_call (cmethod
);
10425 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
10431 case CEE_MONO_VTADDR
: {
10432 MonoInst
*src_var
, *src
;
10438 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
10439 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
10444 case CEE_MONO_NEWOBJ
: {
10445 MonoInst
*iargs
[2];
10447 CHECK_STACK_OVF (1);
10449 token
= read32 (ip
+ 2);
10450 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10451 mono_class_init (klass
);
10452 NEW_DOMAINCONST (cfg
, iargs
[0]);
10453 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
10454 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
10455 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
10456 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
10458 inline_costs
+= 10 * num_calls
++;
10461 case CEE_MONO_OBJADDR
:
10464 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
10465 ins
->dreg
= alloc_ireg_mp (cfg
);
10466 ins
->sreg1
= sp
[0]->dreg
;
10467 ins
->type
= STACK_MP
;
10468 MONO_ADD_INS (cfg
->cbb
, ins
);
10472 case CEE_MONO_LDNATIVEOBJ
:
10474 * Similar to LDOBJ, but instead load the unmanaged
10475 * representation of the vtype to the stack.
10480 token
= read32 (ip
+ 2);
10481 klass
= mono_method_get_wrapper_data (method
, token
);
10482 g_assert (klass
->valuetype
);
10483 mono_class_init (klass
);
10486 MonoInst
*src
, *dest
, *temp
;
10489 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
10490 temp
->backend
.is_pinvoke
= 1;
10491 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
10492 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
10494 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
10495 dest
->type
= STACK_VTYPE
;
10496 dest
->klass
= klass
;
10502 case CEE_MONO_RETOBJ
: {
10504 * Same as RET, but return the native representation of a vtype
10507 g_assert (cfg
->ret
);
10508 g_assert (mono_method_signature (method
)->pinvoke
);
10513 token
= read32 (ip
+ 2);
10514 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10516 if (!cfg
->vret_addr
) {
10517 g_assert (cfg
->ret_var_is_local
);
10519 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
10521 EMIT_NEW_RETLOADA (cfg
, ins
);
10523 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
10525 if (sp
!= stack_start
)
10528 MONO_INST_NEW (cfg
, ins
, OP_BR
);
10529 ins
->inst_target_bb
= end_bblock
;
10530 MONO_ADD_INS (bblock
, ins
);
10531 link_bblock (cfg
, bblock
, end_bblock
);
10532 start_new_bblock
= 1;
10536 case CEE_MONO_CISINST
:
10537 case CEE_MONO_CCASTCLASS
: {
10542 token
= read32 (ip
+ 2);
10543 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10544 if (ip
[1] == CEE_MONO_CISINST
)
10545 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
10547 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
10553 case CEE_MONO_SAVE_LMF
:
10554 case CEE_MONO_RESTORE_LMF
:
10555 #ifdef MONO_ARCH_HAVE_LMF_OPS
10556 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
10557 MONO_ADD_INS (bblock
, ins
);
10558 cfg
->need_lmf_area
= TRUE
;
10562 case CEE_MONO_CLASSCONST
:
10563 CHECK_STACK_OVF (1);
10565 token
= read32 (ip
+ 2);
10566 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
10569 inline_costs
+= 10 * num_calls
++;
10571 case CEE_MONO_NOT_TAKEN
:
10572 bblock
->out_of_line
= TRUE
;
10576 CHECK_STACK_OVF (1);
10578 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
10579 ins
->dreg
= alloc_preg (cfg
);
10580 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
10581 ins
->type
= STACK_PTR
;
10582 MONO_ADD_INS (bblock
, ins
);
10586 case CEE_MONO_DYN_CALL
: {
10587 MonoCallInst
*call
;
10589 /* It would be easier to call a trampoline, but that would put an
10590 * extra frame on the stack, confusing exception handling. So
10591 * implement it inline using an opcode for now.
10594 if (!cfg
->dyn_call_var
) {
10595 cfg
->dyn_call_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
10596 /* prevent it from being register allocated */
10597 cfg
->dyn_call_var
->flags
|= MONO_INST_INDIRECT
;
10600 /* Has to use a call inst since it local regalloc expects it */
10601 MONO_INST_NEW_CALL (cfg
, call
, OP_DYN_CALL
);
10602 ins
= (MonoInst
*)call
;
10604 ins
->sreg1
= sp
[0]->dreg
;
10605 ins
->sreg2
= sp
[1]->dreg
;
10606 MONO_ADD_INS (bblock
, ins
);
10608 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10609 cfg
->param_area
= MAX (cfg
->param_area
, MONO_ARCH_DYN_CALL_PARAM_AREA
);
10613 inline_costs
+= 10 * num_calls
++;
10617 case CEE_MONO_MEMORY_BARRIER
: {
10619 emit_memory_barrier (cfg
, (int)read32 (ip
+ 1));
10623 case CEE_MONO_JIT_ATTACH
: {
10624 MonoInst
*args
[16];
10625 MonoInst
*ad_ins
, *lmf_ins
;
10626 MonoBasicBlock
*next_bb
= NULL
;
10628 cfg
->orig_domain_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
10630 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
10631 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->orig_domain_var
->dreg
, ins
->dreg
);
10637 ad_ins
= mono_get_domain_intrinsic (cfg
);
10638 lmf_ins
= mono_get_lmf_intrinsic (cfg
);
10641 #ifdef MONO_ARCH_HAVE_TLS_GET
10642 if (MONO_ARCH_HAVE_TLS_GET
&& ad_ins
&& lmf_ins
) {
10643 NEW_BBLOCK (cfg
, next_bb
);
10645 MONO_ADD_INS (cfg
->cbb
, ad_ins
);
10646 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, ad_ins
->dreg
, 0);
10647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, next_bb
);
10649 MONO_ADD_INS (cfg
->cbb
, lmf_ins
);
10650 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, lmf_ins
->dreg
, 0);
10651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, next_bb
);
10655 if (cfg
->compile_aot
) {
10656 /* AOT code is only used in the root domain */
10657 EMIT_NEW_PCONST (cfg
, args
[0], NULL
);
10659 EMIT_NEW_PCONST (cfg
, args
[0], cfg
->domain
);
10661 ins
= mono_emit_jit_icall (cfg
, mono_jit_thread_attach
, args
);
10662 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->orig_domain_var
->dreg
, ins
->dreg
);
10665 MONO_START_BB (cfg
, next_bb
);
10671 case CEE_MONO_JIT_DETACH
: {
10672 MonoInst
*args
[16];
10674 /* Restore the original domain */
10675 dreg
= alloc_ireg (cfg
);
10676 EMIT_NEW_UNALU (cfg
, args
[0], OP_MOVE
, dreg
, cfg
->orig_domain_var
->dreg
);
10677 mono_emit_jit_icall (cfg
, mono_jit_set_domain
, args
);
10682 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
10688 case CEE_PREFIX1
: {
10691 case CEE_ARGLIST
: {
10692 /* somewhat similar to LDTOKEN */
10693 MonoInst
*addr
, *vtvar
;
10694 CHECK_STACK_OVF (1);
10695 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
10697 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
10698 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
10700 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
10701 ins
->type
= STACK_VTYPE
;
10702 ins
->klass
= mono_defaults
.argumenthandle_class
;
10715 * The following transforms:
10716 * CEE_CEQ into OP_CEQ
10717 * CEE_CGT into OP_CGT
10718 * CEE_CGT_UN into OP_CGT_UN
10719 * CEE_CLT into OP_CLT
10720 * CEE_CLT_UN into OP_CLT_UN
10722 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
10724 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
10726 cmp
->sreg1
= sp
[0]->dreg
;
10727 cmp
->sreg2
= sp
[1]->dreg
;
10728 type_from_op (cmp
, sp
[0], sp
[1]);
10730 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
10731 cmp
->opcode
= OP_LCOMPARE
;
10732 else if (sp
[0]->type
== STACK_R8
)
10733 cmp
->opcode
= OP_FCOMPARE
;
10735 cmp
->opcode
= OP_ICOMPARE
;
10736 MONO_ADD_INS (bblock
, cmp
);
10737 ins
->type
= STACK_I4
;
10738 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
10739 type_from_op (ins
, sp
[0], sp
[1]);
10741 if (cmp
->opcode
== OP_FCOMPARE
) {
10743 * The backends expect the fceq opcodes to do the
10746 cmp
->opcode
= OP_NOP
;
10747 ins
->sreg1
= cmp
->sreg1
;
10748 ins
->sreg2
= cmp
->sreg2
;
10750 MONO_ADD_INS (bblock
, ins
);
10756 MonoInst
*argconst
;
10757 MonoMethod
*cil_method
;
10759 GSHAREDVT_FAILURE (*ip
);
10761 CHECK_STACK_OVF (1);
10763 n
= read32 (ip
+ 2);
10764 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10765 if (!cmethod
|| mono_loader_get_last_error ())
10767 mono_class_init (cmethod
->klass
);
10769 mono_save_token_info (cfg
, image
, n
, cmethod
);
10771 if (cfg
->generic_sharing_context
)
10772 context_used
= mono_method_check_context_used (cmethod
);
10774 cil_method
= cmethod
;
10775 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
10776 METHOD_ACCESS_FAILURE
;
10778 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
10779 if (check_linkdemand (cfg
, method
, cmethod
))
10780 INLINE_FAILURE ("linkdemand");
10781 CHECK_CFG_EXCEPTION
;
10782 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
10783 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
10787 * Optimize the common case of ldftn+delegate creation
10789 if ((sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
10790 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
10791 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
10792 MonoInst
*target_ins
;
10793 MonoMethod
*invoke
;
10794 int invoke_context_used
= 0;
10796 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
10797 if (!invoke
|| !mono_method_signature (invoke
))
10800 if (cfg
->generic_sharing_context
)
10801 invoke_context_used
= mono_method_check_context_used (invoke
);
10803 target_ins
= sp
[-1];
10805 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
10806 ensure_method_is_allowed_to_call_method (cfg
, method
, ctor_method
, bblock
, ip
);
10808 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
)) {
10809 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10810 if (mono_method_signature (invoke
)->param_count
== mono_method_signature (cmethod
)->param_count
) {
10811 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, target_ins
->dreg
, 0);
10812 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "ArgumentException");
10816 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10817 /* FIXME: SGEN support */
10818 if (invoke_context_used
== 0) {
10820 if (cfg
->verbose_level
> 3)
10821 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
10823 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
, context_used
);
10824 CHECK_CFG_EXCEPTION
;
10833 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
10834 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
10838 inline_costs
+= 10 * num_calls
++;
10841 case CEE_LDVIRTFTN
: {
10842 MonoInst
*args
[2];
10844 GSHAREDVT_FAILURE (*ip
);
10848 n
= read32 (ip
+ 2);
10849 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10850 if (!cmethod
|| mono_loader_get_last_error ())
10852 mono_class_init (cmethod
->klass
);
10854 if (cfg
->generic_sharing_context
)
10855 context_used
= mono_method_check_context_used (cmethod
);
10857 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
10858 if (check_linkdemand (cfg
, method
, cmethod
))
10859 INLINE_FAILURE ("linkdemand");
10860 CHECK_CFG_EXCEPTION
;
10861 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
10862 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
10868 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
10869 cmethod
, MONO_RGCTX_INFO_METHOD
);
10872 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
10874 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
10877 inline_costs
+= 10 * num_calls
++;
10881 CHECK_STACK_OVF (1);
10883 n
= read16 (ip
+ 2);
10885 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
10890 CHECK_STACK_OVF (1);
10892 n
= read16 (ip
+ 2);
10894 NEW_ARGLOADA (cfg
, ins
, n
);
10895 MONO_ADD_INS (cfg
->cbb
, ins
);
10903 n
= read16 (ip
+ 2);
10905 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
10907 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
10911 CHECK_STACK_OVF (1);
10913 n
= read16 (ip
+ 2);
10915 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
10920 unsigned char *tmp_ip
;
10921 CHECK_STACK_OVF (1);
10923 n
= read16 (ip
+ 2);
10926 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
10932 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
10941 n
= read16 (ip
+ 2);
10943 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
10945 emit_stloc_ir (cfg
, sp
, header
, n
);
10952 if (sp
!= stack_start
)
10954 if (cfg
->method
!= method
)
10956 * Inlining this into a loop in a parent could lead to
10957 * stack overflows which is different behavior than the
10958 * non-inlined case, thus disable inlining in this case.
10960 goto inline_failure
;
10962 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
10963 ins
->dreg
= alloc_preg (cfg
);
10964 ins
->sreg1
= sp
[0]->dreg
;
10965 ins
->type
= STACK_PTR
;
10966 MONO_ADD_INS (cfg
->cbb
, ins
);
10968 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10970 ins
->flags
|= MONO_INST_INIT
;
10975 case CEE_ENDFILTER
: {
10976 MonoExceptionClause
*clause
, *nearest
;
10977 int cc
, nearest_num
;
10981 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
10983 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
10984 ins
->sreg1
= (*sp
)->dreg
;
10985 MONO_ADD_INS (bblock
, ins
);
10986 start_new_bblock
= 1;
10991 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
10992 clause
= &header
->clauses
[cc
];
10993 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
10994 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
10995 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
11000 g_assert (nearest
);
11001 if ((ip
- header
->code
) != nearest
->handler_offset
)
11006 case CEE_UNALIGNED_
:
11007 ins_flag
|= MONO_INST_UNALIGNED
;
11008 /* FIXME: record alignment? we can assume 1 for now */
11012 case CEE_VOLATILE_
:
11013 ins_flag
|= MONO_INST_VOLATILE
;
11017 ins_flag
|= MONO_INST_TAILCALL
;
11018 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
11019 /* Can't inline tail calls at this time */
11020 inline_costs
+= 100000;
11027 token
= read32 (ip
+ 2);
11028 klass
= mini_get_class (method
, token
, generic_context
);
11029 CHECK_TYPELOAD (klass
);
11030 if (generic_class_is_reference_type (cfg
, klass
))
11031 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
11033 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
11037 case CEE_CONSTRAINED_
:
11039 token
= read32 (ip
+ 2);
11040 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
11041 constrained_call
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
11043 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
11044 CHECK_TYPELOAD (constrained_call
);
11048 case CEE_INITBLK
: {
11049 MonoInst
*iargs
[3];
11053 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
11054 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
11055 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
11056 /* emit_memset only works when val == 0 */
11057 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
11059 iargs
[0] = sp
[0];
11060 iargs
[1] = sp
[1];
11061 iargs
[2] = sp
[2];
11062 if (ip
[1] == CEE_CPBLK
) {
11063 MonoMethod
*memcpy_method
= get_memcpy_method ();
11064 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
11066 MonoMethod
*memset_method
= get_memset_method ();
11067 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
11077 ins_flag
|= MONO_INST_NOTYPECHECK
;
11079 ins_flag
|= MONO_INST_NORANGECHECK
;
11080 /* we ignore the no-nullcheck for now since we
11081 * really do it explicitly only when doing callvirt->call
11085 case CEE_RETHROW
: {
11087 int handler_offset
= -1;
11089 for (i
= 0; i
< header
->num_clauses
; ++i
) {
11090 MonoExceptionClause
*clause
= &header
->clauses
[i
];
11091 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
11092 handler_offset
= clause
->handler_offset
;
11097 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
11099 g_assert (handler_offset
!= -1);
11101 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
11102 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
11103 ins
->sreg1
= load
->dreg
;
11104 MONO_ADD_INS (bblock
, ins
);
11106 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
11107 MONO_ADD_INS (bblock
, ins
);
11110 link_bblock (cfg
, bblock
, end_bblock
);
11111 start_new_bblock
= 1;
11119 GSHAREDVT_FAILURE (*ip
);
11121 CHECK_STACK_OVF (1);
11123 token
= read32 (ip
+ 2);
11124 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
&& !method
->klass
->image
->dynamic
&& !generic_context
) {
11125 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
11126 val
= mono_type_size (type
, &ialign
);
11128 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
11129 CHECK_TYPELOAD (klass
);
11130 mono_class_init (klass
);
11131 val
= mono_type_size (&klass
->byval_arg
, &ialign
);
11133 EMIT_NEW_ICONST (cfg
, ins
, val
);
11138 case CEE_REFANYTYPE
: {
11139 MonoInst
*src_var
, *src
;
11141 GSHAREDVT_FAILURE (*ip
);
11147 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
11149 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
11150 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
11151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
11156 case CEE_READONLY_
:
11169 g_warning ("opcode 0xfe 0x%02x not handled", ip
[1]);
11179 g_warning ("opcode 0x%02x not handled", *ip
);
11183 if (start_new_bblock
!= 1)
11186 bblock
->cil_length
= ip
- bblock
->cil_code
;
11187 if (bblock
->next_bb
) {
11188 /* This could already be set because of inlining, #693905 */
11189 MonoBasicBlock
*bb
= bblock
;
11191 while (bb
->next_bb
)
11193 bb
->next_bb
= end_bblock
;
11195 bblock
->next_bb
= end_bblock
;
11198 if (cfg
->method
== method
&& cfg
->domainvar
) {
11200 MonoInst
*get_domain
;
11202 cfg
->cbb
= init_localsbb
;
11204 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
11205 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
11208 get_domain
->dreg
= alloc_preg (cfg
);
11209 MONO_ADD_INS (cfg
->cbb
, get_domain
);
11211 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
11212 MONO_ADD_INS (cfg
->cbb
, store
);
11215 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11216 if (cfg
->compile_aot
)
11217 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11218 mono_get_got_var (cfg
);
11221 if (cfg
->method
== method
&& cfg
->got_var
)
11222 mono_emit_load_got_addr (cfg
);
11227 cfg
->cbb
= init_localsbb
;
11229 for (i
= 0; i
< header
->num_locals
; ++i
) {
11230 MonoType
*ptype
= header
->locals
[i
];
11231 int t
= ptype
->type
;
11232 dreg
= cfg
->locals
[i
]->dreg
;
11234 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
11235 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
11236 if (ptype
->byref
) {
11237 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
11238 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
11239 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
11240 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
11241 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
11242 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
11243 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
11244 ins
->type
= STACK_R8
;
11245 ins
->inst_p0
= (void*)&r8_0
;
11246 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
11247 MONO_ADD_INS (init_localsbb
, ins
);
11248 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
11249 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
11250 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
11251 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
11252 } else if (((t
== MONO_TYPE_VAR
) || (t
== MONO_TYPE_MVAR
)) && mini_type_var_is_vt (cfg
, ptype
)) {
11253 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
11255 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
11260 if (cfg
->init_ref_vars
&& cfg
->method
== method
) {
11261 /* Emit initialization for ref vars */
11262 // FIXME: Avoid duplication initialization for IL locals.
11263 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11264 MonoInst
*ins
= cfg
->varinfo
[i
];
11266 if (ins
->opcode
== OP_LOCAL
&& ins
->type
== STACK_OBJ
)
11267 MONO_EMIT_NEW_PCONST (cfg
, ins
->dreg
, NULL
);
11272 MonoBasicBlock
*bb
;
11275 * Make seq points at backward branch targets interruptable.
11277 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
11278 if (bb
->code
&& bb
->in_count
> 1 && bb
->code
->opcode
== OP_SEQ_POINT
)
11279 bb
->code
->flags
|= MONO_INST_SINGLE_STEP_LOC
;
11282 /* Add a sequence point for method entry/exit events */
11284 NEW_SEQ_POINT (cfg
, ins
, METHOD_ENTRY_IL_OFFSET
, FALSE
);
11285 MONO_ADD_INS (init_localsbb
, ins
);
11286 NEW_SEQ_POINT (cfg
, ins
, METHOD_EXIT_IL_OFFSET
, FALSE
);
11287 MONO_ADD_INS (cfg
->bb_exit
, ins
);
11292 if (cfg
->method
== method
) {
11293 MonoBasicBlock
*bb
;
11294 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11295 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
11297 mono_create_spvar_for_region (cfg
, bb
->region
);
11298 if (cfg
->verbose_level
> 2)
11299 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
11303 g_slist_free (class_inits
);
11304 dont_inline
= g_list_remove (dont_inline
, method
);
11306 if (inline_costs
< 0) {
11309 /* Method is too large */
11310 mname
= mono_method_full_name (method
, TRUE
);
11311 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
11312 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
11314 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
11315 mono_basic_block_free (original_bb
);
11319 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
11320 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
11322 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
11323 mono_basic_block_free (original_bb
);
11324 return inline_costs
;
11327 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
11334 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
11338 set_exception_type_from_invalid_il (cfg
, method
, ip
);
11342 g_slist_free (class_inits
);
11343 mono_basic_block_free (original_bb
);
11344 dont_inline
= g_list_remove (dont_inline
, method
);
11345 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
11350 store_membase_reg_to_store_membase_imm (int opcode
)
11353 case OP_STORE_MEMBASE_REG
:
11354 return OP_STORE_MEMBASE_IMM
;
11355 case OP_STOREI1_MEMBASE_REG
:
11356 return OP_STOREI1_MEMBASE_IMM
;
11357 case OP_STOREI2_MEMBASE_REG
:
11358 return OP_STOREI2_MEMBASE_IMM
;
11359 case OP_STOREI4_MEMBASE_REG
:
11360 return OP_STOREI4_MEMBASE_IMM
;
11361 case OP_STOREI8_MEMBASE_REG
:
11362 return OP_STOREI8_MEMBASE_IMM
;
11364 g_assert_not_reached ();
11370 #endif /* DISABLE_JIT */
11373 mono_op_to_op_imm (int opcode
)
11377 return OP_IADD_IMM
;
11379 return OP_ISUB_IMM
;
11381 return OP_IDIV_IMM
;
11383 return OP_IDIV_UN_IMM
;
11385 return OP_IREM_IMM
;
11387 return OP_IREM_UN_IMM
;
11389 return OP_IMUL_IMM
;
11391 return OP_IAND_IMM
;
11395 return OP_IXOR_IMM
;
11397 return OP_ISHL_IMM
;
11399 return OP_ISHR_IMM
;
11401 return OP_ISHR_UN_IMM
;
11404 return OP_LADD_IMM
;
11406 return OP_LSUB_IMM
;
11408 return OP_LAND_IMM
;
11412 return OP_LXOR_IMM
;
11414 return OP_LSHL_IMM
;
11416 return OP_LSHR_IMM
;
11418 return OP_LSHR_UN_IMM
;
11421 return OP_COMPARE_IMM
;
11423 return OP_ICOMPARE_IMM
;
11425 return OP_LCOMPARE_IMM
;
11427 case OP_STORE_MEMBASE_REG
:
11428 return OP_STORE_MEMBASE_IMM
;
11429 case OP_STOREI1_MEMBASE_REG
:
11430 return OP_STOREI1_MEMBASE_IMM
;
11431 case OP_STOREI2_MEMBASE_REG
:
11432 return OP_STOREI2_MEMBASE_IMM
;
11433 case OP_STOREI4_MEMBASE_REG
:
11434 return OP_STOREI4_MEMBASE_IMM
;
11436 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11438 return OP_X86_PUSH_IMM
;
11439 case OP_X86_COMPARE_MEMBASE_REG
:
11440 return OP_X86_COMPARE_MEMBASE_IMM
;
11442 #if defined(TARGET_AMD64)
11443 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
11444 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
11446 case OP_VOIDCALL_REG
:
11447 return OP_VOIDCALL
;
11455 return OP_LOCALLOC_IMM
;
11462 ldind_to_load_membase (int opcode
)
11466 return OP_LOADI1_MEMBASE
;
11468 return OP_LOADU1_MEMBASE
;
11470 return OP_LOADI2_MEMBASE
;
11472 return OP_LOADU2_MEMBASE
;
11474 return OP_LOADI4_MEMBASE
;
11476 return OP_LOADU4_MEMBASE
;
11478 return OP_LOAD_MEMBASE
;
11479 case CEE_LDIND_REF
:
11480 return OP_LOAD_MEMBASE
;
11482 return OP_LOADI8_MEMBASE
;
11484 return OP_LOADR4_MEMBASE
;
11486 return OP_LOADR8_MEMBASE
;
11488 g_assert_not_reached ();
11495 stind_to_store_membase (int opcode
)
11499 return OP_STOREI1_MEMBASE_REG
;
11501 return OP_STOREI2_MEMBASE_REG
;
11503 return OP_STOREI4_MEMBASE_REG
;
11505 case CEE_STIND_REF
:
11506 return OP_STORE_MEMBASE_REG
;
11508 return OP_STOREI8_MEMBASE_REG
;
11510 return OP_STORER4_MEMBASE_REG
;
11512 return OP_STORER8_MEMBASE_REG
;
11514 g_assert_not_reached ();
11521 mono_load_membase_to_load_mem (int opcode
)
11523 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11524 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11526 case OP_LOAD_MEMBASE
:
11527 return OP_LOAD_MEM
;
11528 case OP_LOADU1_MEMBASE
:
11529 return OP_LOADU1_MEM
;
11530 case OP_LOADU2_MEMBASE
:
11531 return OP_LOADU2_MEM
;
11532 case OP_LOADI4_MEMBASE
:
11533 return OP_LOADI4_MEM
;
11534 case OP_LOADU4_MEMBASE
:
11535 return OP_LOADU4_MEM
;
11536 #if SIZEOF_REGISTER == 8
11537 case OP_LOADI8_MEMBASE
:
11538 return OP_LOADI8_MEM
;
11547 op_to_op_dest_membase (int store_opcode
, int opcode
)
11549 #if defined(TARGET_X86)
11550 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
11555 return OP_X86_ADD_MEMBASE_REG
;
11557 return OP_X86_SUB_MEMBASE_REG
;
11559 return OP_X86_AND_MEMBASE_REG
;
11561 return OP_X86_OR_MEMBASE_REG
;
11563 return OP_X86_XOR_MEMBASE_REG
;
11566 return OP_X86_ADD_MEMBASE_IMM
;
11569 return OP_X86_SUB_MEMBASE_IMM
;
11572 return OP_X86_AND_MEMBASE_IMM
;
11575 return OP_X86_OR_MEMBASE_IMM
;
11578 return OP_X86_XOR_MEMBASE_IMM
;
11584 #if defined(TARGET_AMD64)
11585 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
11590 return OP_X86_ADD_MEMBASE_REG
;
11592 return OP_X86_SUB_MEMBASE_REG
;
11594 return OP_X86_AND_MEMBASE_REG
;
11596 return OP_X86_OR_MEMBASE_REG
;
11598 return OP_X86_XOR_MEMBASE_REG
;
11600 return OP_X86_ADD_MEMBASE_IMM
;
11602 return OP_X86_SUB_MEMBASE_IMM
;
11604 return OP_X86_AND_MEMBASE_IMM
;
11606 return OP_X86_OR_MEMBASE_IMM
;
11608 return OP_X86_XOR_MEMBASE_IMM
;
11610 return OP_AMD64_ADD_MEMBASE_REG
;
11612 return OP_AMD64_SUB_MEMBASE_REG
;
11614 return OP_AMD64_AND_MEMBASE_REG
;
11616 return OP_AMD64_OR_MEMBASE_REG
;
11618 return OP_AMD64_XOR_MEMBASE_REG
;
11621 return OP_AMD64_ADD_MEMBASE_IMM
;
11624 return OP_AMD64_SUB_MEMBASE_IMM
;
11627 return OP_AMD64_AND_MEMBASE_IMM
;
11630 return OP_AMD64_OR_MEMBASE_IMM
;
11633 return OP_AMD64_XOR_MEMBASE_IMM
;
11643 op_to_op_store_membase (int store_opcode
, int opcode
)
11645 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11648 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
11649 return OP_X86_SETEQ_MEMBASE
;
11651 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
11652 return OP_X86_SETNE_MEMBASE
;
11660 op_to_op_src1_membase (int load_opcode
, int opcode
)
11663 /* FIXME: This has sign extension issues */
11665 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11666 return OP_X86_COMPARE_MEMBASE8_IMM;
11669 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
11674 return OP_X86_PUSH_MEMBASE
;
11675 case OP_COMPARE_IMM
:
11676 case OP_ICOMPARE_IMM
:
11677 return OP_X86_COMPARE_MEMBASE_IMM
;
11680 return OP_X86_COMPARE_MEMBASE_REG
;
11684 #ifdef TARGET_AMD64
11685 /* FIXME: This has sign extension issues */
11687 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11688 return OP_X86_COMPARE_MEMBASE8_IMM;
11693 #ifdef __mono_ilp32__
11694 if (load_opcode
== OP_LOADI8_MEMBASE
)
11696 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
11698 return OP_X86_PUSH_MEMBASE
;
11700 /* FIXME: This only works for 32 bit immediates
11701 case OP_COMPARE_IMM:
11702 case OP_LCOMPARE_IMM:
11703 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11704 return OP_AMD64_COMPARE_MEMBASE_IMM;
11706 case OP_ICOMPARE_IMM
:
11707 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
11708 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
11712 #ifdef __mono_ilp32__
11713 if (load_opcode
== OP_LOAD_MEMBASE
)
11714 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
11715 if (load_opcode
== OP_LOADI8_MEMBASE
)
11717 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
11719 return OP_AMD64_COMPARE_MEMBASE_REG
;
11722 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
11723 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
11732 op_to_op_src2_membase (int load_opcode
, int opcode
)
11735 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
11741 return OP_X86_COMPARE_REG_MEMBASE
;
11743 return OP_X86_ADD_REG_MEMBASE
;
11745 return OP_X86_SUB_REG_MEMBASE
;
11747 return OP_X86_AND_REG_MEMBASE
;
11749 return OP_X86_OR_REG_MEMBASE
;
11751 return OP_X86_XOR_REG_MEMBASE
;
11755 #ifdef TARGET_AMD64
11756 #ifdef __mono_ilp32__
11757 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
) ) {
11759 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)) {
11763 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
11765 return OP_X86_ADD_REG_MEMBASE
;
11767 return OP_X86_SUB_REG_MEMBASE
;
11769 return OP_X86_AND_REG_MEMBASE
;
11771 return OP_X86_OR_REG_MEMBASE
;
11773 return OP_X86_XOR_REG_MEMBASE
;
11775 #ifdef __mono_ilp32__
11776 } else if (load_opcode
== OP_LOADI8_MEMBASE
) {
11778 } else if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
)) {
11783 return OP_AMD64_COMPARE_REG_MEMBASE
;
11785 return OP_AMD64_ADD_REG_MEMBASE
;
11787 return OP_AMD64_SUB_REG_MEMBASE
;
11789 return OP_AMD64_AND_REG_MEMBASE
;
11791 return OP_AMD64_OR_REG_MEMBASE
;
11793 return OP_AMD64_XOR_REG_MEMBASE
;
11802 mono_op_to_op_imm_noemul (int opcode
)
11805 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11811 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11818 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11823 return mono_op_to_op_imm (opcode
);
11827 #ifndef DISABLE_JIT
11830 * mono_handle_global_vregs:
11832 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11836 mono_handle_global_vregs (MonoCompile
*cfg
)
11838 gint32
*vreg_to_bb
;
11839 MonoBasicBlock
*bb
;
11842 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
11844 #ifdef MONO_ARCH_SIMD_INTRINSICS
11845 if (cfg
->uses_simd_intrinsics
)
11846 mono_simd_simplify_indirection (cfg
);
11849 /* Find local vregs used in more than one bb */
11850 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11851 MonoInst
*ins
= bb
->code
;
11852 int block_num
= bb
->block_num
;
11854 if (cfg
->verbose_level
> 2)
11855 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
11858 for (; ins
; ins
= ins
->next
) {
11859 const char *spec
= INS_INFO (ins
->opcode
);
11860 int regtype
= 0, regindex
;
11863 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11864 mono_print_ins (ins
);
11866 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
11868 for (regindex
= 0; regindex
< 4; regindex
++) {
11871 if (regindex
== 0) {
11872 regtype
= spec
[MONO_INST_DEST
];
11873 if (regtype
== ' ')
11876 } else if (regindex
== 1) {
11877 regtype
= spec
[MONO_INST_SRC1
];
11878 if (regtype
== ' ')
11881 } else if (regindex
== 2) {
11882 regtype
= spec
[MONO_INST_SRC2
];
11883 if (regtype
== ' ')
11886 } else if (regindex
== 3) {
11887 regtype
= spec
[MONO_INST_SRC3
];
11888 if (regtype
== ' ')
11893 #if SIZEOF_REGISTER == 4
11894 /* In the LLVM case, the long opcodes are not decomposed */
11895 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
11897 * Since some instructions reference the original long vreg,
11898 * and some reference the two component vregs, it is quite hard
11899 * to determine when it needs to be global. So be conservative.
11901 if (!get_vreg_to_inst (cfg
, vreg
)) {
11902 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
11904 if (cfg
->verbose_level
> 2)
11905 printf ("LONG VREG R%d made global.\n", vreg
);
11909 * Make the component vregs volatile since the optimizations can
11910 * get confused otherwise.
11912 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
11913 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
11917 g_assert (vreg
!= -1);
11919 prev_bb
= vreg_to_bb
[vreg
];
11920 if (prev_bb
== 0) {
11921 /* 0 is a valid block num */
11922 vreg_to_bb
[vreg
] = block_num
+ 1;
11923 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
11924 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
11927 if (!get_vreg_to_inst (cfg
, vreg
)) {
11928 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11929 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
11933 if (vreg_is_ref (cfg
, vreg
))
11934 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
, vreg
);
11936 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
11939 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
11942 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
11945 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
11948 g_assert_not_reached ();
11952 /* Flag as having been used in more than one bb */
11953 vreg_to_bb
[vreg
] = -1;
11959 /* If a variable is used in only one bblock, convert it into a local vreg */
11960 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11961 MonoInst
*var
= cfg
->varinfo
[i
];
11962 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
11964 switch (var
->type
) {
11970 #if SIZEOF_REGISTER == 8
11973 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11974 /* Enabling this screws up the fp stack on x86 */
11977 /* Arguments are implicitly global */
11978 /* Putting R4 vars into registers doesn't work currently */
11979 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
11981 * Make that the variable's liveness interval doesn't contain a call, since
11982 * that would cause the lvreg to be spilled, making the whole optimization
11985 /* This is too slow for JIT compilation */
11987 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
11989 int def_index
, call_index
, ins_index
;
11990 gboolean spilled
= FALSE
;
11995 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
11996 const char *spec
= INS_INFO (ins
->opcode
);
11998 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
11999 def_index
= ins_index
;
12001 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
12002 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
12003 if (call_index
> def_index
) {
12009 if (MONO_IS_CALL (ins
))
12010 call_index
= ins_index
;
12020 if (G_UNLIKELY (cfg
->verbose_level
> 2))
12021 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
12022 var
->flags
|= MONO_INST_IS_DEAD
;
12023 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
12030 * Compress the varinfo and vars tables so the liveness computation is faster and
12031 * takes up less space.
12034 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
12035 MonoInst
*var
= cfg
->varinfo
[i
];
12036 if (pos
< i
&& cfg
->locals_start
== i
)
12037 cfg
->locals_start
= pos
;
12038 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
12040 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
12041 cfg
->varinfo
[pos
]->inst_c0
= pos
;
12042 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
12043 cfg
->vars
[pos
].idx
= pos
;
12044 #if SIZEOF_REGISTER == 4
12045 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
12046 /* Modify the two component vars too */
12049 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
12050 var1
->inst_c0
= pos
;
12051 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
12052 var1
->inst_c0
= pos
;
12059 cfg
->num_varinfo
= pos
;
12060 if (cfg
->locals_start
> cfg
->num_varinfo
)
12061 cfg
->locals_start
= cfg
->num_varinfo
;
12065 * mono_spill_global_vars:
12067 * Generate spill code for variables which are not allocated to registers,
12068 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12069 * code is generated which could be optimized by the local optimization passes.
12072 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
12074 MonoBasicBlock
*bb
;
12076 int orig_next_vreg
;
12077 guint32
*vreg_to_lvreg
;
12079 guint32 i
, lvregs_len
;
12080 gboolean dest_has_lvreg
= FALSE
;
12081 guint32 stacktypes
[128];
12082 MonoInst
**live_range_start
, **live_range_end
;
12083 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
12085 *need_local_opts
= FALSE
;
12087 memset (spec2
, 0, sizeof (spec2
));
12089 /* FIXME: Move this function to mini.c */
12090 stacktypes
['i'] = STACK_PTR
;
12091 stacktypes
['l'] = STACK_I8
;
12092 stacktypes
['f'] = STACK_R8
;
12093 #ifdef MONO_ARCH_SIMD_INTRINSICS
12094 stacktypes
['x'] = STACK_VTYPE
;
12097 #if SIZEOF_REGISTER == 4
12098 /* Create MonoInsts for longs */
12099 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
12100 MonoInst
*ins
= cfg
->varinfo
[i
];
12102 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
12103 switch (ins
->type
) {
12108 if (ins
->type
== STACK_R8
&& !COMPILE_SOFT_FLOAT (cfg
))
12111 g_assert (ins
->opcode
== OP_REGOFFSET
);
12113 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
12115 tree
->opcode
= OP_REGOFFSET
;
12116 tree
->inst_basereg
= ins
->inst_basereg
;
12117 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
12119 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
12121 tree
->opcode
= OP_REGOFFSET
;
12122 tree
->inst_basereg
= ins
->inst_basereg
;
12123 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
12133 if (cfg
->compute_gc_maps
) {
12134 /* registers need liveness info even for !non refs */
12135 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
12136 MonoInst
*ins
= cfg
->varinfo
[i
];
12138 if (ins
->opcode
== OP_REGVAR
)
12139 ins
->flags
|= MONO_INST_GC_TRACK
;
12143 /* FIXME: widening and truncation */
12146 * As an optimization, when a variable allocated to the stack is first loaded into
12147 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12148 * the variable again.
12150 orig_next_vreg
= cfg
->next_vreg
;
12151 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
12152 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
12156 * These arrays contain the first and last instructions accessing a given
12158 * Since we emit bblocks in the same order we process them here, and we
12159 * don't split live ranges, these will precisely describe the live range of
12160 * the variable, i.e. the instruction range where a valid value can be found
12161 * in the variables location.
12162 * The live range is computed using the liveness info computed by the liveness pass.
12163 * We can't use vmv->range, since that is an abstract live range, and we need
12164 * one which is instruction precise.
12165 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12167 /* FIXME: Only do this if debugging info is requested */
12168 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
12169 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
12170 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
12171 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
12173 /* Add spill loads/stores */
12174 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
12177 if (cfg
->verbose_level
> 2)
12178 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
12180 /* Clear vreg_to_lvreg array */
12181 for (i
= 0; i
< lvregs_len
; i
++)
12182 vreg_to_lvreg
[lvregs
[i
]] = 0;
12186 MONO_BB_FOR_EACH_INS (bb
, ins
) {
12187 const char *spec
= INS_INFO (ins
->opcode
);
12188 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
12189 gboolean store
, no_lvreg
;
12190 int sregs
[MONO_MAX_SRC_REGS
];
12192 if (G_UNLIKELY (cfg
->verbose_level
> 2))
12193 mono_print_ins (ins
);
12195 if (ins
->opcode
== OP_NOP
)
12199 * We handle LDADDR here as well, since it can only be decomposed
12200 * when variable addresses are known.
12202 if (ins
->opcode
== OP_LDADDR
) {
12203 MonoInst
*var
= ins
->inst_p0
;
12205 if (var
->opcode
== OP_VTARG_ADDR
) {
12206 /* Happens on SPARC/S390 where vtypes are passed by reference */
12207 MonoInst
*vtaddr
= var
->inst_left
;
12208 if (vtaddr
->opcode
== OP_REGVAR
) {
12209 ins
->opcode
= OP_MOVE
;
12210 ins
->sreg1
= vtaddr
->dreg
;
12212 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
12213 ins
->opcode
= OP_LOAD_MEMBASE
;
12214 ins
->inst_basereg
= vtaddr
->inst_basereg
;
12215 ins
->inst_offset
= vtaddr
->inst_offset
;
12219 g_assert (var
->opcode
== OP_REGOFFSET
);
12221 ins
->opcode
= OP_ADD_IMM
;
12222 ins
->sreg1
= var
->inst_basereg
;
12223 ins
->inst_imm
= var
->inst_offset
;
12226 *need_local_opts
= TRUE
;
12227 spec
= INS_INFO (ins
->opcode
);
12230 if (ins
->opcode
< MONO_CEE_LAST
) {
12231 mono_print_ins (ins
);
12232 g_assert_not_reached ();
12236 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12240 if (MONO_IS_STORE_MEMBASE (ins
)) {
12241 tmp_reg
= ins
->dreg
;
12242 ins
->dreg
= ins
->sreg2
;
12243 ins
->sreg2
= tmp_reg
;
12246 spec2
[MONO_INST_DEST
] = ' ';
12247 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
12248 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
12249 spec2
[MONO_INST_SRC3
] = ' ';
12251 } else if (MONO_IS_STORE_MEMINDEX (ins
))
12252 g_assert_not_reached ();
12257 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
12258 printf ("\t %.3s %d", spec
, ins
->dreg
);
12259 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
12260 for (srcindex
= 0; srcindex
< num_sregs
; ++srcindex
)
12261 printf (" %d", sregs
[srcindex
]);
12268 regtype
= spec
[MONO_INST_DEST
];
12269 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
12272 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
12273 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
12274 MonoInst
*store_ins
;
12276 MonoInst
*def_ins
= ins
;
12277 int dreg
= ins
->dreg
; /* The original vreg */
12279 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
12281 if (var
->opcode
== OP_REGVAR
) {
12282 ins
->dreg
= var
->dreg
;
12283 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
12285 * Instead of emitting a load+store, use a _membase opcode.
12287 g_assert (var
->opcode
== OP_REGOFFSET
);
12288 if (ins
->opcode
== OP_MOVE
) {
12292 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
12293 ins
->inst_basereg
= var
->inst_basereg
;
12294 ins
->inst_offset
= var
->inst_offset
;
12297 spec
= INS_INFO (ins
->opcode
);
12301 g_assert (var
->opcode
== OP_REGOFFSET
);
12303 prev_dreg
= ins
->dreg
;
12305 /* Invalidate any previous lvreg for this vreg */
12306 vreg_to_lvreg
[ins
->dreg
] = 0;
12310 if (COMPILE_SOFT_FLOAT (cfg
) && store_opcode
== OP_STORER8_MEMBASE_REG
) {
12312 store_opcode
= OP_STOREI8_MEMBASE_REG
;
12315 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
12317 if (regtype
== 'l') {
12318 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
12319 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
12320 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
12321 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
12322 def_ins
= store_ins
;
12325 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
12327 /* Try to fuse the store into the instruction itself */
12328 /* FIXME: Add more instructions */
12329 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
12330 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
12331 ins
->inst_imm
= ins
->inst_c0
;
12332 ins
->inst_destbasereg
= var
->inst_basereg
;
12333 ins
->inst_offset
= var
->inst_offset
;
12334 spec
= INS_INFO (ins
->opcode
);
12335 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
12336 ins
->opcode
= store_opcode
;
12337 ins
->inst_destbasereg
= var
->inst_basereg
;
12338 ins
->inst_offset
= var
->inst_offset
;
12342 tmp_reg
= ins
->dreg
;
12343 ins
->dreg
= ins
->sreg2
;
12344 ins
->sreg2
= tmp_reg
;
12347 spec2
[MONO_INST_DEST
] = ' ';
12348 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
12349 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
12350 spec2
[MONO_INST_SRC3
] = ' ';
12352 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
12353 // FIXME: The backends expect the base reg to be in inst_basereg
12354 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
12356 ins
->inst_basereg
= var
->inst_basereg
;
12357 ins
->inst_offset
= var
->inst_offset
;
12358 spec
= INS_INFO (ins
->opcode
);
12360 /* printf ("INS: "); mono_print_ins (ins); */
12361 /* Create a store instruction */
12362 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
12364 /* Insert it after the instruction */
12365 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
12367 def_ins
= store_ins
;
12370 * We can't assign ins->dreg to var->dreg here, since the
12371 * sregs could use it. So set a flag, and do it after
12374 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
12375 dest_has_lvreg
= TRUE
;
12380 if (def_ins
&& !live_range_start
[dreg
]) {
12381 live_range_start
[dreg
] = def_ins
;
12382 live_range_start_bb
[dreg
] = bb
;
12385 if (cfg
->compute_gc_maps
&& def_ins
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
12388 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_DEF
);
12389 tmp
->inst_c1
= dreg
;
12390 mono_bblock_insert_after_ins (bb
, def_ins
, tmp
);
12397 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
12398 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
12399 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
12400 sreg
= sregs
[srcindex
];
12402 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
12403 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
12404 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
12405 MonoInst
*use_ins
= ins
;
12406 MonoInst
*load_ins
;
12407 guint32 load_opcode
;
12409 if (var
->opcode
== OP_REGVAR
) {
12410 sregs
[srcindex
] = var
->dreg
;
12411 //mono_inst_set_src_registers (ins, sregs);
12412 live_range_end
[sreg
] = use_ins
;
12413 live_range_end_bb
[sreg
] = bb
;
12415 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
12418 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
12419 /* var->dreg is a hreg */
12420 tmp
->inst_c1
= sreg
;
12421 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
12427 g_assert (var
->opcode
== OP_REGOFFSET
);
12429 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
12431 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
12433 if (vreg_to_lvreg
[sreg
]) {
12434 g_assert (vreg_to_lvreg
[sreg
] != -1);
12436 /* The variable is already loaded to an lvreg */
12437 if (G_UNLIKELY (cfg
->verbose_level
> 2))
12438 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
12439 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
12440 //mono_inst_set_src_registers (ins, sregs);
12444 /* Try to fuse the load into the instruction */
12445 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
12446 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
12447 sregs
[0] = var
->inst_basereg
;
12448 //mono_inst_set_src_registers (ins, sregs);
12449 ins
->inst_offset
= var
->inst_offset
;
12450 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
12451 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
12452 sregs
[1] = var
->inst_basereg
;
12453 //mono_inst_set_src_registers (ins, sregs);
12454 ins
->inst_offset
= var
->inst_offset
;
12456 if (MONO_IS_REAL_MOVE (ins
)) {
12457 ins
->opcode
= OP_NOP
;
12460 //printf ("%d ", srcindex); mono_print_ins (ins);
12462 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
12464 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
12465 if (var
->dreg
== prev_dreg
) {
12467 * sreg refers to the value loaded by the load
12468 * emitted below, but we need to use ins->dreg
12469 * since it refers to the store emitted earlier.
12473 g_assert (sreg
!= -1);
12474 vreg_to_lvreg
[var
->dreg
] = sreg
;
12475 g_assert (lvregs_len
< 1024);
12476 lvregs
[lvregs_len
++] = var
->dreg
;
12480 sregs
[srcindex
] = sreg
;
12481 //mono_inst_set_src_registers (ins, sregs);
12483 if (regtype
== 'l') {
12484 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
12485 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
12486 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
12487 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
12488 use_ins
= load_ins
;
12491 #if SIZEOF_REGISTER == 4
12492 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
12494 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
12495 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
12496 use_ins
= load_ins
;
12500 if (var
->dreg
< orig_next_vreg
) {
12501 live_range_end
[var
->dreg
] = use_ins
;
12502 live_range_end_bb
[var
->dreg
] = bb
;
12505 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
12508 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
12509 tmp
->inst_c1
= var
->dreg
;
12510 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
12514 mono_inst_set_src_registers (ins
, sregs
);
12516 if (dest_has_lvreg
) {
12517 g_assert (ins
->dreg
!= -1);
12518 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
12519 g_assert (lvregs_len
< 1024);
12520 lvregs
[lvregs_len
++] = prev_dreg
;
12521 dest_has_lvreg
= FALSE
;
12525 tmp_reg
= ins
->dreg
;
12526 ins
->dreg
= ins
->sreg2
;
12527 ins
->sreg2
= tmp_reg
;
12530 if (MONO_IS_CALL (ins
)) {
12531 /* Clear vreg_to_lvreg array */
12532 for (i
= 0; i
< lvregs_len
; i
++)
12533 vreg_to_lvreg
[lvregs
[i
]] = 0;
12535 } else if (ins
->opcode
== OP_NOP
) {
12537 MONO_INST_NULLIFY_SREGS (ins
);
12540 if (cfg
->verbose_level
> 2)
12541 mono_print_ins_index (1, ins
);
12544 /* Extend the live range based on the liveness info */
12545 if (cfg
->compute_precise_live_ranges
&& bb
->live_out_set
&& bb
->code
) {
12546 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
12547 MonoMethodVar
*vi
= MONO_VARINFO (cfg
, i
);
12549 if (vreg_is_volatile (cfg
, vi
->vreg
))
12550 /* The liveness info is incomplete */
12553 if (mono_bitset_test_fast (bb
->live_in_set
, i
) && !live_range_start
[vi
->vreg
]) {
12554 /* Live from at least the first ins of this bb */
12555 live_range_start
[vi
->vreg
] = bb
->code
;
12556 live_range_start_bb
[vi
->vreg
] = bb
;
12559 if (mono_bitset_test_fast (bb
->live_out_set
, i
)) {
12560 /* Live at least until the last ins of this bb */
12561 live_range_end
[vi
->vreg
] = bb
->last_ins
;
12562 live_range_end_bb
[vi
->vreg
] = bb
;
12568 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12570 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12571 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12573 if (cfg
->compute_precise_live_ranges
&& cfg
->comp_done
& MONO_COMP_LIVENESS
) {
12574 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
12575 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
12578 if (live_range_start
[vreg
]) {
12579 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
12581 ins
->inst_c1
= vreg
;
12582 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
12584 if (live_range_end
[vreg
]) {
12585 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
12587 ins
->inst_c1
= vreg
;
12588 if (live_range_end
[vreg
] == live_range_end_bb
[vreg
]->last_ins
)
12589 mono_add_ins_to_end (live_range_end_bb
[vreg
], ins
);
12591 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
12597 g_free (live_range_start
);
12598 g_free (live_range_end
);
12599 g_free (live_range_start_bb
);
12600 g_free (live_range_end_bb
);
12605 * - use 'iadd' instead of 'int_add'
12606 * - handling ovf opcodes: decompose in method_to_ir.
12607 * - unify iregs/fregs
12608 * -> partly done, the missing parts are:
12609 * - a more complete unification would involve unifying the hregs as well, so
12610 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12611 * would no longer map to the machine hregs, so the code generators would need to
12612 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12613 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12614 * fp/non-fp branches speeds it up by about 15%.
12615 * - use sext/zext opcodes instead of shifts
12617 * - get rid of TEMPLOADs if possible and use vregs instead
12618 * - clean up usage of OP_P/OP_ opcodes
12619 * - cleanup usage of DUMMY_USE
12620 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12622 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12623 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12624 * - make sure handle_stack_args () is called before the branch is emitted
12625 * - when the new IR is done, get rid of all unused stuff
12626 * - COMPARE/BEQ as separate instructions or unify them ?
12627 * - keeping them separate allows specialized compare instructions like
12628 * compare_imm, compare_membase
12629 * - most back ends unify fp compare+branch, fp compare+ceq
12630 * - integrate mono_save_args into inline_method
12631 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12632 * - handle long shift opts on 32 bit platforms somehow: they require
12633 * 3 sregs (2 for arg1 and 1 for arg2)
12634 * - make byref a 'normal' type.
12635 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12636 * variable if needed.
12637 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12638 * like inline_method.
12639 * - remove inlining restrictions
12640 * - fix LNEG and enable cfold of INEG
12641 * - generalize x86 optimizations like ldelema as a peephole optimization
12642 * - add store_mem_imm for amd64
12643 * - optimize the loading of the interruption flag in the managed->native wrappers
12644 * - avoid special handling of OP_NOP in passes
12645 * - move code inserting instructions into one function/macro.
12646 * - try a coalescing phase after liveness analysis
12647 * - add float -> vreg conversion + local optimizations on !x86
12648 * - figure out how to handle decomposed branches during optimizations, ie.
12649 * compare+branch, op_jump_table+op_br etc.
12650 * - promote RuntimeXHandles to vregs
12651 * - vtype cleanups:
12652 * - add a NEW_VARLOADA_VREG macro
12653 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12654 * accessing vtype fields.
12655 * - get rid of I8CONST on 64 bit platforms
12656 * - dealing with the increase in code size due to branches created during opcode
12658 * - use extended basic blocks
12659 * - all parts of the JIT
12660 * - handle_global_vregs () && local regalloc
12661 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12662 * - sources of increase in code size:
12665 * - isinst and castclass
12666 * - lvregs not allocated to global registers even if used multiple times
12667 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12669 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12670 * - add all micro optimizations from the old JIT
12671 * - put tree optimizations into the deadce pass
12672 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12673 * specific function.
12674 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12675 * fcompare + branchCC.
12676 * - create a helper function for allocating a stack slot, taking into account
12677 * MONO_CFG_HAS_SPILLUP.
12679 * - merge the ia64 switch changes.
12680 * - optimize mono_regstate2_alloc_int/float.
12681 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12682 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12683 * parts of the tree could be separated by other instructions, killing the tree
12684 * arguments, or stores killing loads etc. Also, should we fold loads into other
12685 * instructions if the result of the load is used multiple times ?
12686 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12687 * - LAST MERGE: 108395.
12688 * - when returning vtypes in registers, generate IR and append it to the end of the
12689 * last bb instead of doing it in the epilog.
12690 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12698 - When to decompose opcodes:
12699 - earlier: this makes some optimizations hard to implement, since the low level IR
12700 no longer contains the neccessary information. But it is easier to do.
12701 - later: harder to implement, enables more optimizations.
12702 - Branches inside bblocks:
12703 - created when decomposing complex opcodes.
12704 - branches to another bblock: harmless, but not tracked by the branch
12705 optimizations, so need to branch to a label at the start of the bblock.
12706 - branches to inside the same bblock: very problematic, trips up the local
12707 reg allocator. Can be fixed by spitting the current bblock, but that is a
12708 complex operation, since some local vregs can become global vregs etc.
12709 - Local/global vregs:
12710 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12711 local register allocator.
12712 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12713 structure, created by mono_create_var (). Assigned to hregs or the stack by
12714 the global register allocator.
12715 - When to do optimizations like alu->alu_imm:
12716 - earlier -> saves work later on since the IR will be smaller/simpler
12717 - later -> can work on more instructions
12718 - Handling of valuetypes:
12719 - When a vtype is pushed on the stack, a new temporary is created, an
12720 instruction computing its address (LDADDR) is emitted and pushed on
12721 the stack. Need to optimize cases when the vtype is used immediately as in
12722 argument passing, stloc etc.
12723 - Instead of the to_end stuff in the old JIT, simply call the function handling
12724 the values on the stack before emitting the last instruction of the bb.
12727 #endif /* DISABLE_JIT */