2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode
);
102 static int stind_to_store_membase (int opcode
);
104 int mono_op_to_op_imm (int opcode
);
105 int mono_op_to_op_imm_noemul (int opcode
);
107 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
108 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
109 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
111 /* helper methods signature */
112 extern MonoMethodSignature
*helper_sig_class_init_trampoline
;
113 extern MonoMethodSignature
*helper_sig_domain_get
;
114 extern MonoMethodSignature
*helper_sig_generic_class_init_trampoline
;
115 extern MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
;
116 extern MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
;
119 * Instruction metadata
124 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
130 #if SIZEOF_REGISTER == 8
135 /* keep in sync with the enum in mini.h */
138 #include "mini-ops.h"
142 extern GHashTable
*jit_icall_name_hash
;
144 #define MONO_INIT_VARINFO(vi,id) do { \
145 (vi)->range.first_use.pos.bid = 0xffff; \
151 mono_alloc_ireg (MonoCompile
*cfg
)
153 return alloc_ireg (cfg
);
157 mono_alloc_freg (MonoCompile
*cfg
)
159 return alloc_freg (cfg
);
163 mono_alloc_preg (MonoCompile
*cfg
)
165 return alloc_preg (cfg
);
169 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
171 return alloc_dreg (cfg
, stack_type
);
175 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
181 switch (type
->type
) {
184 case MONO_TYPE_BOOLEAN
:
196 case MONO_TYPE_FNPTR
:
198 case MONO_TYPE_CLASS
:
199 case MONO_TYPE_STRING
:
200 case MONO_TYPE_OBJECT
:
201 case MONO_TYPE_SZARRAY
:
202 case MONO_TYPE_ARRAY
:
206 #if SIZEOF_REGISTER == 8
215 case MONO_TYPE_VALUETYPE
:
216 if (type
->data
.klass
->enumtype
) {
217 type
= mono_class_enum_basetype (type
->data
.klass
);
220 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
223 case MONO_TYPE_TYPEDBYREF
:
225 case MONO_TYPE_GENERICINST
:
226 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
230 g_assert (cfg
->generic_sharing_context
);
233 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
239 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
244 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
245 for (i
= 0; i
< bb
->in_count
; ++i
)
246 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
248 for (i
= 0; i
< bb
->out_count
; ++i
)
249 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
251 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
252 mono_print_ins_index (-1, tree
);
256 * Can't put this at the beginning, since other files reference stuff from this
261 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
263 #define GET_BBLOCK(cfg,tblock,ip) do { \
264 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
266 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
267 NEW_BBLOCK (cfg, (tblock)); \
268 (tblock)->cil_code = (ip); \
269 ADD_BBLOCK (cfg, (tblock)); \
273 #if defined(__i386__) || defined(__x86_64__)
274 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
275 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
276 (dest)->dreg = alloc_preg ((cfg)); \
277 (dest)->sreg1 = (sr1); \
278 (dest)->sreg2 = (sr2); \
279 (dest)->inst_imm = (imm); \
280 (dest)->backend.shift_amount = (shift); \
281 MONO_ADD_INS ((cfg)->cbb, (dest)); \
285 #if SIZEOF_REGISTER == 8
286 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
287 /* FIXME: Need to add many more cases */ \
288 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
290 int dr = alloc_preg (cfg); \
291 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
292 (ins)->sreg2 = widen->dreg; \
296 #define ADD_WIDEN_OP(ins, arg1, arg2)
299 #define ADD_BINOP(op) do { \
300 MONO_INST_NEW (cfg, ins, (op)); \
302 ins->sreg1 = sp [0]->dreg; \
303 ins->sreg2 = sp [1]->dreg; \
304 type_from_op (ins, sp [0], sp [1]); \
306 /* Have to insert a widening op */ \
307 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
308 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
309 MONO_ADD_INS ((cfg)->cbb, (ins)); \
311 mono_decompose_opcode ((cfg), (ins)); \
314 #define ADD_UNOP(op) do { \
315 MONO_INST_NEW (cfg, ins, (op)); \
317 ins->sreg1 = sp [0]->dreg; \
318 type_from_op (ins, sp [0], NULL); \
320 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
321 MONO_ADD_INS ((cfg)->cbb, (ins)); \
323 mono_decompose_opcode (cfg, ins); \
326 #define ADD_BINCOND(next_block) do { \
329 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
330 cmp->sreg1 = sp [0]->dreg; \
331 cmp->sreg2 = sp [1]->dreg; \
332 type_from_op (cmp, sp [0], sp [1]); \
334 type_from_op (ins, sp [0], sp [1]); \
335 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
336 GET_BBLOCK (cfg, tblock, target); \
337 link_bblock (cfg, bblock, tblock); \
338 ins->inst_true_bb = tblock; \
339 if ((next_block)) { \
340 link_bblock (cfg, bblock, (next_block)); \
341 ins->inst_false_bb = (next_block); \
342 start_new_bblock = 1; \
344 GET_BBLOCK (cfg, tblock, ip); \
345 link_bblock (cfg, bblock, tblock); \
346 ins->inst_false_bb = tblock; \
347 start_new_bblock = 2; \
349 if (sp != stack_start) { \
350 handle_stack_args (cfg, stack_start, sp - stack_start); \
351 CHECK_UNVERIFIABLE (cfg); \
353 MONO_ADD_INS (bblock, cmp); \
354 MONO_ADD_INS (bblock, ins); \
358 * link_bblock: Links two basic blocks
360 * links two basic blocks in the control flow graph, the 'from'
361 * argument is the starting block and the 'to' argument is the block
362 * the control flow ends to after 'from'.
365 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
367 MonoBasicBlock
**newa
;
371 if (from
->cil_code
) {
373 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
375 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
378 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
380 printf ("edge from entry to exit\n");
385 for (i
= 0; i
< from
->out_count
; ++i
) {
386 if (to
== from
->out_bb
[i
]) {
392 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
393 for (i
= 0; i
< from
->out_count
; ++i
) {
394 newa
[i
] = from
->out_bb
[i
];
402 for (i
= 0; i
< to
->in_count
; ++i
) {
403 if (from
== to
->in_bb
[i
]) {
409 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
410 for (i
= 0; i
< to
->in_count
; ++i
) {
411 newa
[i
] = to
->in_bb
[i
];
420 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
422 link_bblock (cfg
, from
, to
);
426 * mono_find_block_region:
428 * We mark each basic block with a region ID. We use that to avoid BB
429 * optimizations when blocks are in different regions.
432 * A region token that encodes where this region is, and information
433 * about the clause owner for this block.
435 * The region encodes the try/catch/filter clause that owns this block
436 * as well as the type. -1 is a special value that represents a block
437 * that is in none of try/catch/filter.
440 mono_find_block_region (MonoCompile
*cfg
, int offset
)
442 MonoMethod
*method
= cfg
->method
;
443 MonoMethodHeader
*header
= mono_method_get_header (method
);
444 MonoExceptionClause
*clause
;
447 /* first search for handlers and filters */
448 for (i
= 0; i
< header
->num_clauses
; ++i
) {
449 clause
= &header
->clauses
[i
];
450 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
451 (offset
< (clause
->handler_offset
)))
452 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
454 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
455 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
456 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
457 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
458 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
460 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
464 /* search the try blocks */
465 for (i
= 0; i
< header
->num_clauses
; ++i
) {
466 clause
= &header
->clauses
[i
];
467 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
468 return ((i
+ 1) << 8) | clause
->flags
;
475 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
477 MonoMethod
*method
= cfg
->method
;
478 MonoMethodHeader
*header
= mono_method_get_header (method
);
479 MonoExceptionClause
*clause
;
480 MonoBasicBlock
*handler
;
484 for (i
= 0; i
< header
->num_clauses
; ++i
) {
485 clause
= &header
->clauses
[i
];
486 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
487 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
488 if (clause
->flags
== type
) {
489 handler
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
491 res
= g_list_append (res
, handler
);
499 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
503 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
507 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
508 /* prevent it from being register allocated */
509 var
->flags
|= MONO_INST_INDIRECT
;
511 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
515 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
517 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
521 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
525 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
529 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
530 /* prevent it from being register allocated */
531 var
->flags
|= MONO_INST_INDIRECT
;
533 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
539 * Returns the type used in the eval stack when @type is loaded.
540 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
543 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
547 inst
->klass
= klass
= mono_class_from_mono_type (type
);
549 inst
->type
= STACK_MP
;
554 switch (type
->type
) {
556 inst
->type
= STACK_INV
;
560 case MONO_TYPE_BOOLEAN
:
566 inst
->type
= STACK_I4
;
571 case MONO_TYPE_FNPTR
:
572 inst
->type
= STACK_PTR
;
574 case MONO_TYPE_CLASS
:
575 case MONO_TYPE_STRING
:
576 case MONO_TYPE_OBJECT
:
577 case MONO_TYPE_SZARRAY
:
578 case MONO_TYPE_ARRAY
:
579 inst
->type
= STACK_OBJ
;
583 inst
->type
= STACK_I8
;
587 inst
->type
= STACK_R8
;
589 case MONO_TYPE_VALUETYPE
:
590 if (type
->data
.klass
->enumtype
) {
591 type
= mono_class_enum_basetype (type
->data
.klass
);
595 inst
->type
= STACK_VTYPE
;
598 case MONO_TYPE_TYPEDBYREF
:
599 inst
->klass
= mono_defaults
.typed_reference_class
;
600 inst
->type
= STACK_VTYPE
;
602 case MONO_TYPE_GENERICINST
:
603 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
606 case MONO_TYPE_MVAR
:
607 /* FIXME: all the arguments must be references for now,
608 * later look inside cfg and see if the arg num is
611 g_assert (cfg
->generic_sharing_context
);
612 inst
->type
= STACK_OBJ
;
615 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
620 * The following tables are used to quickly validate the IL code in type_from_op ().
623 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
624 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
625 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
626 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
627 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
628 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
629 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
630 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
631 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
636 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
639 /* reduce the size of this table */
641 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
642 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
643 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
644 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
645 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
646 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
647 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
648 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
649 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
653 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
654 /* Inv i L p F & O vt */
656 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
657 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
658 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
659 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
660 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
661 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
662 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
665 /* reduce the size of this table */
667 shift_table
[STACK_MAX
] [STACK_MAX
] = {
668 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
669 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
670 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
671 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
672 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
673 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
674 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
675 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
679 * Tables to map from the non-specific opcode to the matching
680 * type-specific opcode.
682 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
684 binops_op_map
[STACK_MAX
] = {
685 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
688 /* handles from CEE_NEG to CEE_CONV_U8 */
690 unops_op_map
[STACK_MAX
] = {
691 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
694 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
696 ovfops_op_map
[STACK_MAX
] = {
697 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
700 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
702 ovf2ops_op_map
[STACK_MAX
] = {
703 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
706 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
708 ovf3ops_op_map
[STACK_MAX
] = {
709 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
712 /* handles from CEE_BEQ to CEE_BLT_UN */
714 beqops_op_map
[STACK_MAX
] = {
715 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
718 /* handles from CEE_CEQ to CEE_CLT_UN */
720 ceqops_op_map
[STACK_MAX
] = {
721 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
725 * Sets ins->type (the type on the eval stack) according to the
726 * type of the opcode and the arguments to it.
727 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
729 * FIXME: this function sets ins->type unconditionally in some cases, but
730 * it should set it to invalid for some types (a conv.x on an object)
733 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
735 switch (ins
->opcode
) {
742 /* FIXME: check unverifiable args for STACK_MP */
743 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
744 ins
->opcode
+= binops_op_map
[ins
->type
];
751 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
752 ins
->opcode
+= binops_op_map
[ins
->type
];
757 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
758 ins
->opcode
+= binops_op_map
[ins
->type
];
763 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
764 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
765 ins
->opcode
= OP_LCOMPARE
;
766 else if (src1
->type
== STACK_R8
)
767 ins
->opcode
= OP_FCOMPARE
;
769 ins
->opcode
= OP_ICOMPARE
;
771 case OP_ICOMPARE_IMM
:
772 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
773 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
774 ins
->opcode
= OP_LCOMPARE_IMM
;
786 ins
->opcode
+= beqops_op_map
[src1
->type
];
789 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
790 ins
->opcode
+= ceqops_op_map
[src1
->type
];
796 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
797 ins
->opcode
+= ceqops_op_map
[src1
->type
];
801 ins
->type
= neg_table
[src1
->type
];
802 ins
->opcode
+= unops_op_map
[ins
->type
];
805 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
806 ins
->type
= src1
->type
;
808 ins
->type
= STACK_INV
;
809 ins
->opcode
+= unops_op_map
[ins
->type
];
815 ins
->type
= STACK_I4
;
816 ins
->opcode
+= unops_op_map
[src1
->type
];
819 ins
->type
= STACK_R8
;
820 switch (src1
->type
) {
823 ins
->opcode
= OP_ICONV_TO_R_UN
;
826 ins
->opcode
= OP_LCONV_TO_R_UN
;
830 case CEE_CONV_OVF_I1
:
831 case CEE_CONV_OVF_U1
:
832 case CEE_CONV_OVF_I2
:
833 case CEE_CONV_OVF_U2
:
834 case CEE_CONV_OVF_I4
:
835 case CEE_CONV_OVF_U4
:
836 ins
->type
= STACK_I4
;
837 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
839 case CEE_CONV_OVF_I_UN
:
840 case CEE_CONV_OVF_U_UN
:
841 ins
->type
= STACK_PTR
;
842 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
844 case CEE_CONV_OVF_I1_UN
:
845 case CEE_CONV_OVF_I2_UN
:
846 case CEE_CONV_OVF_I4_UN
:
847 case CEE_CONV_OVF_U1_UN
:
848 case CEE_CONV_OVF_U2_UN
:
849 case CEE_CONV_OVF_U4_UN
:
850 ins
->type
= STACK_I4
;
851 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
854 ins
->type
= STACK_PTR
;
855 switch (src1
->type
) {
857 ins
->opcode
= OP_ICONV_TO_U
;
861 #if SIZEOF_REGISTER == 8
862 ins
->opcode
= OP_LCONV_TO_U
;
864 ins
->opcode
= OP_MOVE
;
868 ins
->opcode
= OP_LCONV_TO_U
;
871 ins
->opcode
= OP_FCONV_TO_U
;
877 ins
->type
= STACK_I8
;
878 ins
->opcode
+= unops_op_map
[src1
->type
];
880 case CEE_CONV_OVF_I8
:
881 case CEE_CONV_OVF_U8
:
882 ins
->type
= STACK_I8
;
883 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
885 case CEE_CONV_OVF_U8_UN
:
886 case CEE_CONV_OVF_I8_UN
:
887 ins
->type
= STACK_I8
;
888 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
892 ins
->type
= STACK_R8
;
893 ins
->opcode
+= unops_op_map
[src1
->type
];
896 ins
->type
= STACK_R8
;
900 ins
->type
= STACK_I4
;
901 ins
->opcode
+= ovfops_op_map
[src1
->type
];
906 ins
->type
= STACK_PTR
;
907 ins
->opcode
+= ovfops_op_map
[src1
->type
];
915 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
916 ins
->opcode
+= ovfops_op_map
[src1
->type
];
917 if (ins
->type
== STACK_R8
)
918 ins
->type
= STACK_INV
;
920 case OP_LOAD_MEMBASE
:
921 ins
->type
= STACK_PTR
;
923 case OP_LOADI1_MEMBASE
:
924 case OP_LOADU1_MEMBASE
:
925 case OP_LOADI2_MEMBASE
:
926 case OP_LOADU2_MEMBASE
:
927 case OP_LOADI4_MEMBASE
:
928 case OP_LOADU4_MEMBASE
:
929 ins
->type
= STACK_PTR
;
931 case OP_LOADI8_MEMBASE
:
932 ins
->type
= STACK_I8
;
934 case OP_LOADR4_MEMBASE
:
935 case OP_LOADR8_MEMBASE
:
936 ins
->type
= STACK_R8
;
939 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
943 if (ins
->type
== STACK_MP
)
944 ins
->klass
= mono_defaults
.object_class
;
949 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
955 param_table
[STACK_MAX
] [STACK_MAX
] = {
960 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
964 switch (args
->type
) {
974 for (i
= 0; i
< sig
->param_count
; ++i
) {
975 switch (args
[i
].type
) {
979 if (!sig
->params
[i
]->byref
)
983 if (sig
->params
[i
]->byref
)
985 switch (sig
->params
[i
]->type
) {
986 case MONO_TYPE_CLASS
:
987 case MONO_TYPE_STRING
:
988 case MONO_TYPE_OBJECT
:
989 case MONO_TYPE_SZARRAY
:
990 case MONO_TYPE_ARRAY
:
997 if (sig
->params
[i
]->byref
)
999 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1008 /*if (!param_table [args [i].type] [sig->params [i]->type])
1016 * When we need a pointer to the current domain many times in a method, we
1017 * call mono_domain_get() once and we store the result in a local variable.
1018 * This function returns the variable that represents the MonoDomain*.
1020 inline static MonoInst
*
1021 mono_get_domainvar (MonoCompile
*cfg
)
1023 if (!cfg
->domainvar
)
1024 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1025 return cfg
->domainvar
;
1029 * The got_var contains the address of the Global Offset Table when AOT
1032 inline static MonoInst
*
1033 mono_get_got_var (MonoCompile
*cfg
)
1035 #ifdef MONO_ARCH_NEED_GOT_VAR
1036 if (!cfg
->compile_aot
)
1038 if (!cfg
->got_var
) {
1039 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1041 return cfg
->got_var
;
1048 mono_get_vtable_var (MonoCompile
*cfg
)
1050 g_assert (cfg
->generic_sharing_context
);
1052 if (!cfg
->rgctx_var
) {
1053 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1054 /* force the var to be stack allocated */
1055 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1058 return cfg
->rgctx_var
;
1062 type_from_stack_type (MonoInst
*ins
) {
1063 switch (ins
->type
) {
1064 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1065 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1066 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1067 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1069 return &ins
->klass
->this_arg
;
1070 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1071 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1073 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1078 static G_GNUC_UNUSED
int
1079 type_to_stack_type (MonoType
*t
)
1081 switch (mono_type_get_underlying_type (t
)->type
) {
1084 case MONO_TYPE_BOOLEAN
:
1087 case MONO_TYPE_CHAR
:
1094 case MONO_TYPE_FNPTR
:
1096 case MONO_TYPE_CLASS
:
1097 case MONO_TYPE_STRING
:
1098 case MONO_TYPE_OBJECT
:
1099 case MONO_TYPE_SZARRAY
:
1100 case MONO_TYPE_ARRAY
:
1108 case MONO_TYPE_VALUETYPE
:
1109 case MONO_TYPE_TYPEDBYREF
:
1111 case MONO_TYPE_GENERICINST
:
1112 if (mono_type_generic_inst_is_valuetype (t
))
1118 g_assert_not_reached ();
1125 array_access_to_klass (int opcode
)
1129 return mono_defaults
.byte_class
;
1131 return mono_defaults
.uint16_class
;
1134 return mono_defaults
.int_class
;
1137 return mono_defaults
.sbyte_class
;
1140 return mono_defaults
.int16_class
;
1143 return mono_defaults
.int32_class
;
1145 return mono_defaults
.uint32_class
;
1148 return mono_defaults
.int64_class
;
1151 return mono_defaults
.single_class
;
1154 return mono_defaults
.double_class
;
1155 case CEE_LDELEM_REF
:
1156 case CEE_STELEM_REF
:
1157 return mono_defaults
.object_class
;
1159 g_assert_not_reached ();
1165 * We try to share variables when possible
1168 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1173 /* inlining can result in deeper stacks */
1174 if (slot
>= mono_method_get_header (cfg
->method
)->max_stack
)
1175 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1177 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1179 switch (ins
->type
) {
1186 if ((vnum
= cfg
->intvars
[pos
]))
1187 return cfg
->varinfo
[vnum
];
1188 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1189 cfg
->intvars
[pos
] = res
->inst_c0
;
1192 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1198 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1201 * Don't use this if a generic_context is set, since that means AOT can't
1202 * look up the method using just the image+token.
1203 * table == 0 means this is a reference made from a wrapper.
1205 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1206 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1207 jump_info_token
->image
= image
;
1208 jump_info_token
->token
= token
;
1209 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1214 * This function is called to handle items that are left on the evaluation stack
1215 * at basic block boundaries. What happens is that we save the values to local variables
1216 * and we reload them later when first entering the target basic block (with the
1217 * handle_loaded_temps () function).
1218 * A single joint point will use the same variables (stored in the array bb->out_stack or
1219 * bb->in_stack, if the basic block is before or after the joint point).
1221 * This function needs to be called _before_ emitting the last instruction of
1222 * the bb (i.e. before emitting a branch).
1223 * If the stack merge fails at a join point, cfg->unverifiable is set.
1226 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1229 MonoBasicBlock
*bb
= cfg
->cbb
;
1230 MonoBasicBlock
*outb
;
1231 MonoInst
*inst
, **locals
;
1236 if (cfg
->verbose_level
> 3)
1237 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1238 if (!bb
->out_scount
) {
1239 bb
->out_scount
= count
;
1240 //printf ("bblock %d has out:", bb->block_num);
1242 for (i
= 0; i
< bb
->out_count
; ++i
) {
1243 outb
= bb
->out_bb
[i
];
1244 /* exception handlers are linked, but they should not be considered for stack args */
1245 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1247 //printf (" %d", outb->block_num);
1248 if (outb
->in_stack
) {
1250 bb
->out_stack
= outb
->in_stack
;
1256 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1257 for (i
= 0; i
< count
; ++i
) {
1259 * try to reuse temps already allocated for this purpouse, if they occupy the same
1260 * stack slot and if they are of the same type.
1261 * This won't cause conflicts since if 'local' is used to
1262 * store one of the values in the in_stack of a bblock, then
1263 * the same variable will be used for the same outgoing stack
1265 * This doesn't work when inlining methods, since the bblocks
1266 * in the inlined methods do not inherit their in_stack from
1267 * the bblock they are inlined to. See bug #58863 for an
1270 if (cfg
->inlined_method
)
1271 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1273 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1278 for (i
= 0; i
< bb
->out_count
; ++i
) {
1279 outb
= bb
->out_bb
[i
];
1280 /* exception handlers are linked, but they should not be considered for stack args */
1281 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1283 if (outb
->in_scount
) {
1284 if (outb
->in_scount
!= bb
->out_scount
) {
1285 cfg
->unverifiable
= TRUE
;
1288 continue; /* check they are the same locals */
1290 outb
->in_scount
= count
;
1291 outb
->in_stack
= bb
->out_stack
;
1294 locals
= bb
->out_stack
;
1296 for (i
= 0; i
< count
; ++i
) {
1297 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1298 inst
->cil_code
= sp
[i
]->cil_code
;
1299 sp
[i
] = locals
[i
];
1300 if (cfg
->verbose_level
> 3)
1301 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1305 * It is possible that the out bblocks already have in_stack assigned, and
1306 * the in_stacks differ. In this case, we will store to all the different
1313 /* Find a bblock which has a different in_stack */
1315 while (bindex
< bb
->out_count
) {
1316 outb
= bb
->out_bb
[bindex
];
1317 /* exception handlers are linked, but they should not be considered for stack args */
1318 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1322 if (outb
->in_stack
!= locals
) {
1323 for (i
= 0; i
< count
; ++i
) {
1324 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1325 inst
->cil_code
= sp
[i
]->cil_code
;
1326 sp
[i
] = locals
[i
];
1327 if (cfg
->verbose_level
> 3)
1328 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1330 locals
= outb
->in_stack
;
1339 /* Emit code which loads interface_offsets [klass->interface_id]
1340 * The array is stored in memory before vtable.
1343 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1345 if (cfg
->compile_aot
) {
1346 int ioffset_reg
= alloc_preg (cfg
);
1347 int iid_reg
= alloc_preg (cfg
);
1349 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1350 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1359 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1360 * stored in "klass_reg" implements the interface "klass".
1363 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1365 int ibitmap_reg
= alloc_preg (cfg
);
1366 int ibitmap_byte_reg
= alloc_preg (cfg
);
1368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
));
1370 if (cfg
->compile_aot
) {
1371 int iid_reg
= alloc_preg (cfg
);
1372 int shifted_iid_reg
= alloc_preg (cfg
);
1373 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1374 int masked_iid_reg
= alloc_preg (cfg
);
1375 int iid_one_bit_reg
= alloc_preg (cfg
);
1376 int iid_bit_reg
= alloc_preg (cfg
);
1377 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1378 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1379 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1380 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1381 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1382 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1383 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1384 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1386 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1387 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1392 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1393 * stored in "vtable_reg" implements the interface "klass".
1396 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1398 int ibitmap_reg
= alloc_preg (cfg
);
1399 int ibitmap_byte_reg
= alloc_preg (cfg
);
1401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
));
1403 if (cfg
->compile_aot
) {
1404 int iid_reg
= alloc_preg (cfg
);
1405 int shifted_iid_reg
= alloc_preg (cfg
);
1406 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1407 int masked_iid_reg
= alloc_preg (cfg
);
1408 int iid_one_bit_reg
= alloc_preg (cfg
);
1409 int iid_bit_reg
= alloc_preg (cfg
);
1410 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1412 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, masked_iid_reg
, iid_reg
, 7);
1415 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1416 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1417 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1419 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1420 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1425 * Emit code which checks whenever the interface id of @klass is smaller than
1426 * than the value given by max_iid_reg.
1429 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1430 MonoBasicBlock
*false_target
)
1432 if (cfg
->compile_aot
) {
1433 int iid_reg
= alloc_preg (cfg
);
1434 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1435 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1442 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1445 /* Same as above, but obtains max_iid from a vtable */
1447 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1448 MonoBasicBlock
*false_target
)
1450 int max_iid_reg
= alloc_preg (cfg
);
1452 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1453 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1456 /* Same as above, but obtains max_iid from a klass */
1458 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1459 MonoBasicBlock
*false_target
)
1461 int max_iid_reg
= alloc_preg (cfg
);
1463 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1464 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1468 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1470 int idepth_reg
= alloc_preg (cfg
);
1471 int stypes_reg
= alloc_preg (cfg
);
1472 int stype
= alloc_preg (cfg
);
1474 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1476 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1480 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1481 if (cfg
->compile_aot
) {
1482 int const_reg
= alloc_preg (cfg
);
1483 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1484 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1486 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1492 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1494 int intf_reg
= alloc_preg (cfg
);
1496 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1497 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1498 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1500 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1502 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1506 * Variant of the above that takes a register to the class, not the vtable.
1509 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1511 int intf_bit_reg
= alloc_preg (cfg
);
1513 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1514 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1515 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1519 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1523 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1525 if (cfg
->compile_aot
) {
1526 int const_reg
= alloc_preg (cfg
);
1527 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1528 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1530 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1532 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1536 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1538 if (cfg
->compile_aot
) {
1539 int const_reg
= alloc_preg (cfg
);
1540 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1541 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1543 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1549 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1552 int rank_reg
= alloc_preg (cfg
);
1553 int eclass_reg
= alloc_preg (cfg
);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1556 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1557 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1558 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1560 if (klass
->cast_class
== mono_defaults
.object_class
) {
1561 int parent_reg
= alloc_preg (cfg
);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1563 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1564 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1565 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1566 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1567 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1568 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1569 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1570 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1571 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1573 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1574 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1577 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1578 /* Check that the object is a vector too */
1579 int bounds_reg
= alloc_preg (cfg
);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1581 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1582 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1585 int idepth_reg
= alloc_preg (cfg
);
1586 int stypes_reg
= alloc_preg (cfg
);
1587 int stype
= alloc_preg (cfg
);
1589 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1590 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1591 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1592 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1596 mini_emit_class_check (cfg
, stype
, klass
);
1601 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1605 g_assert (val
== 0);
1610 if ((size
<= 4) && (size
<= align
)) {
1613 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1621 #if SIZEOF_REGISTER == 8
1623 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1629 val_reg
= alloc_preg (cfg
);
1631 if (SIZEOF_REGISTER
== 8)
1632 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1634 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1637 /* This could be optimized further if neccesary */
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1646 #if !NO_UNALIGNED_ACCESS
1647 if (SIZEOF_REGISTER
== 8) {
1649 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1654 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1662 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1678 #endif /* DISABLE_JIT */
1681 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1689 /* This could be optimized further if neccesary */
1691 cur_reg
= alloc_preg (cfg
);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1700 #if !NO_UNALIGNED_ACCESS
1701 if (SIZEOF_REGISTER
== 8) {
1703 cur_reg
= alloc_preg (cfg
);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1714 cur_reg
= alloc_preg (cfg
);
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1716 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1722 cur_reg
= alloc_preg (cfg
);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1730 cur_reg
= alloc_preg (cfg
);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1742 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1745 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1748 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1749 switch (type
->type
) {
1750 case MONO_TYPE_VOID
:
1751 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1754 case MONO_TYPE_BOOLEAN
:
1757 case MONO_TYPE_CHAR
:
1760 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1764 case MONO_TYPE_FNPTR
:
1765 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1766 case MONO_TYPE_CLASS
:
1767 case MONO_TYPE_STRING
:
1768 case MONO_TYPE_OBJECT
:
1769 case MONO_TYPE_SZARRAY
:
1770 case MONO_TYPE_ARRAY
:
1771 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1774 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1777 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1778 case MONO_TYPE_VALUETYPE
:
1779 if (type
->data
.klass
->enumtype
) {
1780 type
= mono_class_enum_basetype (type
->data
.klass
);
1783 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1784 case MONO_TYPE_TYPEDBYREF
:
1785 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1786 case MONO_TYPE_GENERICINST
:
1787 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1790 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1796 * target_type_is_incompatible:
1797 * @cfg: MonoCompile context
1799 * Check that the item @arg on the evaluation stack can be stored
1800 * in the target type (can be a local, or field, etc).
1801 * The cfg arg can be used to check if we need verification or just
1804 * Returns: non-0 value if arg can't be stored on a target.
1807 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1809 MonoType
*simple_type
;
1812 if (target
->byref
) {
1813 /* FIXME: check that the pointed to types match */
1814 if (arg
->type
== STACK_MP
)
1815 return arg
->klass
!= mono_class_from_mono_type (target
);
1816 if (arg
->type
== STACK_PTR
)
1821 simple_type
= mono_type_get_underlying_type (target
);
1822 switch (simple_type
->type
) {
1823 case MONO_TYPE_VOID
:
1827 case MONO_TYPE_BOOLEAN
:
1830 case MONO_TYPE_CHAR
:
1833 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1837 /* STACK_MP is needed when setting pinned locals */
1838 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1843 case MONO_TYPE_FNPTR
:
1844 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1847 case MONO_TYPE_CLASS
:
1848 case MONO_TYPE_STRING
:
1849 case MONO_TYPE_OBJECT
:
1850 case MONO_TYPE_SZARRAY
:
1851 case MONO_TYPE_ARRAY
:
1852 if (arg
->type
!= STACK_OBJ
)
1854 /* FIXME: check type compatibility */
1858 if (arg
->type
!= STACK_I8
)
1863 if (arg
->type
!= STACK_R8
)
1866 case MONO_TYPE_VALUETYPE
:
1867 if (arg
->type
!= STACK_VTYPE
)
1869 klass
= mono_class_from_mono_type (simple_type
);
1870 if (klass
!= arg
->klass
)
1873 case MONO_TYPE_TYPEDBYREF
:
1874 if (arg
->type
!= STACK_VTYPE
)
1876 klass
= mono_class_from_mono_type (simple_type
);
1877 if (klass
!= arg
->klass
)
1880 case MONO_TYPE_GENERICINST
:
1881 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1882 if (arg
->type
!= STACK_VTYPE
)
1884 klass
= mono_class_from_mono_type (simple_type
);
1885 if (klass
!= arg
->klass
)
1889 if (arg
->type
!= STACK_OBJ
)
1891 /* FIXME: check type compatibility */
1895 case MONO_TYPE_MVAR
:
1896 /* FIXME: all the arguments must be references for now,
1897 * later look inside cfg and see if the arg num is
1898 * really a reference
1900 g_assert (cfg
->generic_sharing_context
);
1901 if (arg
->type
!= STACK_OBJ
)
1905 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
1911 * Prepare arguments for passing to a function call.
1912 * Return a non-zero value if the arguments can't be passed to the given
1914 * The type checks are not yet complete and some conversions may need
1915 * casts on 32 or 64 bit architectures.
1917 * FIXME: implement this using target_type_is_incompatible ()
1920 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
1922 MonoType
*simple_type
;
1926 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
1930 for (i
= 0; i
< sig
->param_count
; ++i
) {
1931 if (sig
->params
[i
]->byref
) {
1932 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
1936 simple_type
= sig
->params
[i
];
1937 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
1939 switch (simple_type
->type
) {
1940 case MONO_TYPE_VOID
:
1945 case MONO_TYPE_BOOLEAN
:
1948 case MONO_TYPE_CHAR
:
1951 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
1957 case MONO_TYPE_FNPTR
:
1958 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
1961 case MONO_TYPE_CLASS
:
1962 case MONO_TYPE_STRING
:
1963 case MONO_TYPE_OBJECT
:
1964 case MONO_TYPE_SZARRAY
:
1965 case MONO_TYPE_ARRAY
:
1966 if (args
[i
]->type
!= STACK_OBJ
)
1971 if (args
[i
]->type
!= STACK_I8
)
1976 if (args
[i
]->type
!= STACK_R8
)
1979 case MONO_TYPE_VALUETYPE
:
1980 if (simple_type
->data
.klass
->enumtype
) {
1981 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
1984 if (args
[i
]->type
!= STACK_VTYPE
)
1987 case MONO_TYPE_TYPEDBYREF
:
1988 if (args
[i
]->type
!= STACK_VTYPE
)
1991 case MONO_TYPE_GENERICINST
:
1992 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
1996 g_error ("unknown type 0x%02x in check_call_signature",
2004 callvirt_to_call (int opcode
)
2009 case OP_VOIDCALLVIRT
:
2018 g_assert_not_reached ();
2025 callvirt_to_call_membase (int opcode
)
2029 return OP_CALL_MEMBASE
;
2030 case OP_VOIDCALLVIRT
:
2031 return OP_VOIDCALL_MEMBASE
;
2033 return OP_FCALL_MEMBASE
;
2035 return OP_LCALL_MEMBASE
;
2037 return OP_VCALL_MEMBASE
;
2039 g_assert_not_reached ();
2045 #ifdef MONO_ARCH_HAVE_IMT
2047 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2049 #ifdef MONO_ARCH_IMT_REG
2050 int method_reg
= alloc_preg (cfg
);
2053 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2054 } else if (cfg
->compile_aot
) {
2055 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2058 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2059 ins
->inst_p0
= call
->method
;
2060 ins
->dreg
= method_reg
;
2061 MONO_ADD_INS (cfg
->cbb
, ins
);
2064 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2066 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2071 static MonoJumpInfo
*
2072 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2074 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2078 ji
->data
.target
= target
;
2083 inline static MonoInst
*
2084 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
);
2086 inline static MonoCallInst
*
2087 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2088 MonoInst
**args
, int calli
, int virtual)
2091 #ifdef MONO_ARCH_SOFT_FLOAT
2095 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2098 call
->signature
= sig
;
2100 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2102 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2103 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2106 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2109 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2110 * address of return value to increase optimization opportunities.
2111 * Before vtype decomposition, the dreg of the call ins itself represents the
2112 * fact the call modifies the return value. After decomposition, the call will
2113 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2114 * will be transformed into an LDADDR.
2116 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2117 loada
->dreg
= alloc_preg (cfg
);
2118 loada
->inst_p0
= temp
;
2119 /* We reference the call too since call->dreg could change during optimization */
2120 loada
->inst_p1
= call
;
2121 MONO_ADD_INS (cfg
->cbb
, loada
);
2123 call
->inst
.dreg
= temp
->dreg
;
2125 call
->vret_var
= loada
;
2126 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2127 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2131 * If the call has a float argument, we would need to do an r8->r4 conversion using
2132 * an icall, but that cannot be done during the call sequence since it would clobber
2133 * the call registers + the stack. So we do it before emitting the call.
2135 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2137 MonoInst
*in
= call
->args
[i
];
2139 if (i
>= sig
->hasthis
)
2140 t
= sig
->params
[i
- sig
->hasthis
];
2142 t
= &mono_defaults
.int_class
->byval_arg
;
2143 t
= mono_type_get_underlying_type (t
);
2145 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2146 MonoInst
*iargs
[1];
2150 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2152 /* The result will be in an int vreg */
2153 call
->args
[i
] = conv
;
2158 mono_arch_emit_call (cfg
, call
);
2160 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2161 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2166 inline static MonoInst
*
2167 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
)
2169 MonoCallInst
*call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
);
2171 call
->inst
.sreg1
= addr
->dreg
;
2173 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2175 return (MonoInst
*)call
;
2178 inline static MonoInst
*
2179 mono_emit_rgctx_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2181 #ifdef MONO_ARCH_RGCTX_REG
2186 rgctx_reg
= mono_alloc_preg (cfg
);
2187 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2189 call
= (MonoCallInst
*)mono_emit_calli (cfg
, sig
, args
, addr
);
2191 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2192 cfg
->uses_rgctx_reg
= TRUE
;
2194 return (MonoInst
*)call
;
2196 g_assert_not_reached ();
2202 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2203 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
)
2205 gboolean
virtual = this != NULL
;
2206 gboolean enable_for_aot
= TRUE
;
2209 if (method
->string_ctor
) {
2210 /* Create the real signature */
2211 /* FIXME: Cache these */
2212 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2213 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2218 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual);
2220 if (this && sig
->hasthis
&&
2221 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2222 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && !MONO_CHECK_THIS (this)) {
2223 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2225 call
->method
= method
;
2227 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2228 call
->inst
.inst_left
= this;
2231 int vtable_reg
, slot_reg
, this_reg
;
2233 this_reg
= this->dreg
;
2235 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2236 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2237 /* Make a call to delegate->invoke_impl */
2238 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2239 call
->inst
.inst_basereg
= this_reg
;
2240 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2241 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2243 return (MonoInst
*)call
;
2247 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2248 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2249 (MONO_METHOD_IS_FINAL (method
) &&
2250 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
))) {
2252 * the method is not virtual, we just need to ensure this is not null
2253 * and then we can call the method directly.
2255 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2256 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2259 if (!method
->string_ctor
) {
2260 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2261 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2262 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2265 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2267 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2269 return (MonoInst
*)call
;
2272 if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2274 * the method is virtual, but we can statically dispatch since either
2275 * it's class or the method itself are sealed.
2276 * But first we need to ensure it's not a null reference.
2278 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2279 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2280 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2282 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2283 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2285 return (MonoInst
*)call
;
2288 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2290 vtable_reg
= alloc_preg (cfg
);
2291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2292 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2294 #ifdef MONO_ARCH_HAVE_IMT
2296 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2297 emit_imt_argument (cfg
, call
, imt_arg
);
2298 slot_reg
= vtable_reg
;
2299 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2302 if (slot_reg
== -1) {
2303 slot_reg
= alloc_preg (cfg
);
2304 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2305 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2308 slot_reg
= vtable_reg
;
2309 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2310 (mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
);
2311 #ifdef MONO_ARCH_HAVE_IMT
2313 g_assert (mono_method_signature (method
)->generic_param_count
);
2314 emit_imt_argument (cfg
, call
, imt_arg
);
2319 call
->inst
.sreg1
= slot_reg
;
2320 call
->virtual = TRUE
;
2323 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2325 return (MonoInst
*)call
;
2329 mono_emit_rgctx_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2330 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*vtable_arg
)
2337 #ifdef MONO_ARCH_RGCTX_REG
2338 rgctx_reg
= mono_alloc_preg (cfg
);
2339 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
2344 ins
= mono_emit_method_call_full (cfg
, method
, sig
, args
, this, imt_arg
);
2346 call
= (MonoCallInst
*)ins
;
2348 #ifdef MONO_ARCH_RGCTX_REG
2349 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2350 cfg
->uses_rgctx_reg
= TRUE
;
2359 static inline MonoInst
*
2360 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2362 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
);
2366 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2373 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
);
2376 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2378 return (MonoInst
*)call
;
2381 inline static MonoInst
*
2382 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2384 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2388 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2392 * mono_emit_abs_call:
2394 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2396 inline static MonoInst
*
2397 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2398 MonoMethodSignature
*sig
, MonoInst
**args
)
2400 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2404 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2407 if (cfg
->abs_patches
== NULL
)
2408 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2409 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2410 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2411 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2416 get_memcpy_method (void)
2418 static MonoMethod
*memcpy_method
= NULL
;
2419 if (!memcpy_method
) {
2420 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2422 g_error ("Old corlib found. Install a new one");
2424 return memcpy_method
;
2428 * Emit code to copy a valuetype of type @klass whose address is stored in
2429 * @src->dreg to memory whose address is stored at @dest->dreg.
2432 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2434 MonoInst
*iargs
[3];
2437 MonoMethod
*memcpy_method
;
2441 * This check breaks with spilled vars... need to handle it during verification anyway.
2442 * g_assert (klass && klass == src->klass && klass == dest->klass);
2446 n
= mono_class_native_size (klass
, &align
);
2448 n
= mono_class_value_size (klass
, &align
);
2450 #if HAVE_WRITE_BARRIERS
2451 /* if native is true there should be no references in the struct */
2452 if (klass
->has_references
&& !native
) {
2453 /* Avoid barriers when storing to the stack */
2454 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2455 (dest
->opcode
== OP_LDADDR
))) {
2458 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2460 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2465 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2466 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2467 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2471 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2473 memcpy_method
= get_memcpy_method ();
2474 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2479 get_memset_method (void)
2481 static MonoMethod
*memset_method
= NULL
;
2482 if (!memset_method
) {
2483 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2485 g_error ("Old corlib found. Install a new one");
2487 return memset_method
;
2491 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2493 MonoInst
*iargs
[3];
2496 MonoMethod
*memset_method
;
2498 /* FIXME: Optimize this for the case when dest is an LDADDR */
2500 mono_class_init (klass
);
2501 n
= mono_class_value_size (klass
, &align
);
2503 if (n
<= sizeof (gpointer
) * 5) {
2504 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2507 memset_method
= get_memset_method ();
2509 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2510 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2511 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2516 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2518 MonoInst
*this = NULL
;
2520 g_assert (cfg
->generic_sharing_context
);
2522 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2523 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2524 !method
->klass
->valuetype
)
2525 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2527 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2528 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2531 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2533 mrgctx_loc
= mono_get_vtable_var (cfg
);
2534 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2537 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2538 MonoInst
*vtable_loc
, *vtable_var
;
2542 vtable_loc
= mono_get_vtable_var (cfg
);
2543 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2545 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2546 MonoInst
*mrgctx_var
= vtable_var
;
2549 vtable_reg
= alloc_preg (cfg
);
2550 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2551 vtable_var
->type
= STACK_PTR
;
2557 int vtable_reg
, res_reg
;
2559 vtable_reg
= alloc_preg (cfg
);
2560 res_reg
= alloc_preg (cfg
);
2561 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2566 static MonoJumpInfoRgctxEntry
*
2567 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2569 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2570 res
->method
= method
;
2571 res
->in_mrgctx
= in_mrgctx
;
2572 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2573 res
->data
->type
= patch_type
;
2574 res
->data
->data
.target
= patch_data
;
2575 res
->info_type
= info_type
;
2580 static inline MonoInst
*
2581 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2583 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2587 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2588 MonoClass
*klass
, int rgctx_type
)
2590 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2591 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2593 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2597 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2598 MonoMethod
*cmethod
, int rgctx_type
)
2600 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
2601 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2603 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2607 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
2608 MonoClassField
*field
, int rgctx_type
)
2610 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
2611 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2613 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2617 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
2619 int vtable_reg
= alloc_preg (cfg
);
2620 int context_used
= 0;
2622 if (cfg
->generic_sharing_context
)
2623 context_used
= mono_class_check_context_used (array_class
);
2625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2627 if (cfg
->opt
& MONO_OPT_SHARED
) {
2628 int class_reg
= alloc_preg (cfg
);
2629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2630 if (cfg
->compile_aot
) {
2631 int klass_reg
= alloc_preg (cfg
);
2632 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
2633 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
2635 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
2637 } else if (context_used
) {
2638 MonoInst
*vtable_ins
;
2640 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
2641 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
2643 if (cfg
->compile_aot
) {
2644 int vt_reg
= alloc_preg (cfg
);
2645 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, mono_class_vtable (cfg
->domain
, array_class
));
2646 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
2648 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, mono_class_vtable (cfg
->domain
, array_class
));
2652 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
2656 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
2658 if (mini_get_debug_options ()->better_cast_details
) {
2659 int to_klass_reg
= alloc_preg (cfg
);
2660 int vtable_reg
= alloc_preg (cfg
);
2661 int klass_reg
= alloc_preg (cfg
);
2662 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2665 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
2669 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2673 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
2674 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
2675 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
2680 reset_cast_details (MonoCompile
*cfg
)
2682 /* Reset the variables holding the cast details */
2683 if (mini_get_debug_options ()->better_cast_details
) {
2684 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2686 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2687 /* It is enough to reset the from field */
2688 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
2693 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2694 * generic code is generated.
2697 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
2699 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
2702 MonoInst
*rgctx
, *addr
;
2704 /* FIXME: What if the class is shared? We might not
2705 have to get the address of the method from the
2707 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2708 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2710 rgctx
= emit_get_rgctx (cfg
, method
, context_used
);
2712 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2714 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2719 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
2723 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
2724 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2725 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2726 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
2728 obj_reg
= sp
[0]->dreg
;
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
2732 /* FIXME: generics */
2733 g_assert (klass
->rank
== 0);
2736 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
2737 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
2743 MonoInst
*element_class
;
2745 /* This assertion is from the unboxcast insn */
2746 g_assert (klass
->rank
== 0);
2748 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
2749 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
2751 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
2752 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2754 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
2755 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
2756 reset_cast_details (cfg
);
2759 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_PTR
), obj_reg
, sizeof (MonoObject
));
2760 MONO_ADD_INS (cfg
->cbb
, add
);
2761 add
->type
= STACK_MP
;
2768 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
)
2770 MonoInst
*iargs
[2];
2773 if (cfg
->opt
& MONO_OPT_SHARED
) {
2774 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2775 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
2777 alloc_ftn
= mono_object_new
;
2778 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
2779 /* This happens often in argument checking code, eg. throw new FooException... */
2780 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2781 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
2782 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
2784 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
2785 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
2788 if (managed_alloc
) {
2789 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2790 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2792 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
2794 guint32 lw
= vtable
->klass
->instance_size
;
2795 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
2796 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
2797 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
2800 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2804 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2808 handle_alloc_from_inst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*data_inst
,
2811 MonoInst
*iargs
[2];
2812 MonoMethod
*managed_alloc
= NULL
;
2816 FIXME: we cannot get managed_alloc here because we can't get
2817 the class's vtable (because it's not a closed class)
2819 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2820 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2823 if (cfg
->opt
& MONO_OPT_SHARED
) {
2824 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2825 iargs
[1] = data_inst
;
2826 alloc_ftn
= mono_object_new
;
2828 if (managed_alloc
) {
2829 iargs
[0] = data_inst
;
2830 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2833 iargs
[0] = data_inst
;
2834 alloc_ftn
= mono_object_new_specific
;
2837 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2841 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
)
2843 MonoInst
*alloc
, *ins
;
2845 if (mono_class_is_nullable (klass
)) {
2846 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
2847 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2850 alloc
= handle_alloc (cfg
, klass
, TRUE
);
2852 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
2858 handle_box_from_inst (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
, MonoInst
*data_inst
)
2860 MonoInst
*alloc
, *ins
;
2862 if (mono_class_is_nullable (klass
)) {
2863 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
2864 /* FIXME: What if the class is shared? We might not
2865 have to get the method address from the RGCTX. */
2866 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2867 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2868 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2870 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2872 alloc
= handle_alloc_from_inst (cfg
, klass
, data_inst
, TRUE
);
2874 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
2881 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
2883 MonoBasicBlock
*is_null_bb
;
2884 int obj_reg
= src
->dreg
;
2885 int vtable_reg
= alloc_preg (cfg
);
2887 NEW_BBLOCK (cfg
, is_null_bb
);
2889 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
2890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2892 save_cast_details (cfg
, klass
, obj_reg
);
2894 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2896 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
2898 int klass_reg
= alloc_preg (cfg
);
2900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2902 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
2903 /* the remoting code is broken, access the class for now */
2905 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
2906 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2909 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
2911 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2914 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, is_null_bb
);
2918 MONO_START_BB (cfg
, is_null_bb
);
2920 reset_cast_details (cfg
);
2926 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
2929 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
2930 int obj_reg
= src
->dreg
;
2931 int vtable_reg
= alloc_preg (cfg
);
2932 int res_reg
= alloc_preg (cfg
);
2934 NEW_BBLOCK (cfg
, is_null_bb
);
2935 NEW_BBLOCK (cfg
, false_bb
);
2936 NEW_BBLOCK (cfg
, end_bb
);
2938 /* Do the assignment at the beginning, so the other assignment can be if converted */
2939 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
2940 ins
->type
= STACK_OBJ
;
2943 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
2946 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2948 /* the is_null_bb target simply copies the input register to the output */
2949 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
2951 int klass_reg
= alloc_preg (cfg
);
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2956 int rank_reg
= alloc_preg (cfg
);
2957 int eclass_reg
= alloc_preg (cfg
);
2959 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
2960 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
2961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
2964 if (klass
->cast_class
== mono_defaults
.object_class
) {
2965 int parent_reg
= alloc_preg (cfg
);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
2967 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
2968 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
2970 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
2971 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
2972 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
2973 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
2974 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
2975 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
2977 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2978 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
2980 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
2981 /* Check that the object is a vector too */
2982 int bounds_reg
= alloc_preg (cfg
);
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
2984 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
2985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
2988 /* the is_null_bb target simply copies the input register to the output */
2989 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
2991 } else if (mono_class_is_nullable (klass
)) {
2992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2993 /* the is_null_bb target simply copies the input register to the output */
2994 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
2996 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
2997 /* the remoting code is broken, access the class for now */
2999 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3000 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3003 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3009 /* the is_null_bb target simply copies the input register to the output */
3010 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, is_null_bb
);
3015 MONO_START_BB (cfg
, false_bb
);
3017 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3020 MONO_START_BB (cfg
, is_null_bb
);
3022 MONO_START_BB (cfg
, end_bb
);
3028 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3030 /* This opcode takes as input an object reference and a class, and returns:
3031 0) if the object is an instance of the class,
3032 1) if the object is not instance of the class,
3033 2) if the object is a proxy whose type cannot be determined */
3036 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3037 int obj_reg
= src
->dreg
;
3038 int dreg
= alloc_ireg (cfg
);
3040 int klass_reg
= alloc_preg (cfg
);
3042 NEW_BBLOCK (cfg
, true_bb
);
3043 NEW_BBLOCK (cfg
, false_bb
);
3044 NEW_BBLOCK (cfg
, false2_bb
);
3045 NEW_BBLOCK (cfg
, end_bb
);
3046 NEW_BBLOCK (cfg
, no_proxy_bb
);
3048 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3049 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3051 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3052 NEW_BBLOCK (cfg
, interface_fail_bb
);
3054 tmp_reg
= alloc_preg (cfg
);
3055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3056 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3057 MONO_START_BB (cfg
, interface_fail_bb
);
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3060 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3062 tmp_reg
= alloc_preg (cfg
);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3064 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3067 tmp_reg
= alloc_preg (cfg
);
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3071 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3072 tmp_reg
= alloc_preg (cfg
);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3076 tmp_reg
= alloc_preg (cfg
);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3078 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3081 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3084 MONO_START_BB (cfg
, no_proxy_bb
);
3086 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3089 MONO_START_BB (cfg
, false_bb
);
3091 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3094 MONO_START_BB (cfg
, false2_bb
);
3096 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3099 MONO_START_BB (cfg
, true_bb
);
3101 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3103 MONO_START_BB (cfg
, end_bb
);
3106 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3108 ins
->type
= STACK_I4
;
3114 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3116 /* This opcode takes as input an object reference and a class, and returns:
3117 0) if the object is an instance of the class,
3118 1) if the object is a proxy whose type cannot be determined
3119 an InvalidCastException exception is thrown otherwhise*/
3122 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3123 int obj_reg
= src
->dreg
;
3124 int dreg
= alloc_ireg (cfg
);
3125 int tmp_reg
= alloc_preg (cfg
);
3126 int klass_reg
= alloc_preg (cfg
);
3128 NEW_BBLOCK (cfg
, end_bb
);
3129 NEW_BBLOCK (cfg
, ok_result_bb
);
3131 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3134 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3135 NEW_BBLOCK (cfg
, interface_fail_bb
);
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3138 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3139 MONO_START_BB (cfg
, interface_fail_bb
);
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3142 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3144 tmp_reg
= alloc_preg (cfg
);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3147 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3149 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3153 NEW_BBLOCK (cfg
, no_proxy_bb
);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3157 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3159 tmp_reg
= alloc_preg (cfg
);
3160 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3163 tmp_reg
= alloc_preg (cfg
);
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3165 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3168 NEW_BBLOCK (cfg
, fail_1_bb
);
3170 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3172 MONO_START_BB (cfg
, fail_1_bb
);
3174 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3177 MONO_START_BB (cfg
, no_proxy_bb
);
3179 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3182 MONO_START_BB (cfg
, ok_result_bb
);
3184 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3186 MONO_START_BB (cfg
, end_bb
);
3189 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3191 ins
->type
= STACK_I4
;
3196 static G_GNUC_UNUSED MonoInst
*
3197 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
)
3199 gpointer
*trampoline
;
3200 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3204 obj
= handle_alloc (cfg
, klass
, FALSE
);
3206 /* Inline the contents of mono_delegate_ctor */
3208 /* Set target field */
3209 /* Optimize away setting of NULL target */
3210 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0))
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3213 /* Set method field */
3214 EMIT_NEW_METHODCONST (cfg
, method_ins
, method
);
3215 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3218 * To avoid looking up the compiled code belonging to the target method
3219 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3220 * store it, and we fill it after the method has been compiled.
3222 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3223 MonoInst
*code_slot_ins
;
3225 domain
= mono_domain_get ();
3226 mono_domain_lock (domain
);
3227 if (!domain_jit_info (domain
)->method_code_hash
)
3228 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3229 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3231 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3232 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3234 mono_domain_unlock (domain
);
3236 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3237 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3240 /* Set invoke_impl field */
3241 if (cfg
->compile_aot
) {
3242 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3244 trampoline
= mono_create_delegate_trampoline (klass
);
3245 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3249 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3255 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3257 MonoJitICallInfo
*info
;
3259 /* Need to register the icall so it gets an icall wrapper */
3260 info
= mono_get_array_new_va_icall (rank
);
3262 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3264 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3265 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3269 mono_emit_load_got_addr (MonoCompile
*cfg
)
3271 MonoInst
*getaddr
, *dummy_use
;
3273 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3276 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3277 getaddr
->dreg
= cfg
->got_var
->dreg
;
3279 /* Add it to the start of the first bblock */
3280 if (cfg
->bb_entry
->code
) {
3281 getaddr
->next
= cfg
->bb_entry
->code
;
3282 cfg
->bb_entry
->code
= getaddr
;
3285 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3287 cfg
->got_var_allocated
= TRUE
;
3290 * Add a dummy use to keep the got_var alive, since real uses might
3291 * only be generated by the back ends.
3292 * Add it to end_bblock, so the variable's lifetime covers the whole
3294 * It would be better to make the usage of the got var explicit in all
3295 * cases when the backend needs it (i.e. calls, throw etc.), so this
3296 * wouldn't be needed.
3298 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3299 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3302 static int inline_limit
;
3303 static gboolean inline_limit_inited
;
3306 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3308 MonoMethodHeader
*header
;
3310 #ifdef MONO_ARCH_SOFT_FLOAT
3311 MonoMethodSignature
*sig
= mono_method_signature (method
);
3315 if (cfg
->generic_sharing_context
)
3318 #ifdef MONO_ARCH_HAVE_LMF_OPS
3319 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3320 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3321 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3325 if (method
->is_inflated
)
3326 /* Avoid inflating the header */
3327 header
= mono_method_get_header (((MonoMethodInflated
*)method
)->declaring
);
3329 header
= mono_method_get_header (method
);
3331 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_RUNTIME
) ||
3332 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3333 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3334 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3335 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
) ||
3336 (method
->klass
->marshalbyref
) ||
3337 !header
|| header
->num_clauses
)
3340 /* also consider num_locals? */
3341 /* Do the size check early to avoid creating vtables */
3342 if (!inline_limit_inited
) {
3343 if (getenv ("MONO_INLINELIMIT"))
3344 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3346 inline_limit
= INLINE_LENGTH_LIMIT
;
3347 inline_limit_inited
= TRUE
;
3349 if (header
->code_size
>= inline_limit
)
3353 * if we can initialize the class of the method right away, we do,
3354 * otherwise we don't allow inlining if the class needs initialization,
3355 * since it would mean inserting a call to mono_runtime_class_init()
3356 * inside the inlined code
3358 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3359 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3360 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3361 if (!method
->klass
->runtime_info
)
3362 /* No vtable created yet */
3364 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3367 /* This makes so that inline cannot trigger */
3368 /* .cctors: too many apps depend on them */
3369 /* running with a specific order... */
3370 if (! vtable
->initialized
)
3372 mono_runtime_class_init (vtable
);
3374 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3375 if (!method
->klass
->runtime_info
)
3376 /* No vtable created yet */
3378 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3381 if (!vtable
->initialized
)
3386 * If we're compiling for shared code
3387 * the cctor will need to be run at aot method load time, for example,
3388 * or at the end of the compilation of the inlining method.
3390 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
3395 * CAS - do not inline methods with declarative security
3396 * Note: this has to be before any possible return TRUE;
3398 if (mono_method_has_declsec (method
))
3401 #ifdef MONO_ARCH_SOFT_FLOAT
3403 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
3405 for (i
= 0; i
< sig
->param_count
; ++i
)
3406 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
3414 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
3416 if (vtable
->initialized
&& !cfg
->compile_aot
)
3419 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
3422 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
3425 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
3426 /* The initialization is already done before the method is called */
3433 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
)
3437 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
3439 mono_class_init (klass
);
3440 size
= mono_class_array_element_size (klass
);
3442 mult_reg
= alloc_preg (cfg
);
3443 array_reg
= arr
->dreg
;
3444 index_reg
= index
->dreg
;
3446 #if SIZEOF_REGISTER == 8
3447 /* The array reg is 64 bits but the index reg is only 32 */
3448 index2_reg
= alloc_preg (cfg
);
3449 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
3451 if (index
->type
== STACK_I8
) {
3452 index2_reg
= alloc_preg (cfg
);
3453 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
3455 index2_reg
= index_reg
;
3459 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
3461 #if defined(__i386__) || defined(__x86_64__)
3462 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
3463 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3465 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
3466 ins
->type
= STACK_PTR
;
3472 add_reg
= alloc_preg (cfg
);
3474 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
3475 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
3476 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3477 ins
->type
= STACK_PTR
;
3478 MONO_ADD_INS (cfg
->cbb
, ins
);
3483 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3485 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
3487 int bounds_reg
= alloc_preg (cfg
);
3488 int add_reg
= alloc_preg (cfg
);
3489 int mult_reg
= alloc_preg (cfg
);
3490 int mult2_reg
= alloc_preg (cfg
);
3491 int low1_reg
= alloc_preg (cfg
);
3492 int low2_reg
= alloc_preg (cfg
);
3493 int high1_reg
= alloc_preg (cfg
);
3494 int high2_reg
= alloc_preg (cfg
);
3495 int realidx1_reg
= alloc_preg (cfg
);
3496 int realidx2_reg
= alloc_preg (cfg
);
3497 int sum_reg
= alloc_preg (cfg
);
3502 mono_class_init (klass
);
3503 size
= mono_class_array_element_size (klass
);
3505 index1
= index_ins1
->dreg
;
3506 index2
= index_ins2
->dreg
;
3508 /* range checking */
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
3510 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
3513 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3514 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
3515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
3516 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3517 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
3518 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
3521 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3522 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
3523 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
3524 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3525 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
3526 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3528 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
3529 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
3530 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
3531 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
3532 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3534 ins
->type
= STACK_MP
;
3536 MONO_ADD_INS (cfg
->cbb
, ins
);
3543 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
3547 MonoMethod
*addr_method
;
3550 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
3553 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1]);
3555 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3556 /* emit_ldelema_2 depends on OP_LMUL */
3557 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
3558 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
3562 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
3563 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
3564 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
3570 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
3572 MonoInst
*ins
= NULL
;
3574 static MonoClass
*runtime_helpers_class
= NULL
;
3575 if (! runtime_helpers_class
)
3576 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
3577 "System.Runtime.CompilerServices", "RuntimeHelpers");
3579 if (cmethod
->klass
== mono_defaults
.string_class
) {
3580 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
3581 int dreg
= alloc_ireg (cfg
);
3582 int index_reg
= alloc_preg (cfg
);
3583 int mult_reg
= alloc_preg (cfg
);
3584 int add_reg
= alloc_preg (cfg
);
3586 #if SIZEOF_REGISTER == 8
3587 /* The array reg is 64 bits but the index reg is only 32 */
3588 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
3590 index_reg
= args
[1]->dreg
;
3592 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
3594 #if defined(__i386__) || defined(__x86_64__)
3595 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
3596 add_reg
= ins
->dreg
;
3597 /* Avoid a warning */
3599 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3602 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
3603 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3604 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3605 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
3607 type_from_op (ins
, NULL
, NULL
);
3609 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3610 int dreg
= alloc_ireg (cfg
);
3611 /* Decompose later to allow more optimizations */
3612 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
3613 ins
->type
= STACK_I4
;
3614 cfg
->cbb
->has_array_access
= TRUE
;
3615 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
3618 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
3619 int mult_reg
= alloc_preg (cfg
);
3620 int add_reg
= alloc_preg (cfg
);
3622 /* The corlib functions check for oob already. */
3623 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
3624 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3625 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
3628 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
3630 if (strcmp (cmethod
->name
, "GetType") == 0) {
3631 int dreg
= alloc_preg (cfg
);
3632 int vt_reg
= alloc_preg (cfg
);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3634 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
3635 type_from_op (ins
, NULL
, NULL
);
3638 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3639 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0) {
3640 int dreg
= alloc_ireg (cfg
);
3641 int t1
= alloc_ireg (cfg
);
3643 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
3644 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
3645 ins
->type
= STACK_I4
;
3649 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
3650 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
3651 MONO_ADD_INS (cfg
->cbb
, ins
);
3655 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
3656 if (cmethod
->name
[0] != 'g')
3659 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
3660 int dreg
= alloc_ireg (cfg
);
3661 int vtable_reg
= alloc_preg (cfg
);
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
3663 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3664 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
3665 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3666 type_from_op (ins
, NULL
, NULL
);
3669 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3670 int dreg
= alloc_ireg (cfg
);
3672 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
3673 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
3674 type_from_op (ins
, NULL
, NULL
);
3679 } else if (cmethod
->klass
== runtime_helpers_class
) {
3681 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
3682 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
3686 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
3687 if (strcmp (cmethod
->name
, "get_CurrentThread") == 0 && (ins
= mono_arch_get_thread_intrinsic (cfg
))) {
3688 ins
->dreg
= alloc_preg (cfg
);
3689 ins
->type
= STACK_OBJ
;
3690 MONO_ADD_INS (cfg
->cbb
, ins
);
3692 } else if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
3693 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
3694 MONO_ADD_INS (cfg
->cbb
, ins
);
3696 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
3697 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
3698 MONO_ADD_INS (cfg
->cbb
, ins
);
3701 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
3702 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3703 if (strcmp (cmethod
->name
, "Enter") == 0) {
3706 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
3707 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3708 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3709 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3711 return (MonoInst
*)call
;
3712 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
3715 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
3716 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3717 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3718 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3720 return (MonoInst
*)call
;
3722 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3723 MonoMethod
*fast_method
= NULL
;
3725 /* Avoid infinite recursion */
3726 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
3727 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
3728 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
3731 if (strcmp (cmethod
->name
, "Enter") == 0 ||
3732 strcmp (cmethod
->name
, "Exit") == 0)
3733 fast_method
= mono_monitor_get_fast_path (cmethod
);
3737 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
3739 } else if (mini_class_is_system_array (cmethod
->klass
) &&
3740 strcmp (cmethod
->name
, "GetGenericValueImpl") == 0) {
3741 MonoInst
*addr
, *store
, *load
;
3742 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[1]);
3744 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1]);
3745 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
3746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
3748 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
3749 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
3750 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
3753 #if SIZEOF_REGISTER == 8
3754 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
3755 /* 64 bit reads are already atomic */
3756 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
3757 ins
->dreg
= mono_alloc_preg (cfg
);
3758 ins
->inst_basereg
= args
[0]->dreg
;
3759 ins
->inst_offset
= 0;
3760 MONO_ADD_INS (cfg
->cbb
, ins
);
3764 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3765 if (strcmp (cmethod
->name
, "Increment") == 0) {
3766 MonoInst
*ins_iconst
;
3769 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3770 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3771 #if SIZEOF_REGISTER == 8
3772 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3773 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3776 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3777 ins_iconst
->inst_c0
= 1;
3778 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3779 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3781 MONO_INST_NEW (cfg
, ins
, opcode
);
3782 ins
->dreg
= mono_alloc_ireg (cfg
);
3783 ins
->inst_basereg
= args
[0]->dreg
;
3784 ins
->inst_offset
= 0;
3785 ins
->sreg2
= ins_iconst
->dreg
;
3786 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3787 MONO_ADD_INS (cfg
->cbb
, ins
);
3789 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
3790 MonoInst
*ins_iconst
;
3793 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3794 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3795 #if SIZEOF_REGISTER == 8
3796 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3797 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3800 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3801 ins_iconst
->inst_c0
= -1;
3802 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3803 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3805 MONO_INST_NEW (cfg
, ins
, opcode
);
3806 ins
->dreg
= mono_alloc_ireg (cfg
);
3807 ins
->inst_basereg
= args
[0]->dreg
;
3808 ins
->inst_offset
= 0;
3809 ins
->sreg2
= ins_iconst
->dreg
;
3810 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3811 MONO_ADD_INS (cfg
->cbb
, ins
);
3813 } else if (strcmp (cmethod
->name
, "Add") == 0) {
3816 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3817 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3818 #if SIZEOF_REGISTER == 8
3819 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3820 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3824 MONO_INST_NEW (cfg
, ins
, opcode
);
3825 ins
->dreg
= mono_alloc_ireg (cfg
);
3826 ins
->inst_basereg
= args
[0]->dreg
;
3827 ins
->inst_offset
= 0;
3828 ins
->sreg2
= args
[1]->dreg
;
3829 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3830 MONO_ADD_INS (cfg
->cbb
, ins
);
3833 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3835 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3836 if (strcmp (cmethod
->name
, "Exchange") == 0) {
3839 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3840 opcode
= OP_ATOMIC_EXCHANGE_I4
;
3841 #if SIZEOF_REGISTER == 8
3842 else if ((fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
3843 (fsig
->params
[0]->type
== MONO_TYPE_I
) ||
3844 (fsig
->params
[0]->type
== MONO_TYPE_OBJECT
))
3845 opcode
= OP_ATOMIC_EXCHANGE_I8
;
3847 else if ((fsig
->params
[0]->type
== MONO_TYPE_I
) ||
3848 (fsig
->params
[0]->type
== MONO_TYPE_OBJECT
))
3849 opcode
= OP_ATOMIC_EXCHANGE_I4
;
3854 MONO_INST_NEW (cfg
, ins
, opcode
);
3855 ins
->dreg
= mono_alloc_ireg (cfg
);
3856 ins
->inst_basereg
= args
[0]->dreg
;
3857 ins
->inst_offset
= 0;
3858 ins
->sreg2
= args
[1]->dreg
;
3859 MONO_ADD_INS (cfg
->cbb
, ins
);
3861 switch (fsig
->params
[0]->type
) {
3863 ins
->type
= STACK_I4
;
3867 ins
->type
= STACK_I8
;
3869 case MONO_TYPE_OBJECT
:
3870 ins
->type
= STACK_OBJ
;
3873 g_assert_not_reached ();
3876 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3878 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3880 * Can't implement CompareExchange methods this way since they have
3881 * three arguments. We can implement one of the common cases, where the new
3882 * value is a constant.
3884 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
3885 if ((fsig
->params
[1]->type
== MONO_TYPE_I4
||
3886 (sizeof (gpointer
) == 4 && fsig
->params
[1]->type
== MONO_TYPE_I
))
3887 && args
[2]->opcode
== OP_ICONST
) {
3888 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_IMM_I4
);
3889 ins
->dreg
= alloc_ireg (cfg
);
3890 ins
->sreg1
= args
[0]->dreg
;
3891 ins
->sreg2
= args
[1]->dreg
;
3892 ins
->backend
.data
= GINT_TO_POINTER (args
[2]->inst_c0
);
3893 ins
->type
= STACK_I4
;
3894 MONO_ADD_INS (cfg
->cbb
, ins
);
3896 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3898 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3902 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
3903 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
3904 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
3905 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
3906 MONO_ADD_INS (cfg
->cbb
, ins
);
3909 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
3910 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
3911 #ifdef PLATFORM_WIN32
3912 EMIT_NEW_ICONST (cfg
, ins
, 1);
3914 EMIT_NEW_ICONST (cfg
, ins
, 0);
3918 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
3920 * There is general branches code for Min/Max, but it does not work for
3922 * http://everything2.com/?node_id=1051618
3926 #ifdef MONO_ARCH_SIMD_INTRINSICS
3927 if (cfg
->opt
& MONO_OPT_SIMD
) {
3928 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
3934 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
3938 * This entry point could be used later for arbitrary method
3941 inline static MonoInst
*
3942 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
3943 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
3945 if (method
->klass
== mono_defaults
.string_class
) {
3946 /* managed string allocation support */
3947 if (strcmp (method
->name
, "InternalAllocateStr") == 0) {
3948 MonoInst
*iargs
[2];
3949 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3950 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
3953 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3954 iargs
[1] = args
[0];
3955 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
3962 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
3964 MonoInst
*store
, *temp
;
3967 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3968 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
3971 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3972 * would be different than the MonoInst's used to represent arguments, and
3973 * the ldelema implementation can't deal with that.
3974 * Solution: When ldelema is used on an inline argument, create a var for
3975 * it, emit ldelema on that var, and emit the saving code below in
3976 * inline_method () if needed.
3978 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
3979 cfg
->args
[i
] = temp
;
3980 /* This uses cfg->args [i] which is set by the preceeding line */
3981 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
3982 store
->cil_code
= sp
[0]->cil_code
;
3987 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3988 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3990 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3992 check_inline_called_method_name_limit (MonoMethod
*called_method
)
3995 static char *limit
= NULL
;
3997 if (limit
== NULL
) {
3998 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4000 if (limit_string
!= NULL
)
4001 limit
= limit_string
;
4003 limit
= (char *) "";
4006 if (limit
[0] != '\0') {
4007 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4009 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4010 g_free (called_method_name
);
4012 //return (strncmp_result <= 0);
4013 return (strncmp_result
== 0);
4020 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4022 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4025 static char *limit
= NULL
;
4027 if (limit
== NULL
) {
4028 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4029 if (limit_string
!= NULL
) {
4030 limit
= limit_string
;
4032 limit
= (char *) "";
4036 if (limit
[0] != '\0') {
4037 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4039 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4040 g_free (caller_method_name
);
4042 //return (strncmp_result <= 0);
4043 return (strncmp_result
== 0);
4051 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4052 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_allways
)
4054 MonoInst
*ins
, *rvar
= NULL
;
4055 MonoMethodHeader
*cheader
;
4056 MonoBasicBlock
*ebblock
, *sbblock
;
4058 MonoMethod
*prev_inlined_method
;
4059 MonoInst
**prev_locals
, **prev_args
;
4060 MonoType
**prev_arg_types
;
4061 guint prev_real_offset
;
4062 GHashTable
*prev_cbb_hash
;
4063 MonoBasicBlock
**prev_cil_offset_to_bb
;
4064 MonoBasicBlock
*prev_cbb
;
4065 unsigned char* prev_cil_start
;
4066 guint32 prev_cil_offset_to_bb_len
;
4067 MonoMethod
*prev_current_method
;
4068 MonoGenericContext
*prev_generic_context
;
4069 gboolean ret_var_set
, prev_ret_var_set
;
4071 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4073 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4074 if ((! inline_allways
) && ! check_inline_called_method_name_limit (cmethod
))
4077 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4078 if ((! inline_allways
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4082 if (cfg
->verbose_level
> 2)
4083 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4085 if (!cmethod
->inline_info
) {
4086 mono_jit_stats
.inlineable_methods
++;
4087 cmethod
->inline_info
= 1;
4089 /* allocate space to store the return value */
4090 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4091 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4094 /* allocate local variables */
4095 cheader
= mono_method_get_header (cmethod
);
4096 prev_locals
= cfg
->locals
;
4097 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4098 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4099 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4101 /* allocate start and end blocks */
4102 /* This is needed so if the inline is aborted, we can clean up */
4103 NEW_BBLOCK (cfg
, sbblock
);
4104 sbblock
->real_offset
= real_offset
;
4106 NEW_BBLOCK (cfg
, ebblock
);
4107 ebblock
->block_num
= cfg
->num_bblocks
++;
4108 ebblock
->real_offset
= real_offset
;
4110 prev_args
= cfg
->args
;
4111 prev_arg_types
= cfg
->arg_types
;
4112 prev_inlined_method
= cfg
->inlined_method
;
4113 cfg
->inlined_method
= cmethod
;
4114 cfg
->ret_var_set
= FALSE
;
4115 prev_real_offset
= cfg
->real_offset
;
4116 prev_cbb_hash
= cfg
->cbb_hash
;
4117 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4118 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4119 prev_cil_start
= cfg
->cil_start
;
4120 prev_cbb
= cfg
->cbb
;
4121 prev_current_method
= cfg
->current_method
;
4122 prev_generic_context
= cfg
->generic_context
;
4123 prev_ret_var_set
= cfg
->ret_var_set
;
4125 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, *ip
== CEE_CALLVIRT
);
4127 ret_var_set
= cfg
->ret_var_set
;
4129 cfg
->inlined_method
= prev_inlined_method
;
4130 cfg
->real_offset
= prev_real_offset
;
4131 cfg
->cbb_hash
= prev_cbb_hash
;
4132 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4133 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4134 cfg
->cil_start
= prev_cil_start
;
4135 cfg
->locals
= prev_locals
;
4136 cfg
->args
= prev_args
;
4137 cfg
->arg_types
= prev_arg_types
;
4138 cfg
->current_method
= prev_current_method
;
4139 cfg
->generic_context
= prev_generic_context
;
4140 cfg
->ret_var_set
= prev_ret_var_set
;
4142 if ((costs
>= 0 && costs
< 60) || inline_allways
) {
4143 if (cfg
->verbose_level
> 2)
4144 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4146 mono_jit_stats
.inlined_methods
++;
4148 /* always add some code to avoid block split failures */
4149 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4150 MONO_ADD_INS (prev_cbb
, ins
);
4152 prev_cbb
->next_bb
= sbblock
;
4153 link_bblock (cfg
, prev_cbb
, sbblock
);
4156 * Get rid of the begin and end bblocks if possible to aid local
4159 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4161 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4162 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4164 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4165 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4166 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4168 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4169 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4170 cfg
->cbb
= prev_cbb
;
4178 * If the inlined method contains only a throw, then the ret var is not
4179 * set, so set it to a dummy value.
4182 static double r8_0
= 0.0;
4184 switch (rvar
->type
) {
4186 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
4189 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
4194 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
4197 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4198 ins
->type
= STACK_R8
;
4199 ins
->inst_p0
= (void*)&r8_0
;
4200 ins
->dreg
= rvar
->dreg
;
4201 MONO_ADD_INS (cfg
->cbb
, ins
);
4204 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (fsig
->ret
));
4207 g_assert_not_reached ();
4211 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
4216 if (cfg
->verbose_level
> 2)
4217 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
4218 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
4219 mono_loader_clear_error ();
4221 /* This gets rid of the newly added bblocks */
4222 cfg
->cbb
= prev_cbb
;
4228 * Some of these comments may well be out-of-date.
4229 * Design decisions: we do a single pass over the IL code (and we do bblock
4230 * splitting/merging in the few cases when it's required: a back jump to an IL
4231 * address that was not already seen as bblock starting point).
4232 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4233 * Complex operations are decomposed in simpler ones right away. We need to let the
4234 * arch-specific code peek and poke inside this process somehow (except when the
4235 * optimizations can take advantage of the full semantic info of coarse opcodes).
4236 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4237 * MonoInst->opcode initially is the IL opcode or some simplification of that
4238 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4239 * opcode with value bigger than OP_LAST.
4240 * At this point the IR can be handed over to an interpreter, a dumb code generator
4241 * or to the optimizing code generator that will translate it to SSA form.
4243 * Profiling directed optimizations.
4244 * We may compile by default with few or no optimizations and instrument the code
4245 * or the user may indicate what methods to optimize the most either in a config file
4246 * or through repeated runs where the compiler applies offline the optimizations to
4247 * each method and then decides if it was worth it.
4250 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4251 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4252 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4253 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4254 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4255 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4256 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4257 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4259 /* offset from br.s -> br like opcodes */
4260 #define BIG_BRANCH_OFFSET 13
4263 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
4265 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
4267 return b
== NULL
|| b
== bb
;
4271 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
4273 unsigned char *ip
= start
;
4274 unsigned char *target
;
4277 MonoBasicBlock
*bblock
;
4278 const MonoOpcode
*opcode
;
4281 cli_addr
= ip
- start
;
4282 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
4285 opcode
= &mono_opcodes
[i
];
4286 switch (opcode
->argument
) {
4287 case MonoInlineNone
:
4290 case MonoInlineString
:
4291 case MonoInlineType
:
4292 case MonoInlineField
:
4293 case MonoInlineMethod
:
4296 case MonoShortInlineR
:
4303 case MonoShortInlineVar
:
4304 case MonoShortInlineI
:
4307 case MonoShortInlineBrTarget
:
4308 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
4309 GET_BBLOCK (cfg
, bblock
, target
);
4312 GET_BBLOCK (cfg
, bblock
, ip
);
4314 case MonoInlineBrTarget
:
4315 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
4316 GET_BBLOCK (cfg
, bblock
, target
);
4319 GET_BBLOCK (cfg
, bblock
, ip
);
4321 case MonoInlineSwitch
: {
4322 guint32 n
= read32 (ip
+ 1);
4325 cli_addr
+= 5 + 4 * n
;
4326 target
= start
+ cli_addr
;
4327 GET_BBLOCK (cfg
, bblock
, target
);
4329 for (j
= 0; j
< n
; ++j
) {
4330 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
4331 GET_BBLOCK (cfg
, bblock
, target
);
4341 g_assert_not_reached ();
4344 if (i
== CEE_THROW
) {
4345 unsigned char *bb_start
= ip
- 1;
4347 /* Find the start of the bblock containing the throw */
4349 while ((bb_start
>= start
) && !bblock
) {
4350 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
4354 bblock
->out_of_line
= 1;
4363 static inline MonoMethod
*
4364 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4368 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
4369 return mono_method_get_wrapper_data (m
, token
);
4371 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
4376 static inline MonoMethod
*
4377 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4379 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
4381 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
4387 static inline MonoClass
*
4388 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
4392 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
4393 klass
= mono_method_get_wrapper_data (method
, token
);
4395 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
4397 mono_class_init (klass
);
4402 * Returns TRUE if the JIT should abort inlining because "callee"
4403 * is influenced by security attributes.
4406 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
4410 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
4414 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
4415 if (result
== MONO_JIT_SECURITY_OK
)
4418 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
4419 /* Generate code to throw a SecurityException before the actual call/link */
4420 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4423 NEW_ICONST (cfg
, args
[0], 4);
4424 NEW_METHODCONST (cfg
, args
[1], caller
);
4425 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
4426 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
4427 /* don't hide previous results */
4428 cfg
->exception_type
= MONO_EXCEPTION_SECURITY_LINKDEMAND
;
4429 cfg
->exception_data
= result
;
4437 method_access_exception (void)
4439 static MonoMethod
*method
= NULL
;
4442 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4443 method
= mono_class_get_method_from_name (secman
->securitymanager
,
4444 "MethodAccessException", 2);
4451 emit_throw_method_access_exception (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4452 MonoBasicBlock
*bblock
, unsigned char *ip
)
4454 MonoMethod
*thrower
= method_access_exception ();
4457 EMIT_NEW_METHODCONST (cfg
, args
[0], caller
);
4458 EMIT_NEW_METHODCONST (cfg
, args
[1], callee
);
4459 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4463 field_access_exception (void)
4465 static MonoMethod
*method
= NULL
;
4468 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4469 method
= mono_class_get_method_from_name (secman
->securitymanager
,
4470 "FieldAccessException", 2);
4477 emit_throw_field_access_exception (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
4478 MonoBasicBlock
*bblock
, unsigned char *ip
)
4480 MonoMethod
*thrower
= field_access_exception ();
4483 EMIT_NEW_METHODCONST (cfg
, args
[0], caller
);
4484 EMIT_NEW_METHODCONST (cfg
, args
[1], field
);
4485 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4489 * Return the original method is a wrapper is specified. We can only access
4490 * the custom attributes from the original method.
4493 get_original_method (MonoMethod
*method
)
4495 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
4498 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4499 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
4502 /* in other cases we need to find the original method */
4503 return mono_marshal_method_from_wrapper (method
);
4507 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
4508 MonoBasicBlock
*bblock
, unsigned char *ip
)
4510 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4511 if (mono_security_core_clr_class_level (mono_field_get_parent (field
)) != MONO_SECURITY_CORE_CLR_CRITICAL
)
4514 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4515 caller
= get_original_method (caller
);
4519 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4520 if (mono_security_core_clr_method_level (caller
, TRUE
) == MONO_SECURITY_CORE_CLR_TRANSPARENT
)
4521 emit_throw_field_access_exception (cfg
, caller
, field
, bblock
, ip
);
4525 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4526 MonoBasicBlock
*bblock
, unsigned char *ip
)
4528 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4529 if (mono_security_core_clr_method_level (callee
, TRUE
) != MONO_SECURITY_CORE_CLR_CRITICAL
)
4532 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4533 caller
= get_original_method (caller
);
4537 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4538 if (mono_security_core_clr_method_level (caller
, TRUE
) == MONO_SECURITY_CORE_CLR_TRANSPARENT
)
4539 emit_throw_method_access_exception (cfg
, caller
, callee
, bblock
, ip
);
4543 * Check that the IL instructions at ip are the array initialization
4544 * sequence and return the pointer to the data and the size.
4547 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
4550 * newarr[System.Int32]
4552 * ldtoken field valuetype ...
4553 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4555 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
4556 guint32 token
= read32 (ip
+ 7);
4557 guint32 field_token
= read32 (ip
+ 2);
4558 guint32 field_index
= field_token
& 0xffffff;
4560 const char *data_ptr
;
4562 MonoMethod
*cmethod
;
4563 MonoClass
*dummy_class
;
4564 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
4570 *out_field_token
= field_token
;
4572 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
4575 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
4577 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
4578 case MONO_TYPE_BOOLEAN
:
4582 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4583 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4584 case MONO_TYPE_CHAR
:
4594 return NULL
; /* stupid ARM FP swapped format */
4604 if (size
> mono_type_size (field
->type
, &dummy_align
))
4607 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4608 if (!method
->klass
->image
->dynamic
) {
4609 field_index
= read32 (ip
+ 2) & 0xffffff;
4610 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
4611 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
4612 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4613 /* for aot code we do the lookup on load */
4614 if (aot
&& data_ptr
)
4615 return GUINT_TO_POINTER (rva
);
4617 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4619 data_ptr
= mono_field_get_data (field
);
4627 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
4629 char *method_fname
= mono_method_full_name (method
, TRUE
);
4632 if (mono_method_get_header (method
)->code_size
== 0)
4633 method_code
= g_strdup ("method body is empty.");
4635 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
4636 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
4637 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
4638 g_free (method_fname
);
4639 g_free (method_code
);
4643 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
4645 cfg
->exception_type
= MONO_EXCEPTION_OBJECT_SUPPLIED
;
4646 MONO_GC_REGISTER_ROOT (cfg
->exception_ptr
);
4647 cfg
->exception_ptr
= exception
;
4651 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
4655 if (cfg
->generic_sharing_context
)
4656 type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, &klass
->byval_arg
);
4658 type
= &klass
->byval_arg
;
4659 return MONO_TYPE_IS_REFERENCE (type
);
4663 * mono_decompose_array_access_opts:
4665 * Decompose array access opcodes.
4666 * This should be in decompose.c, but it emits calls so it has to stay here until
4667 * the old JIT is gone.
4670 mono_decompose_array_access_opts (MonoCompile
*cfg
)
4672 MonoBasicBlock
*bb
, *first_bb
;
4675 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4676 * can be executed anytime. It should be run before decompose_long
4680 * Create a dummy bblock and emit code into it so we can use the normal
4681 * code generation macros.
4683 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4684 first_bb
= cfg
->cbb
;
4686 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4688 MonoInst
*prev
= NULL
;
4690 MonoInst
*iargs
[3];
4693 if (!bb
->has_array_access
)
4696 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4698 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4704 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4705 switch (ins
->opcode
) {
4707 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
, ins
->sreg1
,
4708 G_STRUCT_OFFSET (MonoArray
, max_length
));
4709 MONO_ADD_INS (cfg
->cbb
, dest
);
4711 case OP_BOUNDS_CHECK
:
4712 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
);
4715 if (cfg
->opt
& MONO_OPT_SHARED
) {
4716 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
4717 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], ins
->inst_newa_class
);
4718 MONO_INST_NEW (cfg
, iargs
[2], OP_MOVE
);
4719 iargs
[2]->dreg
= ins
->sreg1
;
4721 dest
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
4722 dest
->dreg
= ins
->dreg
;
4724 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, mono_array_class_get (ins
->inst_newa_class
, 1));
4727 NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4728 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
4729 MONO_INST_NEW (cfg
, iargs
[1], OP_MOVE
);
4730 iargs
[1]->dreg
= ins
->sreg1
;
4732 dest
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, iargs
);
4733 dest
->dreg
= ins
->dreg
;
4737 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
,
4738 ins
->sreg1
, G_STRUCT_OFFSET (MonoString
, length
));
4739 MONO_ADD_INS (cfg
->cbb
, dest
);
4745 g_assert (cfg
->cbb
== first_bb
);
4747 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
4748 /* Replace the original instruction with the new code sequence */
4750 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
4751 first_bb
->code
= first_bb
->last_ins
= NULL
;
4752 first_bb
->in_count
= first_bb
->out_count
= 0;
4753 cfg
->cbb
= first_bb
;
4760 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4770 #ifdef MONO_ARCH_SOFT_FLOAT
4773 * mono_decompose_soft_float:
4775 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4776 * similar to long support on 32 bit platforms. 32 bit float values require special
4777 * handling when used as locals, arguments, and in calls.
4778 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4781 mono_decompose_soft_float (MonoCompile
*cfg
)
4783 MonoBasicBlock
*bb
, *first_bb
;
4786 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4790 * Create a dummy bblock and emit code into it so we can use the normal
4791 * code generation macros.
4793 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4794 first_bb
= cfg
->cbb
;
4796 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4798 MonoInst
*prev
= NULL
;
4801 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE HANDLE-SOFT-FLOAT ");
4803 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4809 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4810 const char *spec
= INS_INFO (ins
->opcode
);
4812 /* Most fp operations are handled automatically by opcode emulation */
4814 switch (ins
->opcode
) {
4817 d
.vald
= *(double*)ins
->inst_p0
;
4818 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
4823 /* We load the r8 value */
4824 d
.vald
= *(float*)ins
->inst_p0
;
4825 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
4829 ins
->opcode
= OP_LMOVE
;
4832 ins
->opcode
= OP_MOVE
;
4833 ins
->sreg1
= ins
->sreg1
+ 1;
4836 ins
->opcode
= OP_MOVE
;
4837 ins
->sreg1
= ins
->sreg1
+ 2;
4840 int reg
= ins
->sreg1
;
4842 ins
->opcode
= OP_SETLRET
;
4844 ins
->sreg1
= reg
+ 1;
4845 ins
->sreg2
= reg
+ 2;
4848 case OP_LOADR8_MEMBASE
:
4849 ins
->opcode
= OP_LOADI8_MEMBASE
;
4851 case OP_STORER8_MEMBASE_REG
:
4852 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
4854 case OP_STORER4_MEMBASE_REG
: {
4855 MonoInst
*iargs
[2];
4858 /* Arg 1 is the double value */
4859 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4860 iargs
[0]->dreg
= ins
->sreg1
;
4862 /* Arg 2 is the address to store to */
4863 addr_reg
= mono_alloc_preg (cfg
);
4864 EMIT_NEW_BIALU_IMM (cfg
, iargs
[1], OP_PADD_IMM
, addr_reg
, ins
->inst_destbasereg
, ins
->inst_offset
);
4865 mono_emit_jit_icall (cfg
, mono_fstore_r4
, iargs
);
4869 case OP_LOADR4_MEMBASE
: {
4870 MonoInst
*iargs
[1];
4874 addr_reg
= mono_alloc_preg (cfg
);
4875 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, addr_reg
, ins
->inst_basereg
, ins
->inst_offset
);
4876 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
4877 conv
->dreg
= ins
->dreg
;
4882 case OP_FCALL_MEMBASE
: {
4883 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4884 if (call
->signature
->ret
->type
== MONO_TYPE_R4
) {
4885 MonoCallInst
*call2
;
4886 MonoInst
*iargs
[1];
4889 /* Convert the call into a call returning an int */
4890 MONO_INST_NEW_CALL (cfg
, call2
, OP_CALL
);
4891 memcpy (call2
, call
, sizeof (MonoCallInst
));
4892 switch (ins
->opcode
) {
4894 call2
->inst
.opcode
= OP_CALL
;
4897 call2
->inst
.opcode
= OP_CALL_REG
;
4899 case OP_FCALL_MEMBASE
:
4900 call2
->inst
.opcode
= OP_CALL_MEMBASE
;
4903 g_assert_not_reached ();
4905 call2
->inst
.dreg
= mono_alloc_ireg (cfg
);
4906 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call2
);
4908 /* FIXME: Optimize this */
4910 /* Emit an r4->r8 conversion */
4911 EMIT_NEW_VARLOADA_VREG (cfg
, iargs
[0], call2
->inst
.dreg
, &mono_defaults
.int32_class
->byval_arg
);
4912 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
4913 conv
->dreg
= ins
->dreg
;
4915 switch (ins
->opcode
) {
4917 ins
->opcode
= OP_LCALL
;
4920 ins
->opcode
= OP_LCALL_REG
;
4922 case OP_FCALL_MEMBASE
:
4923 ins
->opcode
= OP_LCALL_MEMBASE
;
4926 g_assert_not_reached ();
4932 MonoJitICallInfo
*info
;
4933 MonoInst
*iargs
[2];
4934 MonoInst
*call
, *cmp
, *br
;
4936 /* Convert fcompare+fbcc to icall+icompare+beq */
4938 info
= mono_find_jit_opcode_emulation (ins
->next
->opcode
);
4941 /* Create dummy MonoInst's for the arguments */
4942 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4943 iargs
[0]->dreg
= ins
->sreg1
;
4944 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
4945 iargs
[1]->dreg
= ins
->sreg2
;
4947 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
4949 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
4950 cmp
->sreg1
= call
->dreg
;
4952 MONO_ADD_INS (cfg
->cbb
, cmp
);
4954 MONO_INST_NEW (cfg
, br
, OP_IBNE_UN
);
4955 br
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * 2);
4956 br
->inst_true_bb
= ins
->next
->inst_true_bb
;
4957 br
->inst_false_bb
= ins
->next
->inst_false_bb
;
4958 MONO_ADD_INS (cfg
->cbb
, br
);
4960 /* The call sequence might include fp ins */
4963 /* Skip fbcc or fccc */
4964 NULLIFY_INS (ins
->next
);
4972 MonoJitICallInfo
*info
;
4973 MonoInst
*iargs
[2];
4976 /* Convert fccc to icall+icompare+iceq */
4978 info
= mono_find_jit_opcode_emulation (ins
->opcode
);
4981 /* Create dummy MonoInst's for the arguments */
4982 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4983 iargs
[0]->dreg
= ins
->sreg1
;
4984 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
4985 iargs
[1]->dreg
= ins
->sreg2
;
4987 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
4989 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, call
->dreg
, 1);
4990 MONO_EMIT_NEW_UNALU (cfg
, OP_ICEQ
, ins
->dreg
, -1);
4992 /* The call sequence might include fp ins */
4997 MonoInst
*iargs
[2];
4998 MonoInst
*call
, *cmp
;
5000 /* Convert to icall+icompare+cond_exc+move */
5002 /* Create dummy MonoInst's for the arguments */
5003 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5004 iargs
[0]->dreg
= ins
->sreg1
;
5006 call
= mono_emit_jit_icall (cfg
, mono_isfinite
, iargs
);
5008 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
5009 cmp
->sreg1
= call
->dreg
;
5011 MONO_ADD_INS (cfg
->cbb
, cmp
);
5013 MONO_EMIT_NEW_COND_EXC (cfg
, INE_UN
, "ArithmeticException");
5015 /* Do the assignment if the value is finite */
5016 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
5022 if (spec
[MONO_INST_SRC1
] == 'f' || spec
[MONO_INST_SRC2
] == 'f' || spec
[MONO_INST_DEST
] == 'f') {
5023 mono_print_ins (ins
);
5024 g_assert_not_reached ();
5029 g_assert (cfg
->cbb
== first_bb
);
5031 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
5032 /* Replace the original instruction with the new code sequence */
5034 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
5035 first_bb
->code
= first_bb
->last_ins
= NULL
;
5036 first_bb
->in_count
= first_bb
->out_count
= 0;
5037 cfg
->cbb
= first_bb
;
5044 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER HANDLE-SOFT-FLOAT ");
5047 mono_decompose_long_opts (cfg
);
5053 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5056 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5057 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5058 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5059 /* Optimize reg-reg moves away */
5061 * Can't optimize other opcodes, since sp[0] might point to
5062 * the last ins of a decomposed opcode.
5064 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5066 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5071 * ldloca inhibits many optimizations so try to get rid of it in common
5074 static inline unsigned char *
5075 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5084 local
= read16 (ip
+ 2);
5088 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5089 gboolean skip
= FALSE
;
5091 /* From the INITOBJ case */
5092 token
= read32 (ip
+ 2);
5093 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5094 CHECK_TYPELOAD (klass
);
5095 if (generic_class_is_reference_type (cfg
, klass
)) {
5096 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5097 } else if (MONO_TYPE_IS_REFERENCE (&klass
->byval_arg
)) {
5098 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5099 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5100 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5113 is_exception_class (MonoClass
*class)
5116 if (class == mono_defaults
.exception_class
)
5118 class = class->parent
;
5124 * mono_method_to_ir:
5126 * Translate the .net IL into linear IR.
5129 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5130 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5131 guint inline_offset
, gboolean is_virtual_call
)
5133 MonoInst
*ins
, **sp
, **stack_start
;
5134 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5135 MonoMethod
*cmethod
, *method_definition
;
5136 MonoInst
**arg_array
;
5137 MonoMethodHeader
*header
;
5139 guint32 token
, ins_flag
;
5141 MonoClass
*constrained_call
= NULL
;
5142 unsigned char *ip
, *end
, *target
, *err_pos
;
5143 static double r8_0
= 0.0;
5144 MonoMethodSignature
*sig
;
5145 MonoGenericContext
*generic_context
= NULL
;
5146 MonoGenericContainer
*generic_container
= NULL
;
5147 MonoType
**param_types
;
5148 int i
, n
, start_new_bblock
, dreg
;
5149 int num_calls
= 0, inline_costs
= 0;
5150 int breakpoint_id
= 0;
5152 MonoBoolean security
, pinvoke
;
5153 MonoSecurityManager
* secman
= NULL
;
5154 MonoDeclSecurityActions actions
;
5155 GSList
*class_inits
= NULL
;
5156 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5159 /* serialization and xdomain stuff may need access to private fields and methods */
5160 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5161 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5162 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5163 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5164 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5165 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5167 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5169 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5170 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5171 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5172 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5174 image
= method
->klass
->image
;
5175 header
= mono_method_get_header (method
);
5176 generic_container
= mono_method_get_generic_container (method
);
5177 sig
= mono_method_signature (method
);
5178 num_args
= sig
->hasthis
+ sig
->param_count
;
5179 ip
= (unsigned char*)header
->code
;
5180 cfg
->cil_start
= ip
;
5181 end
= ip
+ header
->code_size
;
5182 mono_jit_stats
.cil_code_size
+= header
->code_size
;
5184 method_definition
= method
;
5185 while (method_definition
->is_inflated
) {
5186 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5187 method_definition
= imethod
->declaring
;
5190 /* SkipVerification is not allowed if core-clr is enabled */
5191 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5193 dont_verify_stloc
= TRUE
;
5196 if (!dont_verify
&& mini_method_verify (cfg
, method_definition
))
5197 goto exception_exit
;
5199 if (mono_debug_using_mono_debugger ())
5200 cfg
->keep_cil_nops
= TRUE
;
5202 if (sig
->is_inflated
)
5203 generic_context
= mono_method_get_context (method
);
5204 else if (generic_container
)
5205 generic_context
= &generic_container
->context
;
5206 cfg
->generic_context
= generic_context
;
5208 if (!cfg
->generic_sharing_context
)
5209 g_assert (!sig
->has_type_parameters
);
5211 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5212 g_assert (method
->is_inflated
);
5213 g_assert (mono_method_get_context (method
)->method_inst
);
5215 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5216 g_assert (sig
->generic_param_count
);
5218 if (cfg
->method
== method
) {
5219 cfg
->real_offset
= 0;
5221 cfg
->real_offset
= inline_offset
;
5224 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5225 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5227 cfg
->current_method
= method
;
5229 if (cfg
->verbose_level
> 2)
5230 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5232 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5234 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5235 for (n
= 0; n
< sig
->param_count
; ++n
)
5236 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5237 cfg
->arg_types
= param_types
;
5239 dont_inline
= g_list_prepend (dont_inline
, method
);
5240 if (cfg
->method
== method
) {
5242 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5243 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5246 NEW_BBLOCK (cfg
, start_bblock
);
5247 cfg
->bb_entry
= start_bblock
;
5248 start_bblock
->cil_code
= NULL
;
5249 start_bblock
->cil_length
= 0;
5252 NEW_BBLOCK (cfg
, end_bblock
);
5253 cfg
->bb_exit
= end_bblock
;
5254 end_bblock
->cil_code
= NULL
;
5255 end_bblock
->cil_length
= 0;
5256 g_assert (cfg
->num_bblocks
== 2);
5258 arg_array
= cfg
->args
;
5260 if (header
->num_clauses
) {
5261 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5262 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5264 /* handle exception clauses */
5265 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5266 MonoBasicBlock
*try_bb
;
5267 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5268 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5269 try_bb
->real_offset
= clause
->try_offset
;
5270 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5271 tblock
->real_offset
= clause
->handler_offset
;
5272 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5274 link_bblock (cfg
, try_bb
, tblock
);
5276 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5277 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5279 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5280 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5281 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5282 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5283 MONO_ADD_INS (tblock
, ins
);
5285 /* todo: is a fault block unsafe to optimize? */
5286 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5287 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5291 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5293 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5295 /* catch and filter blocks get the exception object on the stack */
5296 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5297 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5298 MonoInst
*dummy_use
;
5300 /* mostly like handle_stack_args (), but just sets the input args */
5301 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5302 tblock
->in_scount
= 1;
5303 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5304 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5307 * Add a dummy use for the exvar so its liveness info will be
5311 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5313 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5314 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5315 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5316 tblock
->real_offset
= clause
->data
.filter_offset
;
5317 tblock
->in_scount
= 1;
5318 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5319 /* The filter block shares the exvar with the handler block */
5320 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5321 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5322 MONO_ADD_INS (tblock
, ins
);
5326 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5327 clause
->data
.catch_class
&&
5328 cfg
->generic_sharing_context
&&
5329 mono_class_check_context_used (clause
->data
.catch_class
)) {
5331 * In shared generic code with catch
5332 * clauses containing type variables
5333 * the exception handling code has to
5334 * be able to get to the rgctx.
5335 * Therefore we have to make sure that
5336 * the vtable/mrgctx argument (for
5337 * static or generic methods) or the
5338 * "this" argument (for non-static
5339 * methods) are live.
5341 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5342 mini_method_get_context (method
)->method_inst
||
5343 method
->klass
->valuetype
) {
5344 mono_get_vtable_var (cfg
);
5346 MonoInst
*dummy_use
;
5348 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5353 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5354 cfg
->cbb
= start_bblock
;
5355 cfg
->args
= arg_array
;
5356 mono_save_args (cfg
, sig
, inline_args
);
5359 /* FIRST CODE BLOCK */
5360 NEW_BBLOCK (cfg
, bblock
);
5361 bblock
->cil_code
= ip
;
5365 ADD_BBLOCK (cfg
, bblock
);
5367 if (cfg
->method
== method
) {
5368 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5369 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5370 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5371 MONO_ADD_INS (bblock
, ins
);
5375 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5376 secman
= mono_security_manager_get_methods ();
5378 security
= (secman
&& mono_method_has_declsec (method
));
5379 /* at this point having security doesn't mean we have any code to generate */
5380 if (security
&& (cfg
->method
== method
)) {
5381 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5382 * And we do not want to enter the next section (with allocation) if we
5383 * have nothing to generate */
5384 security
= mono_declsec_get_demands (method
, &actions
);
5387 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5388 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5390 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5391 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5392 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
5394 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5395 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5399 mono_custom_attrs_free (custom
);
5402 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
5403 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5407 mono_custom_attrs_free (custom
);
5410 /* not a P/Invoke after all */
5415 if ((header
->init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
5416 /* we use a separate basic block for the initialization code */
5417 NEW_BBLOCK (cfg
, init_localsbb
);
5418 cfg
->bb_init
= init_localsbb
;
5419 init_localsbb
->real_offset
= cfg
->real_offset
;
5420 start_bblock
->next_bb
= init_localsbb
;
5421 init_localsbb
->next_bb
= bblock
;
5422 link_bblock (cfg
, start_bblock
, init_localsbb
);
5423 link_bblock (cfg
, init_localsbb
, bblock
);
5425 cfg
->cbb
= init_localsbb
;
5427 start_bblock
->next_bb
= bblock
;
5428 link_bblock (cfg
, start_bblock
, bblock
);
5431 /* at this point we know, if security is TRUE, that some code needs to be generated */
5432 if (security
&& (cfg
->method
== method
)) {
5435 mono_jit_stats
.cas_demand_generation
++;
5437 if (actions
.demand
.blob
) {
5438 /* Add code for SecurityAction.Demand */
5439 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
5440 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
5441 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5442 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5444 if (actions
.noncasdemand
.blob
) {
5445 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5446 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5447 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
5448 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
5449 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5450 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5452 if (actions
.demandchoice
.blob
) {
5453 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5454 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
5455 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
5456 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5457 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
5461 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5463 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
5466 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
5467 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
5468 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5469 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5470 if (!(method
->klass
&& method
->klass
->image
&&
5471 mono_security_core_clr_is_platform_image (method
->klass
->image
))) {
5472 emit_throw_method_access_exception (cfg
, method
, wrapped
, bblock
, ip
);
5478 if (header
->code_size
== 0)
5481 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
5486 if (cfg
->method
== method
)
5487 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
5489 for (n
= 0; n
< header
->num_locals
; ++n
) {
5490 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
5495 /* We force the vtable variable here for all shared methods
5496 for the possibility that they might show up in a stack
5497 trace where their exact instantiation is needed. */
5498 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
5499 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5500 mini_method_get_context (method
)->method_inst
||
5501 method
->klass
->valuetype
) {
5502 mono_get_vtable_var (cfg
);
5504 /* FIXME: Is there a better way to do this?
5505 We need the variable live for the duration
5506 of the whole method. */
5507 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
5511 /* add a check for this != NULL to inlined methods */
5512 if (is_virtual_call
) {
5515 NEW_ARGLOAD (cfg
, arg_ins
, 0);
5516 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
5517 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
5518 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, arg_ins
->dreg
);
5519 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, arg_ins
->dreg
);
5522 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5523 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
5526 start_new_bblock
= 0;
5530 if (cfg
->method
== method
)
5531 cfg
->real_offset
= ip
- header
->code
;
5533 cfg
->real_offset
= inline_offset
;
5538 if (start_new_bblock
) {
5539 bblock
->cil_length
= ip
- bblock
->cil_code
;
5540 if (start_new_bblock
== 2) {
5541 g_assert (ip
== tblock
->cil_code
);
5543 GET_BBLOCK (cfg
, tblock
, ip
);
5545 bblock
->next_bb
= tblock
;
5548 start_new_bblock
= 0;
5549 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5550 if (cfg
->verbose_level
> 3)
5551 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5552 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5556 g_slist_free (class_inits
);
5559 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
5560 link_bblock (cfg
, bblock
, tblock
);
5561 if (sp
!= stack_start
) {
5562 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
5564 CHECK_UNVERIFIABLE (cfg
);
5566 bblock
->next_bb
= tblock
;
5569 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5570 if (cfg
->verbose_level
> 3)
5571 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5572 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5575 g_slist_free (class_inits
);
5580 bblock
->real_offset
= cfg
->real_offset
;
5582 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
5583 guint32 cil_offset
= ip
- header
->code
;
5584 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
5586 /* TODO: Use an increment here */
5587 #if defined(__i386__)
5588 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
5589 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
5591 MONO_ADD_INS (cfg
->cbb
, ins
);
5593 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
5594 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
5598 if (cfg
->verbose_level
> 3)
5599 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
5603 if (cfg
->keep_cil_nops
)
5604 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
5606 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5608 MONO_ADD_INS (bblock
, ins
);
5611 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5613 MONO_ADD_INS (bblock
, ins
);
5619 CHECK_STACK_OVF (1);
5620 n
= (*ip
)-CEE_LDARG_0
;
5622 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5630 CHECK_STACK_OVF (1);
5631 n
= (*ip
)-CEE_LDLOC_0
;
5633 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5642 n
= (*ip
)-CEE_STLOC_0
;
5645 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
5647 emit_stloc_ir (cfg
, sp
, header
, n
);
5654 CHECK_STACK_OVF (1);
5657 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5663 CHECK_STACK_OVF (1);
5666 NEW_ARGLOADA (cfg
, ins
, n
);
5667 MONO_ADD_INS (cfg
->cbb
, ins
);
5677 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
5679 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
5684 CHECK_STACK_OVF (1);
5687 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5691 case CEE_LDLOCA_S
: {
5692 unsigned char *tmp_ip
;
5694 CHECK_STACK_OVF (1);
5695 CHECK_LOCAL (ip
[1]);
5697 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
5703 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
5712 CHECK_LOCAL (ip
[1]);
5713 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
5715 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
5720 CHECK_STACK_OVF (1);
5721 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
5722 ins
->type
= STACK_OBJ
;
5727 CHECK_STACK_OVF (1);
5728 EMIT_NEW_ICONST (cfg
, ins
, -1);
5741 CHECK_STACK_OVF (1);
5742 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
5748 CHECK_STACK_OVF (1);
5750 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
5756 CHECK_STACK_OVF (1);
5757 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
5763 CHECK_STACK_OVF (1);
5764 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
5765 ins
->type
= STACK_I8
;
5766 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
5768 ins
->inst_l
= (gint64
)read64 (ip
);
5769 MONO_ADD_INS (bblock
, ins
);
5775 /* FIXME: we should really allocate this only late in the compilation process */
5776 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
5778 CHECK_STACK_OVF (1);
5779 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
5780 ins
->type
= STACK_R8
;
5781 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5785 MONO_ADD_INS (bblock
, ins
);
5793 /* FIXME: we should really allocate this only late in the compilation process */
5794 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
5796 CHECK_STACK_OVF (1);
5797 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
5798 ins
->type
= STACK_R8
;
5799 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5803 MONO_ADD_INS (bblock
, ins
);
5810 MonoInst
*temp
, *store
;
5812 CHECK_STACK_OVF (1);
5816 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
5817 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
5819 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
5822 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
5835 if (sp
[0]->type
== STACK_R8
)
5836 /* we need to pop the value from the x86 FP stack */
5837 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
5846 if (stack_start
!= sp
)
5848 token
= read32 (ip
+ 1);
5849 /* FIXME: check the signature matches */
5850 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5855 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
5856 GENERIC_SHARING_FAILURE (CEE_JMP
);
5858 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5859 CHECK_CFG_EXCEPTION
;
5863 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
5866 /* Handle tail calls similarly to calls */
5867 n
= fsig
->param_count
+ fsig
->hasthis
;
5869 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
5870 call
->method
= cmethod
;
5871 call
->tail_call
= TRUE
;
5872 call
->signature
= mono_method_signature (cmethod
);
5873 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
5874 call
->inst
.inst_p0
= cmethod
;
5875 for (i
= 0; i
< n
; ++i
)
5876 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
5878 mono_arch_emit_call (cfg
, call
);
5879 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
5882 for (i
= 0; i
< num_args
; ++i
)
5883 /* Prevent arguments from being optimized away */
5884 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
5886 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
5887 ins
= (MonoInst
*)call
;
5888 ins
->inst_p0
= cmethod
;
5889 MONO_ADD_INS (bblock
, ins
);
5893 start_new_bblock
= 1;
5898 case CEE_CALLVIRT
: {
5899 MonoInst
*addr
= NULL
;
5900 MonoMethodSignature
*fsig
= NULL
;
5902 int virtual = *ip
== CEE_CALLVIRT
;
5903 int calli
= *ip
== CEE_CALLI
;
5904 gboolean pass_imt_from_rgctx
= FALSE
;
5905 MonoInst
*imt_arg
= NULL
;
5906 gboolean pass_vtable
= FALSE
;
5907 gboolean pass_mrgctx
= FALSE
;
5908 MonoInst
*vtable_arg
= NULL
;
5909 gboolean check_this
= FALSE
;
5912 token
= read32 (ip
+ 1);
5919 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
5920 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
5922 fsig
= mono_metadata_parse_signature (image
, token
);
5924 n
= fsig
->param_count
+ fsig
->hasthis
;
5926 MonoMethod
*cil_method
;
5928 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
5929 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
5930 cil_method
= cmethod
;
5931 } else if (constrained_call
) {
5932 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
5934 * This is needed since get_method_constrained can't find
5935 * the method in klass representing a type var.
5936 * The type var is guaranteed to be a reference type in this
5939 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5940 cil_method
= cmethod
;
5941 g_assert (!cmethod
->klass
->valuetype
);
5943 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
5946 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5947 cil_method
= cmethod
;
5952 if (!dont_verify
&& !cfg
->skip_visibility
) {
5953 MonoMethod
*target_method
= cil_method
;
5954 if (method
->is_inflated
) {
5955 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
5957 if (!mono_method_can_access_method (method_definition
, target_method
) &&
5958 !mono_method_can_access_method (method
, cil_method
))
5959 METHOD_ACCESS_FAILURE
;
5962 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
5963 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
5965 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
5966 /* MS.NET seems to silently convert this to a callvirt */
5969 if (!cmethod
->klass
->inited
)
5970 if (!mono_class_init (cmethod
->klass
))
5973 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
5974 mini_class_is_system_array (cmethod
->klass
)) {
5975 array_rank
= cmethod
->klass
->rank
;
5976 fsig
= mono_method_signature (cmethod
);
5978 if (mono_method_signature (cmethod
)->pinvoke
) {
5979 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
5980 check_for_pending_exc
, FALSE
);
5981 fsig
= mono_method_signature (wrapper
);
5982 } else if (constrained_call
) {
5983 fsig
= mono_method_signature (cmethod
);
5985 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
5989 mono_save_token_info (cfg
, image
, token
, cil_method
);
5991 n
= fsig
->param_count
+ fsig
->hasthis
;
5993 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
5994 if (check_linkdemand (cfg
, method
, cmethod
))
5996 CHECK_CFG_EXCEPTION
;
5999 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
6000 g_assert_not_reached ();
6003 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
6006 if (!cfg
->generic_sharing_context
&& cmethod
)
6007 g_assert (!mono_method_check_context_used (cmethod
));
6011 //g_assert (!virtual || fsig->hasthis);
6015 if (constrained_call
) {
6017 * We have the `constrained.' prefix opcode.
6019 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
6023 * The type parameter is instantiated as a valuetype,
6024 * but that type doesn't override the method we're
6025 * calling, so we need to box `this'.
6027 dreg
= alloc_dreg (cfg
, STACK_VTYPE
);
6028 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADV_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6029 ins
->klass
= constrained_call
;
6030 sp
[0] = handle_box (cfg
, ins
, constrained_call
);
6031 } else if (!constrained_call
->valuetype
) {
6032 int dreg
= alloc_preg (cfg
);
6035 * The type parameter is instantiated as a reference
6036 * type. We have a managed pointer on the stack, so
6037 * we need to dereference it here.
6039 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6040 ins
->type
= STACK_OBJ
;
6042 } else if (cmethod
->klass
->valuetype
)
6044 constrained_call
= NULL
;
6047 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6051 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6052 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6053 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6054 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6055 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6058 * Pass vtable iff target method might
6059 * be shared, which means that sharing
6060 * is enabled for its class and its
6061 * context is sharable (and it's not a
6064 if (sharing_enabled
&& context_sharable
&&
6065 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6069 if (cmethod
&& mini_method_get_context (cmethod
) &&
6070 mini_method_get_context (cmethod
)->method_inst
) {
6071 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6072 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6073 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6075 g_assert (!pass_vtable
);
6077 if (sharing_enabled
&& context_sharable
)
6081 if (cfg
->generic_sharing_context
&& cmethod
) {
6082 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6084 context_used
= mono_method_check_context_used (cmethod
);
6086 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6087 /* Generic method interface
6088 calls are resolved via a
6089 helper function and don't
6091 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6092 pass_imt_from_rgctx
= TRUE
;
6096 * If a shared method calls another
6097 * shared method then the caller must
6098 * have a generic sharing context
6099 * because the magic trampoline
6100 * requires it. FIXME: We shouldn't
6101 * have to force the vtable/mrgctx
6102 * variable here. Instead there
6103 * should be a flag in the cfg to
6104 * request a generic sharing context.
6107 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6108 mono_get_vtable_var (cfg
);
6113 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6115 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6117 CHECK_TYPELOAD (cmethod
->klass
);
6118 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6123 g_assert (!vtable_arg
);
6126 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6128 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
6131 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6132 MONO_METHOD_IS_FINAL (cmethod
)) {
6139 if (pass_imt_from_rgctx
) {
6140 g_assert (!pass_vtable
);
6143 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6144 cmethod
, MONO_RGCTX_INFO_METHOD
);
6150 MONO_INST_NEW (cfg
, check
, OP_CHECK_THIS
);
6151 check
->sreg1
= sp
[0]->dreg
;
6152 MONO_ADD_INS (cfg
->cbb
, check
);
6155 /* Calling virtual generic methods */
6156 if (cmethod
&& virtual &&
6157 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6158 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6159 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6160 mono_method_signature (cmethod
)->generic_param_count
) {
6161 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6162 MonoInst
*iargs
[4];
6164 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6166 /* Prevent inlining of methods that contain indirect calls */
6169 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6170 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
) {
6171 g_assert (!imt_arg
);
6173 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6174 cmethod
, MONO_RGCTX_INFO_METHOD
);
6177 g_assert (cmethod
->is_inflated
);
6178 EMIT_NEW_METHODCONST (cfg
, imt_arg
, cmethod
);
6180 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
);
6184 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6185 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6186 MONO_ADD_INS (bblock
, store
);
6188 /* FIXME: This should be a managed pointer */
6189 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6191 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6193 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6194 cmethod
, MONO_RGCTX_INFO_METHOD
);
6195 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6196 addr
= mono_emit_jit_icall (cfg
,
6197 mono_helper_compile_generic_method
, iargs
);
6199 EMIT_NEW_METHODCONST (cfg
, iargs
[1], cmethod
);
6200 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6201 addr
= mono_emit_jit_icall (cfg
, mono_helper_compile_generic_method
, iargs
);
6204 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6206 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6209 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6218 /* FIXME: runtime generic context pointer for jumps? */
6219 /* FIXME: handle this for generic sharing eventually */
6220 if ((ins_flag
& MONO_INST_TAILCALL
) && !cfg
->generic_sharing_context
&& !vtable_arg
&& cmethod
&& (*ip
== CEE_CALL
) &&
6221 (mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)))) {
6224 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6227 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6228 call
->tail_call
= TRUE
;
6229 call
->method
= cmethod
;
6230 call
->signature
= mono_method_signature (cmethod
);
6233 /* Handle tail calls similarly to calls */
6234 call
->inst
.opcode
= OP_TAILCALL
;
6236 mono_arch_emit_call (cfg
, call
);
6239 * We implement tail calls by storing the actual arguments into the
6240 * argument variables, then emitting a CEE_JMP.
6242 for (i
= 0; i
< n
; ++i
) {
6243 /* Prevent argument from being register allocated */
6244 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6245 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6249 ins
= (MonoInst
*)call
;
6250 ins
->inst_p0
= cmethod
;
6251 ins
->inst_p1
= arg_array
[0];
6252 MONO_ADD_INS (bblock
, ins
);
6253 link_bblock (cfg
, bblock
, end_bblock
);
6254 start_new_bblock
= 1;
6255 /* skip CEE_RET as well */
6261 /* Conversion to a JIT intrinsic */
6262 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6263 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6264 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6275 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
6276 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
6277 mono_method_check_inlining (cfg
, cmethod
) &&
6278 !g_list_find (dont_inline
, cmethod
)) {
6280 gboolean allways
= FALSE
;
6282 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
6283 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6284 /* Prevent inlining of methods that call wrappers */
6286 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
6290 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, allways
))) {
6292 cfg
->real_offset
+= 5;
6295 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6296 /* *sp is already set by inline_method */
6299 inline_costs
+= costs
;
6305 inline_costs
+= 10 * num_calls
++;
6307 /* Tail recursion elimination */
6308 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
6309 gboolean has_vtargs
= FALSE
;
6312 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6315 /* keep it simple */
6316 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
6317 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
6322 for (i
= 0; i
< n
; ++i
)
6323 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6324 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6325 MONO_ADD_INS (bblock
, ins
);
6326 tblock
= start_bblock
->out_bb
[0];
6327 link_bblock (cfg
, bblock
, tblock
);
6328 ins
->inst_target_bb
= tblock
;
6329 start_new_bblock
= 1;
6331 /* skip the CEE_RET, too */
6332 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
6342 /* Generic sharing */
6343 /* FIXME: only do this for generic methods if
6344 they are not shared! */
6345 if (context_used
&& !imt_arg
&& !array_rank
&&
6346 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
6347 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
6348 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
6349 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
6352 g_assert (cfg
->generic_sharing_context
&& cmethod
);
6356 * We are compiling a call to a
6357 * generic method from shared code,
6358 * which means that we have to look up
6359 * the method in the rgctx and do an
6362 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
6365 /* Indirect calls */
6367 g_assert (!imt_arg
);
6369 if (*ip
== CEE_CALL
)
6370 g_assert (context_used
);
6371 else if (*ip
== CEE_CALLI
)
6372 g_assert (!vtable_arg
);
6374 /* FIXME: what the hell is this??? */
6375 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
6376 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
6378 /* Prevent inlining of methods with indirect calls */
6382 #ifdef MONO_ARCH_RGCTX_REG
6384 int rgctx_reg
= mono_alloc_preg (cfg
);
6386 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
6387 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6388 call
= (MonoCallInst
*)ins
;
6389 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
6390 cfg
->uses_rgctx_reg
= TRUE
;
6395 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6397 * Instead of emitting an indirect call, emit a direct call
6398 * with the contents of the aotconst as the patch info.
6400 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
6403 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6406 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6407 if (fsig
->pinvoke
&& !fsig
->ret
->byref
) {
6411 * Native code might return non register sized integers
6412 * without initializing the upper bits.
6414 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
6415 case OP_LOADI1_MEMBASE
:
6416 widen_op
= OP_ICONV_TO_I1
;
6418 case OP_LOADU1_MEMBASE
:
6419 widen_op
= OP_ICONV_TO_U1
;
6421 case OP_LOADI2_MEMBASE
:
6422 widen_op
= OP_ICONV_TO_I2
;
6424 case OP_LOADU2_MEMBASE
:
6425 widen_op
= OP_ICONV_TO_U2
;
6431 if (widen_op
!= -1) {
6432 int dreg
= alloc_preg (cfg
);
6435 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
6436 widen
->type
= ins
->type
;
6453 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
6454 if (sp
[fsig
->param_count
]->type
== STACK_OBJ
) {
6455 MonoInst
*iargs
[2];
6458 iargs
[1] = sp
[fsig
->param_count
];
6460 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
6463 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
6464 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, sp
[fsig
->param_count
]->dreg
);
6465 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
6466 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6468 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
6471 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
6472 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
6473 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
6476 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6479 g_assert_not_reached ();
6487 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
6489 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6500 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
6502 } else if (imt_arg
) {
6503 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, imt_arg
);
6505 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, NULL
);
6508 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6516 if (cfg
->method
!= method
) {
6517 /* return from inlined method */
6519 * If in_count == 0, that means the ret is unreachable due to
6520 * being preceeded by a throw. In that case, inline_method () will
6521 * handle setting the return value
6522 * (test case: test_0_inline_throw ()).
6524 if (return_var
&& cfg
->cbb
->in_count
) {
6528 //g_assert (returnvar != -1);
6529 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
6530 cfg
->ret_var_set
= TRUE
;
6534 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
6536 g_assert (!return_var
);
6539 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
6542 if (!cfg
->vret_addr
) {
6545 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
6547 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
6549 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
6550 ins
->klass
= mono_class_from_mono_type (ret_type
);
6553 #ifdef MONO_ARCH_SOFT_FLOAT
6554 if (!ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
6555 MonoInst
*iargs
[1];
6559 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
6560 mono_arch_emit_setret (cfg
, method
, conv
);
6562 mono_arch_emit_setret (cfg
, method
, *sp
);
6565 mono_arch_emit_setret (cfg
, method
, *sp
);
6570 if (sp
!= stack_start
)
6572 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6574 ins
->inst_target_bb
= end_bblock
;
6575 MONO_ADD_INS (bblock
, ins
);
6576 link_bblock (cfg
, bblock
, end_bblock
);
6577 start_new_bblock
= 1;
6581 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6583 target
= ip
+ 1 + (signed char)(*ip
);
6585 GET_BBLOCK (cfg
, tblock
, target
);
6586 link_bblock (cfg
, bblock
, tblock
);
6587 ins
->inst_target_bb
= tblock
;
6588 if (sp
!= stack_start
) {
6589 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6591 CHECK_UNVERIFIABLE (cfg
);
6593 MONO_ADD_INS (bblock
, ins
);
6594 start_new_bblock
= 1;
6595 inline_costs
+= BRANCH_COST
;
6609 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
6611 target
= ip
+ 1 + *(signed char*)ip
;
6617 inline_costs
+= BRANCH_COST
;
6621 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6624 target
= ip
+ 4 + (gint32
)read32(ip
);
6626 GET_BBLOCK (cfg
, tblock
, target
);
6627 link_bblock (cfg
, bblock
, tblock
);
6628 ins
->inst_target_bb
= tblock
;
6629 if (sp
!= stack_start
) {
6630 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6632 CHECK_UNVERIFIABLE (cfg
);
6635 MONO_ADD_INS (bblock
, ins
);
6637 start_new_bblock
= 1;
6638 inline_costs
+= BRANCH_COST
;
6645 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
6646 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
6647 guint32 opsize
= is_short
? 1 : 4;
6649 CHECK_OPSIZE (opsize
);
6651 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
6654 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
6659 GET_BBLOCK (cfg
, tblock
, target
);
6660 link_bblock (cfg
, bblock
, tblock
);
6661 GET_BBLOCK (cfg
, tblock
, ip
);
6662 link_bblock (cfg
, bblock
, tblock
);
6664 if (sp
!= stack_start
) {
6665 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6666 CHECK_UNVERIFIABLE (cfg
);
6669 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
6670 cmp
->sreg1
= sp
[0]->dreg
;
6671 type_from_op (cmp
, sp
[0], NULL
);
6674 #if SIZEOF_REGISTER == 4
6675 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
6676 /* Convert it to OP_LCOMPARE */
6677 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6678 ins
->type
= STACK_I8
;
6679 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6681 MONO_ADD_INS (bblock
, ins
);
6682 cmp
->opcode
= OP_LCOMPARE
;
6683 cmp
->sreg2
= ins
->dreg
;
6686 MONO_ADD_INS (bblock
, cmp
);
6688 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
6689 type_from_op (ins
, sp
[0], NULL
);
6690 MONO_ADD_INS (bblock
, ins
);
6691 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
6692 GET_BBLOCK (cfg
, tblock
, target
);
6693 ins
->inst_true_bb
= tblock
;
6694 GET_BBLOCK (cfg
, tblock
, ip
);
6695 ins
->inst_false_bb
= tblock
;
6696 start_new_bblock
= 2;
6699 inline_costs
+= BRANCH_COST
;
6714 MONO_INST_NEW (cfg
, ins
, *ip
);
6716 target
= ip
+ 4 + (gint32
)read32(ip
);
6722 inline_costs
+= BRANCH_COST
;
6726 MonoBasicBlock
**targets
;
6727 MonoBasicBlock
*default_bblock
;
6728 MonoJumpInfoBBTable
*table
;
6729 int offset_reg
= alloc_preg (cfg
);
6730 int target_reg
= alloc_preg (cfg
);
6731 int table_reg
= alloc_preg (cfg
);
6732 int sum_reg
= alloc_preg (cfg
);
6733 gboolean use_op_switch
;
6737 n
= read32 (ip
+ 1);
6740 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
6744 CHECK_OPSIZE (n
* sizeof (guint32
));
6745 target
= ip
+ n
* sizeof (guint32
);
6747 GET_BBLOCK (cfg
, default_bblock
, target
);
6749 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
6750 for (i
= 0; i
< n
; ++i
) {
6751 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
6752 targets
[i
] = tblock
;
6756 if (sp
!= stack_start
) {
6758 * Link the current bb with the targets as well, so handle_stack_args
6759 * will set their in_stack correctly.
6761 link_bblock (cfg
, bblock
, default_bblock
);
6762 for (i
= 0; i
< n
; ++i
)
6763 link_bblock (cfg
, bblock
, targets
[i
]);
6765 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6767 CHECK_UNVERIFIABLE (cfg
);
6770 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
6771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
6774 for (i
= 0; i
< n
; ++i
)
6775 link_bblock (cfg
, bblock
, targets
[i
]);
6777 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
6778 table
->table
= targets
;
6779 table
->table_size
= n
;
6781 use_op_switch
= FALSE
;
6783 /* ARM implements SWITCH statements differently */
6784 /* FIXME: Make it use the generic implementation */
6785 if (!cfg
->compile_aot
)
6786 use_op_switch
= TRUE
;
6789 if (use_op_switch
) {
6790 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
6791 ins
->sreg1
= src1
->dreg
;
6792 ins
->inst_p0
= table
;
6793 ins
->inst_many_bb
= targets
;
6794 ins
->klass
= GUINT_TO_POINTER (n
);
6795 MONO_ADD_INS (cfg
->cbb
, ins
);
6797 if (sizeof (gpointer
) == 8)
6798 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
6800 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
6802 #if SIZEOF_REGISTER == 8
6803 /* The upper word might not be zero, and we add it to a 64 bit address later */
6804 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
6807 if (cfg
->compile_aot
) {
6808 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
6810 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
6811 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
6812 ins
->inst_p0
= table
;
6813 ins
->dreg
= table_reg
;
6814 MONO_ADD_INS (cfg
->cbb
, ins
);
6817 /* FIXME: Use load_memindex */
6818 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
6819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
6820 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
6822 start_new_bblock
= 1;
6823 inline_costs
+= (BRANCH_COST
* 2);
6843 dreg
= alloc_freg (cfg
);
6846 dreg
= alloc_lreg (cfg
);
6849 dreg
= alloc_preg (cfg
);
6852 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
6853 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
6854 ins
->flags
|= ins_flag
;
6856 MONO_ADD_INS (bblock
, ins
);
6871 #if HAVE_WRITE_BARRIERS
6872 if (*ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0))) {
6873 /* insert call to write barrier */
6874 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
6875 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
6882 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
6883 ins
->flags
|= ins_flag
;
6885 MONO_ADD_INS (bblock
, ins
);
6893 MONO_INST_NEW (cfg
, ins
, (*ip
));
6895 ins
->sreg1
= sp
[0]->dreg
;
6896 ins
->sreg2
= sp
[1]->dreg
;
6897 type_from_op (ins
, sp
[0], sp
[1]);
6899 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
6901 /* Use the immediate opcodes if possible */
6902 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
6903 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
6904 if (imm_opcode
!= -1) {
6905 ins
->opcode
= imm_opcode
;
6906 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
6909 sp
[1]->opcode
= OP_NOP
;
6913 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
6916 mono_decompose_opcode (cfg
, ins
);
6933 MONO_INST_NEW (cfg
, ins
, (*ip
));
6935 ins
->sreg1
= sp
[0]->dreg
;
6936 ins
->sreg2
= sp
[1]->dreg
;
6937 type_from_op (ins
, sp
[0], sp
[1]);
6939 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
6940 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
6942 /* FIXME: Pass opcode to is_inst_imm */
6944 /* Use the immediate opcodes if possible */
6945 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
6948 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
6949 if (imm_opcode
!= -1) {
6950 ins
->opcode
= imm_opcode
;
6951 if (sp
[1]->opcode
== OP_I8CONST
) {
6952 #if SIZEOF_REGISTER == 8
6953 ins
->inst_imm
= sp
[1]->inst_l
;
6955 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
6956 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
6960 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
6963 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6964 if (sp
[1]->next
== NULL
)
6965 sp
[1]->opcode
= OP_NOP
;
6968 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
6971 mono_decompose_opcode (cfg
, ins
);
6984 case CEE_CONV_OVF_I8
:
6985 case CEE_CONV_OVF_U8
:
6989 /* Special case this earlier so we have long constants in the IR */
6990 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
6991 int data
= sp
[-1]->inst_c0
;
6992 sp
[-1]->opcode
= OP_I8CONST
;
6993 sp
[-1]->type
= STACK_I8
;
6994 #if SIZEOF_REGISTER == 8
6995 if ((*ip
) == CEE_CONV_U8
)
6996 sp
[-1]->inst_c0
= (guint32
)data
;
6998 sp
[-1]->inst_c0
= data
;
7000 sp
[-1]->inst_ls_word
= data
;
7001 if ((*ip
) == CEE_CONV_U8
)
7002 sp
[-1]->inst_ms_word
= 0;
7004 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
7006 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
7013 case CEE_CONV_OVF_I4
:
7014 case CEE_CONV_OVF_I1
:
7015 case CEE_CONV_OVF_I2
:
7016 case CEE_CONV_OVF_I
:
7017 case CEE_CONV_OVF_U
:
7020 if (sp
[-1]->type
== STACK_R8
) {
7021 ADD_UNOP (CEE_CONV_OVF_I8
);
7028 case CEE_CONV_OVF_U1
:
7029 case CEE_CONV_OVF_U2
:
7030 case CEE_CONV_OVF_U4
:
7033 if (sp
[-1]->type
== STACK_R8
) {
7034 ADD_UNOP (CEE_CONV_OVF_U8
);
7041 case CEE_CONV_OVF_I1_UN
:
7042 case CEE_CONV_OVF_I2_UN
:
7043 case CEE_CONV_OVF_I4_UN
:
7044 case CEE_CONV_OVF_I8_UN
:
7045 case CEE_CONV_OVF_U1_UN
:
7046 case CEE_CONV_OVF_U2_UN
:
7047 case CEE_CONV_OVF_U4_UN
:
7048 case CEE_CONV_OVF_U8_UN
:
7049 case CEE_CONV_OVF_I_UN
:
7050 case CEE_CONV_OVF_U_UN
:
7060 case CEE_ADD_OVF_UN
:
7062 case CEE_MUL_OVF_UN
:
7064 case CEE_SUB_OVF_UN
:
7072 token
= read32 (ip
+ 1);
7073 klass
= mini_get_class (method
, token
, generic_context
);
7074 CHECK_TYPELOAD (klass
);
7076 if (generic_class_is_reference_type (cfg
, klass
)) {
7077 MonoInst
*store
, *load
;
7078 int dreg
= alloc_preg (cfg
);
7080 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7081 load
->flags
|= ins_flag
;
7082 MONO_ADD_INS (cfg
->cbb
, load
);
7084 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7085 store
->flags
|= ins_flag
;
7086 MONO_ADD_INS (cfg
->cbb
, store
);
7088 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7100 token
= read32 (ip
+ 1);
7101 klass
= mini_get_class (method
, token
, generic_context
);
7102 CHECK_TYPELOAD (klass
);
7104 /* Optimize the common ldobj+stloc combination */
7114 loc_index
= ip
[5] - CEE_STLOC_0
;
7121 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7122 CHECK_LOCAL (loc_index
);
7124 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7125 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7131 /* Optimize the ldobj+stobj combination */
7132 /* The reference case ends up being a load+store anyway */
7133 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 9) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7138 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7145 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7154 CHECK_STACK_OVF (1);
7156 n
= read32 (ip
+ 1);
7158 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7159 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7160 ins
->type
= STACK_OBJ
;
7163 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7164 MonoInst
*iargs
[1];
7166 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7167 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7169 if (cfg
->opt
& MONO_OPT_SHARED
) {
7170 MonoInst
*iargs
[3];
7172 if (cfg
->compile_aot
) {
7173 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7175 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7176 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7177 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7178 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7179 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7181 if (bblock
->out_of_line
) {
7182 MonoInst
*iargs
[2];
7184 if (image
== mono_defaults
.corlib
) {
7186 * Avoid relocations in AOT and save some space by using a
7187 * version of helper_ldstr specialized to mscorlib.
7189 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7190 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7192 /* Avoid creating the string object */
7193 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7194 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7195 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7199 if (cfg
->compile_aot
) {
7200 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7202 MONO_ADD_INS (bblock
, ins
);
7205 NEW_PCONST (cfg
, ins
, NULL
);
7206 ins
->type
= STACK_OBJ
;
7207 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7209 MONO_ADD_INS (bblock
, ins
);
7218 MonoInst
*iargs
[2];
7219 MonoMethodSignature
*fsig
;
7222 MonoInst
*vtable_arg
= NULL
;
7225 token
= read32 (ip
+ 1);
7226 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7229 fsig
= mono_method_get_signature (cmethod
, image
, token
);
7231 mono_save_token_info (cfg
, image
, token
, cmethod
);
7233 if (!mono_class_init (cmethod
->klass
))
7236 if (cfg
->generic_sharing_context
)
7237 context_used
= mono_method_check_context_used (cmethod
);
7239 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7240 if (check_linkdemand (cfg
, method
, cmethod
))
7242 CHECK_CFG_EXCEPTION
;
7243 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
7244 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
7247 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
7248 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7249 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
7251 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
7252 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7254 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
7258 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
7259 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7261 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7263 CHECK_TYPELOAD (cmethod
->klass
);
7264 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7269 n
= fsig
->param_count
;
7273 * Generate smaller code for the common newobj <exception> instruction in
7274 * argument checking code.
7276 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
7277 is_exception_class (cmethod
->klass
) && n
<= 2 &&
7278 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
7279 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
7280 MonoInst
*iargs
[3];
7282 g_assert (!vtable_arg
);
7286 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
7289 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
7293 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
7298 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
7301 g_assert_not_reached ();
7309 /* move the args to allow room for 'this' in the first position */
7315 /* check_call_signature () requires sp[0] to be set */
7316 this_ins
.type
= STACK_OBJ
;
7318 if (check_call_signature (cfg
, fsig
, sp
))
7323 if (mini_class_is_system_array (cmethod
->klass
)) {
7324 g_assert (!vtable_arg
);
7327 *sp
= emit_get_rgctx_method (cfg
, context_used
,
7328 cmethod
, MONO_RGCTX_INFO_METHOD
);
7330 EMIT_NEW_METHODCONST (cfg
, *sp
, cmethod
);
7333 /* Avoid varargs in the common case */
7334 if (fsig
->param_count
== 1)
7335 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
7336 else if (fsig
->param_count
== 2)
7337 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
7339 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
7340 } else if (cmethod
->string_ctor
) {
7341 g_assert (!context_used
);
7342 g_assert (!vtable_arg
);
7343 /* we simply pass a null pointer */
7344 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
7345 /* now call the string ctor */
7346 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
);
7348 MonoInst
* callvirt_this_arg
= NULL
;
7350 if (cmethod
->klass
->valuetype
) {
7351 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
7352 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
7353 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
7358 * The code generated by mini_emit_virtual_call () expects
7359 * iargs [0] to be a boxed instance, but luckily the vcall
7360 * will be transformed into a normal call there.
7362 } else if (context_used
) {
7366 if (cfg
->opt
& MONO_OPT_SHARED
)
7367 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7369 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7370 data
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, rgctx_info
);
7372 alloc
= handle_alloc_from_inst (cfg
, cmethod
->klass
, data
, FALSE
);
7375 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7377 CHECK_TYPELOAD (cmethod
->klass
);
7380 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7381 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7382 * As a workaround, we call class cctors before allocating objects.
7384 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
7385 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
7386 if (cfg
->verbose_level
> 2)
7387 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
7388 class_inits
= g_slist_prepend (class_inits
, vtable
);
7391 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
);
7396 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
7398 /* Now call the actual ctor */
7399 /* Avoid virtual calls to ctors if possible */
7400 if (cmethod
->klass
->marshalbyref
)
7401 callvirt_this_arg
= sp
[0];
7403 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
7404 mono_method_check_inlining (cfg
, cmethod
) &&
7405 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
7406 !g_list_find (dont_inline
, cmethod
)) {
7409 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
7410 cfg
->real_offset
+= 5;
7413 inline_costs
+= costs
- 5;
7416 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
);
7418 } else if (context_used
&&
7419 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7420 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
7421 MonoInst
*cmethod_addr
;
7423 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
7424 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7426 mono_emit_rgctx_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
7429 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
,
7430 callvirt_this_arg
, NULL
, vtable_arg
);
7431 if (mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && ((MonoCallInst
*)ins
)->method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)
7432 GENERIC_SHARING_FAILURE (*ip
);
7436 if (alloc
== NULL
) {
7438 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
7439 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
7453 token
= read32 (ip
+ 1);
7454 klass
= mini_get_class (method
, token
, generic_context
);
7455 CHECK_TYPELOAD (klass
);
7456 if (sp
[0]->type
!= STACK_OBJ
)
7459 if (cfg
->generic_sharing_context
)
7460 context_used
= mono_class_check_context_used (klass
);
7469 args
[1] = emit_get_rgctx_klass (cfg
, context_used
,
7470 klass
, MONO_RGCTX_INFO_KLASS
);
7472 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
7476 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7477 MonoMethod
*mono_castclass
;
7478 MonoInst
*iargs
[1];
7481 mono_castclass
= mono_marshal_get_castclass (klass
);
7484 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7485 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7486 g_assert (costs
> 0);
7489 cfg
->real_offset
+= 5;
7494 inline_costs
+= costs
;
7497 ins
= handle_castclass (cfg
, klass
, *sp
);
7507 token
= read32 (ip
+ 1);
7508 klass
= mini_get_class (method
, token
, generic_context
);
7509 CHECK_TYPELOAD (klass
);
7510 if (sp
[0]->type
!= STACK_OBJ
)
7513 if (cfg
->generic_sharing_context
)
7514 context_used
= mono_class_check_context_used (klass
);
7523 args
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7525 *sp
= mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
7529 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7530 MonoMethod
*mono_isinst
;
7531 MonoInst
*iargs
[1];
7534 mono_isinst
= mono_marshal_get_isinst (klass
);
7537 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
7538 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7539 g_assert (costs
> 0);
7542 cfg
->real_offset
+= 5;
7547 inline_costs
+= costs
;
7550 ins
= handle_isinst (cfg
, klass
, *sp
);
7557 case CEE_UNBOX_ANY
: {
7561 token
= read32 (ip
+ 1);
7562 klass
= mini_get_class (method
, token
, generic_context
);
7563 CHECK_TYPELOAD (klass
);
7565 mono_save_token_info (cfg
, image
, token
, klass
);
7567 if (cfg
->generic_sharing_context
)
7568 context_used
= mono_class_check_context_used (klass
);
7570 if (generic_class_is_reference_type (cfg
, klass
)) {
7573 MonoInst
*iargs
[2];
7578 iargs
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7579 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, iargs
);
7583 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7584 MonoMethod
*mono_castclass
;
7585 MonoInst
*iargs
[1];
7588 mono_castclass
= mono_marshal_get_castclass (klass
);
7591 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7592 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7594 g_assert (costs
> 0);
7597 cfg
->real_offset
+= 5;
7601 inline_costs
+= costs
;
7603 ins
= handle_castclass (cfg
, klass
, *sp
);
7611 if (mono_class_is_nullable (klass
)) {
7612 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7619 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7625 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7638 token
= read32 (ip
+ 1);
7639 klass
= mini_get_class (method
, token
, generic_context
);
7640 CHECK_TYPELOAD (klass
);
7642 mono_save_token_info (cfg
, image
, token
, klass
);
7644 if (cfg
->generic_sharing_context
)
7645 context_used
= mono_class_check_context_used (klass
);
7647 if (generic_class_is_reference_type (cfg
, klass
)) {
7653 if (klass
== mono_defaults
.void_class
)
7655 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
7657 /* frequent check in generic code: box (struct), brtrue */
7658 if (!mono_class_is_nullable (klass
) &&
7659 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) && (ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
)) {
7660 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7662 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7663 if (*ip
== CEE_BRTRUE_S
) {
7666 target
= ip
+ 1 + (signed char)(*ip
);
7671 target
= ip
+ 4 + (gint
)(read32 (ip
));
7674 GET_BBLOCK (cfg
, tblock
, target
);
7675 link_bblock (cfg
, bblock
, tblock
);
7676 ins
->inst_target_bb
= tblock
;
7677 GET_BBLOCK (cfg
, tblock
, ip
);
7679 * This leads to some inconsistency, since the two bblocks are
7680 * not really connected, but it is needed for handling stack
7681 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7682 * FIXME: This should only be needed if sp != stack_start, but that
7683 * doesn't work for some reason (test failure in mcs/tests on x86).
7685 link_bblock (cfg
, bblock
, tblock
);
7686 if (sp
!= stack_start
) {
7687 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7689 CHECK_UNVERIFIABLE (cfg
);
7691 MONO_ADD_INS (bblock
, ins
);
7692 start_new_bblock
= 1;
7700 if (cfg
->opt
& MONO_OPT_SHARED
)
7701 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7703 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7704 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
7705 *sp
++ = handle_box_from_inst (cfg
, val
, klass
, context_used
, data
);
7707 *sp
++ = handle_box (cfg
, val
, klass
);
7718 token
= read32 (ip
+ 1);
7719 klass
= mini_get_class (method
, token
, generic_context
);
7720 CHECK_TYPELOAD (klass
);
7722 mono_save_token_info (cfg
, image
, token
, klass
);
7724 if (cfg
->generic_sharing_context
)
7725 context_used
= mono_class_check_context_used (klass
);
7727 if (mono_class_is_nullable (klass
)) {
7730 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7731 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
7735 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7745 MonoClassField
*field
;
7749 if (*ip
== CEE_STFLD
) {
7756 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
7758 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
7761 token
= read32 (ip
+ 1);
7762 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7763 field
= mono_method_get_wrapper_data (method
, token
);
7764 klass
= field
->parent
;
7767 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
7771 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
7772 FIELD_ACCESS_FAILURE
;
7773 mono_class_init (klass
);
7775 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7776 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7777 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7778 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7781 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
7782 if (*ip
== CEE_STFLD
) {
7783 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
7785 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
7786 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
7787 MonoInst
*iargs
[5];
7790 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
7791 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
7792 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
7796 if (cfg
->opt
& MONO_OPT_INLINE
) {
7797 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
7798 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7799 g_assert (costs
> 0);
7801 cfg
->real_offset
+= 5;
7804 inline_costs
+= costs
;
7806 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
7811 #if HAVE_WRITE_BARRIERS
7812 if (mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
7813 /* insert call to write barrier */
7814 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
7815 MonoInst
*iargs
[2];
7818 dreg
= alloc_preg (cfg
);
7819 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
7821 mono_emit_method_call (cfg
, write_barrier
, iargs
, NULL
);
7825 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
7827 store
->flags
|= ins_flag
;
7834 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
7835 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
7836 MonoInst
*iargs
[4];
7839 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
7840 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
7841 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
7842 if ((cfg
->opt
& MONO_OPT_INLINE
) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper
)->ret
)) {
7843 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
7844 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7846 g_assert (costs
> 0);
7848 cfg
->real_offset
+= 5;
7852 inline_costs
+= costs
;
7854 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
7858 if (sp
[0]->type
== STACK_VTYPE
) {
7861 /* Have to compute the address of the variable */
7863 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
7865 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
7867 g_assert (var
->klass
== klass
);
7869 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
7873 if (*ip
== CEE_LDFLDA
) {
7874 dreg
= alloc_preg (cfg
);
7876 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
7877 ins
->klass
= mono_class_from_mono_type (field
->type
);
7878 ins
->type
= STACK_MP
;
7883 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
7884 load
->flags
|= ins_flag
;
7895 MonoClassField
*field
;
7896 gpointer addr
= NULL
;
7897 gboolean is_special_static
;
7900 token
= read32 (ip
+ 1);
7902 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7903 field
= mono_method_get_wrapper_data (method
, token
);
7904 klass
= field
->parent
;
7907 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
7910 mono_class_init (klass
);
7911 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
7912 FIELD_ACCESS_FAILURE
;
7914 /* if the class is Critical then transparent code cannot access it's fields */
7915 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
7916 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
7919 * We can only support shared generic static
7920 * field access on architectures where the
7921 * trampoline code has been extended to handle
7922 * the generic class init.
7924 #ifndef MONO_ARCH_VTABLE_REG
7925 GENERIC_SHARING_FAILURE (*ip
);
7928 if (cfg
->generic_sharing_context
)
7929 context_used
= mono_class_check_context_used (klass
);
7931 g_assert (!(field
->type
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
7933 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7934 * to be called here.
7936 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
7937 mono_class_vtable (cfg
->domain
, klass
);
7938 CHECK_TYPELOAD (klass
);
7940 mono_domain_lock (cfg
->domain
);
7941 if (cfg
->domain
->special_static_fields
)
7942 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
7943 mono_domain_unlock (cfg
->domain
);
7945 is_special_static
= mono_class_field_is_special_static (field
);
7947 /* Generate IR to compute the field address */
7949 if ((cfg
->opt
& MONO_OPT_SHARED
) ||
7950 (cfg
->compile_aot
&& is_special_static
) ||
7951 (context_used
&& is_special_static
)) {
7952 MonoInst
*iargs
[2];
7954 g_assert (field
->parent
);
7955 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7957 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
7958 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
7960 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
7962 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
7963 } else if (context_used
) {
7964 MonoInst
*static_data
;
7967 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7968 method->klass->name_space, method->klass->name, method->name,
7969 depth, field->offset);
7972 if (mono_class_needs_cctor_run (klass
, method
)) {
7976 vtable
= emit_get_rgctx_klass (cfg
, context_used
,
7977 klass
, MONO_RGCTX_INFO_VTABLE
);
7979 // FIXME: This doesn't work since it tries to pass the argument
7980 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7982 * The vtable pointer is always passed in a register regardless of
7983 * the calling convention, so assign it manually, and make a call
7984 * using a signature without parameters.
7986 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable
);
7987 #ifdef MONO_ARCH_VTABLE_REG
7988 mono_call_inst_add_outarg_reg (cfg
, call
, vtable
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
7989 cfg
->uses_vtable_reg
= TRUE
;
7996 * The pointer we're computing here is
7998 * super_info.static_data + field->offset
8000 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
8001 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
8003 if (field
->offset
== 0) {
8006 int addr_reg
= mono_alloc_preg (cfg
);
8007 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
8009 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
8010 MonoInst
*iargs
[2];
8012 g_assert (field
->parent
);
8013 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8014 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8015 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8017 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
8019 CHECK_TYPELOAD (klass
);
8021 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8022 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8023 if (cfg
->verbose_level
> 2)
8024 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
8025 class_inits
= g_slist_prepend (class_inits
, vtable
);
8027 if (cfg
->run_cctors
) {
8029 /* This makes so that inline cannot trigger */
8030 /* .cctors: too many apps depend on them */
8031 /* running with a specific order... */
8032 if (! vtable
->initialized
)
8034 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
8036 set_exception_object (cfg
, ex
);
8037 goto exception_exit
;
8041 addr
= (char*)vtable
->data
+ field
->offset
;
8043 if (cfg
->compile_aot
)
8044 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8046 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8049 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8050 * This could be later optimized to do just a couple of
8051 * memory dereferences with constant offsets.
8053 MonoInst
*iargs
[1];
8054 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8055 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8059 /* Generate IR to do the actual load/store operation */
8061 if (*ip
== CEE_LDSFLDA
) {
8062 ins
->klass
= mono_class_from_mono_type (field
->type
);
8063 ins
->type
= STACK_PTR
;
8065 } else if (*ip
== CEE_STSFLD
) {
8070 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, ins
->dreg
, 0, sp
[0]->dreg
);
8071 store
->flags
|= ins_flag
;
8073 gboolean is_const
= FALSE
;
8074 MonoVTable
*vtable
= NULL
;
8076 if (!context_used
) {
8077 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8078 CHECK_TYPELOAD (klass
);
8080 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8081 vtable
->initialized
&& (field
->type
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8082 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8083 int ro_type
= field
->type
->type
;
8084 if (ro_type
== MONO_TYPE_VALUETYPE
&& field
->type
->data
.klass
->enumtype
) {
8085 ro_type
= mono_class_enum_basetype (field
->type
->data
.klass
)->type
;
8087 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8090 case MONO_TYPE_BOOLEAN
:
8092 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8096 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8099 case MONO_TYPE_CHAR
:
8101 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8105 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8110 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8114 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8117 #ifndef HAVE_MOVING_COLLECTOR
8120 case MONO_TYPE_STRING
:
8121 case MONO_TYPE_OBJECT
:
8122 case MONO_TYPE_CLASS
:
8123 case MONO_TYPE_SZARRAY
:
8125 case MONO_TYPE_FNPTR
:
8126 case MONO_TYPE_ARRAY
:
8127 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8128 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8134 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
8139 case MONO_TYPE_VALUETYPE
:
8149 CHECK_STACK_OVF (1);
8151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
8152 load
->flags
|= ins_flag
;
8165 token
= read32 (ip
+ 1);
8166 klass
= mini_get_class (method
, token
, generic_context
);
8167 CHECK_TYPELOAD (klass
);
8168 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8169 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
8180 const char *data_ptr
;
8182 guint32 field_token
;
8188 token
= read32 (ip
+ 1);
8190 klass
= mini_get_class (method
, token
, generic_context
);
8191 CHECK_TYPELOAD (klass
);
8193 if (cfg
->generic_sharing_context
)
8194 context_used
= mono_class_check_context_used (klass
);
8199 /* FIXME: Decompose later to help abcrem */
8202 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
8203 mono_array_class_get (klass
, 1), MONO_RGCTX_INFO_VTABLE
);
8208 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
8210 if (cfg
->opt
& MONO_OPT_SHARED
) {
8211 /* Decompose now to avoid problems with references to the domainvar */
8212 MonoInst
*iargs
[3];
8214 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8215 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8218 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
8220 /* Decompose later since it is needed by abcrem */
8221 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
8222 ins
->dreg
= alloc_preg (cfg
);
8223 ins
->sreg1
= sp
[0]->dreg
;
8224 ins
->inst_newa_class
= klass
;
8225 ins
->type
= STACK_OBJ
;
8227 MONO_ADD_INS (cfg
->cbb
, ins
);
8228 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8229 cfg
->cbb
->has_array_access
= TRUE
;
8231 /* Needed so mono_emit_load_get_addr () gets called */
8232 mono_get_got_var (cfg
);
8242 * we inline/optimize the initialization sequence if possible.
8243 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8244 * for small sizes open code the memcpy
8245 * ensure the rva field is big enough
8247 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
8248 MonoMethod
*memcpy_method
= get_memcpy_method ();
8249 MonoInst
*iargs
[3];
8250 int add_reg
= alloc_preg (cfg
);
8252 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
8253 if (cfg
->compile_aot
) {
8254 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
8256 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
8258 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
8259 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
8268 if (sp
[0]->type
!= STACK_OBJ
)
8271 dreg
= alloc_preg (cfg
);
8272 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
8273 ins
->dreg
= alloc_preg (cfg
);
8274 ins
->sreg1
= sp
[0]->dreg
;
8275 ins
->type
= STACK_I4
;
8276 MONO_ADD_INS (cfg
->cbb
, ins
);
8277 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8278 cfg
->cbb
->has_array_access
= TRUE
;
8286 if (sp
[0]->type
!= STACK_OBJ
)
8289 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8291 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
8292 CHECK_TYPELOAD (klass
);
8293 /* we need to make sure that this array is exactly the type it needs
8294 * to be for correctness. the wrappers are lax with their usage
8295 * so we need to ignore them here
8297 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
)
8298 mini_emit_check_array_type (cfg
, sp
[0], mono_array_class_get (klass
, 1));
8301 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8305 case CEE_LDELEM_ANY
:
8316 case CEE_LDELEM_REF
: {
8322 if (*ip
== CEE_LDELEM_ANY
) {
8324 token
= read32 (ip
+ 1);
8325 klass
= mini_get_class (method
, token
, generic_context
);
8326 CHECK_TYPELOAD (klass
);
8327 mono_class_init (klass
);
8330 klass
= array_access_to_klass (*ip
);
8332 if (sp
[0]->type
!= STACK_OBJ
)
8335 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8337 if (sp
[1]->opcode
== OP_ICONST
) {
8338 int array_reg
= sp
[0]->dreg
;
8339 int index_reg
= sp
[1]->dreg
;
8340 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8342 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
8345 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8346 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
8349 if (*ip
== CEE_LDELEM_ANY
)
8362 case CEE_STELEM_REF
:
8363 case CEE_STELEM_ANY
: {
8369 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8371 if (*ip
== CEE_STELEM_ANY
) {
8373 token
= read32 (ip
+ 1);
8374 klass
= mini_get_class (method
, token
, generic_context
);
8375 CHECK_TYPELOAD (klass
);
8376 mono_class_init (klass
);
8379 klass
= array_access_to_klass (*ip
);
8381 if (sp
[0]->type
!= STACK_OBJ
)
8384 /* storing a NULL doesn't need any of the complex checks in stelemref */
8385 if (generic_class_is_reference_type (cfg
, klass
) &&
8386 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
8387 MonoMethod
* helper
= mono_marshal_get_stelemref ();
8388 MonoInst
*iargs
[3];
8390 if (sp
[0]->type
!= STACK_OBJ
)
8392 if (sp
[2]->type
!= STACK_OBJ
)
8399 mono_emit_method_call (cfg
, helper
, iargs
, NULL
);
8401 if (sp
[1]->opcode
== OP_ICONST
) {
8402 int array_reg
= sp
[0]->dreg
;
8403 int index_reg
= sp
[1]->dreg
;
8404 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8406 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8407 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
8409 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8410 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
8414 if (*ip
== CEE_STELEM_ANY
)
8421 case CEE_CKFINITE
: {
8425 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
8426 ins
->sreg1
= sp
[0]->dreg
;
8427 ins
->dreg
= alloc_freg (cfg
);
8428 ins
->type
= STACK_R8
;
8429 MONO_ADD_INS (bblock
, ins
);
8432 mono_decompose_opcode (cfg
, ins
);
8437 case CEE_REFANYVAL
: {
8438 MonoInst
*src_var
, *src
;
8440 int klass_reg
= alloc_preg (cfg
);
8441 int dreg
= alloc_preg (cfg
);
8444 MONO_INST_NEW (cfg
, ins
, *ip
);
8447 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8448 CHECK_TYPELOAD (klass
);
8449 mono_class_init (klass
);
8451 if (cfg
->generic_sharing_context
)
8452 context_used
= mono_class_check_context_used (klass
);
8455 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8457 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8458 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
8459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
8462 MonoInst
*klass_ins
;
8464 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
8465 klass
, MONO_RGCTX_INFO_KLASS
);
8468 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
8469 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
8471 mini_emit_class_check (cfg
, klass_reg
, klass
);
8473 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
8474 ins
->type
= STACK_MP
;
8479 case CEE_MKREFANY
: {
8480 MonoInst
*loc
, *addr
;
8483 MONO_INST_NEW (cfg
, ins
, *ip
);
8486 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8487 CHECK_TYPELOAD (klass
);
8488 mono_class_init (klass
);
8490 if (cfg
->generic_sharing_context
)
8491 context_used
= mono_class_check_context_used (klass
);
8493 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
8494 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
8497 MonoInst
*const_ins
;
8498 int type_reg
= alloc_preg (cfg
);
8500 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
8501 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
8502 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8503 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8504 } else if (cfg
->compile_aot
) {
8505 int const_reg
= alloc_preg (cfg
);
8506 int type_reg
= alloc_preg (cfg
);
8508 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
8509 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
8510 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8511 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8513 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
8514 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
8516 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
8518 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
8519 ins
->type
= STACK_VTYPE
;
8520 ins
->klass
= mono_defaults
.typed_reference_class
;
8527 MonoClass
*handle_class
;
8529 CHECK_STACK_OVF (1);
8532 n
= read32 (ip
+ 1);
8534 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
8535 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
8536 handle
= mono_method_get_wrapper_data (method
, n
);
8537 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
8538 if (handle_class
== mono_defaults
.typehandle_class
)
8539 handle
= &((MonoClass
*)handle
)->byval_arg
;
8542 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
8546 mono_class_init (handle_class
);
8547 if (cfg
->generic_sharing_context
) {
8548 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
8549 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
8550 /* This case handles ldtoken
8551 of an open type, like for
8554 } else if (handle_class
== mono_defaults
.typehandle_class
) {
8555 /* If we get a MONO_TYPE_CLASS
8556 then we need to provide the
8558 instantiation of it. */
8559 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
8562 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
8563 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
8564 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
8565 else if (handle_class
== mono_defaults
.methodhandle_class
)
8566 context_used
= mono_method_check_context_used (handle
);
8568 g_assert_not_reached ();
8571 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
8572 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
8573 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
8574 MonoInst
*addr
, *vtvar
, *iargs
[3];
8575 int method_context_used
;
8577 if (cfg
->generic_sharing_context
)
8578 method_context_used
= mono_method_check_context_used (method
);
8580 method_context_used
= 0;
8582 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8584 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8585 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
8586 if (method_context_used
) {
8587 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
8588 method
, MONO_RGCTX_INFO_METHOD
);
8589 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
8591 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
8592 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
8594 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8596 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8598 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8600 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8601 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
8602 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
8603 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
8604 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
8605 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
8607 mono_class_init (tclass
);
8609 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8610 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
8611 } else if (cfg
->compile_aot
) {
8612 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
8614 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
8616 ins
->type
= STACK_OBJ
;
8617 ins
->klass
= cmethod
->klass
;
8620 MonoInst
*addr
, *vtvar
;
8622 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8625 if (handle_class
== mono_defaults
.typehandle_class
) {
8626 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8627 mono_class_from_mono_type (handle
),
8628 MONO_RGCTX_INFO_TYPE
);
8629 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
8630 ins
= emit_get_rgctx_method (cfg
, context_used
,
8631 handle
, MONO_RGCTX_INFO_METHOD
);
8632 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
8633 ins
= emit_get_rgctx_field (cfg
, context_used
,
8634 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
8636 g_assert_not_reached ();
8638 } else if (cfg
->compile_aot
) {
8639 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
8641 EMIT_NEW_PCONST (cfg
, ins
, handle
);
8643 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8644 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8645 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8655 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
8657 ins
->sreg1
= sp
[0]->dreg
;
8659 bblock
->out_of_line
= TRUE
;
8660 MONO_ADD_INS (bblock
, ins
);
8661 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
8662 MONO_ADD_INS (bblock
, ins
);
8665 link_bblock (cfg
, bblock
, end_bblock
);
8666 start_new_bblock
= 1;
8668 case CEE_ENDFINALLY
:
8669 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
8670 MONO_ADD_INS (bblock
, ins
);
8672 start_new_bblock
= 1;
8675 * Control will leave the method so empty the stack, otherwise
8676 * the next basic block will start with a nonempty stack.
8678 while (sp
!= stack_start
) {
8686 if (*ip
== CEE_LEAVE
) {
8688 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
8691 target
= ip
+ 2 + (signed char)(ip
[1]);
8694 /* empty the stack */
8695 while (sp
!= stack_start
) {
8700 * If this leave statement is in a catch block, check for a
8701 * pending exception, and rethrow it if necessary.
8703 for (i
= 0; i
< header
->num_clauses
; ++i
) {
8704 MonoExceptionClause
*clause
= &header
->clauses
[i
];
8707 * Use <= in the final comparison to handle clauses with multiple
8708 * leave statements, like in bug #78024.
8709 * The ordering of the exception clauses guarantees that we find the
8712 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
)) {
8714 MonoBasicBlock
*dont_throw
;
8719 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8722 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
8724 NEW_BBLOCK (cfg
, dont_throw
);
8727 * Currently, we allways rethrow the abort exception, despite the
8728 * fact that this is not correct. See thread6.cs for an example.
8729 * But propagating the abort exception is more important than
8730 * getting the sematics right.
8732 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
8733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
8734 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
8736 MONO_START_BB (cfg
, dont_throw
);
8741 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
8743 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
8745 link_bblock (cfg
, bblock
, tblock
);
8746 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
8747 ins
->inst_target_bb
= tblock
;
8748 MONO_ADD_INS (bblock
, ins
);
8750 g_list_free (handlers
);
8753 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8754 MONO_ADD_INS (bblock
, ins
);
8755 GET_BBLOCK (cfg
, tblock
, target
);
8756 link_bblock (cfg
, bblock
, tblock
);
8757 ins
->inst_target_bb
= tblock
;
8758 start_new_bblock
= 1;
8760 if (*ip
== CEE_LEAVE
)
8769 * Mono specific opcodes
8771 case MONO_CUSTOM_PREFIX
: {
8773 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
8777 case CEE_MONO_ICALL
: {
8779 MonoJitICallInfo
*info
;
8781 token
= read32 (ip
+ 2);
8782 func
= mono_method_get_wrapper_data (method
, token
);
8783 info
= mono_find_jit_icall_by_addr (func
);
8786 CHECK_STACK (info
->sig
->param_count
);
8787 sp
-= info
->sig
->param_count
;
8789 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
8790 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
8794 inline_costs
+= 10 * num_calls
++;
8798 case CEE_MONO_LDPTR
: {
8801 CHECK_STACK_OVF (1);
8803 token
= read32 (ip
+ 2);
8805 ptr
= mono_method_get_wrapper_data (method
, token
);
8806 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
8807 MonoJitICallInfo
*callinfo
;
8808 const char *icall_name
;
8810 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
8811 g_assert (icall_name
);
8812 callinfo
= mono_find_jit_icall_by_name (icall_name
);
8813 g_assert (callinfo
);
8815 if (ptr
== callinfo
->func
) {
8816 /* Will be transformed into an AOTCONST later */
8817 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8823 /* FIXME: Generalize this */
8824 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
8825 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
8830 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8833 inline_costs
+= 10 * num_calls
++;
8834 /* Can't embed random pointers into AOT code */
8835 cfg
->disable_aot
= 1;
8838 case CEE_MONO_ICALL_ADDR
: {
8839 MonoMethod
*cmethod
;
8842 CHECK_STACK_OVF (1);
8844 token
= read32 (ip
+ 2);
8846 cmethod
= mono_method_get_wrapper_data (method
, token
);
8848 if (cfg
->compile_aot
) {
8849 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
8851 ptr
= mono_lookup_internal_call (cmethod
);
8853 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8859 case CEE_MONO_VTADDR
: {
8860 MonoInst
*src_var
, *src
;
8866 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8867 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
8872 case CEE_MONO_NEWOBJ
: {
8873 MonoInst
*iargs
[2];
8875 CHECK_STACK_OVF (1);
8877 token
= read32 (ip
+ 2);
8878 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
8879 mono_class_init (klass
);
8880 NEW_DOMAINCONST (cfg
, iargs
[0]);
8881 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
8882 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8883 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
8884 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
8886 inline_costs
+= 10 * num_calls
++;
8889 case CEE_MONO_OBJADDR
:
8892 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
8893 ins
->dreg
= alloc_preg (cfg
);
8894 ins
->sreg1
= sp
[0]->dreg
;
8895 ins
->type
= STACK_MP
;
8896 MONO_ADD_INS (cfg
->cbb
, ins
);
8900 case CEE_MONO_LDNATIVEOBJ
:
8902 * Similar to LDOBJ, but instead load the unmanaged
8903 * representation of the vtype to the stack.
8908 token
= read32 (ip
+ 2);
8909 klass
= mono_method_get_wrapper_data (method
, token
);
8910 g_assert (klass
->valuetype
);
8911 mono_class_init (klass
);
8914 MonoInst
*src
, *dest
, *temp
;
8917 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
8918 temp
->backend
.is_pinvoke
= 1;
8919 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
8920 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
8922 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
8923 dest
->type
= STACK_VTYPE
;
8924 dest
->klass
= klass
;
8930 case CEE_MONO_RETOBJ
: {
8932 * Same as RET, but return the native representation of a vtype
8935 g_assert (cfg
->ret
);
8936 g_assert (mono_method_signature (method
)->pinvoke
);
8941 token
= read32 (ip
+ 2);
8942 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
8944 if (!cfg
->vret_addr
) {
8945 g_assert (cfg
->ret_var_is_local
);
8947 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
8949 EMIT_NEW_RETLOADA (cfg
, ins
);
8951 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
8953 if (sp
!= stack_start
)
8956 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8957 ins
->inst_target_bb
= end_bblock
;
8958 MONO_ADD_INS (bblock
, ins
);
8959 link_bblock (cfg
, bblock
, end_bblock
);
8960 start_new_bblock
= 1;
8964 case CEE_MONO_CISINST
:
8965 case CEE_MONO_CCASTCLASS
: {
8970 token
= read32 (ip
+ 2);
8971 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
8972 if (ip
[1] == CEE_MONO_CISINST
)
8973 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
8975 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
8981 case CEE_MONO_SAVE_LMF
:
8982 case CEE_MONO_RESTORE_LMF
:
8983 #ifdef MONO_ARCH_HAVE_LMF_OPS
8984 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
8985 MONO_ADD_INS (bblock
, ins
);
8986 cfg
->need_lmf_area
= TRUE
;
8990 case CEE_MONO_CLASSCONST
:
8991 CHECK_STACK_OVF (1);
8993 token
= read32 (ip
+ 2);
8994 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
8997 inline_costs
+= 10 * num_calls
++;
8999 case CEE_MONO_NOT_TAKEN
:
9000 bblock
->out_of_line
= TRUE
;
9004 CHECK_STACK_OVF (1);
9006 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
9007 ins
->dreg
= alloc_preg (cfg
);
9008 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
9009 ins
->type
= STACK_PTR
;
9010 MONO_ADD_INS (bblock
, ins
);
9015 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
9025 /* somewhat similar to LDTOKEN */
9026 MonoInst
*addr
, *vtvar
;
9027 CHECK_STACK_OVF (1);
9028 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
9030 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9031 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
9033 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9034 ins
->type
= STACK_VTYPE
;
9035 ins
->klass
= mono_defaults
.argumenthandle_class
;
9048 * The following transforms:
9049 * CEE_CEQ into OP_CEQ
9050 * CEE_CGT into OP_CGT
9051 * CEE_CGT_UN into OP_CGT_UN
9052 * CEE_CLT into OP_CLT
9053 * CEE_CLT_UN into OP_CLT_UN
9055 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
9057 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
9059 cmp
->sreg1
= sp
[0]->dreg
;
9060 cmp
->sreg2
= sp
[1]->dreg
;
9061 type_from_op (cmp
, sp
[0], sp
[1]);
9063 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
9064 cmp
->opcode
= OP_LCOMPARE
;
9065 else if (sp
[0]->type
== STACK_R8
)
9066 cmp
->opcode
= OP_FCOMPARE
;
9068 cmp
->opcode
= OP_ICOMPARE
;
9069 MONO_ADD_INS (bblock
, cmp
);
9070 ins
->type
= STACK_I4
;
9071 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
9072 type_from_op (ins
, sp
[0], sp
[1]);
9074 if (cmp
->opcode
== OP_FCOMPARE
) {
9076 * The backends expect the fceq opcodes to do the
9079 cmp
->opcode
= OP_NOP
;
9080 ins
->sreg1
= cmp
->sreg1
;
9081 ins
->sreg2
= cmp
->sreg2
;
9083 MONO_ADD_INS (bblock
, ins
);
9090 MonoMethod
*cil_method
;
9091 gboolean needs_static_rgctx_invoke
;
9093 CHECK_STACK_OVF (1);
9095 n
= read32 (ip
+ 2);
9096 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9099 mono_class_init (cmethod
->klass
);
9101 mono_save_token_info (cfg
, image
, n
, cmethod
);
9103 if (cfg
->generic_sharing_context
)
9104 context_used
= mono_method_check_context_used (cmethod
);
9106 needs_static_rgctx_invoke
= mono_method_needs_static_rgctx_invoke (cmethod
, TRUE
);
9108 cil_method
= cmethod
;
9109 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
9110 METHOD_ACCESS_FAILURE
;
9112 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9113 if (check_linkdemand (cfg
, method
, cmethod
))
9115 CHECK_CFG_EXCEPTION
;
9116 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9117 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9121 * Optimize the common case of ldftn+delegate creation
9123 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9124 /* FIXME: SGEN support */
9125 /* FIXME: handle shared static generic methods */
9126 /* FIXME: handle this in shared code */
9127 if (!needs_static_rgctx_invoke
&& !context_used
&& (sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
9128 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
9129 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
9130 MonoInst
*target_ins
;
9133 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
9134 if (!invoke
|| !mono_method_signature (invoke
))
9138 if (cfg
->verbose_level
> 3)
9139 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
9140 target_ins
= sp
[-1];
9142 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
);
9151 if (needs_static_rgctx_invoke
)
9152 cmethod
= mono_marshal_get_static_rgctx_invoke (cmethod
);
9154 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
9155 } else if (needs_static_rgctx_invoke
) {
9156 EMIT_NEW_METHODCONST (cfg
, argconst
, mono_marshal_get_static_rgctx_invoke (cmethod
));
9158 EMIT_NEW_METHODCONST (cfg
, argconst
, cmethod
);
9160 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
9164 inline_costs
+= 10 * num_calls
++;
9167 case CEE_LDVIRTFTN
: {
9172 n
= read32 (ip
+ 2);
9173 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9176 mono_class_init (cmethod
->klass
);
9178 if (cfg
->generic_sharing_context
)
9179 context_used
= mono_method_check_context_used (cmethod
);
9181 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9182 if (check_linkdemand (cfg
, method
, cmethod
))
9184 CHECK_CFG_EXCEPTION
;
9185 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9186 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9193 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
9194 cmethod
, MONO_RGCTX_INFO_METHOD
);
9195 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
9197 EMIT_NEW_METHODCONST (cfg
, args
[1], cmethod
);
9198 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
9202 inline_costs
+= 10 * num_calls
++;
9206 CHECK_STACK_OVF (1);
9208 n
= read16 (ip
+ 2);
9210 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
9215 CHECK_STACK_OVF (1);
9217 n
= read16 (ip
+ 2);
9219 NEW_ARGLOADA (cfg
, ins
, n
);
9220 MONO_ADD_INS (cfg
->cbb
, ins
);
9228 n
= read16 (ip
+ 2);
9230 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
9232 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
9236 CHECK_STACK_OVF (1);
9238 n
= read16 (ip
+ 2);
9240 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
9245 unsigned char *tmp_ip
;
9246 CHECK_STACK_OVF (1);
9248 n
= read16 (ip
+ 2);
9251 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
9257 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
9266 n
= read16 (ip
+ 2);
9268 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
9270 emit_stloc_ir (cfg
, sp
, header
, n
);
9277 if (sp
!= stack_start
)
9279 if (cfg
->method
!= method
)
9281 * Inlining this into a loop in a parent could lead to
9282 * stack overflows which is different behavior than the
9283 * non-inlined case, thus disable inlining in this case.
9285 goto inline_failure
;
9287 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
9288 ins
->dreg
= alloc_preg (cfg
);
9289 ins
->sreg1
= sp
[0]->dreg
;
9290 ins
->type
= STACK_PTR
;
9291 MONO_ADD_INS (cfg
->cbb
, ins
);
9293 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
9294 if (header
->init_locals
)
9295 ins
->flags
|= MONO_INST_INIT
;
9300 case CEE_ENDFILTER
: {
9301 MonoExceptionClause
*clause
, *nearest
;
9302 int cc
, nearest_num
;
9306 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
9308 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
9309 ins
->sreg1
= (*sp
)->dreg
;
9310 MONO_ADD_INS (bblock
, ins
);
9311 start_new_bblock
= 1;
9316 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
9317 clause
= &header
->clauses
[cc
];
9318 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
9319 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
9320 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
9326 if ((ip
- header
->code
) != nearest
->handler_offset
)
9331 case CEE_UNALIGNED_
:
9332 ins_flag
|= MONO_INST_UNALIGNED
;
9333 /* FIXME: record alignment? we can assume 1 for now */
9338 ins_flag
|= MONO_INST_VOLATILE
;
9342 ins_flag
|= MONO_INST_TAILCALL
;
9343 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
9344 /* Can't inline tail calls at this time */
9345 inline_costs
+= 100000;
9352 token
= read32 (ip
+ 2);
9353 klass
= mini_get_class (method
, token
, generic_context
);
9354 CHECK_TYPELOAD (klass
);
9355 if (generic_class_is_reference_type (cfg
, klass
))
9356 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
9358 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
9362 case CEE_CONSTRAINED_
:
9364 token
= read32 (ip
+ 2);
9365 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
9366 CHECK_TYPELOAD (constrained_call
);
9371 MonoInst
*iargs
[3];
9375 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
9376 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
9377 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
9378 /* emit_memset only works when val == 0 */
9379 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
9384 if (ip
[1] == CEE_CPBLK
) {
9385 MonoMethod
*memcpy_method
= get_memcpy_method ();
9386 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9388 MonoMethod
*memset_method
= get_memset_method ();
9389 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
9399 ins_flag
|= MONO_INST_NOTYPECHECK
;
9401 ins_flag
|= MONO_INST_NORANGECHECK
;
9402 /* we ignore the no-nullcheck for now since we
9403 * really do it explicitly only when doing callvirt->call
9409 int handler_offset
= -1;
9411 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9412 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9413 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
9414 handler_offset
= clause
->handler_offset
;
9419 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
9421 g_assert (handler_offset
!= -1);
9423 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
9424 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
9425 ins
->sreg1
= load
->dreg
;
9426 MONO_ADD_INS (bblock
, ins
);
9428 link_bblock (cfg
, bblock
, end_bblock
);
9429 start_new_bblock
= 1;
9437 CHECK_STACK_OVF (1);
9439 token
= read32 (ip
+ 2);
9440 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
) {
9441 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
9442 token
= mono_type_size (type
, &ialign
);
9444 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
9445 CHECK_TYPELOAD (klass
);
9446 mono_class_init (klass
);
9447 token
= mono_class_value_size (klass
, &align
);
9449 EMIT_NEW_ICONST (cfg
, ins
, token
);
9454 case CEE_REFANYTYPE
: {
9455 MonoInst
*src_var
, *src
;
9461 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9463 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9464 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9465 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
9475 g_error ("opcode 0xfe 0x%02x not handled", ip
[1]);
9480 g_error ("opcode 0x%02x not handled", *ip
);
9483 if (start_new_bblock
!= 1)
9486 bblock
->cil_length
= ip
- bblock
->cil_code
;
9487 bblock
->next_bb
= end_bblock
;
9489 if (cfg
->method
== method
&& cfg
->domainvar
) {
9491 MonoInst
*get_domain
;
9493 cfg
->cbb
= init_localsbb
;
9495 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
9496 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
9499 get_domain
->dreg
= alloc_preg (cfg
);
9500 MONO_ADD_INS (cfg
->cbb
, get_domain
);
9502 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
9503 MONO_ADD_INS (cfg
->cbb
, store
);
9506 if (cfg
->method
== method
&& cfg
->got_var
)
9507 mono_emit_load_got_addr (cfg
);
9509 if (header
->init_locals
) {
9512 cfg
->cbb
= init_localsbb
;
9514 for (i
= 0; i
< header
->num_locals
; ++i
) {
9515 MonoType
*ptype
= header
->locals
[i
];
9516 int t
= ptype
->type
;
9517 dreg
= cfg
->locals
[i
]->dreg
;
9519 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
9520 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
9522 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9523 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
9524 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9525 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
9526 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9527 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
9528 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
9529 ins
->type
= STACK_R8
;
9530 ins
->inst_p0
= (void*)&r8_0
;
9531 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
9532 MONO_ADD_INS (init_localsbb
, ins
);
9533 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
9534 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
9535 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
9536 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
9538 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9545 if (cfg
->method
== method
) {
9547 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
9548 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
9550 mono_create_spvar_for_region (cfg
, bb
->region
);
9551 if (cfg
->verbose_level
> 2)
9552 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
9556 g_slist_free (class_inits
);
9557 dont_inline
= g_list_remove (dont_inline
, method
);
9559 if (inline_costs
< 0) {
9562 /* Method is too large */
9563 mname
= mono_method_full_name (method
, TRUE
);
9564 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
9565 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
9570 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
9571 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
9573 return inline_costs
;
9576 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
9577 g_slist_free (class_inits
);
9578 dont_inline
= g_list_remove (dont_inline
, method
);
9582 g_slist_free (class_inits
);
9583 dont_inline
= g_list_remove (dont_inline
, method
);
9587 g_slist_free (class_inits
);
9588 dont_inline
= g_list_remove (dont_inline
, method
);
9589 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
9593 g_slist_free (class_inits
);
9594 dont_inline
= g_list_remove (dont_inline
, method
);
9595 set_exception_type_from_invalid_il (cfg
, method
, ip
);
9600 store_membase_reg_to_store_membase_imm (int opcode
)
9603 case OP_STORE_MEMBASE_REG
:
9604 return OP_STORE_MEMBASE_IMM
;
9605 case OP_STOREI1_MEMBASE_REG
:
9606 return OP_STOREI1_MEMBASE_IMM
;
9607 case OP_STOREI2_MEMBASE_REG
:
9608 return OP_STOREI2_MEMBASE_IMM
;
9609 case OP_STOREI4_MEMBASE_REG
:
9610 return OP_STOREI4_MEMBASE_IMM
;
9611 case OP_STOREI8_MEMBASE_REG
:
9612 return OP_STOREI8_MEMBASE_IMM
;
9614 g_assert_not_reached ();
9620 #endif /* DISABLE_JIT */
9623 mono_op_to_op_imm (int opcode
)
9633 return OP_IDIV_UN_IMM
;
9637 return OP_IREM_UN_IMM
;
9651 return OP_ISHR_UN_IMM
;
9668 return OP_LSHR_UN_IMM
;
9671 return OP_COMPARE_IMM
;
9673 return OP_ICOMPARE_IMM
;
9675 return OP_LCOMPARE_IMM
;
9677 case OP_STORE_MEMBASE_REG
:
9678 return OP_STORE_MEMBASE_IMM
;
9679 case OP_STOREI1_MEMBASE_REG
:
9680 return OP_STOREI1_MEMBASE_IMM
;
9681 case OP_STOREI2_MEMBASE_REG
:
9682 return OP_STOREI2_MEMBASE_IMM
;
9683 case OP_STOREI4_MEMBASE_REG
:
9684 return OP_STOREI4_MEMBASE_IMM
;
9686 #if defined(__i386__) || defined (__x86_64__)
9688 return OP_X86_PUSH_IMM
;
9689 case OP_X86_COMPARE_MEMBASE_REG
:
9690 return OP_X86_COMPARE_MEMBASE_IMM
;
9692 #if defined(__x86_64__)
9693 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
9694 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
9696 case OP_VOIDCALL_REG
:
9705 return OP_LOCALLOC_IMM
;
9712 ldind_to_load_membase (int opcode
)
9716 return OP_LOADI1_MEMBASE
;
9718 return OP_LOADU1_MEMBASE
;
9720 return OP_LOADI2_MEMBASE
;
9722 return OP_LOADU2_MEMBASE
;
9724 return OP_LOADI4_MEMBASE
;
9726 return OP_LOADU4_MEMBASE
;
9728 return OP_LOAD_MEMBASE
;
9730 return OP_LOAD_MEMBASE
;
9732 return OP_LOADI8_MEMBASE
;
9734 return OP_LOADR4_MEMBASE
;
9736 return OP_LOADR8_MEMBASE
;
9738 g_assert_not_reached ();
9745 stind_to_store_membase (int opcode
)
9749 return OP_STOREI1_MEMBASE_REG
;
9751 return OP_STOREI2_MEMBASE_REG
;
9753 return OP_STOREI4_MEMBASE_REG
;
9756 return OP_STORE_MEMBASE_REG
;
9758 return OP_STOREI8_MEMBASE_REG
;
9760 return OP_STORER4_MEMBASE_REG
;
9762 return OP_STORER8_MEMBASE_REG
;
9764 g_assert_not_reached ();
9771 mono_load_membase_to_load_mem (int opcode
)
9773 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9774 #if defined(__i386__) || defined(__x86_64__)
9776 case OP_LOAD_MEMBASE
:
9778 case OP_LOADU1_MEMBASE
:
9779 return OP_LOADU1_MEM
;
9780 case OP_LOADU2_MEMBASE
:
9781 return OP_LOADU2_MEM
;
9782 case OP_LOADI4_MEMBASE
:
9783 return OP_LOADI4_MEM
;
9784 case OP_LOADU4_MEMBASE
:
9785 return OP_LOADU4_MEM
;
9786 #if SIZEOF_REGISTER == 8
9787 case OP_LOADI8_MEMBASE
:
9788 return OP_LOADI8_MEM
;
9797 op_to_op_dest_membase (int store_opcode
, int opcode
)
9799 #if defined(__i386__)
9800 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
9805 return OP_X86_ADD_MEMBASE_REG
;
9807 return OP_X86_SUB_MEMBASE_REG
;
9809 return OP_X86_AND_MEMBASE_REG
;
9811 return OP_X86_OR_MEMBASE_REG
;
9813 return OP_X86_XOR_MEMBASE_REG
;
9816 return OP_X86_ADD_MEMBASE_IMM
;
9819 return OP_X86_SUB_MEMBASE_IMM
;
9822 return OP_X86_AND_MEMBASE_IMM
;
9825 return OP_X86_OR_MEMBASE_IMM
;
9828 return OP_X86_XOR_MEMBASE_IMM
;
9834 #if defined(__x86_64__)
9835 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
9840 return OP_X86_ADD_MEMBASE_REG
;
9842 return OP_X86_SUB_MEMBASE_REG
;
9844 return OP_X86_AND_MEMBASE_REG
;
9846 return OP_X86_OR_MEMBASE_REG
;
9848 return OP_X86_XOR_MEMBASE_REG
;
9850 return OP_X86_ADD_MEMBASE_IMM
;
9852 return OP_X86_SUB_MEMBASE_IMM
;
9854 return OP_X86_AND_MEMBASE_IMM
;
9856 return OP_X86_OR_MEMBASE_IMM
;
9858 return OP_X86_XOR_MEMBASE_IMM
;
9860 return OP_AMD64_ADD_MEMBASE_REG
;
9862 return OP_AMD64_SUB_MEMBASE_REG
;
9864 return OP_AMD64_AND_MEMBASE_REG
;
9866 return OP_AMD64_OR_MEMBASE_REG
;
9868 return OP_AMD64_XOR_MEMBASE_REG
;
9871 return OP_AMD64_ADD_MEMBASE_IMM
;
9874 return OP_AMD64_SUB_MEMBASE_IMM
;
9877 return OP_AMD64_AND_MEMBASE_IMM
;
9880 return OP_AMD64_OR_MEMBASE_IMM
;
9883 return OP_AMD64_XOR_MEMBASE_IMM
;
9893 op_to_op_store_membase (int store_opcode
, int opcode
)
9895 #if defined(__i386__) || defined(__x86_64__)
9898 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
9899 return OP_X86_SETEQ_MEMBASE
;
9901 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
9902 return OP_X86_SETNE_MEMBASE
;
9910 op_to_op_src1_membase (int load_opcode
, int opcode
)
9913 /* FIXME: This has sign extension issues */
9915 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9916 return OP_X86_COMPARE_MEMBASE8_IMM;
9919 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
9924 return OP_X86_PUSH_MEMBASE
;
9925 case OP_COMPARE_IMM
:
9926 case OP_ICOMPARE_IMM
:
9927 return OP_X86_COMPARE_MEMBASE_IMM
;
9930 return OP_X86_COMPARE_MEMBASE_REG
;
9935 /* FIXME: This has sign extension issues */
9937 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9938 return OP_X86_COMPARE_MEMBASE8_IMM;
9943 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
9944 return OP_X86_PUSH_MEMBASE
;
9946 /* FIXME: This only works for 32 bit immediates
9947 case OP_COMPARE_IMM:
9948 case OP_LCOMPARE_IMM:
9949 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9950 return OP_AMD64_COMPARE_MEMBASE_IMM;
9952 case OP_ICOMPARE_IMM
:
9953 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9954 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
9958 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
9959 return OP_AMD64_COMPARE_MEMBASE_REG
;
9962 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9963 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
9972 op_to_op_src2_membase (int load_opcode
, int opcode
)
9975 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
9981 return OP_X86_COMPARE_REG_MEMBASE
;
9983 return OP_X86_ADD_REG_MEMBASE
;
9985 return OP_X86_SUB_REG_MEMBASE
;
9987 return OP_X86_AND_REG_MEMBASE
;
9989 return OP_X86_OR_REG_MEMBASE
;
9991 return OP_X86_XOR_REG_MEMBASE
;
9998 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
9999 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
10003 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10004 return OP_AMD64_COMPARE_REG_MEMBASE
;
10007 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10008 return OP_X86_ADD_REG_MEMBASE
;
10010 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10011 return OP_X86_SUB_REG_MEMBASE
;
10013 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10014 return OP_X86_AND_REG_MEMBASE
;
10016 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10017 return OP_X86_OR_REG_MEMBASE
;
10019 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10020 return OP_X86_XOR_REG_MEMBASE
;
10022 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10023 return OP_AMD64_ADD_REG_MEMBASE
;
10025 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10026 return OP_AMD64_SUB_REG_MEMBASE
;
10028 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10029 return OP_AMD64_AND_REG_MEMBASE
;
10031 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10032 return OP_AMD64_OR_REG_MEMBASE
;
10034 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10035 return OP_AMD64_XOR_REG_MEMBASE
;
10043 mono_op_to_op_imm_noemul (int opcode
)
10046 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10051 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10059 return mono_op_to_op_imm (opcode
);
10063 #ifndef DISABLE_JIT
10066 * mono_handle_global_vregs:
10068 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10072 mono_handle_global_vregs (MonoCompile
*cfg
)
10074 gint32
*vreg_to_bb
;
10075 MonoBasicBlock
*bb
;
10078 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
10080 #ifdef MONO_ARCH_SIMD_INTRINSICS
10081 if (cfg
->uses_simd_intrinsics
)
10082 mono_simd_simplify_indirection (cfg
);
10085 /* Find local vregs used in more than one bb */
10086 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10087 MonoInst
*ins
= bb
->code
;
10088 int block_num
= bb
->block_num
;
10090 if (cfg
->verbose_level
> 2)
10091 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
10094 for (; ins
; ins
= ins
->next
) {
10095 const char *spec
= INS_INFO (ins
->opcode
);
10096 int regtype
, regindex
;
10099 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10100 mono_print_ins (ins
);
10102 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
10104 for (regindex
= 0; regindex
< 3; regindex
++) {
10107 if (regindex
== 0) {
10108 regtype
= spec
[MONO_INST_DEST
];
10109 if (regtype
== ' ')
10112 } else if (regindex
== 1) {
10113 regtype
= spec
[MONO_INST_SRC1
];
10114 if (regtype
== ' ')
10118 regtype
= spec
[MONO_INST_SRC2
];
10119 if (regtype
== ' ')
10124 #if SIZEOF_REGISTER == 4
10125 if (regtype
== 'l') {
10127 * Since some instructions reference the original long vreg,
10128 * and some reference the two component vregs, it is quite hard
10129 * to determine when it needs to be global. So be conservative.
10131 if (!get_vreg_to_inst (cfg
, vreg
)) {
10132 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
10134 if (cfg
->verbose_level
> 2)
10135 printf ("LONG VREG R%d made global.\n", vreg
);
10139 * Make the component vregs volatile since the optimizations can
10140 * get confused otherwise.
10142 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
10143 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
10147 g_assert (vreg
!= -1);
10149 prev_bb
= vreg_to_bb
[vreg
];
10150 if (prev_bb
== 0) {
10151 /* 0 is a valid block num */
10152 vreg_to_bb
[vreg
] = block_num
+ 1;
10153 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
10154 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
10157 if (!get_vreg_to_inst (cfg
, vreg
)) {
10158 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10159 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
10163 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
10166 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
10169 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
10172 g_assert_not_reached ();
10176 /* Flag as having been used in more than one bb */
10177 vreg_to_bb
[vreg
] = -1;
10183 /* If a variable is used in only one bblock, convert it into a local vreg */
10184 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10185 MonoInst
*var
= cfg
->varinfo
[i
];
10186 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
10188 switch (var
->type
) {
10194 #if SIZEOF_REGISTER == 8
10197 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10198 /* Enabling this screws up the fp stack on x86 */
10201 /* Arguments are implicitly global */
10202 /* Putting R4 vars into registers doesn't work currently */
10203 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
10205 * Make that the variable's liveness interval doesn't contain a call, since
10206 * that would cause the lvreg to be spilled, making the whole optimization
10209 /* This is too slow for JIT compilation */
10211 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
10213 int def_index
, call_index
, ins_index
;
10214 gboolean spilled
= FALSE
;
10219 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
10220 const char *spec
= INS_INFO (ins
->opcode
);
10222 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
10223 def_index
= ins_index
;
10225 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
10226 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
10227 if (call_index
> def_index
) {
10233 if (MONO_IS_CALL (ins
))
10234 call_index
= ins_index
;
10244 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10245 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
10246 var
->flags
|= MONO_INST_IS_DEAD
;
10247 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
10254 * Compress the varinfo and vars tables so the liveness computation is faster and
10255 * takes up less space.
10258 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10259 MonoInst
*var
= cfg
->varinfo
[i
];
10260 if (pos
< i
&& cfg
->locals_start
== i
)
10261 cfg
->locals_start
= pos
;
10262 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
10264 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
10265 cfg
->varinfo
[pos
]->inst_c0
= pos
;
10266 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
10267 cfg
->vars
[pos
].idx
= pos
;
10268 #if SIZEOF_REGISTER == 4
10269 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
10270 /* Modify the two component vars too */
10273 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
10274 var1
->inst_c0
= pos
;
10275 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
10276 var1
->inst_c0
= pos
;
10283 cfg
->num_varinfo
= pos
;
10284 if (cfg
->locals_start
> cfg
->num_varinfo
)
10285 cfg
->locals_start
= cfg
->num_varinfo
;
10289 * mono_spill_global_vars:
10291 * Generate spill code for variables which are not allocated to registers,
10292 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10293 * code is generated which could be optimized by the local optimization passes.
10296 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
10298 MonoBasicBlock
*bb
;
10300 int orig_next_vreg
;
10301 guint32
*vreg_to_lvreg
;
10303 guint32 i
, lvregs_len
;
10304 gboolean dest_has_lvreg
= FALSE
;
10305 guint32 stacktypes
[128];
10306 MonoInst
**live_range_start
, **live_range_end
;
10307 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
10309 *need_local_opts
= FALSE
;
10311 memset (spec2
, 0, sizeof (spec2
));
10313 /* FIXME: Move this function to mini.c */
10314 stacktypes
['i'] = STACK_PTR
;
10315 stacktypes
['l'] = STACK_I8
;
10316 stacktypes
['f'] = STACK_R8
;
10317 #ifdef MONO_ARCH_SIMD_INTRINSICS
10318 stacktypes
['x'] = STACK_VTYPE
;
10321 #if SIZEOF_REGISTER == 4
10322 /* Create MonoInsts for longs */
10323 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10324 MonoInst
*ins
= cfg
->varinfo
[i
];
10326 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
10327 switch (ins
->type
) {
10328 #ifdef MONO_ARCH_SOFT_FLOAT
10334 g_assert (ins
->opcode
== OP_REGOFFSET
);
10336 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
10338 tree
->opcode
= OP_REGOFFSET
;
10339 tree
->inst_basereg
= ins
->inst_basereg
;
10340 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
10342 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
10344 tree
->opcode
= OP_REGOFFSET
;
10345 tree
->inst_basereg
= ins
->inst_basereg
;
10346 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
10356 /* FIXME: widening and truncation */
10359 * As an optimization, when a variable allocated to the stack is first loaded into
10360 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10361 * the variable again.
10363 orig_next_vreg
= cfg
->next_vreg
;
10364 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
10365 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
10369 * These arrays contain the first and last instructions accessing a given
10371 * Since we emit bblocks in the same order we process them here, and we
10372 * don't split live ranges, these will precisely describe the live range of
10373 * the variable, i.e. the instruction range where a valid value can be found
10374 * in the variables location.
10376 /* FIXME: Only do this if debugging info is requested */
10377 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10378 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10379 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10380 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10382 /* Add spill loads/stores */
10383 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10386 if (cfg
->verbose_level
> 2)
10387 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
10389 /* Clear vreg_to_lvreg array */
10390 for (i
= 0; i
< lvregs_len
; i
++)
10391 vreg_to_lvreg
[lvregs
[i
]] = 0;
10395 MONO_BB_FOR_EACH_INS (bb
, ins
) {
10396 const char *spec
= INS_INFO (ins
->opcode
);
10397 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
;
10398 gboolean store
, no_lvreg
;
10400 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10401 mono_print_ins (ins
);
10403 if (ins
->opcode
== OP_NOP
)
10407 * We handle LDADDR here as well, since it can only be decomposed
10408 * when variable addresses are known.
10410 if (ins
->opcode
== OP_LDADDR
) {
10411 MonoInst
*var
= ins
->inst_p0
;
10413 if (var
->opcode
== OP_VTARG_ADDR
) {
10414 /* Happens on SPARC/S390 where vtypes are passed by reference */
10415 MonoInst
*vtaddr
= var
->inst_left
;
10416 if (vtaddr
->opcode
== OP_REGVAR
) {
10417 ins
->opcode
= OP_MOVE
;
10418 ins
->sreg1
= vtaddr
->dreg
;
10420 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
10421 ins
->opcode
= OP_LOAD_MEMBASE
;
10422 ins
->inst_basereg
= vtaddr
->inst_basereg
;
10423 ins
->inst_offset
= vtaddr
->inst_offset
;
10427 g_assert (var
->opcode
== OP_REGOFFSET
);
10429 ins
->opcode
= OP_ADD_IMM
;
10430 ins
->sreg1
= var
->inst_basereg
;
10431 ins
->inst_imm
= var
->inst_offset
;
10434 *need_local_opts
= TRUE
;
10435 spec
= INS_INFO (ins
->opcode
);
10438 if (ins
->opcode
< MONO_CEE_LAST
) {
10439 mono_print_ins (ins
);
10440 g_assert_not_reached ();
10444 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10448 if (MONO_IS_STORE_MEMBASE (ins
)) {
10449 tmp_reg
= ins
->dreg
;
10450 ins
->dreg
= ins
->sreg2
;
10451 ins
->sreg2
= tmp_reg
;
10454 spec2
[MONO_INST_DEST
] = ' ';
10455 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10456 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10458 } else if (MONO_IS_STORE_MEMINDEX (ins
))
10459 g_assert_not_reached ();
10464 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10465 printf ("\t %.3s %d %d %d\n", spec
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
10470 regtype
= spec
[MONO_INST_DEST
];
10471 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
10474 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
10475 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
10476 MonoInst
*store_ins
;
10478 MonoInst
*def_ins
= ins
;
10479 int dreg
= ins
->dreg
; /* The original vreg */
10481 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
10483 if (var
->opcode
== OP_REGVAR
) {
10484 ins
->dreg
= var
->dreg
;
10485 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
10487 * Instead of emitting a load+store, use a _membase opcode.
10489 g_assert (var
->opcode
== OP_REGOFFSET
);
10490 if (ins
->opcode
== OP_MOVE
) {
10494 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
10495 ins
->inst_basereg
= var
->inst_basereg
;
10496 ins
->inst_offset
= var
->inst_offset
;
10499 spec
= INS_INFO (ins
->opcode
);
10503 g_assert (var
->opcode
== OP_REGOFFSET
);
10505 prev_dreg
= ins
->dreg
;
10507 /* Invalidate any previous lvreg for this vreg */
10508 vreg_to_lvreg
[ins
->dreg
] = 0;
10512 #ifdef MONO_ARCH_SOFT_FLOAT
10513 if (store_opcode
== OP_STORER8_MEMBASE_REG
) {
10515 store_opcode
= OP_STOREI8_MEMBASE_REG
;
10519 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10521 if (regtype
== 'l') {
10522 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
10523 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10524 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
10525 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10526 def_ins
= store_ins
;
10529 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
10531 /* Try to fuse the store into the instruction itself */
10532 /* FIXME: Add more instructions */
10533 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
10534 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
10535 ins
->inst_imm
= ins
->inst_c0
;
10536 ins
->inst_destbasereg
= var
->inst_basereg
;
10537 ins
->inst_offset
= var
->inst_offset
;
10538 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
10539 ins
->opcode
= store_opcode
;
10540 ins
->inst_destbasereg
= var
->inst_basereg
;
10541 ins
->inst_offset
= var
->inst_offset
;
10545 tmp_reg
= ins
->dreg
;
10546 ins
->dreg
= ins
->sreg2
;
10547 ins
->sreg2
= tmp_reg
;
10550 spec2
[MONO_INST_DEST
] = ' ';
10551 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10552 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10554 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
10555 // FIXME: The backends expect the base reg to be in inst_basereg
10556 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
10558 ins
->inst_basereg
= var
->inst_basereg
;
10559 ins
->inst_offset
= var
->inst_offset
;
10560 spec
= INS_INFO (ins
->opcode
);
10562 /* printf ("INS: "); mono_print_ins (ins); */
10563 /* Create a store instruction */
10564 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
10566 /* Insert it after the instruction */
10567 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10569 def_ins
= store_ins
;
10572 * We can't assign ins->dreg to var->dreg here, since the
10573 * sregs could use it. So set a flag, and do it after
10576 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
10577 dest_has_lvreg
= TRUE
;
10582 if (def_ins
&& !live_range_start
[dreg
]) {
10583 live_range_start
[dreg
] = def_ins
;
10584 live_range_start_bb
[dreg
] = bb
;
10591 for (srcindex
= 0; srcindex
< 2; ++srcindex
) {
10592 regtype
= spec
[(srcindex
== 0) ? MONO_INST_SRC1
: MONO_INST_SRC2
];
10593 sreg
= srcindex
== 0 ? ins
->sreg1
: ins
->sreg2
;
10595 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
10596 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
10597 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
10598 MonoInst
*use_ins
= ins
;
10599 MonoInst
*load_ins
;
10600 guint32 load_opcode
;
10602 if (var
->opcode
== OP_REGVAR
) {
10604 ins
->sreg1
= var
->dreg
;
10606 ins
->sreg2
= var
->dreg
;
10607 live_range_end
[var
->dreg
] = use_ins
;
10608 live_range_end_bb
[var
->dreg
] = bb
;
10612 g_assert (var
->opcode
== OP_REGOFFSET
);
10614 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
10616 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
10618 if (vreg_to_lvreg
[sreg
]) {
10619 /* The variable is already loaded to an lvreg */
10620 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10621 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
10623 ins
->sreg1
= vreg_to_lvreg
[sreg
];
10625 ins
->sreg2
= vreg_to_lvreg
[sreg
];
10629 /* Try to fuse the load into the instruction */
10630 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
10631 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
10632 ins
->inst_basereg
= var
->inst_basereg
;
10633 ins
->inst_offset
= var
->inst_offset
;
10634 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
10635 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
10636 ins
->sreg2
= var
->inst_basereg
;
10637 ins
->inst_offset
= var
->inst_offset
;
10639 if (MONO_IS_REAL_MOVE (ins
)) {
10640 ins
->opcode
= OP_NOP
;
10643 //printf ("%d ", srcindex); mono_print_ins (ins);
10645 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10647 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
10648 if (var
->dreg
== prev_dreg
) {
10650 * sreg refers to the value loaded by the load
10651 * emitted below, but we need to use ins->dreg
10652 * since it refers to the store emitted earlier.
10656 vreg_to_lvreg
[var
->dreg
] = sreg
;
10657 g_assert (lvregs_len
< 1024);
10658 lvregs
[lvregs_len
++] = var
->dreg
;
10667 if (regtype
== 'l') {
10668 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
10669 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10670 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
10671 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10672 use_ins
= load_ins
;
10675 #if SIZEOF_REGISTER == 4
10676 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
10678 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
10679 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10680 use_ins
= load_ins
;
10684 if (var
->dreg
< orig_next_vreg
) {
10685 live_range_end
[var
->dreg
] = use_ins
;
10686 live_range_end_bb
[var
->dreg
] = bb
;
10691 if (dest_has_lvreg
) {
10692 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
10693 g_assert (lvregs_len
< 1024);
10694 lvregs
[lvregs_len
++] = prev_dreg
;
10695 dest_has_lvreg
= FALSE
;
10699 tmp_reg
= ins
->dreg
;
10700 ins
->dreg
= ins
->sreg2
;
10701 ins
->sreg2
= tmp_reg
;
10704 if (MONO_IS_CALL (ins
)) {
10705 /* Clear vreg_to_lvreg array */
10706 for (i
= 0; i
< lvregs_len
; i
++)
10707 vreg_to_lvreg
[lvregs
[i
]] = 0;
10711 if (cfg
->verbose_level
> 2)
10712 mono_print_ins_index (1, ins
);
10716 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10718 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10719 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10721 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10722 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
10725 if (live_range_start
[vreg
]) {
10726 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
10728 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
10730 if (live_range_end
[vreg
]) {
10731 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
10733 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
10738 g_free (live_range_start
);
10739 g_free (live_range_end
);
10740 g_free (live_range_start_bb
);
10741 g_free (live_range_end_bb
);
10746 * - use 'iadd' instead of 'int_add'
10747 * - handling ovf opcodes: decompose in method_to_ir.
10748 * - unify iregs/fregs
10749 * -> partly done, the missing parts are:
10750 * - a more complete unification would involve unifying the hregs as well, so
10751 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10752 * would no longer map to the machine hregs, so the code generators would need to
10753 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10754 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10755 * fp/non-fp branches speeds it up by about 15%.
10756 * - use sext/zext opcodes instead of shifts
10758 * - get rid of TEMPLOADs if possible and use vregs instead
10759 * - clean up usage of OP_P/OP_ opcodes
10760 * - cleanup usage of DUMMY_USE
10761 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10763 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10764 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10765 * - make sure handle_stack_args () is called before the branch is emitted
10766 * - when the new IR is done, get rid of all unused stuff
10767 * - COMPARE/BEQ as separate instructions or unify them ?
10768 * - keeping them separate allows specialized compare instructions like
10769 * compare_imm, compare_membase
10770 * - most back ends unify fp compare+branch, fp compare+ceq
10771 * - integrate mono_save_args into inline_method
10772 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10773 * - handle long shift opts on 32 bit platforms somehow: they require
10774 * 3 sregs (2 for arg1 and 1 for arg2)
10775 * - make byref a 'normal' type.
10776 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10777 * variable if needed.
10778 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10779 * like inline_method.
10780 * - remove inlining restrictions
10781 * - fix LNEG and enable cfold of INEG
10782 * - generalize x86 optimizations like ldelema as a peephole optimization
10783 * - add store_mem_imm for amd64
10784 * - optimize the loading of the interruption flag in the managed->native wrappers
10785 * - avoid special handling of OP_NOP in passes
10786 * - move code inserting instructions into one function/macro.
10787 * - try a coalescing phase after liveness analysis
10788 * - add float -> vreg conversion + local optimizations on !x86
10789 * - figure out how to handle decomposed branches during optimizations, ie.
10790 * compare+branch, op_jump_table+op_br etc.
10791 * - promote RuntimeXHandles to vregs
10792 * - vtype cleanups:
10793 * - add a NEW_VARLOADA_VREG macro
10794 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10795 * accessing vtype fields.
10796 * - get rid of I8CONST on 64 bit platforms
10797 * - dealing with the increase in code size due to branches created during opcode
10799 * - use extended basic blocks
10800 * - all parts of the JIT
10801 * - handle_global_vregs () && local regalloc
10802 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10803 * - sources of increase in code size:
10806 * - isinst and castclass
10807 * - lvregs not allocated to global registers even if used multiple times
10808 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10810 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10811 * - add all micro optimizations from the old JIT
10812 * - put tree optimizations into the deadce pass
10813 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10814 * specific function.
10815 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10816 * fcompare + branchCC.
10817 * - create a helper function for allocating a stack slot, taking into account
10818 * MONO_CFG_HAS_SPILLUP.
10820 * - merge the ia64 switch changes.
10821 * - optimize mono_regstate2_alloc_int/float.
10822 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10823 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10824 * parts of the tree could be separated by other instructions, killing the tree
10825 * arguments, or stores killing loads etc. Also, should we fold loads into other
10826 * instructions if the result of the load is used multiple times ?
10827 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10828 * - LAST MERGE: 108395.
10829 * - when returning vtypes in registers, generate IR and append it to the end of the
10830 * last bb instead of doing it in the epilog.
10831 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10839 - When to decompose opcodes:
10840 - earlier: this makes some optimizations hard to implement, since the low level IR
10841 no longer contains the neccessary information. But it is easier to do.
10842 - later: harder to implement, enables more optimizations.
10843 - Branches inside bblocks:
10844 - created when decomposing complex opcodes.
10845 - branches to another bblock: harmless, but not tracked by the branch
10846 optimizations, so need to branch to a label at the start of the bblock.
10847 - branches to inside the same bblock: very problematic, trips up the local
10848 reg allocator. Can be fixed by spitting the current bblock, but that is a
10849 complex operation, since some local vregs can become global vregs etc.
10850 - Local/global vregs:
10851 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10852 local register allocator.
10853 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10854 structure, created by mono_create_var (). Assigned to hregs or the stack by
10855 the global register allocator.
10856 - When to do optimizations like alu->alu_imm:
10857 - earlier -> saves work later on since the IR will be smaller/simpler
10858 - later -> can work on more instructions
10859 - Handling of valuetypes:
10860 - When a vtype is pushed on the stack, a new temporary is created, an
10861 instruction computing its address (LDADDR) is emitted and pushed on
10862 the stack. Need to optimize cases when the vtype is used immediately as in
10863 argument passing, stloc etc.
10864 - Instead of the to_end stuff in the old JIT, simply call the function handling
10865 the values on the stack before emitting the last instruction of the bb.
10868 #endif /* DISABLE_JIT */