2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode
);
105 static int stind_to_store_membase (int opcode
);
107 int mono_op_to_op_imm (int opcode
);
108 int mono_op_to_op_imm_noemul (int opcode
);
110 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
111 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
112 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
114 /* helper methods signature */
115 extern MonoMethodSignature
*helper_sig_class_init_trampoline
;
116 extern MonoMethodSignature
*helper_sig_domain_get
;
117 extern MonoMethodSignature
*helper_sig_generic_class_init_trampoline
;
118 extern MonoMethodSignature
*helper_sig_generic_class_init_trampoline_llvm
;
119 extern MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
;
120 extern MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
;
121 extern MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline_llvm
;
124 * Instruction metadata
132 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
133 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
139 #if SIZEOF_REGISTER == 8
144 /* keep in sync with the enum in mini.h */
147 #include "mini-ops.h"
152 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
153 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
155 * This should contain the index of the last sreg + 1. This is not the same
156 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
158 const gint8 ins_sreg_counts
[] = {
159 #include "mini-ops.h"
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst
*ins
, int *regs
)
173 ins
->sreg1
= regs
[0];
174 ins
->sreg2
= regs
[1];
175 ins
->sreg3
= regs
[2];
179 mono_alloc_ireg (MonoCompile
*cfg
)
181 return alloc_ireg (cfg
);
185 mono_alloc_freg (MonoCompile
*cfg
)
187 return alloc_freg (cfg
);
191 mono_alloc_preg (MonoCompile
*cfg
)
193 return alloc_preg (cfg
);
197 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
199 return alloc_dreg (cfg
, stack_type
);
203 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
209 switch (type
->type
) {
212 case MONO_TYPE_BOOLEAN
:
224 case MONO_TYPE_FNPTR
:
226 case MONO_TYPE_CLASS
:
227 case MONO_TYPE_STRING
:
228 case MONO_TYPE_OBJECT
:
229 case MONO_TYPE_SZARRAY
:
230 case MONO_TYPE_ARRAY
:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE
:
244 if (type
->data
.klass
->enumtype
) {
245 type
= mono_class_enum_basetype (type
->data
.klass
);
248 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
251 case MONO_TYPE_TYPEDBYREF
:
253 case MONO_TYPE_GENERICINST
:
254 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
258 g_assert (cfg
->generic_sharing_context
);
261 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
267 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
272 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
273 for (i
= 0; i
< bb
->in_count
; ++i
)
274 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
276 for (i
= 0; i
< bb
->out_count
; ++i
)
277 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
279 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
280 mono_print_ins_index (-1, tree
);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
393 MonoBasicBlock
**newa
;
397 if (from
->cil_code
) {
399 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
401 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
404 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
406 printf ("edge from entry to exit\n");
411 for (i
= 0; i
< from
->out_count
; ++i
) {
412 if (to
== from
->out_bb
[i
]) {
418 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
419 for (i
= 0; i
< from
->out_count
; ++i
) {
420 newa
[i
] = from
->out_bb
[i
];
428 for (i
= 0; i
< to
->in_count
; ++i
) {
429 if (from
== to
->in_bb
[i
]) {
435 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
436 for (i
= 0; i
< to
->in_count
; ++i
) {
437 newa
[i
] = to
->in_bb
[i
];
446 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
448 link_bblock (cfg
, from
, to
);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile
*cfg
, int offset
)
468 MonoMethodHeader
*header
= cfg
->header
;
469 MonoExceptionClause
*clause
;
472 for (i
= 0; i
< header
->num_clauses
; ++i
) {
473 clause
= &header
->clauses
[i
];
474 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
475 (offset
< (clause
->handler_offset
)))
476 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
478 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
479 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
480 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
481 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
482 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
484 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
487 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
488 return ((i
+ 1) << 8) | clause
->flags
;
495 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
497 MonoMethodHeader
*header
= cfg
->header
;
498 MonoExceptionClause
*clause
;
502 for (i
= 0; i
< header
->num_clauses
; ++i
) {
503 clause
= &header
->clauses
[i
];
504 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
506 if (clause
->flags
== type
)
507 res
= g_list_append (res
, clause
);
514 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
518 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
522 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
523 /* prevent it from being register allocated */
524 var
->flags
|= MONO_INST_INDIRECT
;
526 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
530 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
532 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
536 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
540 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
544 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
545 /* prevent it from being register allocated */
546 var
->flags
|= MONO_INST_INDIRECT
;
548 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
554 * Returns the type used in the eval stack when @type is loaded.
555 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
558 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
562 inst
->klass
= klass
= mono_class_from_mono_type (type
);
564 inst
->type
= STACK_MP
;
569 switch (type
->type
) {
571 inst
->type
= STACK_INV
;
575 case MONO_TYPE_BOOLEAN
:
581 inst
->type
= STACK_I4
;
586 case MONO_TYPE_FNPTR
:
587 inst
->type
= STACK_PTR
;
589 case MONO_TYPE_CLASS
:
590 case MONO_TYPE_STRING
:
591 case MONO_TYPE_OBJECT
:
592 case MONO_TYPE_SZARRAY
:
593 case MONO_TYPE_ARRAY
:
594 inst
->type
= STACK_OBJ
;
598 inst
->type
= STACK_I8
;
602 inst
->type
= STACK_R8
;
604 case MONO_TYPE_VALUETYPE
:
605 if (type
->data
.klass
->enumtype
) {
606 type
= mono_class_enum_basetype (type
->data
.klass
);
610 inst
->type
= STACK_VTYPE
;
613 case MONO_TYPE_TYPEDBYREF
:
614 inst
->klass
= mono_defaults
.typed_reference_class
;
615 inst
->type
= STACK_VTYPE
;
617 case MONO_TYPE_GENERICINST
:
618 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
621 case MONO_TYPE_MVAR
:
622 /* FIXME: all the arguments must be references for now,
623 * later look inside cfg and see if the arg num is
626 g_assert (cfg
->generic_sharing_context
);
627 inst
->type
= STACK_OBJ
;
630 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
635 * The following tables are used to quickly validate the IL code in type_from_op ().
638 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
639 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
640 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
641 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
642 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
643 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
644 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
645 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
646 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
651 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
654 /* reduce the size of this table */
656 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
657 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
658 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
659 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
660 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
661 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
662 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
663 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
664 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
668 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
669 /* Inv i L p F & O vt */
671 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
672 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
673 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
674 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
675 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
676 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
677 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
680 /* reduce the size of this table */
682 shift_table
[STACK_MAX
] [STACK_MAX
] = {
683 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
684 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
685 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
686 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
687 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
688 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
689 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
690 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
694 * Tables to map from the non-specific opcode to the matching
695 * type-specific opcode.
697 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
699 binops_op_map
[STACK_MAX
] = {
700 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
703 /* handles from CEE_NEG to CEE_CONV_U8 */
705 unops_op_map
[STACK_MAX
] = {
706 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
709 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
711 ovfops_op_map
[STACK_MAX
] = {
712 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
715 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
717 ovf2ops_op_map
[STACK_MAX
] = {
718 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
721 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
723 ovf3ops_op_map
[STACK_MAX
] = {
724 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
727 /* handles from CEE_BEQ to CEE_BLT_UN */
729 beqops_op_map
[STACK_MAX
] = {
730 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
733 /* handles from CEE_CEQ to CEE_CLT_UN */
735 ceqops_op_map
[STACK_MAX
] = {
736 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
740 * Sets ins->type (the type on the eval stack) according to the
741 * type of the opcode and the arguments to it.
742 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
744 * FIXME: this function sets ins->type unconditionally in some cases, but
745 * it should set it to invalid for some types (a conv.x on an object)
748 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
750 switch (ins
->opcode
) {
757 /* FIXME: check unverifiable args for STACK_MP */
758 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
759 ins
->opcode
+= binops_op_map
[ins
->type
];
766 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
767 ins
->opcode
+= binops_op_map
[ins
->type
];
772 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
773 ins
->opcode
+= binops_op_map
[ins
->type
];
778 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
779 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
780 ins
->opcode
= OP_LCOMPARE
;
781 else if (src1
->type
== STACK_R8
)
782 ins
->opcode
= OP_FCOMPARE
;
784 ins
->opcode
= OP_ICOMPARE
;
786 case OP_ICOMPARE_IMM
:
787 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
788 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
789 ins
->opcode
= OP_LCOMPARE_IMM
;
801 ins
->opcode
+= beqops_op_map
[src1
->type
];
804 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
805 ins
->opcode
+= ceqops_op_map
[src1
->type
];
811 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
812 ins
->opcode
+= ceqops_op_map
[src1
->type
];
816 ins
->type
= neg_table
[src1
->type
];
817 ins
->opcode
+= unops_op_map
[ins
->type
];
820 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
821 ins
->type
= src1
->type
;
823 ins
->type
= STACK_INV
;
824 ins
->opcode
+= unops_op_map
[ins
->type
];
830 ins
->type
= STACK_I4
;
831 ins
->opcode
+= unops_op_map
[src1
->type
];
834 ins
->type
= STACK_R8
;
835 switch (src1
->type
) {
838 ins
->opcode
= OP_ICONV_TO_R_UN
;
841 ins
->opcode
= OP_LCONV_TO_R_UN
;
845 case CEE_CONV_OVF_I1
:
846 case CEE_CONV_OVF_U1
:
847 case CEE_CONV_OVF_I2
:
848 case CEE_CONV_OVF_U2
:
849 case CEE_CONV_OVF_I4
:
850 case CEE_CONV_OVF_U4
:
851 ins
->type
= STACK_I4
;
852 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
854 case CEE_CONV_OVF_I_UN
:
855 case CEE_CONV_OVF_U_UN
:
856 ins
->type
= STACK_PTR
;
857 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
859 case CEE_CONV_OVF_I1_UN
:
860 case CEE_CONV_OVF_I2_UN
:
861 case CEE_CONV_OVF_I4_UN
:
862 case CEE_CONV_OVF_U1_UN
:
863 case CEE_CONV_OVF_U2_UN
:
864 case CEE_CONV_OVF_U4_UN
:
865 ins
->type
= STACK_I4
;
866 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
869 ins
->type
= STACK_PTR
;
870 switch (src1
->type
) {
872 ins
->opcode
= OP_ICONV_TO_U
;
876 #if SIZEOF_REGISTER == 8
877 ins
->opcode
= OP_LCONV_TO_U
;
879 ins
->opcode
= OP_MOVE
;
883 ins
->opcode
= OP_LCONV_TO_U
;
886 ins
->opcode
= OP_FCONV_TO_U
;
892 ins
->type
= STACK_I8
;
893 ins
->opcode
+= unops_op_map
[src1
->type
];
895 case CEE_CONV_OVF_I8
:
896 case CEE_CONV_OVF_U8
:
897 ins
->type
= STACK_I8
;
898 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
900 case CEE_CONV_OVF_U8_UN
:
901 case CEE_CONV_OVF_I8_UN
:
902 ins
->type
= STACK_I8
;
903 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
907 ins
->type
= STACK_R8
;
908 ins
->opcode
+= unops_op_map
[src1
->type
];
911 ins
->type
= STACK_R8
;
915 ins
->type
= STACK_I4
;
916 ins
->opcode
+= ovfops_op_map
[src1
->type
];
921 ins
->type
= STACK_PTR
;
922 ins
->opcode
+= ovfops_op_map
[src1
->type
];
930 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
931 ins
->opcode
+= ovfops_op_map
[src1
->type
];
932 if (ins
->type
== STACK_R8
)
933 ins
->type
= STACK_INV
;
935 case OP_LOAD_MEMBASE
:
936 ins
->type
= STACK_PTR
;
938 case OP_LOADI1_MEMBASE
:
939 case OP_LOADU1_MEMBASE
:
940 case OP_LOADI2_MEMBASE
:
941 case OP_LOADU2_MEMBASE
:
942 case OP_LOADI4_MEMBASE
:
943 case OP_LOADU4_MEMBASE
:
944 ins
->type
= STACK_PTR
;
946 case OP_LOADI8_MEMBASE
:
947 ins
->type
= STACK_I8
;
949 case OP_LOADR4_MEMBASE
:
950 case OP_LOADR8_MEMBASE
:
951 ins
->type
= STACK_R8
;
954 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
958 if (ins
->type
== STACK_MP
)
959 ins
->klass
= mono_defaults
.object_class
;
964 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
970 param_table
[STACK_MAX
] [STACK_MAX
] = {
975 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
979 switch (args
->type
) {
989 for (i
= 0; i
< sig
->param_count
; ++i
) {
990 switch (args
[i
].type
) {
994 if (!sig
->params
[i
]->byref
)
998 if (sig
->params
[i
]->byref
)
1000 switch (sig
->params
[i
]->type
) {
1001 case MONO_TYPE_CLASS
:
1002 case MONO_TYPE_STRING
:
1003 case MONO_TYPE_OBJECT
:
1004 case MONO_TYPE_SZARRAY
:
1005 case MONO_TYPE_ARRAY
:
1012 if (sig
->params
[i
]->byref
)
1014 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1023 /*if (!param_table [args [i].type] [sig->params [i]->type])
1031 * When we need a pointer to the current domain many times in a method, we
1032 * call mono_domain_get() once and we store the result in a local variable.
1033 * This function returns the variable that represents the MonoDomain*.
1035 inline static MonoInst
*
1036 mono_get_domainvar (MonoCompile
*cfg
)
1038 if (!cfg
->domainvar
)
1039 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1040 return cfg
->domainvar
;
1044 * The got_var contains the address of the Global Offset Table when AOT
1048 mono_get_got_var (MonoCompile
*cfg
)
1050 #ifdef MONO_ARCH_NEED_GOT_VAR
1051 if (!cfg
->compile_aot
)
1053 if (!cfg
->got_var
) {
1054 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1056 return cfg
->got_var
;
1063 mono_get_vtable_var (MonoCompile
*cfg
)
1065 g_assert (cfg
->generic_sharing_context
);
1067 if (!cfg
->rgctx_var
) {
1068 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1069 /* force the var to be stack allocated */
1070 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1073 return cfg
->rgctx_var
;
1077 type_from_stack_type (MonoInst
*ins
) {
1078 switch (ins
->type
) {
1079 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1080 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1081 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1082 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1084 return &ins
->klass
->this_arg
;
1085 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1086 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1088 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1093 static G_GNUC_UNUSED
int
1094 type_to_stack_type (MonoType
*t
)
1096 t
= mono_type_get_underlying_type (t
);
1100 case MONO_TYPE_BOOLEAN
:
1103 case MONO_TYPE_CHAR
:
1110 case MONO_TYPE_FNPTR
:
1112 case MONO_TYPE_CLASS
:
1113 case MONO_TYPE_STRING
:
1114 case MONO_TYPE_OBJECT
:
1115 case MONO_TYPE_SZARRAY
:
1116 case MONO_TYPE_ARRAY
:
1124 case MONO_TYPE_VALUETYPE
:
1125 case MONO_TYPE_TYPEDBYREF
:
1127 case MONO_TYPE_GENERICINST
:
1128 if (mono_type_generic_inst_is_valuetype (t
))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode
)
1145 return mono_defaults
.byte_class
;
1147 return mono_defaults
.uint16_class
;
1150 return mono_defaults
.int_class
;
1153 return mono_defaults
.sbyte_class
;
1156 return mono_defaults
.int16_class
;
1159 return mono_defaults
.int32_class
;
1161 return mono_defaults
.uint32_class
;
1164 return mono_defaults
.int64_class
;
1167 return mono_defaults
.single_class
;
1170 return mono_defaults
.double_class
;
1171 case CEE_LDELEM_REF
:
1172 case CEE_STELEM_REF
:
1173 return mono_defaults
.object_class
;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1189 /* inlining can result in deeper stacks */
1190 if (slot
>= cfg
->header
->max_stack
)
1191 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1193 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1195 switch (ins
->type
) {
1202 if ((vnum
= cfg
->intvars
[pos
]))
1203 return cfg
->varinfo
[vnum
];
1204 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1205 cfg
->intvars
[pos
] = res
->inst_c0
;
1208 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1214 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1222 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1223 jump_info_token
->image
= image
;
1224 jump_info_token
->token
= token
;
1225 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1245 MonoBasicBlock
*bb
= cfg
->cbb
;
1246 MonoBasicBlock
*outb
;
1247 MonoInst
*inst
, **locals
;
1252 if (cfg
->verbose_level
> 3)
1253 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1254 if (!bb
->out_scount
) {
1255 bb
->out_scount
= count
;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i
= 0; i
< bb
->out_count
; ++i
) {
1259 outb
= bb
->out_bb
[i
];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1263 //printf (" %d", outb->block_num);
1264 if (outb
->in_stack
) {
1266 bb
->out_stack
= outb
->in_stack
;
1272 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1273 for (i
= 0; i
< count
; ++i
) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg
->inlined_method
)
1287 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1289 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1294 for (i
= 0; i
< bb
->out_count
; ++i
) {
1295 outb
= bb
->out_bb
[i
];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1299 if (outb
->in_scount
) {
1300 if (outb
->in_scount
!= bb
->out_scount
) {
1301 cfg
->unverifiable
= TRUE
;
1304 continue; /* check they are the same locals */
1306 outb
->in_scount
= count
;
1307 outb
->in_stack
= bb
->out_stack
;
1310 locals
= bb
->out_stack
;
1312 for (i
= 0; i
< count
; ++i
) {
1313 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1314 inst
->cil_code
= sp
[i
]->cil_code
;
1315 sp
[i
] = locals
[i
];
1316 if (cfg
->verbose_level
> 3)
1317 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex
< bb
->out_count
) {
1332 outb
= bb
->out_bb
[bindex
];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1338 if (outb
->in_stack
!= locals
) {
1339 for (i
= 0; i
< count
; ++i
) {
1340 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1341 inst
->cil_code
= sp
[i
]->cil_code
;
1342 sp
[i
] = locals
[i
];
1343 if (cfg
->verbose_level
> 3)
1344 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1346 locals
= outb
->in_stack
;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1361 if (cfg
->compile_aot
) {
1362 int ioffset_reg
= alloc_preg (cfg
);
1363 int iid_reg
= alloc_preg (cfg
);
1365 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1366 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1375 mini_emit_interface_bitmap_check (MonoCompile
*cfg
, int intf_bit_reg
, int base_reg
, int offset
, MonoClass
*klass
)
1377 int ibitmap_reg
= alloc_preg (cfg
);
1378 #ifdef COMPRESSED_INTERFACE_BITMAP
1380 MonoInst
*res
, *ins
;
1381 NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, ibitmap_reg
, base_reg
, offset
);
1382 MONO_ADD_INS (cfg
->cbb
, ins
);
1384 if (cfg
->compile_aot
)
1385 EMIT_NEW_AOTCONST (cfg
, args
[1], MONO_PATCH_INFO_IID
, klass
);
1387 EMIT_NEW_ICONST (cfg
, args
[1], klass
->interface_id
);
1388 res
= mono_emit_jit_icall (cfg
, mono_class_interface_match
, args
);
1389 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, intf_bit_reg
, res
->dreg
);
1391 int ibitmap_byte_reg
= alloc_preg (cfg
);
1393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, base_reg
, offset
);
1395 if (cfg
->compile_aot
) {
1396 int iid_reg
= alloc_preg (cfg
);
1397 int shifted_iid_reg
= alloc_preg (cfg
);
1398 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1399 int masked_iid_reg
= alloc_preg (cfg
);
1400 int iid_one_bit_reg
= alloc_preg (cfg
);
1401 int iid_bit_reg
= alloc_preg (cfg
);
1402 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1404 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1407 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1408 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1409 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1418 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1419 * stored in "klass_reg" implements the interface "klass".
1422 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1424 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
), klass
);
1428 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1429 * stored in "vtable_reg" implements the interface "klass".
1432 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1434 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
), klass
);
1438 * Emit code which checks whenever the interface id of @klass is smaller than
1439 * than the value given by max_iid_reg.
1442 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1443 MonoBasicBlock
*false_target
)
1445 if (cfg
->compile_aot
) {
1446 int iid_reg
= alloc_preg (cfg
);
1447 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1448 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1451 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1453 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1455 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1458 /* Same as above, but obtains max_iid from a vtable */
1460 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1461 MonoBasicBlock
*false_target
)
1463 int max_iid_reg
= alloc_preg (cfg
);
1465 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1466 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1469 /* Same as above, but obtains max_iid from a klass */
1471 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1472 MonoBasicBlock
*false_target
)
1474 int max_iid_reg
= alloc_preg (cfg
);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1477 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1481 mini_emit_isninst_cast_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_ins
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1483 int idepth_reg
= alloc_preg (cfg
);
1484 int stypes_reg
= alloc_preg (cfg
);
1485 int stype
= alloc_preg (cfg
);
1487 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1488 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1489 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1495 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, klass_ins
->dreg
);
1496 } else if (cfg
->compile_aot
) {
1497 int const_reg
= alloc_preg (cfg
);
1498 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1499 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1501 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1507 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1509 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, NULL
, false_target
, true_target
);
1513 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1515 int intf_reg
= alloc_preg (cfg
);
1517 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1518 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1523 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1532 int intf_bit_reg
= alloc_preg (cfg
);
1534 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1535 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1540 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1544 mini_emit_class_check_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
)
1547 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_inst
->dreg
);
1548 } else if (cfg
->compile_aot
) {
1549 int const_reg
= alloc_preg (cfg
);
1550 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1551 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1555 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1559 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1561 return mini_emit_class_check_inst (cfg
, klass_reg
, klass
, NULL
);
1565 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1567 if (cfg
->compile_aot
) {
1568 int const_reg
= alloc_preg (cfg
);
1569 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1570 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1572 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1578 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
);
1581 mini_emit_castclass_inst (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
, MonoBasicBlock
*object_is_null
)
1584 int rank_reg
= alloc_preg (cfg
);
1585 int eclass_reg
= alloc_preg (cfg
);
1587 g_assert (!klass_inst
);
1588 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1589 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1590 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1591 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1593 if (klass
->cast_class
== mono_defaults
.object_class
) {
1594 int parent_reg
= alloc_preg (cfg
);
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1596 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1597 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1598 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1599 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1600 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1601 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1602 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1603 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1604 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1606 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1607 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1610 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1611 /* Check that the object is a vector too */
1612 int bounds_reg
= alloc_preg (cfg
);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1614 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1615 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1618 int idepth_reg
= alloc_preg (cfg
);
1619 int stypes_reg
= alloc_preg (cfg
);
1620 int stype
= alloc_preg (cfg
);
1622 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1623 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1624 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1625 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1629 mini_emit_class_check_inst (cfg
, stype
, klass
, klass_inst
);
1634 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1636 return mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, NULL
, object_is_null
);
1640 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1644 g_assert (val
== 0);
1649 if ((size
<= 4) && (size
<= align
)) {
1652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1655 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1660 #if SIZEOF_REGISTER == 8
1662 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1668 val_reg
= alloc_preg (cfg
);
1670 if (SIZEOF_REGISTER
== 8)
1671 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1673 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1676 /* This could be optimized further if neccesary */
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1685 #if !NO_UNALIGNED_ACCESS
1686 if (SIZEOF_REGISTER
== 8) {
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1718 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1725 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1726 g_assert (size
< 10000);
1729 /* This could be optimized further if neccesary */
1731 cur_reg
= alloc_preg (cfg
);
1732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1733 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1740 #if !NO_UNALIGNED_ACCESS
1741 if (SIZEOF_REGISTER
== 8) {
1743 cur_reg
= alloc_preg (cfg
);
1744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1754 cur_reg
= alloc_preg (cfg
);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1762 cur_reg
= alloc_preg (cfg
);
1763 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1770 cur_reg
= alloc_preg (cfg
);
1771 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1780 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1783 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1786 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1787 switch (type
->type
) {
1788 case MONO_TYPE_VOID
:
1789 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1792 case MONO_TYPE_BOOLEAN
:
1795 case MONO_TYPE_CHAR
:
1798 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1802 case MONO_TYPE_FNPTR
:
1803 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1804 case MONO_TYPE_CLASS
:
1805 case MONO_TYPE_STRING
:
1806 case MONO_TYPE_OBJECT
:
1807 case MONO_TYPE_SZARRAY
:
1808 case MONO_TYPE_ARRAY
:
1809 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1812 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1815 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1816 case MONO_TYPE_VALUETYPE
:
1817 if (type
->data
.klass
->enumtype
) {
1818 type
= mono_class_enum_basetype (type
->data
.klass
);
1821 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1822 case MONO_TYPE_TYPEDBYREF
:
1823 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1824 case MONO_TYPE_GENERICINST
:
1825 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1828 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1834 * target_type_is_incompatible:
1835 * @cfg: MonoCompile context
1837 * Check that the item @arg on the evaluation stack can be stored
1838 * in the target type (can be a local, or field, etc).
1839 * The cfg arg can be used to check if we need verification or just
1842 * Returns: non-0 value if arg can't be stored on a target.
1845 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1847 MonoType
*simple_type
;
1850 if (target
->byref
) {
1851 /* FIXME: check that the pointed to types match */
1852 if (arg
->type
== STACK_MP
)
1853 return arg
->klass
!= mono_class_from_mono_type (target
);
1854 if (arg
->type
== STACK_PTR
)
1859 simple_type
= mono_type_get_underlying_type (target
);
1860 switch (simple_type
->type
) {
1861 case MONO_TYPE_VOID
:
1865 case MONO_TYPE_BOOLEAN
:
1868 case MONO_TYPE_CHAR
:
1871 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1875 /* STACK_MP is needed when setting pinned locals */
1876 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1881 case MONO_TYPE_FNPTR
:
1882 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1885 case MONO_TYPE_CLASS
:
1886 case MONO_TYPE_STRING
:
1887 case MONO_TYPE_OBJECT
:
1888 case MONO_TYPE_SZARRAY
:
1889 case MONO_TYPE_ARRAY
:
1890 if (arg
->type
!= STACK_OBJ
)
1892 /* FIXME: check type compatibility */
1896 if (arg
->type
!= STACK_I8
)
1901 if (arg
->type
!= STACK_R8
)
1904 case MONO_TYPE_VALUETYPE
:
1905 if (arg
->type
!= STACK_VTYPE
)
1907 klass
= mono_class_from_mono_type (simple_type
);
1908 if (klass
!= arg
->klass
)
1911 case MONO_TYPE_TYPEDBYREF
:
1912 if (arg
->type
!= STACK_VTYPE
)
1914 klass
= mono_class_from_mono_type (simple_type
);
1915 if (klass
!= arg
->klass
)
1918 case MONO_TYPE_GENERICINST
:
1919 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1920 if (arg
->type
!= STACK_VTYPE
)
1922 klass
= mono_class_from_mono_type (simple_type
);
1923 if (klass
!= arg
->klass
)
1927 if (arg
->type
!= STACK_OBJ
)
1929 /* FIXME: check type compatibility */
1933 case MONO_TYPE_MVAR
:
1934 /* FIXME: all the arguments must be references for now,
1935 * later look inside cfg and see if the arg num is
1936 * really a reference
1938 g_assert (cfg
->generic_sharing_context
);
1939 if (arg
->type
!= STACK_OBJ
)
1943 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
1949 * Prepare arguments for passing to a function call.
1950 * Return a non-zero value if the arguments can't be passed to the given
1952 * The type checks are not yet complete and some conversions may need
1953 * casts on 32 or 64 bit architectures.
1955 * FIXME: implement this using target_type_is_incompatible ()
1958 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
1960 MonoType
*simple_type
;
1964 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
1968 for (i
= 0; i
< sig
->param_count
; ++i
) {
1969 if (sig
->params
[i
]->byref
) {
1970 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
1974 simple_type
= sig
->params
[i
];
1975 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
1977 switch (simple_type
->type
) {
1978 case MONO_TYPE_VOID
:
1983 case MONO_TYPE_BOOLEAN
:
1986 case MONO_TYPE_CHAR
:
1989 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
1995 case MONO_TYPE_FNPTR
:
1996 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
1999 case MONO_TYPE_CLASS
:
2000 case MONO_TYPE_STRING
:
2001 case MONO_TYPE_OBJECT
:
2002 case MONO_TYPE_SZARRAY
:
2003 case MONO_TYPE_ARRAY
:
2004 if (args
[i
]->type
!= STACK_OBJ
)
2009 if (args
[i
]->type
!= STACK_I8
)
2014 if (args
[i
]->type
!= STACK_R8
)
2017 case MONO_TYPE_VALUETYPE
:
2018 if (simple_type
->data
.klass
->enumtype
) {
2019 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
2022 if (args
[i
]->type
!= STACK_VTYPE
)
2025 case MONO_TYPE_TYPEDBYREF
:
2026 if (args
[i
]->type
!= STACK_VTYPE
)
2029 case MONO_TYPE_GENERICINST
:
2030 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
2034 g_error ("unknown type 0x%02x in check_call_signature",
2042 callvirt_to_call (int opcode
)
2047 case OP_VOIDCALLVIRT
:
2056 g_assert_not_reached ();
2063 callvirt_to_call_membase (int opcode
)
2067 return OP_CALL_MEMBASE
;
2068 case OP_VOIDCALLVIRT
:
2069 return OP_VOIDCALL_MEMBASE
;
2071 return OP_FCALL_MEMBASE
;
2073 return OP_LCALL_MEMBASE
;
2075 return OP_VCALL_MEMBASE
;
2077 g_assert_not_reached ();
2083 #ifdef MONO_ARCH_HAVE_IMT
2085 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2087 #ifdef MONO_ARCH_IMT_REG
2088 int method_reg
= alloc_preg (cfg
);
2091 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2092 } else if (cfg
->compile_aot
) {
2093 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2096 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2097 ins
->inst_p0
= call
->method
;
2098 ins
->dreg
= method_reg
;
2099 MONO_ADD_INS (cfg
->cbb
, ins
);
2103 if (COMPILE_LLVM (cfg
))
2104 call
->imt_arg_reg
= method_reg
;
2106 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2108 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2113 static MonoJumpInfo
*
2114 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2116 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2120 ji
->data
.target
= target
;
2125 inline static MonoCallInst
*
2126 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2127 MonoInst
**args
, int calli
, int virtual, int tail
)
2130 #ifdef MONO_ARCH_SOFT_FLOAT
2135 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
2137 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2140 call
->signature
= sig
;
2142 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2145 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2146 call
->vret_var
= cfg
->vret_addr
;
2147 //g_assert_not_reached ();
2149 } else if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2150 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2153 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2156 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2157 * address of return value to increase optimization opportunities.
2158 * Before vtype decomposition, the dreg of the call ins itself represents the
2159 * fact the call modifies the return value. After decomposition, the call will
2160 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2161 * will be transformed into an LDADDR.
2163 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2164 loada
->dreg
= alloc_preg (cfg
);
2165 loada
->inst_p0
= temp
;
2166 /* We reference the call too since call->dreg could change during optimization */
2167 loada
->inst_p1
= call
;
2168 MONO_ADD_INS (cfg
->cbb
, loada
);
2170 call
->inst
.dreg
= temp
->dreg
;
2172 call
->vret_var
= loada
;
2173 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2174 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2176 #ifdef MONO_ARCH_SOFT_FLOAT
2177 if (COMPILE_SOFT_FLOAT (cfg
)) {
2179 * If the call has a float argument, we would need to do an r8->r4 conversion using
2180 * an icall, but that cannot be done during the call sequence since it would clobber
2181 * the call registers + the stack. So we do it before emitting the call.
2183 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2185 MonoInst
*in
= call
->args
[i
];
2187 if (i
>= sig
->hasthis
)
2188 t
= sig
->params
[i
- sig
->hasthis
];
2190 t
= &mono_defaults
.int_class
->byval_arg
;
2191 t
= mono_type_get_underlying_type (t
);
2193 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2194 MonoInst
*iargs
[1];
2198 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2200 /* The result will be in an int vreg */
2201 call
->args
[i
] = conv
;
2208 if (COMPILE_LLVM (cfg
))
2209 mono_llvm_emit_call (cfg
, call
);
2211 mono_arch_emit_call (cfg
, call
);
2213 mono_arch_emit_call (cfg
, call
);
2216 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2217 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2222 inline static MonoInst
*
2223 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
)
2225 MonoCallInst
*call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
);
2227 call
->inst
.sreg1
= addr
->dreg
;
2229 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2231 return (MonoInst
*)call
;
2235 set_rgctx_arg (MonoCompile
*cfg
, MonoCallInst
*call
, int rgctx_reg
, MonoInst
*rgctx_arg
)
2237 #ifdef MONO_ARCH_RGCTX_REG
2238 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2239 cfg
->uses_rgctx_reg
= TRUE
;
2240 call
->rgctx_reg
= TRUE
;
2242 call
->rgctx_arg_reg
= rgctx_reg
;
2249 inline static MonoInst
*
2250 mono_emit_rgctx_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2256 rgctx_reg
= mono_alloc_preg (cfg
);
2257 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2259 call
= (MonoCallInst
*)mono_emit_calli (cfg
, sig
, args
, addr
);
2261 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2262 return (MonoInst
*)call
;
2266 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
, MonoMethod
*cmethod
, int rgctx_type
);
2268 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
, MonoClass
*klass
, int rgctx_type
);
2271 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2272 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
)
2274 gboolean might_be_remote
;
2275 gboolean
virtual = this != NULL
;
2276 gboolean enable_for_aot
= TRUE
;
2280 if (method
->string_ctor
) {
2281 /* Create the real signature */
2282 /* FIXME: Cache these */
2283 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2284 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2289 might_be_remote
= this && sig
->hasthis
&&
2290 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2291 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && !MONO_CHECK_THIS (this);
2293 context_used
= mono_method_check_context_used (method
);
2294 if (might_be_remote
&& context_used
) {
2297 g_assert (cfg
->generic_sharing_context
);
2299 addr
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
2301 return mono_emit_calli (cfg
, sig
, args
, addr
);
2304 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual, FALSE
);
2306 if (might_be_remote
)
2307 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2309 call
->method
= method
;
2310 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2311 call
->inst
.inst_left
= this;
2314 int vtable_reg
, slot_reg
, this_reg
;
2316 this_reg
= this->dreg
;
2318 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2319 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2320 MONO_EMIT_NULL_CHECK (cfg
, this_reg
);
2322 /* Make a call to delegate->invoke_impl */
2323 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2324 call
->inst
.inst_basereg
= this_reg
;
2325 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2326 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2328 return (MonoInst
*)call
;
2332 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2333 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2334 (MONO_METHOD_IS_FINAL (method
) &&
2335 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)) &&
2336 !(method
->klass
->marshalbyref
&& context_used
)) {
2338 * the method is not virtual, we just need to ensure this is not null
2339 * and then we can call the method directly.
2341 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2343 * The check above ensures method is not gshared, this is needed since
2344 * gshared methods can't have wrappers.
2346 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2349 if (!method
->string_ctor
)
2350 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2352 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2354 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2356 return (MonoInst
*)call
;
2359 if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2361 * the method is virtual, but we can statically dispatch since either
2362 * it's class or the method itself are sealed.
2363 * But first we need to ensure it's not a null reference.
2365 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2367 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2368 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2370 return (MonoInst
*)call
;
2373 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2375 vtable_reg
= alloc_preg (cfg
);
2376 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2377 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2379 #ifdef MONO_ARCH_HAVE_IMT
2381 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2382 emit_imt_argument (cfg
, call
, imt_arg
);
2383 slot_reg
= vtable_reg
;
2384 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2387 if (slot_reg
== -1) {
2388 slot_reg
= alloc_preg (cfg
);
2389 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2390 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2393 slot_reg
= vtable_reg
;
2394 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2395 ((mono_method_get_vtable_index (method
)) * (SIZEOF_VOID_P
));
2396 #ifdef MONO_ARCH_HAVE_IMT
2398 g_assert (mono_method_signature (method
)->generic_param_count
);
2399 emit_imt_argument (cfg
, call
, imt_arg
);
2404 call
->inst
.sreg1
= slot_reg
;
2405 call
->virtual = TRUE
;
2408 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2410 return (MonoInst
*)call
;
2414 mono_emit_rgctx_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2415 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*vtable_arg
)
2422 rgctx_reg
= mono_alloc_preg (cfg
);
2423 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
2425 ins
= mono_emit_method_call_full (cfg
, method
, sig
, args
, this, imt_arg
);
2427 call
= (MonoCallInst
*)ins
;
2429 set_rgctx_arg (cfg
, call
, rgctx_reg
, vtable_arg
);
2435 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2437 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
);
2441 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2448 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
);
2451 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2453 return (MonoInst
*)call
;
2457 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2459 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2463 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2467 * mono_emit_abs_call:
2469 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2471 inline static MonoInst
*
2472 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2473 MonoMethodSignature
*sig
, MonoInst
**args
)
2475 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2479 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2482 if (cfg
->abs_patches
== NULL
)
2483 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2484 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2485 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2486 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2491 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2493 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2494 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2498 * Native code might return non register sized integers
2499 * without initializing the upper bits.
2501 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2502 case OP_LOADI1_MEMBASE
:
2503 widen_op
= OP_ICONV_TO_I1
;
2505 case OP_LOADU1_MEMBASE
:
2506 widen_op
= OP_ICONV_TO_U1
;
2508 case OP_LOADI2_MEMBASE
:
2509 widen_op
= OP_ICONV_TO_I2
;
2511 case OP_LOADU2_MEMBASE
:
2512 widen_op
= OP_ICONV_TO_U2
;
2518 if (widen_op
!= -1) {
2519 int dreg
= alloc_preg (cfg
);
2522 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2523 widen
->type
= ins
->type
;
2533 get_memcpy_method (void)
2535 static MonoMethod
*memcpy_method
= NULL
;
2536 if (!memcpy_method
) {
2537 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2539 g_error ("Old corlib found. Install a new one");
2541 return memcpy_method
;
2544 #if HAVE_WRITE_BARRIERS
2547 create_write_barrier_bitmap (MonoClass
*klass
, unsigned *wb_bitmap
, int offset
)
2549 MonoClassField
*field
;
2550 gpointer iter
= NULL
;
2552 while ((field
= mono_class_get_fields (klass
, &iter
))) {
2555 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
2557 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
2558 if (mono_type_is_reference (field
->type
)) {
2559 g_assert ((foffset
% SIZEOF_VOID_P
) == 0);
2560 *wb_bitmap
|= 1 << ((offset
+ foffset
) / SIZEOF_VOID_P
);
2562 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2563 MonoClass
*field_class
= mono_class_from_mono_type (field
->type
);
2564 if (field_class
->has_references
)
2565 create_write_barrier_bitmap (field_class
, wb_bitmap
, offset
+ foffset
);
2571 mono_emit_wb_aware_memcpy (MonoCompile
*cfg
, MonoClass
*klass
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
2574 int dest_ptr_reg
, tmp_reg
;
2575 unsigned need_wb
= 0;
2580 /*types with references can't have alignment smaller than sizeof(void*) */
2581 if (align
< SIZEOF_VOID_P
)
2585 * This value cannot be biger than 32 due to the way we calculate the required wb bitmap.
2586 * FIXME tune this value.
2588 if (size
> 5 * SIZEOF_VOID_P
)
2591 create_write_barrier_bitmap (klass
, &need_wb
, 0);
2593 dest_ptr_reg
= alloc_preg (cfg
);
2594 tmp_reg
= alloc_preg (cfg
);
2596 /*tmp = dreg + doffset*/
2598 NEW_BIALU_IMM (cfg
, args
[0], OP_PADD_IMM
, dest_ptr_reg
, destreg
, doffset
);
2599 MONO_ADD_INS (cfg
->cbb
, args
[0]);
2601 EMIT_NEW_UNALU (cfg
, args
[0], OP_MOVE
, dest_ptr_reg
, destreg
);
2604 while (size
>= SIZEOF_VOID_P
) {
2605 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, tmp_reg
, srcreg
, soffset
);
2606 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, dest_ptr_reg
, 0, tmp_reg
);
2608 if (need_wb
& 0x1) {
2609 MonoInst
*dummy_use
;
2611 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
2612 mono_emit_method_call (cfg
, write_barrier
, &args
[0], NULL
);
2614 MONO_INST_NEW (cfg
, dummy_use
, OP_DUMMY_USE
);
2615 dummy_use
->sreg1
= dest_ptr_reg
;
2616 MONO_ADD_INS (cfg
->cbb
, dummy_use
);
2620 doffset
+= SIZEOF_VOID_P
;
2621 soffset
+= SIZEOF_VOID_P
;
2622 size
-= SIZEOF_VOID_P
;
2625 //tmp += sizeof (void*)
2626 if (size
>= SIZEOF_VOID_P
) {
2627 NEW_BIALU_IMM (cfg
, args
[0], OP_PADD_IMM
, dest_ptr_reg
, dest_ptr_reg
, SIZEOF_VOID_P
);
2628 MONO_ADD_INS (cfg
->cbb
, args
[0]);
2632 /* Those cannot be references since size < sizeof (void*) */
2634 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, tmp_reg
, srcreg
, soffset
);
2635 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, tmp_reg
);
2642 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmp_reg
, srcreg
, soffset
);
2643 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, tmp_reg
);
2650 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmp_reg
, srcreg
, soffset
);
2651 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, tmp_reg
);
2662 * Emit code to copy a valuetype of type @klass whose address is stored in
2663 * @src->dreg to memory whose address is stored at @dest->dreg.
2666 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2668 MonoInst
*iargs
[3];
2671 MonoMethod
*memcpy_method
;
2675 * This check breaks with spilled vars... need to handle it during verification anyway.
2676 * g_assert (klass && klass == src->klass && klass == dest->klass);
2680 n
= mono_class_native_size (klass
, &align
);
2682 n
= mono_class_value_size (klass
, &align
);
2684 #if HAVE_WRITE_BARRIERS
2685 /* if native is true there should be no references in the struct */
2686 if (klass
->has_references
&& !native
) {
2687 /* Avoid barriers when storing to the stack */
2688 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2689 (dest
->opcode
== OP_LDADDR
))) {
2690 int context_used
= 0;
2695 if (cfg
->generic_sharing_context
)
2696 context_used
= mono_class_check_context_used (klass
);
2697 /*FIXME can we use the intrinsics version when context_used == TRUE? */
2699 iargs
[2] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
2700 } else if ((cfg
->opt
& MONO_OPT_INTRINS
) && mono_emit_wb_aware_memcpy (cfg
, klass
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
)) {
2703 if (cfg
->compile_aot
) {
2704 EMIT_NEW_CLASSCONST (cfg
, iargs
[2], klass
);
2706 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2707 mono_class_compute_gc_descriptor (klass
);
2711 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2717 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2718 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2719 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2723 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2725 memcpy_method
= get_memcpy_method ();
2726 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2731 get_memset_method (void)
2733 static MonoMethod
*memset_method
= NULL
;
2734 if (!memset_method
) {
2735 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2737 g_error ("Old corlib found. Install a new one");
2739 return memset_method
;
2743 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2745 MonoInst
*iargs
[3];
2748 MonoMethod
*memset_method
;
2750 /* FIXME: Optimize this for the case when dest is an LDADDR */
2752 mono_class_init (klass
);
2753 n
= mono_class_value_size (klass
, &align
);
2755 if (n
<= sizeof (gpointer
) * 5) {
2756 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2759 memset_method
= get_memset_method ();
2761 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2762 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2763 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2768 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2770 MonoInst
*this = NULL
;
2772 g_assert (cfg
->generic_sharing_context
);
2774 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2775 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2776 !method
->klass
->valuetype
)
2777 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2779 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2780 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2783 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2785 mrgctx_loc
= mono_get_vtable_var (cfg
);
2786 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2789 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2790 MonoInst
*vtable_loc
, *vtable_var
;
2794 vtable_loc
= mono_get_vtable_var (cfg
);
2795 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2797 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2798 MonoInst
*mrgctx_var
= vtable_var
;
2801 vtable_reg
= alloc_preg (cfg
);
2802 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2803 vtable_var
->type
= STACK_PTR
;
2809 int vtable_reg
, res_reg
;
2811 vtable_reg
= alloc_preg (cfg
);
2812 res_reg
= alloc_preg (cfg
);
2813 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2818 static MonoJumpInfoRgctxEntry
*
2819 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2821 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2822 res
->method
= method
;
2823 res
->in_mrgctx
= in_mrgctx
;
2824 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2825 res
->data
->type
= patch_type
;
2826 res
->data
->data
.target
= patch_data
;
2827 res
->info_type
= info_type
;
2832 static inline MonoInst
*
2833 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2835 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2839 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2840 MonoClass
*klass
, int rgctx_type
)
2842 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2843 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2845 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2849 * emit_get_rgctx_method:
2851 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2852 * normal constants, else emit a load from the rgctx.
2855 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2856 MonoMethod
*cmethod
, int rgctx_type
)
2858 if (!context_used
) {
2861 switch (rgctx_type
) {
2862 case MONO_RGCTX_INFO_METHOD
:
2863 EMIT_NEW_METHODCONST (cfg
, ins
, cmethod
);
2865 case MONO_RGCTX_INFO_METHOD_RGCTX
:
2866 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, ins
, cmethod
);
2869 g_assert_not_reached ();
2872 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
2873 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2875 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2880 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
2881 MonoClassField
*field
, int rgctx_type
)
2883 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
2884 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2886 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2890 * On return the caller must check @klass for load errors.
2893 emit_generic_class_init (MonoCompile
*cfg
, MonoClass
*klass
)
2895 MonoInst
*vtable_arg
;
2897 int context_used
= 0;
2899 if (cfg
->generic_sharing_context
)
2900 context_used
= mono_class_check_context_used (klass
);
2903 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
2904 klass
, MONO_RGCTX_INFO_VTABLE
);
2906 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
2910 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
2913 if (COMPILE_LLVM (cfg
))
2914 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline_llvm
, &vtable_arg
);
2916 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable_arg
);
2917 #ifdef MONO_ARCH_VTABLE_REG
2918 mono_call_inst_add_outarg_reg (cfg
, call
, vtable_arg
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
2919 cfg
->uses_vtable_reg
= TRUE
;
2926 * On return the caller must check @array_class for load errors
2929 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
2931 int vtable_reg
= alloc_preg (cfg
);
2932 int context_used
= 0;
2934 if (cfg
->generic_sharing_context
)
2935 context_used
= mono_class_check_context_used (array_class
);
2937 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2939 if (cfg
->opt
& MONO_OPT_SHARED
) {
2940 int class_reg
= alloc_preg (cfg
);
2941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2942 if (cfg
->compile_aot
) {
2943 int klass_reg
= alloc_preg (cfg
);
2944 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
2945 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
2947 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
2949 } else if (context_used
) {
2950 MonoInst
*vtable_ins
;
2952 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
2953 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
2955 if (cfg
->compile_aot
) {
2959 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
2961 vt_reg
= alloc_preg (cfg
);
2962 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, vtable
);
2963 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
2966 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
2968 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vtable
);
2972 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
2976 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
2978 if (mini_get_debug_options ()->better_cast_details
) {
2979 int to_klass_reg
= alloc_preg (cfg
);
2980 int vtable_reg
= alloc_preg (cfg
);
2981 int klass_reg
= alloc_preg (cfg
);
2982 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2985 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
2989 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2991 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2993 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
2994 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
2995 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
3000 reset_cast_details (MonoCompile
*cfg
)
3002 /* Reset the variables holding the cast details */
3003 if (mini_get_debug_options ()->better_cast_details
) {
3004 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3006 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3007 /* It is enough to reset the from field */
3008 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
3013 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3014 * generic code is generated.
3017 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
3019 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
3022 MonoInst
*rgctx
, *addr
;
3024 /* FIXME: What if the class is shared? We might not
3025 have to get the address of the method from the
3027 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3028 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3030 rgctx
= emit_get_rgctx (cfg
, method
, context_used
);
3032 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3034 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3039 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
3043 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
3044 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3045 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3046 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
3048 obj_reg
= sp
[0]->dreg
;
3049 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3052 /* FIXME: generics */
3053 g_assert (klass
->rank
== 0);
3056 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
3057 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
3063 MonoInst
*element_class
;
3065 /* This assertion is from the unboxcast insn */
3066 g_assert (klass
->rank
== 0);
3068 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
3069 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
3071 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
3072 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3074 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
3075 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
3076 reset_cast_details (cfg
);
3079 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_PTR
), obj_reg
, sizeof (MonoObject
));
3080 MONO_ADD_INS (cfg
->cbb
, add
);
3081 add
->type
= STACK_MP
;
3088 * Returns NULL and set the cfg exception on error.
3091 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
, int context_used
)
3093 MonoInst
*iargs
[2];
3099 MonoInst
*iargs
[2];
3102 FIXME: we cannot get managed_alloc here because we can't get
3103 the class's vtable (because it's not a closed class)
3105 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3106 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3109 if (cfg
->opt
& MONO_OPT_SHARED
)
3110 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
3112 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
3113 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
3115 if (cfg
->opt
& MONO_OPT_SHARED
) {
3116 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3118 alloc_ftn
= mono_object_new
;
3121 alloc_ftn
= mono_object_new_specific
;
3124 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3127 if (cfg
->opt
& MONO_OPT_SHARED
) {
3128 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3129 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
3131 alloc_ftn
= mono_object_new
;
3132 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
3133 /* This happens often in argument checking code, eg. throw new FooException... */
3134 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3135 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
3136 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
3138 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3139 MonoMethod
*managed_alloc
= NULL
;
3143 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3144 cfg
->exception_ptr
= klass
;
3148 #ifndef MONO_CROSS_COMPILE
3149 managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
3152 if (managed_alloc
) {
3153 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3154 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
3156 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
3158 guint32 lw
= vtable
->klass
->instance_size
;
3159 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
3160 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
3161 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
3164 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3168 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3172 * Returns NULL and set the cfg exception on error.
3175 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
)
3177 MonoInst
*alloc
, *ins
;
3179 if (mono_class_is_nullable (klass
)) {
3180 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
3183 /* FIXME: What if the class is shared? We might not
3184 have to get the method address from the RGCTX. */
3185 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3186 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3187 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3189 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3191 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3195 alloc
= handle_alloc (cfg
, klass
, TRUE
, context_used
);
3199 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3204 // FIXME: This doesn't work yet (class libs tests fail?)
3205 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3208 * Returns NULL and set the cfg exception on error.
3211 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3213 MonoBasicBlock
*is_null_bb
;
3214 int obj_reg
= src
->dreg
;
3215 int vtable_reg
= alloc_preg (cfg
);
3216 MonoInst
*klass_inst
= NULL
;
3221 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
,
3222 klass
, MONO_RGCTX_INFO_KLASS
);
3224 if (is_complex_isinst (klass
)) {
3225 /* Complex case, handle by an icall */
3231 args
[1] = klass_inst
;
3233 return mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
3235 /* Simple case, handled by the code below */
3239 NEW_BBLOCK (cfg
, is_null_bb
);
3241 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
3244 save_cast_details (cfg
, klass
, obj_reg
);
3246 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3248 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
3250 int klass_reg
= alloc_preg (cfg
);
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3254 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3255 /* the remoting code is broken, access the class for now */
3256 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3257 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3259 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3260 cfg
->exception_ptr
= klass
;
3263 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3265 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3266 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3268 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3271 mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, klass_inst
, is_null_bb
);
3275 MONO_START_BB (cfg
, is_null_bb
);
3277 reset_cast_details (cfg
);
3283 * Returns NULL and set the cfg exception on error.
3286 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3289 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
3290 int obj_reg
= src
->dreg
;
3291 int vtable_reg
= alloc_preg (cfg
);
3292 int res_reg
= alloc_preg (cfg
);
3293 MonoInst
*klass_inst
= NULL
;
3296 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3298 if (is_complex_isinst (klass
)) {
3301 /* Complex case, handle by an icall */
3307 args
[1] = klass_inst
;
3309 return mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
3311 /* Simple case, the code below can handle it */
3315 NEW_BBLOCK (cfg
, is_null_bb
);
3316 NEW_BBLOCK (cfg
, false_bb
);
3317 NEW_BBLOCK (cfg
, end_bb
);
3319 /* Do the assignment at the beginning, so the other assignment can be if converted */
3320 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
3321 ins
->type
= STACK_OBJ
;
3324 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
3327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3329 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3330 g_assert (!context_used
);
3331 /* the is_null_bb target simply copies the input register to the output */
3332 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
3334 int klass_reg
= alloc_preg (cfg
);
3337 int rank_reg
= alloc_preg (cfg
);
3338 int eclass_reg
= alloc_preg (cfg
);
3340 g_assert (!context_used
);
3341 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3342 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
3343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
3346 if (klass
->cast_class
== mono_defaults
.object_class
) {
3347 int parent_reg
= alloc_preg (cfg
);
3348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
3349 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
3350 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3352 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
3353 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
3354 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3355 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3356 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
3357 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3359 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3360 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3362 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
3363 /* Check that the object is a vector too */
3364 int bounds_reg
= alloc_preg (cfg
);
3365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3366 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
3367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3370 /* the is_null_bb target simply copies the input register to the output */
3371 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3373 } else if (mono_class_is_nullable (klass
)) {
3374 g_assert (!context_used
);
3375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3376 /* the is_null_bb target simply copies the input register to the output */
3377 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3379 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3380 g_assert (!context_used
);
3381 /* the remoting code is broken, access the class for now */
3382 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3383 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3385 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3386 cfg
->exception_ptr
= klass
;
3389 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3392 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3394 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3395 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3398 /* the is_null_bb target simply copies the input register to the output */
3399 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, klass_inst
, false_bb
, is_null_bb
);
3404 MONO_START_BB (cfg
, false_bb
);
3406 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3407 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3409 MONO_START_BB (cfg
, is_null_bb
);
3411 MONO_START_BB (cfg
, end_bb
);
3417 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3419 /* This opcode takes as input an object reference and a class, and returns:
3420 0) if the object is an instance of the class,
3421 1) if the object is not instance of the class,
3422 2) if the object is a proxy whose type cannot be determined */
3425 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3426 int obj_reg
= src
->dreg
;
3427 int dreg
= alloc_ireg (cfg
);
3429 int klass_reg
= alloc_preg (cfg
);
3431 NEW_BBLOCK (cfg
, true_bb
);
3432 NEW_BBLOCK (cfg
, false_bb
);
3433 NEW_BBLOCK (cfg
, false2_bb
);
3434 NEW_BBLOCK (cfg
, end_bb
);
3435 NEW_BBLOCK (cfg
, no_proxy_bb
);
3437 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3440 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3441 NEW_BBLOCK (cfg
, interface_fail_bb
);
3443 tmp_reg
= alloc_preg (cfg
);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3445 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3446 MONO_START_BB (cfg
, interface_fail_bb
);
3447 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3449 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3451 tmp_reg
= alloc_preg (cfg
);
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3453 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3456 tmp_reg
= alloc_preg (cfg
);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3460 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3461 tmp_reg
= alloc_preg (cfg
);
3462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3465 tmp_reg
= alloc_preg (cfg
);
3466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3467 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3468 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3470 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3473 MONO_START_BB (cfg
, no_proxy_bb
);
3475 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3478 MONO_START_BB (cfg
, false_bb
);
3480 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3481 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3483 MONO_START_BB (cfg
, false2_bb
);
3485 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3488 MONO_START_BB (cfg
, true_bb
);
3490 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3492 MONO_START_BB (cfg
, end_bb
);
3495 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3497 ins
->type
= STACK_I4
;
3503 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3505 /* This opcode takes as input an object reference and a class, and returns:
3506 0) if the object is an instance of the class,
3507 1) if the object is a proxy whose type cannot be determined
3508 an InvalidCastException exception is thrown otherwhise*/
3511 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3512 int obj_reg
= src
->dreg
;
3513 int dreg
= alloc_ireg (cfg
);
3514 int tmp_reg
= alloc_preg (cfg
);
3515 int klass_reg
= alloc_preg (cfg
);
3517 NEW_BBLOCK (cfg
, end_bb
);
3518 NEW_BBLOCK (cfg
, ok_result_bb
);
3520 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3523 save_cast_details (cfg
, klass
, obj_reg
);
3525 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3526 NEW_BBLOCK (cfg
, interface_fail_bb
);
3528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3529 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3530 MONO_START_BB (cfg
, interface_fail_bb
);
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3533 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3535 tmp_reg
= alloc_preg (cfg
);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3537 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3538 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3540 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3541 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3544 NEW_BBLOCK (cfg
, no_proxy_bb
);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3548 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3550 tmp_reg
= alloc_preg (cfg
);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3554 tmp_reg
= alloc_preg (cfg
);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3556 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3559 NEW_BBLOCK (cfg
, fail_1_bb
);
3561 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3563 MONO_START_BB (cfg
, fail_1_bb
);
3565 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3568 MONO_START_BB (cfg
, no_proxy_bb
);
3570 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3573 MONO_START_BB (cfg
, ok_result_bb
);
3575 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3577 MONO_START_BB (cfg
, end_bb
);
3580 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3582 ins
->type
= STACK_I4
;
3588 * Returns NULL and set the cfg exception on error.
3590 static G_GNUC_UNUSED MonoInst
*
3591 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
, int context_used
)
3593 gpointer
*trampoline
;
3594 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3598 obj
= handle_alloc (cfg
, klass
, FALSE
, 0);
3602 /* Inline the contents of mono_delegate_ctor */
3604 /* Set target field */
3605 /* Optimize away setting of NULL target */
3606 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0))
3607 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3609 /* Set method field */
3610 method_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD
);
3611 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3614 * To avoid looking up the compiled code belonging to the target method
3615 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3616 * store it, and we fill it after the method has been compiled.
3618 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3619 MonoInst
*code_slot_ins
;
3622 code_slot_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE
);
3624 domain
= mono_domain_get ();
3625 mono_domain_lock (domain
);
3626 if (!domain_jit_info (domain
)->method_code_hash
)
3627 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3628 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3630 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3631 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3633 mono_domain_unlock (domain
);
3635 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3637 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3640 /* Set invoke_impl field */
3641 if (cfg
->compile_aot
) {
3642 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3644 trampoline
= mono_create_delegate_trampoline (klass
);
3645 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3647 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3649 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3655 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3657 MonoJitICallInfo
*info
;
3659 /* Need to register the icall so it gets an icall wrapper */
3660 info
= mono_get_array_new_va_icall (rank
);
3662 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3664 /* mono_array_new_va () needs a vararg calling convention */
3665 cfg
->disable_llvm
= TRUE
;
3667 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3668 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3672 mono_emit_load_got_addr (MonoCompile
*cfg
)
3674 MonoInst
*getaddr
, *dummy_use
;
3676 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3679 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3680 getaddr
->dreg
= cfg
->got_var
->dreg
;
3682 /* Add it to the start of the first bblock */
3683 if (cfg
->bb_entry
->code
) {
3684 getaddr
->next
= cfg
->bb_entry
->code
;
3685 cfg
->bb_entry
->code
= getaddr
;
3688 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3690 cfg
->got_var_allocated
= TRUE
;
3693 * Add a dummy use to keep the got_var alive, since real uses might
3694 * only be generated by the back ends.
3695 * Add it to end_bblock, so the variable's lifetime covers the whole
3697 * It would be better to make the usage of the got var explicit in all
3698 * cases when the backend needs it (i.e. calls, throw etc.), so this
3699 * wouldn't be needed.
3701 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3702 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3705 static int inline_limit
;
3706 static gboolean inline_limit_inited
;
3709 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3711 MonoMethodHeaderSummary header
;
3713 #ifdef MONO_ARCH_SOFT_FLOAT
3714 MonoMethodSignature
*sig
= mono_method_signature (method
);
3718 if (cfg
->generic_sharing_context
)
3721 if (cfg
->inline_depth
> 10)
3724 #ifdef MONO_ARCH_HAVE_LMF_OPS
3725 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3726 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3727 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3732 if (!mono_method_get_header_summary (method
, &header
))
3735 /*runtime, icall and pinvoke are checked by summary call*/
3736 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3737 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3738 (method
->klass
->marshalbyref
) ||
3742 /* also consider num_locals? */
3743 /* Do the size check early to avoid creating vtables */
3744 if (!inline_limit_inited
) {
3745 if (getenv ("MONO_INLINELIMIT"))
3746 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3748 inline_limit
= INLINE_LENGTH_LIMIT
;
3749 inline_limit_inited
= TRUE
;
3751 if (header
.code_size
>= inline_limit
)
3755 * if we can initialize the class of the method right away, we do,
3756 * otherwise we don't allow inlining if the class needs initialization,
3757 * since it would mean inserting a call to mono_runtime_class_init()
3758 * inside the inlined code
3760 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3761 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3762 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3763 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3764 if (!method
->klass
->runtime_info
)
3765 /* No vtable created yet */
3767 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3770 /* This makes so that inline cannot trigger */
3771 /* .cctors: too many apps depend on them */
3772 /* running with a specific order... */
3773 if (! vtable
->initialized
)
3775 mono_runtime_class_init (vtable
);
3777 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3778 if (!method
->klass
->runtime_info
)
3779 /* No vtable created yet */
3781 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3784 if (!vtable
->initialized
)
3789 * If we're compiling for shared code
3790 * the cctor will need to be run at aot method load time, for example,
3791 * or at the end of the compilation of the inlining method.
3793 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
3798 * CAS - do not inline methods with declarative security
3799 * Note: this has to be before any possible return TRUE;
3801 if (mono_method_has_declsec (method
))
3804 #ifdef MONO_ARCH_SOFT_FLOAT
3806 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
3808 for (i
= 0; i
< sig
->param_count
; ++i
)
3809 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
3817 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
3819 if (vtable
->initialized
&& !cfg
->compile_aot
)
3822 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
3825 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
3828 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
3829 /* The initialization is already done before the method is called */
3836 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
, gboolean bcheck
)
3840 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
3842 mono_class_init (klass
);
3843 size
= mono_class_array_element_size (klass
);
3845 mult_reg
= alloc_preg (cfg
);
3846 array_reg
= arr
->dreg
;
3847 index_reg
= index
->dreg
;
3849 #if SIZEOF_REGISTER == 8
3850 /* The array reg is 64 bits but the index reg is only 32 */
3851 if (COMPILE_LLVM (cfg
)) {
3853 index2_reg
= index_reg
;
3855 index2_reg
= alloc_preg (cfg
);
3856 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
3859 if (index
->type
== STACK_I8
) {
3860 index2_reg
= alloc_preg (cfg
);
3861 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
3863 index2_reg
= index_reg
;
3868 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
3870 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3871 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
3872 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3874 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
3875 ins
->type
= STACK_PTR
;
3881 add_reg
= alloc_preg (cfg
);
3883 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
3884 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
3885 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3886 ins
->type
= STACK_PTR
;
3887 MONO_ADD_INS (cfg
->cbb
, ins
);
3892 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3894 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
3896 int bounds_reg
= alloc_preg (cfg
);
3897 int add_reg
= alloc_preg (cfg
);
3898 int mult_reg
= alloc_preg (cfg
);
3899 int mult2_reg
= alloc_preg (cfg
);
3900 int low1_reg
= alloc_preg (cfg
);
3901 int low2_reg
= alloc_preg (cfg
);
3902 int high1_reg
= alloc_preg (cfg
);
3903 int high2_reg
= alloc_preg (cfg
);
3904 int realidx1_reg
= alloc_preg (cfg
);
3905 int realidx2_reg
= alloc_preg (cfg
);
3906 int sum_reg
= alloc_preg (cfg
);
3911 mono_class_init (klass
);
3912 size
= mono_class_array_element_size (klass
);
3914 index1
= index_ins1
->dreg
;
3915 index2
= index_ins2
->dreg
;
3917 /* range checking */
3918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
3919 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3921 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
3922 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3923 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
3924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
3925 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3926 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
3927 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3929 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
3930 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3931 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
3932 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
3933 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3934 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
3935 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3937 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
3938 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
3939 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
3940 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
3941 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3943 ins
->type
= STACK_MP
;
3945 MONO_ADD_INS (cfg
->cbb
, ins
);
3952 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
3956 MonoMethod
*addr_method
;
3959 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
3962 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], TRUE
);
3964 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3965 /* emit_ldelema_2 depends on OP_LMUL */
3966 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
3967 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
3971 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
3972 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
3973 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
3978 static MonoBreakPolicy
3979 always_insert_breakpoint (MonoMethod
*method
)
3981 return MONO_BREAK_POLICY_ALWAYS
;
3984 static MonoBreakPolicyFunc break_policy_func
= always_insert_breakpoint
;
3987 * mono_set_break_policy:
3988 * policy_callback: the new callback function
3990 * Allow embedders to decide wherther to actually obey breakpoint instructions
3991 * (both break IL instructions and Debugger.Break () method calls), for example
3992 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3993 * untrusted or semi-trusted code.
3995 * @policy_callback will be called every time a break point instruction needs to
3996 * be inserted with the method argument being the method that calls Debugger.Break()
3997 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3998 * if it wants the breakpoint to not be effective in the given method.
3999 * #MONO_BREAK_POLICY_ALWAYS is the default.
4002 mono_set_break_policy (MonoBreakPolicyFunc policy_callback
)
4004 if (policy_callback
)
4005 break_policy_func
= policy_callback
;
4007 break_policy_func
= always_insert_breakpoint
;
4011 should_insert_brekpoint (MonoMethod
*method
) {
4012 switch (break_policy_func (method
)) {
4013 case MONO_BREAK_POLICY_ALWAYS
:
4015 case MONO_BREAK_POLICY_NEVER
:
4017 case MONO_BREAK_POLICY_ON_DBG
:
4018 return mono_debug_using_mono_debugger ();
4020 g_warning ("Incorrect value returned from break policy callback");
4025 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4027 emit_array_generic_access (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, int is_set
)
4029 MonoInst
*addr
, *store
, *load
;
4030 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[2]);
4032 /* the bounds check is already done by the callers */
4033 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1], FALSE
);
4035 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, args
[2]->dreg
, 0);
4036 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, addr
->dreg
, 0, load
->dreg
);
4038 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
4039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
4045 mini_emit_inst_for_ctor (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4047 MonoInst
*ins
= NULL
;
4048 #ifdef MONO_ARCH_SIMD_INTRINSICS
4049 if (cfg
->opt
& MONO_OPT_SIMD
) {
4050 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4060 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4062 MonoInst
*ins
= NULL
;
4064 static MonoClass
*runtime_helpers_class
= NULL
;
4065 if (! runtime_helpers_class
)
4066 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
4067 "System.Runtime.CompilerServices", "RuntimeHelpers");
4069 if (cmethod
->klass
== mono_defaults
.string_class
) {
4070 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
4071 int dreg
= alloc_ireg (cfg
);
4072 int index_reg
= alloc_preg (cfg
);
4073 int mult_reg
= alloc_preg (cfg
);
4074 int add_reg
= alloc_preg (cfg
);
4076 #if SIZEOF_REGISTER == 8
4077 /* The array reg is 64 bits but the index reg is only 32 */
4078 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
4080 index_reg
= args
[1]->dreg
;
4082 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
4084 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4085 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
4086 add_reg
= ins
->dreg
;
4087 /* Avoid a warning */
4089 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4092 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
4093 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4094 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4095 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
4097 type_from_op (ins
, NULL
, NULL
);
4099 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4100 int dreg
= alloc_ireg (cfg
);
4101 /* Decompose later to allow more optimizations */
4102 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
4103 ins
->type
= STACK_I4
;
4104 cfg
->cbb
->has_array_access
= TRUE
;
4105 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
4108 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
4109 int mult_reg
= alloc_preg (cfg
);
4110 int add_reg
= alloc_preg (cfg
);
4112 /* The corlib functions check for oob already. */
4113 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
4114 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4115 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
4116 return cfg
->cbb
->last_ins
;
4119 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
4121 if (strcmp (cmethod
->name
, "GetType") == 0) {
4122 int dreg
= alloc_preg (cfg
);
4123 int vt_reg
= alloc_preg (cfg
);
4124 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4125 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
4126 type_from_op (ins
, NULL
, NULL
);
4129 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4130 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0) {
4131 int dreg
= alloc_ireg (cfg
);
4132 int t1
= alloc_ireg (cfg
);
4134 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
4135 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
4136 ins
->type
= STACK_I4
;
4140 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
4141 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4142 MONO_ADD_INS (cfg
->cbb
, ins
);
4146 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
4147 if (strcmp (cmethod
->name
+ 1, "etGenericValueImpl") == 0)
4148 return emit_array_generic_access (cfg
, fsig
, args
, *cmethod
->name
== 'S');
4149 if (cmethod
->name
[0] != 'g')
4152 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
4153 int dreg
= alloc_ireg (cfg
);
4154 int vtable_reg
= alloc_preg (cfg
);
4155 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
4156 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4157 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
4158 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
4159 type_from_op (ins
, NULL
, NULL
);
4162 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4163 int dreg
= alloc_ireg (cfg
);
4165 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4166 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4167 type_from_op (ins
, NULL
, NULL
);
4172 } else if (cmethod
->klass
== runtime_helpers_class
) {
4174 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
4175 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
4179 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
4180 if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
4181 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
4182 MONO_ADD_INS (cfg
->cbb
, ins
);
4184 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
4185 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
4186 MONO_ADD_INS (cfg
->cbb
, ins
);
4189 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
4190 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4191 if (strcmp (cmethod
->name
, "Enter") == 0) {
4194 if (COMPILE_LLVM (cfg
)) {
4196 * Pass the argument normally, the LLVM backend will handle the
4197 * calling convention problems.
4199 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4201 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
4202 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4203 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4204 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4207 return (MonoInst
*)call
;
4208 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
4211 if (COMPILE_LLVM (cfg
)) {
4212 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4214 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
4215 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4216 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4217 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4220 return (MonoInst
*)call
;
4222 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4223 MonoMethod
*fast_method
= NULL
;
4225 /* Avoid infinite recursion */
4226 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
4227 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
4228 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
4231 if (strcmp (cmethod
->name
, "Enter") == 0 ||
4232 strcmp (cmethod
->name
, "Exit") == 0)
4233 fast_method
= mono_monitor_get_fast_path (cmethod
);
4237 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
4239 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
4240 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
4241 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
4244 #if SIZEOF_REGISTER == 8
4245 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
4246 /* 64 bit reads are already atomic */
4247 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
4248 ins
->dreg
= mono_alloc_preg (cfg
);
4249 ins
->inst_basereg
= args
[0]->dreg
;
4250 ins
->inst_offset
= 0;
4251 MONO_ADD_INS (cfg
->cbb
, ins
);
4255 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4256 if (strcmp (cmethod
->name
, "Increment") == 0) {
4257 MonoInst
*ins_iconst
;
4260 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4261 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4262 #if SIZEOF_REGISTER == 8
4263 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4264 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4267 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4268 ins_iconst
->inst_c0
= 1;
4269 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4270 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4272 MONO_INST_NEW (cfg
, ins
, opcode
);
4273 ins
->dreg
= mono_alloc_ireg (cfg
);
4274 ins
->inst_basereg
= args
[0]->dreg
;
4275 ins
->inst_offset
= 0;
4276 ins
->sreg2
= ins_iconst
->dreg
;
4277 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4278 MONO_ADD_INS (cfg
->cbb
, ins
);
4280 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
4281 MonoInst
*ins_iconst
;
4284 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4285 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4286 #if SIZEOF_REGISTER == 8
4287 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4288 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4291 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4292 ins_iconst
->inst_c0
= -1;
4293 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4294 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4296 MONO_INST_NEW (cfg
, ins
, opcode
);
4297 ins
->dreg
= mono_alloc_ireg (cfg
);
4298 ins
->inst_basereg
= args
[0]->dreg
;
4299 ins
->inst_offset
= 0;
4300 ins
->sreg2
= ins_iconst
->dreg
;
4301 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4302 MONO_ADD_INS (cfg
->cbb
, ins
);
4304 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4307 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4308 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4309 #if SIZEOF_REGISTER == 8
4310 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4311 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4315 MONO_INST_NEW (cfg
, ins
, opcode
);
4316 ins
->dreg
= mono_alloc_ireg (cfg
);
4317 ins
->inst_basereg
= args
[0]->dreg
;
4318 ins
->inst_offset
= 0;
4319 ins
->sreg2
= args
[1]->dreg
;
4320 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4321 MONO_ADD_INS (cfg
->cbb
, ins
);
4324 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4326 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4327 if (strcmp (cmethod
->name
, "Exchange") == 0) {
4329 gboolean is_ref
= fsig
->params
[0]->type
== MONO_TYPE_OBJECT
;
4331 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4332 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4333 #if SIZEOF_REGISTER == 8
4334 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
4335 (fsig
->params
[0]->type
== MONO_TYPE_I
))
4336 opcode
= OP_ATOMIC_EXCHANGE_I8
;
4338 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I
))
4339 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4344 MONO_INST_NEW (cfg
, ins
, opcode
);
4345 ins
->dreg
= mono_alloc_ireg (cfg
);
4346 ins
->inst_basereg
= args
[0]->dreg
;
4347 ins
->inst_offset
= 0;
4348 ins
->sreg2
= args
[1]->dreg
;
4349 MONO_ADD_INS (cfg
->cbb
, ins
);
4351 switch (fsig
->params
[0]->type
) {
4353 ins
->type
= STACK_I4
;
4357 ins
->type
= STACK_I8
;
4359 case MONO_TYPE_OBJECT
:
4360 ins
->type
= STACK_OBJ
;
4363 g_assert_not_reached ();
4366 #if HAVE_WRITE_BARRIERS
4368 MonoInst
*dummy_use
;
4369 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
4370 mono_emit_method_call (cfg
, write_barrier
, &args
[0], NULL
);
4371 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[1]);
4375 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4377 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4378 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
4380 gboolean is_ref
= MONO_TYPE_IS_REFERENCE (fsig
->params
[1]);
4381 if (fsig
->params
[1]->type
== MONO_TYPE_I4
)
4383 else if (is_ref
|| fsig
->params
[1]->type
== MONO_TYPE_I
)
4384 size
= sizeof (gpointer
);
4385 else if (sizeof (gpointer
) == 8 && fsig
->params
[1]->type
== MONO_TYPE_I8
)
4388 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I4
);
4389 ins
->dreg
= alloc_ireg (cfg
);
4390 ins
->sreg1
= args
[0]->dreg
;
4391 ins
->sreg2
= args
[1]->dreg
;
4392 ins
->sreg3
= args
[2]->dreg
;
4393 ins
->type
= STACK_I4
;
4394 MONO_ADD_INS (cfg
->cbb
, ins
);
4395 } else if (size
== 8) {
4396 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I8
);
4397 ins
->dreg
= alloc_ireg (cfg
);
4398 ins
->sreg1
= args
[0]->dreg
;
4399 ins
->sreg2
= args
[1]->dreg
;
4400 ins
->sreg3
= args
[2]->dreg
;
4401 ins
->type
= STACK_I8
;
4402 MONO_ADD_INS (cfg
->cbb
, ins
);
4404 /* g_assert_not_reached (); */
4406 #if HAVE_WRITE_BARRIERS
4408 MonoInst
*dummy_use
;
4409 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
4410 mono_emit_method_call (cfg
, write_barrier
, &args
[0], NULL
);
4411 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[1]);
4415 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4419 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
4420 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
4421 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
4422 if (should_insert_brekpoint (cfg
->method
))
4423 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
4425 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4426 MONO_ADD_INS (cfg
->cbb
, ins
);
4429 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
4430 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
4432 EMIT_NEW_ICONST (cfg
, ins
, 1);
4434 EMIT_NEW_ICONST (cfg
, ins
, 0);
4438 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
4440 * There is general branches code for Min/Max, but it does not work for
4442 * http://everything2.com/?node_id=1051618
4446 #ifdef MONO_ARCH_SIMD_INTRINSICS
4447 if (cfg
->opt
& MONO_OPT_SIMD
) {
4448 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4454 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
4458 * This entry point could be used later for arbitrary method
4461 inline static MonoInst
*
4462 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
4463 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
4465 if (method
->klass
== mono_defaults
.string_class
) {
4466 /* managed string allocation support */
4467 if (strcmp (method
->name
, "InternalAllocateStr") == 0 && !(mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
)) {
4468 MonoInst
*iargs
[2];
4469 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4470 MonoMethod
*managed_alloc
= NULL
;
4472 g_assert (vtable
); /*Should not fail since it System.String*/
4473 #ifndef MONO_CROSS_COMPILE
4474 managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
4478 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4479 iargs
[1] = args
[0];
4480 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
4487 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
4489 MonoInst
*store
, *temp
;
4492 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4493 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
4496 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4497 * would be different than the MonoInst's used to represent arguments, and
4498 * the ldelema implementation can't deal with that.
4499 * Solution: When ldelema is used on an inline argument, create a var for
4500 * it, emit ldelema on that var, and emit the saving code below in
4501 * inline_method () if needed.
4503 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
4504 cfg
->args
[i
] = temp
;
4505 /* This uses cfg->args [i] which is set by the preceeding line */
4506 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
4507 store
->cil_code
= sp
[0]->cil_code
;
4512 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4513 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4515 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4517 check_inline_called_method_name_limit (MonoMethod
*called_method
)
4520 static char *limit
= NULL
;
4522 if (limit
== NULL
) {
4523 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4525 if (limit_string
!= NULL
)
4526 limit
= limit_string
;
4528 limit
= (char *) "";
4531 if (limit
[0] != '\0') {
4532 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4534 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4535 g_free (called_method_name
);
4537 //return (strncmp_result <= 0);
4538 return (strncmp_result
== 0);
4545 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4547 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4550 static char *limit
= NULL
;
4552 if (limit
== NULL
) {
4553 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4554 if (limit_string
!= NULL
) {
4555 limit
= limit_string
;
4557 limit
= (char *) "";
4561 if (limit
[0] != '\0') {
4562 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4564 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4565 g_free (caller_method_name
);
4567 //return (strncmp_result <= 0);
4568 return (strncmp_result
== 0);
4576 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4577 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_allways
)
4579 MonoInst
*ins
, *rvar
= NULL
;
4580 MonoMethodHeader
*cheader
;
4581 MonoBasicBlock
*ebblock
, *sbblock
;
4583 MonoMethod
*prev_inlined_method
;
4584 MonoInst
**prev_locals
, **prev_args
;
4585 MonoType
**prev_arg_types
;
4586 guint prev_real_offset
;
4587 GHashTable
*prev_cbb_hash
;
4588 MonoBasicBlock
**prev_cil_offset_to_bb
;
4589 MonoBasicBlock
*prev_cbb
;
4590 unsigned char* prev_cil_start
;
4591 guint32 prev_cil_offset_to_bb_len
;
4592 MonoMethod
*prev_current_method
;
4593 MonoGenericContext
*prev_generic_context
;
4594 gboolean ret_var_set
, prev_ret_var_set
;
4596 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4598 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4599 if ((! inline_allways
) && ! check_inline_called_method_name_limit (cmethod
))
4602 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4603 if ((! inline_allways
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4607 if (cfg
->verbose_level
> 2)
4608 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4610 if (!cmethod
->inline_info
) {
4611 mono_jit_stats
.inlineable_methods
++;
4612 cmethod
->inline_info
= 1;
4615 /* allocate local variables */
4616 cheader
= mono_method_get_header (cmethod
);
4618 if (cheader
== NULL
|| mono_loader_get_last_error ()) {
4620 mono_metadata_free_mh (cheader
);
4621 mono_loader_clear_error ();
4625 /* allocate space to store the return value */
4626 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4627 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4631 prev_locals
= cfg
->locals
;
4632 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4633 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4634 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4636 /* allocate start and end blocks */
4637 /* This is needed so if the inline is aborted, we can clean up */
4638 NEW_BBLOCK (cfg
, sbblock
);
4639 sbblock
->real_offset
= real_offset
;
4641 NEW_BBLOCK (cfg
, ebblock
);
4642 ebblock
->block_num
= cfg
->num_bblocks
++;
4643 ebblock
->real_offset
= real_offset
;
4645 prev_args
= cfg
->args
;
4646 prev_arg_types
= cfg
->arg_types
;
4647 prev_inlined_method
= cfg
->inlined_method
;
4648 cfg
->inlined_method
= cmethod
;
4649 cfg
->ret_var_set
= FALSE
;
4650 cfg
->inline_depth
++;
4651 prev_real_offset
= cfg
->real_offset
;
4652 prev_cbb_hash
= cfg
->cbb_hash
;
4653 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4654 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4655 prev_cil_start
= cfg
->cil_start
;
4656 prev_cbb
= cfg
->cbb
;
4657 prev_current_method
= cfg
->current_method
;
4658 prev_generic_context
= cfg
->generic_context
;
4659 prev_ret_var_set
= cfg
->ret_var_set
;
4661 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, *ip
== CEE_CALLVIRT
);
4663 ret_var_set
= cfg
->ret_var_set
;
4665 cfg
->inlined_method
= prev_inlined_method
;
4666 cfg
->real_offset
= prev_real_offset
;
4667 cfg
->cbb_hash
= prev_cbb_hash
;
4668 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4669 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4670 cfg
->cil_start
= prev_cil_start
;
4671 cfg
->locals
= prev_locals
;
4672 cfg
->args
= prev_args
;
4673 cfg
->arg_types
= prev_arg_types
;
4674 cfg
->current_method
= prev_current_method
;
4675 cfg
->generic_context
= prev_generic_context
;
4676 cfg
->ret_var_set
= prev_ret_var_set
;
4677 cfg
->inline_depth
--;
4679 if ((costs
>= 0 && costs
< 60) || inline_allways
) {
4680 if (cfg
->verbose_level
> 2)
4681 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4683 mono_jit_stats
.inlined_methods
++;
4685 /* always add some code to avoid block split failures */
4686 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4687 MONO_ADD_INS (prev_cbb
, ins
);
4689 prev_cbb
->next_bb
= sbblock
;
4690 link_bblock (cfg
, prev_cbb
, sbblock
);
4693 * Get rid of the begin and end bblocks if possible to aid local
4696 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4698 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4699 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4701 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4702 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4703 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4705 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4706 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4707 cfg
->cbb
= prev_cbb
;
4715 * If the inlined method contains only a throw, then the ret var is not
4716 * set, so set it to a dummy value.
4719 static double r8_0
= 0.0;
4721 switch (rvar
->type
) {
4723 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
4726 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
4731 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
4734 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4735 ins
->type
= STACK_R8
;
4736 ins
->inst_p0
= (void*)&r8_0
;
4737 ins
->dreg
= rvar
->dreg
;
4738 MONO_ADD_INS (cfg
->cbb
, ins
);
4741 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (fsig
->ret
));
4744 g_assert_not_reached ();
4748 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
4751 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
4754 if (cfg
->verbose_level
> 2)
4755 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
4756 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
4757 mono_loader_clear_error ();
4759 /* This gets rid of the newly added bblocks */
4760 cfg
->cbb
= prev_cbb
;
4762 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
4767 * Some of these comments may well be out-of-date.
4768 * Design decisions: we do a single pass over the IL code (and we do bblock
4769 * splitting/merging in the few cases when it's required: a back jump to an IL
4770 * address that was not already seen as bblock starting point).
4771 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4772 * Complex operations are decomposed in simpler ones right away. We need to let the
4773 * arch-specific code peek and poke inside this process somehow (except when the
4774 * optimizations can take advantage of the full semantic info of coarse opcodes).
4775 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4776 * MonoInst->opcode initially is the IL opcode or some simplification of that
4777 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4778 * opcode with value bigger than OP_LAST.
4779 * At this point the IR can be handed over to an interpreter, a dumb code generator
4780 * or to the optimizing code generator that will translate it to SSA form.
4782 * Profiling directed optimizations.
4783 * We may compile by default with few or no optimizations and instrument the code
4784 * or the user may indicate what methods to optimize the most either in a config file
4785 * or through repeated runs where the compiler applies offline the optimizations to
4786 * each method and then decides if it was worth it.
4789 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4790 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4791 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4792 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4793 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4794 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4795 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4796 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4798 /* offset from br.s -> br like opcodes */
4799 #define BIG_BRANCH_OFFSET 13
4802 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
4804 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
4806 return b
== NULL
|| b
== bb
;
4810 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
4812 unsigned char *ip
= start
;
4813 unsigned char *target
;
4816 MonoBasicBlock
*bblock
;
4817 const MonoOpcode
*opcode
;
4820 cli_addr
= ip
- start
;
4821 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
4824 opcode
= &mono_opcodes
[i
];
4825 switch (opcode
->argument
) {
4826 case MonoInlineNone
:
4829 case MonoInlineString
:
4830 case MonoInlineType
:
4831 case MonoInlineField
:
4832 case MonoInlineMethod
:
4835 case MonoShortInlineR
:
4842 case MonoShortInlineVar
:
4843 case MonoShortInlineI
:
4846 case MonoShortInlineBrTarget
:
4847 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
4848 GET_BBLOCK (cfg
, bblock
, target
);
4851 GET_BBLOCK (cfg
, bblock
, ip
);
4853 case MonoInlineBrTarget
:
4854 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
4855 GET_BBLOCK (cfg
, bblock
, target
);
4858 GET_BBLOCK (cfg
, bblock
, ip
);
4860 case MonoInlineSwitch
: {
4861 guint32 n
= read32 (ip
+ 1);
4864 cli_addr
+= 5 + 4 * n
;
4865 target
= start
+ cli_addr
;
4866 GET_BBLOCK (cfg
, bblock
, target
);
4868 for (j
= 0; j
< n
; ++j
) {
4869 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
4870 GET_BBLOCK (cfg
, bblock
, target
);
4880 g_assert_not_reached ();
4883 if (i
== CEE_THROW
) {
4884 unsigned char *bb_start
= ip
- 1;
4886 /* Find the start of the bblock containing the throw */
4888 while ((bb_start
>= start
) && !bblock
) {
4889 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
4893 bblock
->out_of_line
= 1;
4902 static inline MonoMethod
*
4903 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4907 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
4908 return mono_method_get_wrapper_data (m
, token
);
4910 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
4915 static inline MonoMethod
*
4916 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4918 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
4920 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
4926 static inline MonoClass
*
4927 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
4931 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
4932 klass
= mono_method_get_wrapper_data (method
, token
);
4934 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
4936 mono_class_init (klass
);
4941 * Returns TRUE if the JIT should abort inlining because "callee"
4942 * is influenced by security attributes.
4945 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
4949 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
4953 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
4954 if (result
== MONO_JIT_SECURITY_OK
)
4957 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
4958 /* Generate code to throw a SecurityException before the actual call/link */
4959 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4962 NEW_ICONST (cfg
, args
[0], 4);
4963 NEW_METHODCONST (cfg
, args
[1], caller
);
4964 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
4965 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
4966 /* don't hide previous results */
4967 cfg
->exception_type
= MONO_EXCEPTION_SECURITY_LINKDEMAND
;
4968 cfg
->exception_data
= result
;
4976 throw_exception (void)
4978 static MonoMethod
*method
= NULL
;
4981 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4982 method
= mono_class_get_method_from_name (secman
->securitymanager
, "ThrowException", 1);
4989 emit_throw_exception (MonoCompile
*cfg
, MonoException
*ex
)
4991 MonoMethod
*thrower
= throw_exception ();
4994 EMIT_NEW_PCONST (cfg
, args
[0], ex
);
4995 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4999 * Return the original method is a wrapper is specified. We can only access
5000 * the custom attributes from the original method.
5003 get_original_method (MonoMethod
*method
)
5005 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
5008 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5009 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
5012 /* in other cases we need to find the original method */
5013 return mono_marshal_method_from_wrapper (method
);
5017 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
5018 MonoBasicBlock
*bblock
, unsigned char *ip
)
5020 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5021 MonoException
*ex
= mono_security_core_clr_is_field_access_allowed (get_original_method (caller
), field
);
5023 emit_throw_exception (cfg
, ex
);
5027 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
5028 MonoBasicBlock
*bblock
, unsigned char *ip
)
5030 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5031 MonoException
*ex
= mono_security_core_clr_is_call_allowed (get_original_method (caller
), callee
);
5033 emit_throw_exception (cfg
, ex
);
5037 * Check that the IL instructions at ip are the array initialization
5038 * sequence and return the pointer to the data and the size.
5041 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
5044 * newarr[System.Int32]
5046 * ldtoken field valuetype ...
5047 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5049 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
5050 guint32 token
= read32 (ip
+ 7);
5051 guint32 field_token
= read32 (ip
+ 2);
5052 guint32 field_index
= field_token
& 0xffffff;
5054 const char *data_ptr
;
5056 MonoMethod
*cmethod
;
5057 MonoClass
*dummy_class
;
5058 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
5064 *out_field_token
= field_token
;
5066 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
5069 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
5071 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
5072 case MONO_TYPE_BOOLEAN
:
5076 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5077 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5078 case MONO_TYPE_CHAR
:
5088 return NULL
; /* stupid ARM FP swapped format */
5098 if (size
> mono_type_size (field
->type
, &dummy_align
))
5101 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5102 if (!method
->klass
->image
->dynamic
) {
5103 field_index
= read32 (ip
+ 2) & 0xffffff;
5104 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
5105 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
5106 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5107 /* for aot code we do the lookup on load */
5108 if (aot
&& data_ptr
)
5109 return GUINT_TO_POINTER (rva
);
5111 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5113 data_ptr
= mono_field_get_data (field
);
5121 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
5123 char *method_fname
= mono_method_full_name (method
, TRUE
);
5125 MonoMethodHeader
*header
= mono_method_get_header (method
);
5127 if (header
->code_size
== 0)
5128 method_code
= g_strdup ("method body is empty.");
5130 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
5131 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
5132 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
5133 g_free (method_fname
);
5134 g_free (method_code
);
5135 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
5139 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
5141 cfg
->exception_type
= MONO_EXCEPTION_OBJECT_SUPPLIED
;
5142 MONO_GC_REGISTER_ROOT (cfg
->exception_ptr
);
5143 cfg
->exception_ptr
= exception
;
5147 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
5151 if (cfg
->generic_sharing_context
)
5152 type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, &klass
->byval_arg
);
5154 type
= &klass
->byval_arg
;
5155 return MONO_TYPE_IS_REFERENCE (type
);
5159 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5162 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5163 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5164 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5165 /* Optimize reg-reg moves away */
5167 * Can't optimize other opcodes, since sp[0] might point to
5168 * the last ins of a decomposed opcode.
5170 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5172 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5177 * ldloca inhibits many optimizations so try to get rid of it in common
5180 static inline unsigned char *
5181 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5190 local
= read16 (ip
+ 2);
5194 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5195 gboolean skip
= FALSE
;
5197 /* From the INITOBJ case */
5198 token
= read32 (ip
+ 2);
5199 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5200 CHECK_TYPELOAD (klass
);
5201 if (generic_class_is_reference_type (cfg
, klass
)) {
5202 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5203 } else if (MONO_TYPE_IS_REFERENCE (&klass
->byval_arg
)) {
5204 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5205 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5206 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5219 is_exception_class (MonoClass
*class)
5222 if (class == mono_defaults
.exception_class
)
5224 class = class->parent
;
5230 * mono_method_to_ir:
5232 * Translate the .net IL into linear IR.
5235 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5236 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5237 guint inline_offset
, gboolean is_virtual_call
)
5240 MonoInst
*ins
, **sp
, **stack_start
;
5241 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5242 MonoSimpleBasicBlock
*bb
= NULL
, *original_bb
= NULL
;
5243 MonoMethod
*cmethod
, *method_definition
;
5244 MonoInst
**arg_array
;
5245 MonoMethodHeader
*header
;
5247 guint32 token
, ins_flag
;
5249 MonoClass
*constrained_call
= NULL
;
5250 unsigned char *ip
, *end
, *target
, *err_pos
;
5251 static double r8_0
= 0.0;
5252 MonoMethodSignature
*sig
;
5253 MonoGenericContext
*generic_context
= NULL
;
5254 MonoGenericContainer
*generic_container
= NULL
;
5255 MonoType
**param_types
;
5256 int i
, n
, start_new_bblock
, dreg
;
5257 int num_calls
= 0, inline_costs
= 0;
5258 int breakpoint_id
= 0;
5260 MonoBoolean security
, pinvoke
;
5261 MonoSecurityManager
* secman
= NULL
;
5262 MonoDeclSecurityActions actions
;
5263 GSList
*class_inits
= NULL
;
5264 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5266 gboolean init_locals
, seq_points
, skip_dead_blocks
;
5268 /* serialization and xdomain stuff may need access to private fields and methods */
5269 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5270 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5271 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5272 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5273 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5274 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5276 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5278 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5279 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5280 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5281 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5283 image
= method
->klass
->image
;
5284 header
= mono_method_get_header (method
);
5285 generic_container
= mono_method_get_generic_container (method
);
5286 sig
= mono_method_signature (method
);
5287 num_args
= sig
->hasthis
+ sig
->param_count
;
5288 ip
= (unsigned char*)header
->code
;
5289 cfg
->cil_start
= ip
;
5290 end
= ip
+ header
->code_size
;
5291 mono_jit_stats
.cil_code_size
+= header
->code_size
;
5292 init_locals
= header
->init_locals
;
5294 seq_points
= cfg
->gen_seq_points
&& cfg
->method
== method
;
5297 * Methods without init_locals set could cause asserts in various passes
5302 method_definition
= method
;
5303 while (method_definition
->is_inflated
) {
5304 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5305 method_definition
= imethod
->declaring
;
5308 /* SkipVerification is not allowed if core-clr is enabled */
5309 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5311 dont_verify_stloc
= TRUE
;
5314 if (!dont_verify
&& mini_method_verify (cfg
, method_definition
))
5315 goto exception_exit
;
5317 if (mono_debug_using_mono_debugger ())
5318 cfg
->keep_cil_nops
= TRUE
;
5320 if (sig
->is_inflated
)
5321 generic_context
= mono_method_get_context (method
);
5322 else if (generic_container
)
5323 generic_context
= &generic_container
->context
;
5324 cfg
->generic_context
= generic_context
;
5326 if (!cfg
->generic_sharing_context
)
5327 g_assert (!sig
->has_type_parameters
);
5329 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5330 g_assert (method
->is_inflated
);
5331 g_assert (mono_method_get_context (method
)->method_inst
);
5333 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5334 g_assert (sig
->generic_param_count
);
5336 if (cfg
->method
== method
) {
5337 cfg
->real_offset
= 0;
5339 cfg
->real_offset
= inline_offset
;
5342 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5343 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5345 cfg
->current_method
= method
;
5347 if (cfg
->verbose_level
> 2)
5348 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5350 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5352 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5353 for (n
= 0; n
< sig
->param_count
; ++n
)
5354 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5355 cfg
->arg_types
= param_types
;
5357 dont_inline
= g_list_prepend (dont_inline
, method
);
5358 if (cfg
->method
== method
) {
5360 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5361 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5364 NEW_BBLOCK (cfg
, start_bblock
);
5365 cfg
->bb_entry
= start_bblock
;
5366 start_bblock
->cil_code
= NULL
;
5367 start_bblock
->cil_length
= 0;
5370 NEW_BBLOCK (cfg
, end_bblock
);
5371 cfg
->bb_exit
= end_bblock
;
5372 end_bblock
->cil_code
= NULL
;
5373 end_bblock
->cil_length
= 0;
5374 g_assert (cfg
->num_bblocks
== 2);
5376 arg_array
= cfg
->args
;
5378 if (header
->num_clauses
) {
5379 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5380 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5382 /* handle exception clauses */
5383 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5384 MonoBasicBlock
*try_bb
;
5385 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5386 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5387 try_bb
->real_offset
= clause
->try_offset
;
5388 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5389 tblock
->real_offset
= clause
->handler_offset
;
5390 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5392 link_bblock (cfg
, try_bb
, tblock
);
5394 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5395 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5397 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5398 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5399 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5400 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5401 MONO_ADD_INS (tblock
, ins
);
5403 /* todo: is a fault block unsafe to optimize? */
5404 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5405 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5409 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5411 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5413 /* catch and filter blocks get the exception object on the stack */
5414 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5415 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5416 MonoInst
*dummy_use
;
5418 /* mostly like handle_stack_args (), but just sets the input args */
5419 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5420 tblock
->in_scount
= 1;
5421 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5422 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5425 * Add a dummy use for the exvar so its liveness info will be
5429 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5431 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5432 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5433 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5434 tblock
->real_offset
= clause
->data
.filter_offset
;
5435 tblock
->in_scount
= 1;
5436 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5437 /* The filter block shares the exvar with the handler block */
5438 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5439 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5440 MONO_ADD_INS (tblock
, ins
);
5444 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5445 clause
->data
.catch_class
&&
5446 cfg
->generic_sharing_context
&&
5447 mono_class_check_context_used (clause
->data
.catch_class
)) {
5449 * In shared generic code with catch
5450 * clauses containing type variables
5451 * the exception handling code has to
5452 * be able to get to the rgctx.
5453 * Therefore we have to make sure that
5454 * the vtable/mrgctx argument (for
5455 * static or generic methods) or the
5456 * "this" argument (for non-static
5457 * methods) are live.
5459 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5460 mini_method_get_context (method
)->method_inst
||
5461 method
->klass
->valuetype
) {
5462 mono_get_vtable_var (cfg
);
5464 MonoInst
*dummy_use
;
5466 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5471 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5472 cfg
->cbb
= start_bblock
;
5473 cfg
->args
= arg_array
;
5474 mono_save_args (cfg
, sig
, inline_args
);
5477 /* FIRST CODE BLOCK */
5478 NEW_BBLOCK (cfg
, bblock
);
5479 bblock
->cil_code
= ip
;
5483 ADD_BBLOCK (cfg
, bblock
);
5485 if (cfg
->method
== method
) {
5486 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5487 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5488 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5489 MONO_ADD_INS (bblock
, ins
);
5493 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5494 secman
= mono_security_manager_get_methods ();
5496 security
= (secman
&& mono_method_has_declsec (method
));
5497 /* at this point having security doesn't mean we have any code to generate */
5498 if (security
&& (cfg
->method
== method
)) {
5499 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5500 * And we do not want to enter the next section (with allocation) if we
5501 * have nothing to generate */
5502 security
= mono_declsec_get_demands (method
, &actions
);
5505 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5506 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5508 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5509 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5510 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
5512 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5513 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5517 mono_custom_attrs_free (custom
);
5520 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
5521 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5525 mono_custom_attrs_free (custom
);
5528 /* not a P/Invoke after all */
5533 if ((init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
5534 /* we use a separate basic block for the initialization code */
5535 NEW_BBLOCK (cfg
, init_localsbb
);
5536 cfg
->bb_init
= init_localsbb
;
5537 init_localsbb
->real_offset
= cfg
->real_offset
;
5538 start_bblock
->next_bb
= init_localsbb
;
5539 init_localsbb
->next_bb
= bblock
;
5540 link_bblock (cfg
, start_bblock
, init_localsbb
);
5541 link_bblock (cfg
, init_localsbb
, bblock
);
5543 cfg
->cbb
= init_localsbb
;
5545 start_bblock
->next_bb
= bblock
;
5546 link_bblock (cfg
, start_bblock
, bblock
);
5549 /* at this point we know, if security is TRUE, that some code needs to be generated */
5550 if (security
&& (cfg
->method
== method
)) {
5553 mono_jit_stats
.cas_demand_generation
++;
5555 if (actions
.demand
.blob
) {
5556 /* Add code for SecurityAction.Demand */
5557 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
5558 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
5559 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5560 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5562 if (actions
.noncasdemand
.blob
) {
5563 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5564 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5565 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
5566 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
5567 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5568 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5570 if (actions
.demandchoice
.blob
) {
5571 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5572 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
5573 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
5574 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5575 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
5579 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5581 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
5584 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
5585 /* check if this is native code, e.g. an icall or a p/invoke */
5586 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
5587 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5589 gboolean pinvk
= (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
);
5590 gboolean icall
= (wrapped
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
);
5592 /* if this ia a native call then it can only be JITted from platform code */
5593 if ((icall
|| pinvk
) && method
->klass
&& method
->klass
->image
) {
5594 if (!mono_security_core_clr_is_platform_image (method
->klass
->image
)) {
5595 MonoException
*ex
= icall
? mono_get_exception_security () :
5596 mono_get_exception_method_access ();
5597 emit_throw_exception (cfg
, ex
);
5604 if (header
->code_size
== 0)
5607 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
5612 if (cfg
->method
== method
)
5613 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
5615 for (n
= 0; n
< header
->num_locals
; ++n
) {
5616 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
5621 /* We force the vtable variable here for all shared methods
5622 for the possibility that they might show up in a stack
5623 trace where their exact instantiation is needed. */
5624 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
5625 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5626 mini_method_get_context (method
)->method_inst
||
5627 method
->klass
->valuetype
) {
5628 mono_get_vtable_var (cfg
);
5630 /* FIXME: Is there a better way to do this?
5631 We need the variable live for the duration
5632 of the whole method. */
5633 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
5637 /* add a check for this != NULL to inlined methods */
5638 if (is_virtual_call
) {
5641 NEW_ARGLOAD (cfg
, arg_ins
, 0);
5642 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
5643 MONO_EMIT_NEW_CHECK_THIS (cfg
, arg_ins
->dreg
);
5646 skip_dead_blocks
= !dont_verify
;
5647 if (skip_dead_blocks
) {
5648 original_bb
= bb
= mono_basic_block_split (method
, &error
);
5649 if (!mono_error_ok (&error
)) {
5650 mono_error_cleanup (&error
);
5656 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5657 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
5660 start_new_bblock
= 0;
5663 if (cfg
->method
== method
)
5664 cfg
->real_offset
= ip
- header
->code
;
5666 cfg
->real_offset
= inline_offset
;
5671 if (start_new_bblock
) {
5672 bblock
->cil_length
= ip
- bblock
->cil_code
;
5673 if (start_new_bblock
== 2) {
5674 g_assert (ip
== tblock
->cil_code
);
5676 GET_BBLOCK (cfg
, tblock
, ip
);
5678 bblock
->next_bb
= tblock
;
5681 start_new_bblock
= 0;
5682 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5683 if (cfg
->verbose_level
> 3)
5684 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5685 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5689 g_slist_free (class_inits
);
5692 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
5693 link_bblock (cfg
, bblock
, tblock
);
5694 if (sp
!= stack_start
) {
5695 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
5697 CHECK_UNVERIFIABLE (cfg
);
5699 bblock
->next_bb
= tblock
;
5702 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5703 if (cfg
->verbose_level
> 3)
5704 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5705 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5708 g_slist_free (class_inits
);
5713 if (skip_dead_blocks
) {
5714 int ip_offset
= ip
- header
->code
;
5716 if (ip_offset
== bb
->end
)
5720 int op_size
= mono_opcode_size (ip
, end
);
5721 g_assert (op_size
> 0); /*The BB formation pass must catch all bad ops*/
5723 if (cfg
->verbose_level
> 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset
);
5725 if (ip_offset
+ op_size
== bb
->end
) {
5726 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5727 MONO_ADD_INS (bblock
, ins
);
5728 start_new_bblock
= 1;
5736 * Sequence points are points where the debugger can place a breakpoint.
5737 * Currently, we generate these automatically at points where the IL
5740 if (seq_points
&& sp
== stack_start
) {
5741 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
5742 MONO_ADD_INS (cfg
->cbb
, ins
);
5745 bblock
->real_offset
= cfg
->real_offset
;
5747 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
5748 guint32 cil_offset
= ip
- header
->code
;
5749 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
5751 /* TODO: Use an increment here */
5752 #if defined(TARGET_X86)
5753 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
5754 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
5756 MONO_ADD_INS (cfg
->cbb
, ins
);
5758 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
5759 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
5763 if (cfg
->verbose_level
> 3)
5764 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
5768 if (cfg
->keep_cil_nops
)
5769 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
5771 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5773 MONO_ADD_INS (bblock
, ins
);
5776 if (should_insert_brekpoint (cfg
->method
))
5777 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5779 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5781 MONO_ADD_INS (bblock
, ins
);
5787 CHECK_STACK_OVF (1);
5788 n
= (*ip
)-CEE_LDARG_0
;
5790 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5798 CHECK_STACK_OVF (1);
5799 n
= (*ip
)-CEE_LDLOC_0
;
5801 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5810 n
= (*ip
)-CEE_STLOC_0
;
5813 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
5815 emit_stloc_ir (cfg
, sp
, header
, n
);
5822 CHECK_STACK_OVF (1);
5825 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5831 CHECK_STACK_OVF (1);
5834 NEW_ARGLOADA (cfg
, ins
, n
);
5835 MONO_ADD_INS (cfg
->cbb
, ins
);
5845 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
5847 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
5852 CHECK_STACK_OVF (1);
5855 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5859 case CEE_LDLOCA_S
: {
5860 unsigned char *tmp_ip
;
5862 CHECK_STACK_OVF (1);
5863 CHECK_LOCAL (ip
[1]);
5865 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
5871 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
5880 CHECK_LOCAL (ip
[1]);
5881 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
5883 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
5888 CHECK_STACK_OVF (1);
5889 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
5890 ins
->type
= STACK_OBJ
;
5895 CHECK_STACK_OVF (1);
5896 EMIT_NEW_ICONST (cfg
, ins
, -1);
5909 CHECK_STACK_OVF (1);
5910 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
5916 CHECK_STACK_OVF (1);
5918 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
5924 CHECK_STACK_OVF (1);
5925 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
5931 CHECK_STACK_OVF (1);
5932 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
5933 ins
->type
= STACK_I8
;
5934 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
5936 ins
->inst_l
= (gint64
)read64 (ip
);
5937 MONO_ADD_INS (bblock
, ins
);
5943 gboolean use_aotconst
= FALSE
;
5945 #ifdef TARGET_POWERPC
5946 /* FIXME: Clean this up */
5947 if (cfg
->compile_aot
)
5948 use_aotconst
= TRUE
;
5951 /* FIXME: we should really allocate this only late in the compilation process */
5952 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
5954 CHECK_STACK_OVF (1);
5960 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
5962 dreg
= alloc_freg (cfg
);
5963 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
5964 ins
->type
= STACK_R8
;
5966 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
5967 ins
->type
= STACK_R8
;
5968 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5970 MONO_ADD_INS (bblock
, ins
);
5980 gboolean use_aotconst
= FALSE
;
5982 #ifdef TARGET_POWERPC
5983 /* FIXME: Clean this up */
5984 if (cfg
->compile_aot
)
5985 use_aotconst
= TRUE
;
5988 /* FIXME: we should really allocate this only late in the compilation process */
5989 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
5991 CHECK_STACK_OVF (1);
5997 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
5999 dreg
= alloc_freg (cfg
);
6000 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
6001 ins
->type
= STACK_R8
;
6003 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
6004 ins
->type
= STACK_R8
;
6005 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6007 MONO_ADD_INS (bblock
, ins
);
6016 MonoInst
*temp
, *store
;
6018 CHECK_STACK_OVF (1);
6022 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
6023 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
6025 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6028 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6041 if (sp
[0]->type
== STACK_R8
)
6042 /* we need to pop the value from the x86 FP stack */
6043 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
6052 if (stack_start
!= sp
)
6054 token
= read32 (ip
+ 1);
6055 /* FIXME: check the signature matches */
6056 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6061 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
6062 GENERIC_SHARING_FAILURE (CEE_JMP
);
6064 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6065 CHECK_CFG_EXCEPTION
;
6067 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6069 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
6072 /* Handle tail calls similarly to calls */
6073 n
= fsig
->param_count
+ fsig
->hasthis
;
6075 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6076 call
->method
= cmethod
;
6077 call
->tail_call
= TRUE
;
6078 call
->signature
= mono_method_signature (cmethod
);
6079 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6080 call
->inst
.inst_p0
= cmethod
;
6081 for (i
= 0; i
< n
; ++i
)
6082 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6084 mono_arch_emit_call (cfg
, call
);
6085 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
6088 for (i
= 0; i
< num_args
; ++i
)
6089 /* Prevent arguments from being optimized away */
6090 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6092 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6093 ins
= (MonoInst
*)call
;
6094 ins
->inst_p0
= cmethod
;
6095 MONO_ADD_INS (bblock
, ins
);
6099 start_new_bblock
= 1;
6104 case CEE_CALLVIRT
: {
6105 MonoInst
*addr
= NULL
;
6106 MonoMethodSignature
*fsig
= NULL
;
6108 int virtual = *ip
== CEE_CALLVIRT
;
6109 int calli
= *ip
== CEE_CALLI
;
6110 gboolean pass_imt_from_rgctx
= FALSE
;
6111 MonoInst
*imt_arg
= NULL
;
6112 gboolean pass_vtable
= FALSE
;
6113 gboolean pass_mrgctx
= FALSE
;
6114 MonoInst
*vtable_arg
= NULL
;
6115 gboolean check_this
= FALSE
;
6116 gboolean supported_tail_call
= FALSE
;
6119 token
= read32 (ip
+ 1);
6126 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
6127 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
6129 fsig
= mono_metadata_parse_signature (image
, token
);
6131 n
= fsig
->param_count
+ fsig
->hasthis
;
6133 if (method
->dynamic
&& fsig
->pinvoke
) {
6137 * This is a call through a function pointer using a pinvoke
6138 * signature. Have to create a wrapper and call that instead.
6139 * FIXME: This is very slow, need to create a wrapper at JIT time
6140 * instead based on the signature.
6142 EMIT_NEW_IMAGECONST (cfg
, args
[0], method
->klass
->image
);
6143 EMIT_NEW_PCONST (cfg
, args
[1], fsig
);
6145 addr
= mono_emit_jit_icall (cfg
, mono_get_native_calli_wrapper
, args
);
6148 MonoMethod
*cil_method
;
6150 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
6151 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
6152 cil_method
= cmethod
;
6153 } else if (constrained_call
) {
6154 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
6156 * This is needed since get_method_constrained can't find
6157 * the method in klass representing a type var.
6158 * The type var is guaranteed to be a reference type in this
6161 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6162 cil_method
= cmethod
;
6163 g_assert (!cmethod
->klass
->valuetype
);
6165 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
6168 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6169 cil_method
= cmethod
;
6174 if (!dont_verify
&& !cfg
->skip_visibility
) {
6175 MonoMethod
*target_method
= cil_method
;
6176 if (method
->is_inflated
) {
6177 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
6179 if (!mono_method_can_access_method (method_definition
, target_method
) &&
6180 !mono_method_can_access_method (method
, cil_method
))
6181 METHOD_ACCESS_FAILURE
;
6184 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
6185 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
6187 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
6188 /* MS.NET seems to silently convert this to a callvirt */
6193 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6194 * converts to a callvirt.
6196 * tests/bug-515884.il is an example of this behavior
6198 const int test_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
| METHOD_ATTRIBUTE_STATIC
;
6199 const int expected_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
;
6200 if (!virtual && cmethod
->klass
->marshalbyref
&& (cmethod
->flags
& test_flags
) == expected_flags
&& cfg
->method
->wrapper_type
== MONO_WRAPPER_NONE
)
6204 if (!cmethod
->klass
->inited
)
6205 if (!mono_class_init (cmethod
->klass
))
6208 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
6209 mini_class_is_system_array (cmethod
->klass
)) {
6210 array_rank
= cmethod
->klass
->rank
;
6211 fsig
= mono_method_signature (cmethod
);
6213 fsig
= mono_method_signature (cmethod
);
6218 if (fsig
->pinvoke
) {
6219 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
6220 check_for_pending_exc
, FALSE
);
6221 fsig
= mono_method_signature (wrapper
);
6222 } else if (constrained_call
) {
6223 fsig
= mono_method_signature (cmethod
);
6225 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
6229 mono_save_token_info (cfg
, image
, token
, cil_method
);
6231 n
= fsig
->param_count
+ fsig
->hasthis
;
6233 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
6234 if (check_linkdemand (cfg
, method
, cmethod
))
6236 CHECK_CFG_EXCEPTION
;
6239 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
6240 g_assert_not_reached ();
6243 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
6246 if (!cfg
->generic_sharing_context
&& cmethod
)
6247 g_assert (!mono_method_check_context_used (cmethod
));
6251 //g_assert (!virtual || fsig->hasthis);
6255 if (constrained_call
) {
6257 * We have the `constrained.' prefix opcode.
6259 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
6261 * The type parameter is instantiated as a valuetype,
6262 * but that type doesn't override the method we're
6263 * calling, so we need to box `this'.
6265 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &constrained_call
->byval_arg
, sp
[0]->dreg
, 0);
6266 ins
->klass
= constrained_call
;
6267 sp
[0] = handle_box (cfg
, ins
, constrained_call
, mono_class_check_context_used (constrained_call
));
6268 CHECK_CFG_EXCEPTION
;
6269 } else if (!constrained_call
->valuetype
) {
6270 int dreg
= alloc_preg (cfg
);
6273 * The type parameter is instantiated as a reference
6274 * type. We have a managed pointer on the stack, so
6275 * we need to dereference it here.
6277 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6278 ins
->type
= STACK_OBJ
;
6280 } else if (cmethod
->klass
->valuetype
)
6282 constrained_call
= NULL
;
6285 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6289 * If the callee is a shared method, then its static cctor
6290 * might not get called after the call was patched.
6292 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
6293 emit_generic_class_init (cfg
, cmethod
->klass
);
6294 CHECK_TYPELOAD (cmethod
->klass
);
6297 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6298 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6299 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6300 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6301 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6304 * Pass vtable iff target method might
6305 * be shared, which means that sharing
6306 * is enabled for its class and its
6307 * context is sharable (and it's not a
6310 if (sharing_enabled
&& context_sharable
&&
6311 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6315 if (cmethod
&& mini_method_get_context (cmethod
) &&
6316 mini_method_get_context (cmethod
)->method_inst
) {
6317 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6318 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6319 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6321 g_assert (!pass_vtable
);
6323 if (sharing_enabled
&& context_sharable
)
6327 if (cfg
->generic_sharing_context
&& cmethod
) {
6328 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6330 context_used
= mono_method_check_context_used (cmethod
);
6332 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6333 /* Generic method interface
6334 calls are resolved via a
6335 helper function and don't
6337 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6338 pass_imt_from_rgctx
= TRUE
;
6342 * If a shared method calls another
6343 * shared method then the caller must
6344 * have a generic sharing context
6345 * because the magic trampoline
6346 * requires it. FIXME: We shouldn't
6347 * have to force the vtable/mrgctx
6348 * variable here. Instead there
6349 * should be a flag in the cfg to
6350 * request a generic sharing context.
6353 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6354 mono_get_vtable_var (cfg
);
6359 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6361 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6363 CHECK_TYPELOAD (cmethod
->klass
);
6364 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6369 g_assert (!vtable_arg
);
6371 if (!cfg
->compile_aot
) {
6373 * emit_get_rgctx_method () calls mono_class_vtable () so check
6374 * for type load errors before.
6376 mono_class_setup_vtable (cmethod
->klass
);
6377 CHECK_TYPELOAD (cmethod
->klass
);
6380 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6382 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6383 MONO_METHOD_IS_FINAL (cmethod
)) {
6390 if (pass_imt_from_rgctx
) {
6391 g_assert (!pass_vtable
);
6394 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6395 cmethod
, MONO_RGCTX_INFO_METHOD
);
6399 MONO_EMIT_NEW_CHECK_THIS (cfg
, sp
[0]->dreg
);
6401 /* Calling virtual generic methods */
6402 if (cmethod
&& virtual &&
6403 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6404 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6405 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6406 mono_method_signature (cmethod
)->generic_param_count
) {
6407 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6408 MonoInst
*iargs
[4];
6410 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6412 /* Prevent inlining of methods that contain indirect calls */
6415 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6416 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
&& mono_use_imt
) {
6417 g_assert (!imt_arg
);
6419 g_assert (cmethod
->is_inflated
);
6420 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6421 cmethod
, MONO_RGCTX_INFO_METHOD
);
6422 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
);
6426 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6427 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6428 MONO_ADD_INS (bblock
, store
);
6430 /* FIXME: This should be a managed pointer */
6431 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6433 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6434 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6435 cmethod
, MONO_RGCTX_INFO_METHOD
);
6436 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6437 addr
= mono_emit_jit_icall (cfg
,
6438 mono_helper_compile_generic_method
, iargs
);
6440 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6442 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6445 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6446 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6448 CHECK_CFG_EXCEPTION
;
6455 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6456 supported_tail_call
= cmethod
&& MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method
), mono_method_signature (cmethod
));
6458 supported_tail_call
= cmethod
&& mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->ret
);
6462 /* FIXME: runtime generic context pointer for jumps? */
6463 /* FIXME: handle this for generic sharing eventually */
6464 if ((ins_flag
& MONO_INST_TAILCALL
) && !cfg
->generic_sharing_context
&& !vtable_arg
&& cmethod
&& (*ip
== CEE_CALL
) && supported_tail_call
) {
6467 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6470 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6471 /* Handle tail calls similarly to calls */
6472 call
= mono_emit_call_args (cfg
, mono_method_signature (cmethod
), sp
, FALSE
, FALSE
, TRUE
);
6474 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6475 call
->tail_call
= TRUE
;
6476 call
->method
= cmethod
;
6477 call
->signature
= mono_method_signature (cmethod
);
6480 * We implement tail calls by storing the actual arguments into the
6481 * argument variables, then emitting a CEE_JMP.
6483 for (i
= 0; i
< n
; ++i
) {
6484 /* Prevent argument from being register allocated */
6485 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6486 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6490 ins
= (MonoInst
*)call
;
6491 ins
->inst_p0
= cmethod
;
6492 ins
->inst_p1
= arg_array
[0];
6493 MONO_ADD_INS (bblock
, ins
);
6494 link_bblock (cfg
, bblock
, end_bblock
);
6495 start_new_bblock
= 1;
6497 CHECK_CFG_EXCEPTION
;
6499 /* skip CEE_RET as well */
6505 /* Conversion to a JIT intrinsic */
6506 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6507 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6508 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6513 CHECK_CFG_EXCEPTION
;
6521 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
6522 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
6523 mono_method_check_inlining (cfg
, cmethod
) &&
6524 !g_list_find (dont_inline
, cmethod
)) {
6526 gboolean allways
= FALSE
;
6528 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
6529 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6530 /* Prevent inlining of methods that call wrappers */
6532 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
6536 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, allways
))) {
6538 cfg
->real_offset
+= 5;
6541 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6542 /* *sp is already set by inline_method */
6545 inline_costs
+= costs
;
6551 inline_costs
+= 10 * num_calls
++;
6553 /* Tail recursion elimination */
6554 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
6555 gboolean has_vtargs
= FALSE
;
6558 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6561 /* keep it simple */
6562 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
6563 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
6568 for (i
= 0; i
< n
; ++i
)
6569 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6570 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6571 MONO_ADD_INS (bblock
, ins
);
6572 tblock
= start_bblock
->out_bb
[0];
6573 link_bblock (cfg
, bblock
, tblock
);
6574 ins
->inst_target_bb
= tblock
;
6575 start_new_bblock
= 1;
6577 /* skip the CEE_RET, too */
6578 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
6588 /* Generic sharing */
6589 /* FIXME: only do this for generic methods if
6590 they are not shared! */
6591 if (context_used
&& !imt_arg
&& !array_rank
&&
6592 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
6593 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
6594 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
6595 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
6598 g_assert (cfg
->generic_sharing_context
&& cmethod
);
6602 * We are compiling a call to a
6603 * generic method from shared code,
6604 * which means that we have to look up
6605 * the method in the rgctx and do an
6608 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
6611 /* Indirect calls */
6613 g_assert (!imt_arg
);
6615 if (*ip
== CEE_CALL
)
6616 g_assert (context_used
);
6617 else if (*ip
== CEE_CALLI
)
6618 g_assert (!vtable_arg
);
6620 /* FIXME: what the hell is this??? */
6621 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
6622 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
6624 /* Prevent inlining of methods with indirect calls */
6629 int rgctx_reg
= mono_alloc_preg (cfg
);
6631 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
6632 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6633 call
= (MonoCallInst
*)ins
;
6634 set_rgctx_arg (cfg
, call
, rgctx_reg
, vtable_arg
);
6636 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6638 * Instead of emitting an indirect call, emit a direct call
6639 * with the contents of the aotconst as the patch info.
6641 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
6643 } else if (addr
->opcode
== OP_GOT_ENTRY
&& addr
->inst_right
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6644 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_right
->inst_left
, fsig
, sp
);
6647 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6650 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6651 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6653 CHECK_CFG_EXCEPTION
;
6664 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
6665 if (sp
[fsig
->param_count
]->type
== STACK_OBJ
) {
6666 MonoInst
*iargs
[2];
6669 iargs
[1] = sp
[fsig
->param_count
];
6671 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
6674 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
6675 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, sp
[fsig
->param_count
]->dreg
);
6676 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
6677 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6679 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
6682 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
6683 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
6684 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
6685 CHECK_TYPELOAD (cmethod
->klass
);
6688 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6691 g_assert_not_reached ();
6694 CHECK_CFG_EXCEPTION
;
6701 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
6703 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6704 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6706 CHECK_CFG_EXCEPTION
;
6716 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
6718 } else if (imt_arg
) {
6719 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, imt_arg
);
6721 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, NULL
);
6724 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6725 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6727 CHECK_CFG_EXCEPTION
;
6734 if (cfg
->method
!= method
) {
6735 /* return from inlined method */
6737 * If in_count == 0, that means the ret is unreachable due to
6738 * being preceeded by a throw. In that case, inline_method () will
6739 * handle setting the return value
6740 * (test case: test_0_inline_throw ()).
6742 if (return_var
&& cfg
->cbb
->in_count
) {
6746 //g_assert (returnvar != -1);
6747 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
6748 cfg
->ret_var_set
= TRUE
;
6752 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
6756 * Place a seq point here too even through the IL stack is not
6757 * empty, so a step over on
6760 * will work correctly.
6762 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
6763 MONO_ADD_INS (cfg
->cbb
, ins
);
6766 g_assert (!return_var
);
6769 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
6772 if (!cfg
->vret_addr
) {
6775 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
6777 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
6779 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
6780 ins
->klass
= mono_class_from_mono_type (ret_type
);
6783 #ifdef MONO_ARCH_SOFT_FLOAT
6784 if (COMPILE_SOFT_FLOAT (cfg
) && !ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
6785 MonoInst
*iargs
[1];
6789 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
6790 mono_arch_emit_setret (cfg
, method
, conv
);
6792 mono_arch_emit_setret (cfg
, method
, *sp
);
6795 mono_arch_emit_setret (cfg
, method
, *sp
);
6800 if (sp
!= stack_start
)
6802 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6804 ins
->inst_target_bb
= end_bblock
;
6805 MONO_ADD_INS (bblock
, ins
);
6806 link_bblock (cfg
, bblock
, end_bblock
);
6807 start_new_bblock
= 1;
6811 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6813 target
= ip
+ 1 + (signed char)(*ip
);
6815 GET_BBLOCK (cfg
, tblock
, target
);
6816 link_bblock (cfg
, bblock
, tblock
);
6817 ins
->inst_target_bb
= tblock
;
6818 if (sp
!= stack_start
) {
6819 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6821 CHECK_UNVERIFIABLE (cfg
);
6823 MONO_ADD_INS (bblock
, ins
);
6824 start_new_bblock
= 1;
6825 inline_costs
+= BRANCH_COST
;
6839 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
6841 target
= ip
+ 1 + *(signed char*)ip
;
6847 inline_costs
+= BRANCH_COST
;
6851 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6854 target
= ip
+ 4 + (gint32
)read32(ip
);
6856 GET_BBLOCK (cfg
, tblock
, target
);
6857 link_bblock (cfg
, bblock
, tblock
);
6858 ins
->inst_target_bb
= tblock
;
6859 if (sp
!= stack_start
) {
6860 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6862 CHECK_UNVERIFIABLE (cfg
);
6865 MONO_ADD_INS (bblock
, ins
);
6867 start_new_bblock
= 1;
6868 inline_costs
+= BRANCH_COST
;
6875 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
6876 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
6877 guint32 opsize
= is_short
? 1 : 4;
6879 CHECK_OPSIZE (opsize
);
6881 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
6884 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
6889 GET_BBLOCK (cfg
, tblock
, target
);
6890 link_bblock (cfg
, bblock
, tblock
);
6891 GET_BBLOCK (cfg
, tblock
, ip
);
6892 link_bblock (cfg
, bblock
, tblock
);
6894 if (sp
!= stack_start
) {
6895 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6896 CHECK_UNVERIFIABLE (cfg
);
6899 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
6900 cmp
->sreg1
= sp
[0]->dreg
;
6901 type_from_op (cmp
, sp
[0], NULL
);
6904 #if SIZEOF_REGISTER == 4
6905 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
6906 /* Convert it to OP_LCOMPARE */
6907 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6908 ins
->type
= STACK_I8
;
6909 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6911 MONO_ADD_INS (bblock
, ins
);
6912 cmp
->opcode
= OP_LCOMPARE
;
6913 cmp
->sreg2
= ins
->dreg
;
6916 MONO_ADD_INS (bblock
, cmp
);
6918 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
6919 type_from_op (ins
, sp
[0], NULL
);
6920 MONO_ADD_INS (bblock
, ins
);
6921 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
6922 GET_BBLOCK (cfg
, tblock
, target
);
6923 ins
->inst_true_bb
= tblock
;
6924 GET_BBLOCK (cfg
, tblock
, ip
);
6925 ins
->inst_false_bb
= tblock
;
6926 start_new_bblock
= 2;
6929 inline_costs
+= BRANCH_COST
;
6944 MONO_INST_NEW (cfg
, ins
, *ip
);
6946 target
= ip
+ 4 + (gint32
)read32(ip
);
6952 inline_costs
+= BRANCH_COST
;
6956 MonoBasicBlock
**targets
;
6957 MonoBasicBlock
*default_bblock
;
6958 MonoJumpInfoBBTable
*table
;
6959 int offset_reg
= alloc_preg (cfg
);
6960 int target_reg
= alloc_preg (cfg
);
6961 int table_reg
= alloc_preg (cfg
);
6962 int sum_reg
= alloc_preg (cfg
);
6963 gboolean use_op_switch
;
6967 n
= read32 (ip
+ 1);
6970 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
6974 CHECK_OPSIZE (n
* sizeof (guint32
));
6975 target
= ip
+ n
* sizeof (guint32
);
6977 GET_BBLOCK (cfg
, default_bblock
, target
);
6979 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
6980 for (i
= 0; i
< n
; ++i
) {
6981 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
6982 targets
[i
] = tblock
;
6986 if (sp
!= stack_start
) {
6988 * Link the current bb with the targets as well, so handle_stack_args
6989 * will set their in_stack correctly.
6991 link_bblock (cfg
, bblock
, default_bblock
);
6992 for (i
= 0; i
< n
; ++i
)
6993 link_bblock (cfg
, bblock
, targets
[i
]);
6995 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6997 CHECK_UNVERIFIABLE (cfg
);
7000 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
7001 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
7004 for (i
= 0; i
< n
; ++i
)
7005 link_bblock (cfg
, bblock
, targets
[i
]);
7007 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
7008 table
->table
= targets
;
7009 table
->table_size
= n
;
7011 use_op_switch
= FALSE
;
7013 /* ARM implements SWITCH statements differently */
7014 /* FIXME: Make it use the generic implementation */
7015 if (!cfg
->compile_aot
)
7016 use_op_switch
= TRUE
;
7019 if (COMPILE_LLVM (cfg
))
7020 use_op_switch
= TRUE
;
7022 cfg
->cbb
->has_jump_table
= 1;
7024 if (use_op_switch
) {
7025 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
7026 ins
->sreg1
= src1
->dreg
;
7027 ins
->inst_p0
= table
;
7028 ins
->inst_many_bb
= targets
;
7029 ins
->klass
= GUINT_TO_POINTER (n
);
7030 MONO_ADD_INS (cfg
->cbb
, ins
);
7032 if (sizeof (gpointer
) == 8)
7033 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
7035 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
7037 #if SIZEOF_REGISTER == 8
7038 /* The upper word might not be zero, and we add it to a 64 bit address later */
7039 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
7042 if (cfg
->compile_aot
) {
7043 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
7045 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
7046 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
7047 ins
->inst_p0
= table
;
7048 ins
->dreg
= table_reg
;
7049 MONO_ADD_INS (cfg
->cbb
, ins
);
7052 /* FIXME: Use load_memindex */
7053 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
7054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
7055 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
7057 start_new_bblock
= 1;
7058 inline_costs
+= (BRANCH_COST
* 2);
7078 dreg
= alloc_freg (cfg
);
7081 dreg
= alloc_lreg (cfg
);
7084 dreg
= alloc_preg (cfg
);
7087 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
7088 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
7089 ins
->flags
|= ins_flag
;
7091 MONO_ADD_INS (bblock
, ins
);
7106 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
7107 ins
->flags
|= ins_flag
;
7109 MONO_ADD_INS (bblock
, ins
);
7111 #if HAVE_WRITE_BARRIERS
7112 if (*ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0))) {
7113 MonoInst
*dummy_use
;
7114 /* insert call to write barrier */
7115 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
7116 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
7117 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, sp
[1]);
7128 MONO_INST_NEW (cfg
, ins
, (*ip
));
7130 ins
->sreg1
= sp
[0]->dreg
;
7131 ins
->sreg2
= sp
[1]->dreg
;
7132 type_from_op (ins
, sp
[0], sp
[1]);
7134 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7136 /* Use the immediate opcodes if possible */
7137 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
7138 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
7139 if (imm_opcode
!= -1) {
7140 ins
->opcode
= imm_opcode
;
7141 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7144 sp
[1]->opcode
= OP_NOP
;
7148 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7150 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7167 MONO_INST_NEW (cfg
, ins
, (*ip
));
7169 ins
->sreg1
= sp
[0]->dreg
;
7170 ins
->sreg2
= sp
[1]->dreg
;
7171 type_from_op (ins
, sp
[0], sp
[1]);
7173 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
7174 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7176 /* FIXME: Pass opcode to is_inst_imm */
7178 /* Use the immediate opcodes if possible */
7179 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
7182 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
7183 if (imm_opcode
!= -1) {
7184 ins
->opcode
= imm_opcode
;
7185 if (sp
[1]->opcode
== OP_I8CONST
) {
7186 #if SIZEOF_REGISTER == 8
7187 ins
->inst_imm
= sp
[1]->inst_l
;
7189 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
7190 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
7194 ins
->inst_imm
= (gssize
)(sp
[1]->inst_c0
);
7197 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7198 if (sp
[1]->next
== NULL
)
7199 sp
[1]->opcode
= OP_NOP
;
7202 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7204 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7217 case CEE_CONV_OVF_I8
:
7218 case CEE_CONV_OVF_U8
:
7222 /* Special case this earlier so we have long constants in the IR */
7223 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
7224 int data
= sp
[-1]->inst_c0
;
7225 sp
[-1]->opcode
= OP_I8CONST
;
7226 sp
[-1]->type
= STACK_I8
;
7227 #if SIZEOF_REGISTER == 8
7228 if ((*ip
) == CEE_CONV_U8
)
7229 sp
[-1]->inst_c0
= (guint32
)data
;
7231 sp
[-1]->inst_c0
= data
;
7233 sp
[-1]->inst_ls_word
= data
;
7234 if ((*ip
) == CEE_CONV_U8
)
7235 sp
[-1]->inst_ms_word
= 0;
7237 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
7239 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
7246 case CEE_CONV_OVF_I4
:
7247 case CEE_CONV_OVF_I1
:
7248 case CEE_CONV_OVF_I2
:
7249 case CEE_CONV_OVF_I
:
7250 case CEE_CONV_OVF_U
:
7253 if (sp
[-1]->type
== STACK_R8
) {
7254 ADD_UNOP (CEE_CONV_OVF_I8
);
7261 case CEE_CONV_OVF_U1
:
7262 case CEE_CONV_OVF_U2
:
7263 case CEE_CONV_OVF_U4
:
7266 if (sp
[-1]->type
== STACK_R8
) {
7267 ADD_UNOP (CEE_CONV_OVF_U8
);
7274 case CEE_CONV_OVF_I1_UN
:
7275 case CEE_CONV_OVF_I2_UN
:
7276 case CEE_CONV_OVF_I4_UN
:
7277 case CEE_CONV_OVF_I8_UN
:
7278 case CEE_CONV_OVF_U1_UN
:
7279 case CEE_CONV_OVF_U2_UN
:
7280 case CEE_CONV_OVF_U4_UN
:
7281 case CEE_CONV_OVF_U8_UN
:
7282 case CEE_CONV_OVF_I_UN
:
7283 case CEE_CONV_OVF_U_UN
:
7290 CHECK_CFG_EXCEPTION
;
7294 case CEE_ADD_OVF_UN
:
7296 case CEE_MUL_OVF_UN
:
7298 case CEE_SUB_OVF_UN
:
7306 token
= read32 (ip
+ 1);
7307 klass
= mini_get_class (method
, token
, generic_context
);
7308 CHECK_TYPELOAD (klass
);
7310 if (generic_class_is_reference_type (cfg
, klass
)) {
7311 MonoInst
*store
, *load
;
7312 int dreg
= alloc_preg (cfg
);
7314 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7315 load
->flags
|= ins_flag
;
7316 MONO_ADD_INS (cfg
->cbb
, load
);
7318 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7319 store
->flags
|= ins_flag
;
7320 MONO_ADD_INS (cfg
->cbb
, store
);
7322 #if HAVE_WRITE_BARRIERS
7323 if (cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
) {
7324 MonoInst
*dummy_use
;
7325 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
7326 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
7327 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, sp
[1]);
7331 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7343 token
= read32 (ip
+ 1);
7344 klass
= mini_get_class (method
, token
, generic_context
);
7345 CHECK_TYPELOAD (klass
);
7347 /* Optimize the common ldobj+stloc combination */
7357 loc_index
= ip
[5] - CEE_STLOC_0
;
7364 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7365 CHECK_LOCAL (loc_index
);
7367 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7368 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7374 /* Optimize the ldobj+stobj combination */
7375 /* The reference case ends up being a load+store anyway */
7376 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 5) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7381 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7388 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7397 CHECK_STACK_OVF (1);
7399 n
= read32 (ip
+ 1);
7401 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7402 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7403 ins
->type
= STACK_OBJ
;
7406 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7407 MonoInst
*iargs
[1];
7409 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7410 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7412 if (cfg
->opt
& MONO_OPT_SHARED
) {
7413 MonoInst
*iargs
[3];
7415 if (cfg
->compile_aot
) {
7416 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7418 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7419 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7420 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7421 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7422 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7424 if (bblock
->out_of_line
) {
7425 MonoInst
*iargs
[2];
7427 if (image
== mono_defaults
.corlib
) {
7429 * Avoid relocations in AOT and save some space by using a
7430 * version of helper_ldstr specialized to mscorlib.
7432 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7433 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7435 /* Avoid creating the string object */
7436 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7437 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7438 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7442 if (cfg
->compile_aot
) {
7443 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7445 MONO_ADD_INS (bblock
, ins
);
7448 NEW_PCONST (cfg
, ins
, NULL
);
7449 ins
->type
= STACK_OBJ
;
7450 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7452 MONO_ADD_INS (bblock
, ins
);
7461 MonoInst
*iargs
[2];
7462 MonoMethodSignature
*fsig
;
7465 MonoInst
*vtable_arg
= NULL
;
7468 token
= read32 (ip
+ 1);
7469 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7472 fsig
= mono_method_get_signature (cmethod
, image
, token
);
7476 mono_save_token_info (cfg
, image
, token
, cmethod
);
7478 if (!mono_class_init (cmethod
->klass
))
7481 if (cfg
->generic_sharing_context
)
7482 context_used
= mono_method_check_context_used (cmethod
);
7484 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7485 if (check_linkdemand (cfg
, method
, cmethod
))
7487 CHECK_CFG_EXCEPTION
;
7488 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
7489 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
7492 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
7493 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7494 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
7495 mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7496 CHECK_TYPELOAD (cmethod
->klass
);
7498 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
7499 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7502 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
7503 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7505 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7507 CHECK_TYPELOAD (cmethod
->klass
);
7508 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7513 n
= fsig
->param_count
;
7517 * Generate smaller code for the common newobj <exception> instruction in
7518 * argument checking code.
7520 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
7521 is_exception_class (cmethod
->klass
) && n
<= 2 &&
7522 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
7523 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
7524 MonoInst
*iargs
[3];
7526 g_assert (!vtable_arg
);
7530 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
7533 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
7537 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
7542 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
7545 g_assert_not_reached ();
7553 /* move the args to allow room for 'this' in the first position */
7559 /* check_call_signature () requires sp[0] to be set */
7560 this_ins
.type
= STACK_OBJ
;
7562 if (check_call_signature (cfg
, fsig
, sp
))
7567 if (mini_class_is_system_array (cmethod
->klass
)) {
7568 g_assert (!vtable_arg
);
7570 *sp
= emit_get_rgctx_method (cfg
, context_used
,
7571 cmethod
, MONO_RGCTX_INFO_METHOD
);
7573 /* Avoid varargs in the common case */
7574 if (fsig
->param_count
== 1)
7575 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
7576 else if (fsig
->param_count
== 2)
7577 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
7578 else if (fsig
->param_count
== 3)
7579 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_3
, sp
);
7581 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
7582 } else if (cmethod
->string_ctor
) {
7583 g_assert (!context_used
);
7584 g_assert (!vtable_arg
);
7585 /* we simply pass a null pointer */
7586 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
7587 /* now call the string ctor */
7588 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
);
7590 MonoInst
* callvirt_this_arg
= NULL
;
7592 if (cmethod
->klass
->valuetype
) {
7593 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
7594 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
7595 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
7600 * The code generated by mini_emit_virtual_call () expects
7601 * iargs [0] to be a boxed instance, but luckily the vcall
7602 * will be transformed into a normal call there.
7604 } else if (context_used
) {
7605 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, context_used
);
7608 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7610 CHECK_TYPELOAD (cmethod
->klass
);
7613 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7614 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7615 * As a workaround, we call class cctors before allocating objects.
7617 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
7618 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
7619 if (cfg
->verbose_level
> 2)
7620 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
7621 class_inits
= g_slist_prepend (class_inits
, vtable
);
7624 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, 0);
7627 CHECK_CFG_EXCEPTION
; /*for handle_alloc*/
7630 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
7632 /* Now call the actual ctor */
7633 /* Avoid virtual calls to ctors if possible */
7634 if (cmethod
->klass
->marshalbyref
)
7635 callvirt_this_arg
= sp
[0];
7638 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_ctor (cfg
, cmethod
, fsig
, sp
))) {
7639 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
7640 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
7645 CHECK_CFG_EXCEPTION
;
7650 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
7651 mono_method_check_inlining (cfg
, cmethod
) &&
7652 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
7653 !g_list_find (dont_inline
, cmethod
)) {
7656 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
7657 cfg
->real_offset
+= 5;
7660 inline_costs
+= costs
- 5;
7663 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
);
7665 } else if (context_used
&&
7666 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7667 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
7668 MonoInst
*cmethod_addr
;
7670 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
7671 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7673 mono_emit_rgctx_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
7676 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
,
7677 callvirt_this_arg
, NULL
, vtable_arg
);
7681 if (alloc
== NULL
) {
7683 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
7684 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
7698 token
= read32 (ip
+ 1);
7699 klass
= mini_get_class (method
, token
, generic_context
);
7700 CHECK_TYPELOAD (klass
);
7701 if (sp
[0]->type
!= STACK_OBJ
)
7704 if (cfg
->generic_sharing_context
)
7705 context_used
= mono_class_check_context_used (klass
);
7707 if (!context_used
&& mono_class_has_variant_generic_params (klass
)) {
7714 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
7716 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
7720 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
7721 MonoMethod
*mono_castclass
;
7722 MonoInst
*iargs
[1];
7725 mono_castclass
= mono_marshal_get_castclass (klass
);
7728 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7729 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7730 g_assert (costs
> 0);
7733 cfg
->real_offset
+= 5;
7738 inline_costs
+= costs
;
7741 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
7742 CHECK_CFG_EXCEPTION
;
7752 token
= read32 (ip
+ 1);
7753 klass
= mini_get_class (method
, token
, generic_context
);
7754 CHECK_TYPELOAD (klass
);
7755 if (sp
[0]->type
!= STACK_OBJ
)
7758 if (cfg
->generic_sharing_context
)
7759 context_used
= mono_class_check_context_used (klass
);
7761 if (!context_used
&& mono_class_has_variant_generic_params (klass
)) {
7768 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
7770 *sp
= mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
7774 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
7775 MonoMethod
*mono_isinst
;
7776 MonoInst
*iargs
[1];
7779 mono_isinst
= mono_marshal_get_isinst (klass
);
7782 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
7783 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7784 g_assert (costs
> 0);
7787 cfg
->real_offset
+= 5;
7792 inline_costs
+= costs
;
7795 ins
= handle_isinst (cfg
, klass
, *sp
, context_used
);
7796 CHECK_CFG_EXCEPTION
;
7803 case CEE_UNBOX_ANY
: {
7807 token
= read32 (ip
+ 1);
7808 klass
= mini_get_class (method
, token
, generic_context
);
7809 CHECK_TYPELOAD (klass
);
7811 mono_save_token_info (cfg
, image
, token
, klass
);
7813 if (cfg
->generic_sharing_context
)
7814 context_used
= mono_class_check_context_used (klass
);
7816 if (generic_class_is_reference_type (cfg
, klass
)) {
7817 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7819 MonoInst
*iargs
[2];
7824 iargs
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7825 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, iargs
);
7829 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7830 MonoMethod
*mono_castclass
;
7831 MonoInst
*iargs
[1];
7834 mono_castclass
= mono_marshal_get_castclass (klass
);
7837 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7838 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7840 g_assert (costs
> 0);
7843 cfg
->real_offset
+= 5;
7847 inline_costs
+= costs
;
7849 ins
= handle_castclass (cfg
, klass
, *sp
, 0);
7850 CHECK_CFG_EXCEPTION
;
7858 if (mono_class_is_nullable (klass
)) {
7859 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7866 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7872 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7885 token
= read32 (ip
+ 1);
7886 klass
= mini_get_class (method
, token
, generic_context
);
7887 CHECK_TYPELOAD (klass
);
7889 mono_save_token_info (cfg
, image
, token
, klass
);
7891 if (cfg
->generic_sharing_context
)
7892 context_used
= mono_class_check_context_used (klass
);
7894 if (generic_class_is_reference_type (cfg
, klass
)) {
7900 if (klass
== mono_defaults
.void_class
)
7902 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
7904 /* frequent check in generic code: box (struct), brtrue */
7905 if (!mono_class_is_nullable (klass
) &&
7906 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) && (ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
)) {
7907 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7909 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7910 if (*ip
== CEE_BRTRUE_S
) {
7913 target
= ip
+ 1 + (signed char)(*ip
);
7918 target
= ip
+ 4 + (gint
)(read32 (ip
));
7921 GET_BBLOCK (cfg
, tblock
, target
);
7922 link_bblock (cfg
, bblock
, tblock
);
7923 ins
->inst_target_bb
= tblock
;
7924 GET_BBLOCK (cfg
, tblock
, ip
);
7926 * This leads to some inconsistency, since the two bblocks are
7927 * not really connected, but it is needed for handling stack
7928 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7929 * FIXME: This should only be needed if sp != stack_start, but that
7930 * doesn't work for some reason (test failure in mcs/tests on x86).
7932 link_bblock (cfg
, bblock
, tblock
);
7933 if (sp
!= stack_start
) {
7934 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7936 CHECK_UNVERIFIABLE (cfg
);
7938 MONO_ADD_INS (bblock
, ins
);
7939 start_new_bblock
= 1;
7943 *sp
++ = handle_box (cfg
, val
, klass
, context_used
);
7945 CHECK_CFG_EXCEPTION
;
7954 token
= read32 (ip
+ 1);
7955 klass
= mini_get_class (method
, token
, generic_context
);
7956 CHECK_TYPELOAD (klass
);
7958 mono_save_token_info (cfg
, image
, token
, klass
);
7960 if (cfg
->generic_sharing_context
)
7961 context_used
= mono_class_check_context_used (klass
);
7963 if (mono_class_is_nullable (klass
)) {
7966 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7967 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
7971 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7981 MonoClassField
*field
;
7985 if (*ip
== CEE_STFLD
) {
7992 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
7994 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
7997 token
= read32 (ip
+ 1);
7998 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7999 field
= mono_method_get_wrapper_data (method
, token
);
8000 klass
= field
->parent
;
8003 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8007 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8008 FIELD_ACCESS_FAILURE
;
8009 mono_class_init (klass
);
8011 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8012 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8013 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8014 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8017 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
8018 if (*ip
== CEE_STFLD
) {
8019 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
8021 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8022 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
8023 MonoInst
*iargs
[5];
8026 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8027 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8028 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
8032 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8033 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
8034 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8035 g_assert (costs
> 0);
8037 cfg
->real_offset
+= 5;
8040 inline_costs
+= costs
;
8042 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
8047 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
8049 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
8050 store
->flags
|= MONO_INST_FAULT
;
8052 #if HAVE_WRITE_BARRIERS
8053 if (mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
8054 /* insert call to write barrier */
8055 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
8056 MonoInst
*iargs
[2], *dummy_use
;
8059 dreg
= alloc_preg (cfg
);
8060 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8062 mono_emit_method_call (cfg
, write_barrier
, iargs
, NULL
);
8064 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, sp
[1]);
8068 store
->flags
|= ins_flag
;
8075 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8076 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
8077 MonoInst
*iargs
[4];
8080 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8081 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8082 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
8083 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8084 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
8085 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8087 g_assert (costs
> 0);
8089 cfg
->real_offset
+= 5;
8093 inline_costs
+= costs
;
8095 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
8099 if (sp
[0]->type
== STACK_VTYPE
) {
8102 /* Have to compute the address of the variable */
8104 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8106 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8108 g_assert (var
->klass
== klass
);
8110 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
8114 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
8116 if (*ip
== CEE_LDFLDA
) {
8117 dreg
= alloc_preg (cfg
);
8119 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8120 ins
->klass
= mono_class_from_mono_type (field
->type
);
8121 ins
->type
= STACK_MP
;
8126 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
8127 load
->flags
|= ins_flag
;
8128 load
->flags
|= MONO_INST_FAULT
;
8139 MonoClassField
*field
;
8140 gpointer addr
= NULL
;
8141 gboolean is_special_static
;
8144 token
= read32 (ip
+ 1);
8146 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8147 field
= mono_method_get_wrapper_data (method
, token
);
8148 klass
= field
->parent
;
8151 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8154 mono_class_init (klass
);
8155 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8156 FIELD_ACCESS_FAILURE
;
8158 /* if the class is Critical then transparent code cannot access it's fields */
8159 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
8160 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
8163 * We can only support shared generic static
8164 * field access on architectures where the
8165 * trampoline code has been extended to handle
8166 * the generic class init.
8168 #ifndef MONO_ARCH_VTABLE_REG
8169 GENERIC_SHARING_FAILURE (*ip
);
8172 if (cfg
->generic_sharing_context
)
8173 context_used
= mono_class_check_context_used (klass
);
8175 g_assert (!(field
->type
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
8177 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8178 * to be called here.
8180 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
8181 mono_class_vtable (cfg
->domain
, klass
);
8182 CHECK_TYPELOAD (klass
);
8184 mono_domain_lock (cfg
->domain
);
8185 if (cfg
->domain
->special_static_fields
)
8186 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
8187 mono_domain_unlock (cfg
->domain
);
8189 is_special_static
= mono_class_field_is_special_static (field
);
8191 /* Generate IR to compute the field address */
8192 if (is_special_static
&& ((gsize
)addr
& 0x80000000) == 0 && mono_get_thread_intrinsic (cfg
) && !(cfg
->opt
& MONO_OPT_SHARED
) && !context_used
) {
8194 * Fast access to TLS data
8195 * Inline version of get_thread_static_data () in
8199 int idx
, static_data_reg
, array_reg
, dreg
;
8200 MonoInst
*thread_ins
;
8202 // offset &= 0x7fffffff;
8203 // idx = (offset >> 24) - 1;
8204 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8206 thread_ins
= mono_get_thread_intrinsic (cfg
);
8207 MONO_ADD_INS (cfg
->cbb
, thread_ins
);
8208 static_data_reg
= alloc_ireg (cfg
);
8209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, G_STRUCT_OFFSET (MonoInternalThread
, static_data
));
8211 if (cfg
->compile_aot
) {
8212 int offset_reg
, offset2_reg
, idx_reg
;
8214 /* For TLS variables, this will return the TLS offset */
8215 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8216 offset_reg
= ins
->dreg
;
8217 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset_reg
, offset_reg
, 0x7fffffff);
8218 idx_reg
= alloc_ireg (cfg
);
8219 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, idx_reg
, offset_reg
, 24);
8220 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISUB_IMM
, idx_reg
, idx_reg
, 1);
8221 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, sizeof (gpointer
) == 8 ? 3 : 2);
8222 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
8223 array_reg
= alloc_ireg (cfg
);
8224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
8225 offset2_reg
= alloc_ireg (cfg
);
8226 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset_reg
, 0xffffff);
8227 dreg
= alloc_ireg (cfg
);
8228 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
8230 offset
= (gsize
)addr
& 0x7fffffff;
8231 idx
= (offset
>> 24) - 1;
8233 array_reg
= alloc_ireg (cfg
);
8234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, idx
* sizeof (gpointer
));
8235 dreg
= alloc_ireg (cfg
);
8236 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_ADD_IMM
, dreg
, array_reg
, (offset
& 0xffffff));
8238 } else if ((cfg
->opt
& MONO_OPT_SHARED
) ||
8239 (cfg
->compile_aot
&& is_special_static
) ||
8240 (context_used
&& is_special_static
)) {
8241 MonoInst
*iargs
[2];
8243 g_assert (field
->parent
);
8244 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8246 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
8247 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
8249 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8251 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8252 } else if (context_used
) {
8253 MonoInst
*static_data
;
8256 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8257 method->klass->name_space, method->klass->name, method->name,
8258 depth, field->offset);
8261 if (mono_class_needs_cctor_run (klass
, method
))
8262 emit_generic_class_init (cfg
, klass
);
8265 * The pointer we're computing here is
8267 * super_info.static_data + field->offset
8269 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
8270 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
8272 if (field
->offset
== 0) {
8275 int addr_reg
= mono_alloc_preg (cfg
);
8276 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
8278 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
8279 MonoInst
*iargs
[2];
8281 g_assert (field
->parent
);
8282 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8283 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8284 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8286 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
8288 CHECK_TYPELOAD (klass
);
8290 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8291 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8292 if (cfg
->verbose_level
> 2)
8293 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
8294 class_inits
= g_slist_prepend (class_inits
, vtable
);
8296 if (cfg
->run_cctors
) {
8298 /* This makes so that inline cannot trigger */
8299 /* .cctors: too many apps depend on them */
8300 /* running with a specific order... */
8301 if (! vtable
->initialized
)
8303 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
8305 set_exception_object (cfg
, ex
);
8306 goto exception_exit
;
8310 addr
= (char*)vtable
->data
+ field
->offset
;
8312 if (cfg
->compile_aot
)
8313 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8315 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8317 MonoInst
*iargs
[1];
8318 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8319 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8323 /* Generate IR to do the actual load/store operation */
8325 if (*ip
== CEE_LDSFLDA
) {
8326 ins
->klass
= mono_class_from_mono_type (field
->type
);
8327 ins
->type
= STACK_PTR
;
8329 } else if (*ip
== CEE_STSFLD
) {
8334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, ins
->dreg
, 0, sp
[0]->dreg
);
8335 store
->flags
|= ins_flag
;
8337 gboolean is_const
= FALSE
;
8338 MonoVTable
*vtable
= NULL
;
8340 if (!context_used
) {
8341 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8342 CHECK_TYPELOAD (klass
);
8344 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8345 vtable
->initialized
&& (field
->type
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8346 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8347 int ro_type
= field
->type
->type
;
8348 if (ro_type
== MONO_TYPE_VALUETYPE
&& field
->type
->data
.klass
->enumtype
) {
8349 ro_type
= mono_class_enum_basetype (field
->type
->data
.klass
)->type
;
8351 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8354 case MONO_TYPE_BOOLEAN
:
8356 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8360 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8363 case MONO_TYPE_CHAR
:
8365 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8369 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8374 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8378 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8384 case MONO_TYPE_FNPTR
:
8385 #ifndef HAVE_MOVING_COLLECTOR
8386 case MONO_TYPE_STRING
:
8387 case MONO_TYPE_OBJECT
:
8388 case MONO_TYPE_CLASS
:
8389 case MONO_TYPE_SZARRAY
:
8390 case MONO_TYPE_ARRAY
:
8392 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8393 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8398 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
8403 case MONO_TYPE_VALUETYPE
:
8413 CHECK_STACK_OVF (1);
8415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
8416 load
->flags
|= ins_flag
;
8429 token
= read32 (ip
+ 1);
8430 klass
= mini_get_class (method
, token
, generic_context
);
8431 CHECK_TYPELOAD (klass
);
8432 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8433 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
8434 #if HAVE_WRITE_BARRIERS
8435 if (cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&&
8436 generic_class_is_reference_type (cfg
, klass
)) {
8437 MonoInst
*dummy_use
;
8438 /* insert call to write barrier */
8439 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
8440 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
8441 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, sp
[1]);
8454 const char *data_ptr
;
8456 guint32 field_token
;
8462 token
= read32 (ip
+ 1);
8464 klass
= mini_get_class (method
, token
, generic_context
);
8465 CHECK_TYPELOAD (klass
);
8467 if (cfg
->generic_sharing_context
)
8468 context_used
= mono_class_check_context_used (klass
);
8470 if (sp
[0]->type
== STACK_I8
|| (SIZEOF_VOID_P
== 8 && sp
[0]->type
== STACK_PTR
)) {
8471 MONO_INST_NEW (cfg
, ins
, OP_LCONV_TO_I4
);
8472 ins
->sreg1
= sp
[0]->dreg
;
8473 ins
->type
= STACK_I4
;
8474 ins
->dreg
= alloc_ireg (cfg
);
8475 MONO_ADD_INS (cfg
->cbb
, ins
);
8476 *sp
= mono_decompose_opcode (cfg
, ins
);
8481 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
8482 /* FIXME: we cannot get a managed
8483 allocator because we can't get the
8484 open generic class's vtable. We
8485 have the same problem in
8486 handle_alloc(). This
8487 needs to be solved so that we can
8488 have managed allocs of shared
8491 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8492 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8494 MonoMethod
*managed_alloc
= NULL
;
8496 /* FIXME: Decompose later to help abcrem */
8499 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
8500 array_class
, MONO_RGCTX_INFO_VTABLE
);
8505 ins
= mono_emit_method_call (cfg
, managed_alloc
, args
, NULL
);
8507 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
8509 if (cfg
->opt
& MONO_OPT_SHARED
) {
8510 /* Decompose now to avoid problems with references to the domainvar */
8511 MonoInst
*iargs
[3];
8513 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8514 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8517 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
8519 /* Decompose later since it is needed by abcrem */
8520 MonoClass
*array_type
= mono_array_class_get (klass
, 1);
8521 mono_class_vtable (cfg
->domain
, array_type
);
8522 CHECK_TYPELOAD (array_type
);
8524 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
8525 ins
->dreg
= alloc_preg (cfg
);
8526 ins
->sreg1
= sp
[0]->dreg
;
8527 ins
->inst_newa_class
= klass
;
8528 ins
->type
= STACK_OBJ
;
8530 MONO_ADD_INS (cfg
->cbb
, ins
);
8531 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8532 cfg
->cbb
->has_array_access
= TRUE
;
8534 /* Needed so mono_emit_load_get_addr () gets called */
8535 mono_get_got_var (cfg
);
8545 * we inline/optimize the initialization sequence if possible.
8546 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8547 * for small sizes open code the memcpy
8548 * ensure the rva field is big enough
8550 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
8551 MonoMethod
*memcpy_method
= get_memcpy_method ();
8552 MonoInst
*iargs
[3];
8553 int add_reg
= alloc_preg (cfg
);
8555 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
8556 if (cfg
->compile_aot
) {
8557 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
8559 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
8561 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
8562 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
8571 if (sp
[0]->type
!= STACK_OBJ
)
8574 dreg
= alloc_preg (cfg
);
8575 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
8576 ins
->dreg
= alloc_preg (cfg
);
8577 ins
->sreg1
= sp
[0]->dreg
;
8578 ins
->type
= STACK_I4
;
8579 MONO_ADD_INS (cfg
->cbb
, ins
);
8580 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8581 cfg
->cbb
->has_array_access
= TRUE
;
8589 if (sp
[0]->type
!= STACK_OBJ
)
8592 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8594 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
8595 CHECK_TYPELOAD (klass
);
8596 /* we need to make sure that this array is exactly the type it needs
8597 * to be for correctness. the wrappers are lax with their usage
8598 * so we need to ignore them here
8600 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
) {
8601 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
8602 mini_emit_check_array_type (cfg
, sp
[0], array_class
);
8603 CHECK_TYPELOAD (array_class
);
8607 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
8622 case CEE_LDELEM_REF
: {
8628 if (*ip
== CEE_LDELEM
) {
8630 token
= read32 (ip
+ 1);
8631 klass
= mini_get_class (method
, token
, generic_context
);
8632 CHECK_TYPELOAD (klass
);
8633 mono_class_init (klass
);
8636 klass
= array_access_to_klass (*ip
);
8638 if (sp
[0]->type
!= STACK_OBJ
)
8641 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8643 if (sp
[1]->opcode
== OP_ICONST
) {
8644 int array_reg
= sp
[0]->dreg
;
8645 int index_reg
= sp
[1]->dreg
;
8646 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8648 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8649 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
8651 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
8652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
8655 if (*ip
== CEE_LDELEM
)
8668 case CEE_STELEM_REF
:
8675 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8677 if (*ip
== CEE_STELEM
) {
8679 token
= read32 (ip
+ 1);
8680 klass
= mini_get_class (method
, token
, generic_context
);
8681 CHECK_TYPELOAD (klass
);
8682 mono_class_init (klass
);
8685 klass
= array_access_to_klass (*ip
);
8687 if (sp
[0]->type
!= STACK_OBJ
)
8690 /* storing a NULL doesn't need any of the complex checks in stelemref */
8691 if (generic_class_is_reference_type (cfg
, klass
) &&
8692 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
8693 MonoMethod
* helper
= mono_marshal_get_stelemref ();
8694 MonoInst
*iargs
[3];
8696 if (sp
[0]->type
!= STACK_OBJ
)
8698 if (sp
[2]->type
!= STACK_OBJ
)
8705 mono_emit_method_call (cfg
, helper
, iargs
, NULL
);
8707 if (sp
[1]->opcode
== OP_ICONST
) {
8708 int array_reg
= sp
[0]->dreg
;
8709 int index_reg
= sp
[1]->dreg
;
8710 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8712 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
8715 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
8716 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
8720 if (*ip
== CEE_STELEM
)
8727 case CEE_CKFINITE
: {
8731 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
8732 ins
->sreg1
= sp
[0]->dreg
;
8733 ins
->dreg
= alloc_freg (cfg
);
8734 ins
->type
= STACK_R8
;
8735 MONO_ADD_INS (bblock
, ins
);
8737 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8742 case CEE_REFANYVAL
: {
8743 MonoInst
*src_var
, *src
;
8745 int klass_reg
= alloc_preg (cfg
);
8746 int dreg
= alloc_preg (cfg
);
8749 MONO_INST_NEW (cfg
, ins
, *ip
);
8752 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8753 CHECK_TYPELOAD (klass
);
8754 mono_class_init (klass
);
8756 if (cfg
->generic_sharing_context
)
8757 context_used
= mono_class_check_context_used (klass
);
8760 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8762 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8763 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
8764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
8767 MonoInst
*klass_ins
;
8769 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
8770 klass
, MONO_RGCTX_INFO_KLASS
);
8773 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
8774 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
8776 mini_emit_class_check (cfg
, klass_reg
, klass
);
8778 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
8779 ins
->type
= STACK_MP
;
8784 case CEE_MKREFANY
: {
8785 MonoInst
*loc
, *addr
;
8788 MONO_INST_NEW (cfg
, ins
, *ip
);
8791 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8792 CHECK_TYPELOAD (klass
);
8793 mono_class_init (klass
);
8795 if (cfg
->generic_sharing_context
)
8796 context_used
= mono_class_check_context_used (klass
);
8798 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
8799 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
8802 MonoInst
*const_ins
;
8803 int type_reg
= alloc_preg (cfg
);
8805 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
8806 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
8807 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8808 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8809 } else if (cfg
->compile_aot
) {
8810 int const_reg
= alloc_preg (cfg
);
8811 int type_reg
= alloc_preg (cfg
);
8813 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
8814 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
8815 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8816 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8818 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
8819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
8821 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
8823 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
8824 ins
->type
= STACK_VTYPE
;
8825 ins
->klass
= mono_defaults
.typed_reference_class
;
8832 MonoClass
*handle_class
;
8834 CHECK_STACK_OVF (1);
8837 n
= read32 (ip
+ 1);
8839 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
8840 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
8841 handle
= mono_method_get_wrapper_data (method
, n
);
8842 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
8843 if (handle_class
== mono_defaults
.typehandle_class
)
8844 handle
= &((MonoClass
*)handle
)->byval_arg
;
8847 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
8851 mono_class_init (handle_class
);
8852 if (cfg
->generic_sharing_context
) {
8853 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
8854 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
8855 /* This case handles ldtoken
8856 of an open type, like for
8859 } else if (handle_class
== mono_defaults
.typehandle_class
) {
8860 /* If we get a MONO_TYPE_CLASS
8861 then we need to provide the
8863 instantiation of it. */
8864 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
8867 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
8868 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
8869 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
8870 else if (handle_class
== mono_defaults
.methodhandle_class
)
8871 context_used
= mono_method_check_context_used (handle
);
8873 g_assert_not_reached ();
8876 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
8877 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
8878 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
8879 MonoInst
*addr
, *vtvar
, *iargs
[3];
8880 int method_context_used
;
8882 if (cfg
->generic_sharing_context
)
8883 method_context_used
= mono_method_check_context_used (method
);
8885 method_context_used
= 0;
8887 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8889 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8890 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
8891 if (method_context_used
) {
8892 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
8893 method
, MONO_RGCTX_INFO_METHOD
);
8894 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
8896 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
8897 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
8899 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8901 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8903 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8905 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8906 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
8907 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
8908 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
8909 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
8910 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
8912 mono_class_init (tclass
);
8914 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8915 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
8916 } else if (cfg
->compile_aot
) {
8917 if (method
->wrapper_type
) {
8918 if (mono_class_get (tclass
->image
, tclass
->type_token
) == tclass
&& !generic_context
) {
8919 /* Special case for static synchronized wrappers */
8920 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, tclass
->image
, tclass
->type_token
, generic_context
);
8922 /* FIXME: n is not a normal token */
8923 cfg
->disable_aot
= TRUE
;
8924 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
8927 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
8930 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
8932 ins
->type
= STACK_OBJ
;
8933 ins
->klass
= cmethod
->klass
;
8936 MonoInst
*addr
, *vtvar
;
8938 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8941 if (handle_class
== mono_defaults
.typehandle_class
) {
8942 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8943 mono_class_from_mono_type (handle
),
8944 MONO_RGCTX_INFO_TYPE
);
8945 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
8946 ins
= emit_get_rgctx_method (cfg
, context_used
,
8947 handle
, MONO_RGCTX_INFO_METHOD
);
8948 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
8949 ins
= emit_get_rgctx_field (cfg
, context_used
,
8950 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
8952 g_assert_not_reached ();
8954 } else if (cfg
->compile_aot
) {
8955 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
8957 EMIT_NEW_PCONST (cfg
, ins
, handle
);
8959 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8960 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8961 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8971 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
8973 ins
->sreg1
= sp
[0]->dreg
;
8975 bblock
->out_of_line
= TRUE
;
8976 MONO_ADD_INS (bblock
, ins
);
8977 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
8978 MONO_ADD_INS (bblock
, ins
);
8981 link_bblock (cfg
, bblock
, end_bblock
);
8982 start_new_bblock
= 1;
8984 case CEE_ENDFINALLY
:
8985 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
8986 MONO_ADD_INS (bblock
, ins
);
8988 start_new_bblock
= 1;
8991 * Control will leave the method so empty the stack, otherwise
8992 * the next basic block will start with a nonempty stack.
8994 while (sp
!= stack_start
) {
9002 if (*ip
== CEE_LEAVE
) {
9004 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
9007 target
= ip
+ 2 + (signed char)(ip
[1]);
9010 /* empty the stack */
9011 while (sp
!= stack_start
) {
9016 * If this leave statement is in a catch block, check for a
9017 * pending exception, and rethrow it if necessary.
9018 * We avoid doing this in runtime invoke wrappers, since those are called
9019 * by native code which excepts the wrapper to catch all exceptions.
9021 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9022 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9025 * Use <= in the final comparison to handle clauses with multiple
9026 * leave statements, like in bug #78024.
9027 * The ordering of the exception clauses guarantees that we find the
9030 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
) && method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
) {
9032 MonoBasicBlock
*dont_throw
;
9037 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9040 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
9042 NEW_BBLOCK (cfg
, dont_throw
);
9045 * Currently, we allways rethrow the abort exception, despite the
9046 * fact that this is not correct. See thread6.cs for an example.
9047 * But propagating the abort exception is more important than
9048 * getting the sematics right.
9050 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
9051 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
9052 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
9054 MONO_START_BB (cfg
, dont_throw
);
9059 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
9061 MonoExceptionClause
*clause
;
9063 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
9065 tblock
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
9067 link_bblock (cfg
, bblock
, tblock
);
9068 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
9069 ins
->inst_target_bb
= tblock
;
9070 ins
->inst_eh_block
= clause
;
9071 MONO_ADD_INS (bblock
, ins
);
9072 bblock
->has_call_handler
= 1;
9073 if (COMPILE_LLVM (cfg
)) {
9074 MonoBasicBlock
*target_bb
;
9077 * Link the finally bblock with the target, since it will
9078 * conceptually branch there.
9079 * FIXME: Have to link the bblock containing the endfinally.
9081 GET_BBLOCK (cfg
, target_bb
, target
);
9082 link_bblock (cfg
, tblock
, target_bb
);
9085 g_list_free (handlers
);
9088 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9089 MONO_ADD_INS (bblock
, ins
);
9090 GET_BBLOCK (cfg
, tblock
, target
);
9091 link_bblock (cfg
, bblock
, tblock
);
9092 ins
->inst_target_bb
= tblock
;
9093 start_new_bblock
= 1;
9095 if (*ip
== CEE_LEAVE
)
9104 * Mono specific opcodes
9106 case MONO_CUSTOM_PREFIX
: {
9108 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
9112 case CEE_MONO_ICALL
: {
9114 MonoJitICallInfo
*info
;
9116 token
= read32 (ip
+ 2);
9117 func
= mono_method_get_wrapper_data (method
, token
);
9118 info
= mono_find_jit_icall_by_addr (func
);
9121 CHECK_STACK (info
->sig
->param_count
);
9122 sp
-= info
->sig
->param_count
;
9124 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
9125 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
9129 inline_costs
+= 10 * num_calls
++;
9133 case CEE_MONO_LDPTR
: {
9136 CHECK_STACK_OVF (1);
9138 token
= read32 (ip
+ 2);
9140 ptr
= mono_method_get_wrapper_data (method
, token
);
9141 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
9142 MonoJitICallInfo
*callinfo
;
9143 const char *icall_name
;
9145 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
9146 g_assert (icall_name
);
9147 callinfo
= mono_find_jit_icall_by_name (icall_name
);
9148 g_assert (callinfo
);
9150 if (ptr
== callinfo
->func
) {
9151 /* Will be transformed into an AOTCONST later */
9152 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9158 /* FIXME: Generalize this */
9159 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
9160 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
9165 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9168 inline_costs
+= 10 * num_calls
++;
9169 /* Can't embed random pointers into AOT code */
9170 cfg
->disable_aot
= 1;
9173 case CEE_MONO_ICALL_ADDR
: {
9174 MonoMethod
*cmethod
;
9177 CHECK_STACK_OVF (1);
9179 token
= read32 (ip
+ 2);
9181 cmethod
= mono_method_get_wrapper_data (method
, token
);
9183 if (cfg
->compile_aot
) {
9184 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
9186 ptr
= mono_lookup_internal_call (cmethod
);
9188 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9194 case CEE_MONO_VTADDR
: {
9195 MonoInst
*src_var
, *src
;
9201 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9202 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
9207 case CEE_MONO_NEWOBJ
: {
9208 MonoInst
*iargs
[2];
9210 CHECK_STACK_OVF (1);
9212 token
= read32 (ip
+ 2);
9213 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9214 mono_class_init (klass
);
9215 NEW_DOMAINCONST (cfg
, iargs
[0]);
9216 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
9217 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9218 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
9219 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
9221 inline_costs
+= 10 * num_calls
++;
9224 case CEE_MONO_OBJADDR
:
9227 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
9228 ins
->dreg
= alloc_preg (cfg
);
9229 ins
->sreg1
= sp
[0]->dreg
;
9230 ins
->type
= STACK_MP
;
9231 MONO_ADD_INS (cfg
->cbb
, ins
);
9235 case CEE_MONO_LDNATIVEOBJ
:
9237 * Similar to LDOBJ, but instead load the unmanaged
9238 * representation of the vtype to the stack.
9243 token
= read32 (ip
+ 2);
9244 klass
= mono_method_get_wrapper_data (method
, token
);
9245 g_assert (klass
->valuetype
);
9246 mono_class_init (klass
);
9249 MonoInst
*src
, *dest
, *temp
;
9252 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
9253 temp
->backend
.is_pinvoke
= 1;
9254 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
9255 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
9257 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
9258 dest
->type
= STACK_VTYPE
;
9259 dest
->klass
= klass
;
9265 case CEE_MONO_RETOBJ
: {
9267 * Same as RET, but return the native representation of a vtype
9270 g_assert (cfg
->ret
);
9271 g_assert (mono_method_signature (method
)->pinvoke
);
9276 token
= read32 (ip
+ 2);
9277 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9279 if (!cfg
->vret_addr
) {
9280 g_assert (cfg
->ret_var_is_local
);
9282 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
9284 EMIT_NEW_RETLOADA (cfg
, ins
);
9286 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
9288 if (sp
!= stack_start
)
9291 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9292 ins
->inst_target_bb
= end_bblock
;
9293 MONO_ADD_INS (bblock
, ins
);
9294 link_bblock (cfg
, bblock
, end_bblock
);
9295 start_new_bblock
= 1;
9299 case CEE_MONO_CISINST
:
9300 case CEE_MONO_CCASTCLASS
: {
9305 token
= read32 (ip
+ 2);
9306 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9307 if (ip
[1] == CEE_MONO_CISINST
)
9308 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
9310 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
9316 case CEE_MONO_SAVE_LMF
:
9317 case CEE_MONO_RESTORE_LMF
:
9318 #ifdef MONO_ARCH_HAVE_LMF_OPS
9319 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
9320 MONO_ADD_INS (bblock
, ins
);
9321 cfg
->need_lmf_area
= TRUE
;
9325 case CEE_MONO_CLASSCONST
:
9326 CHECK_STACK_OVF (1);
9328 token
= read32 (ip
+ 2);
9329 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
9332 inline_costs
+= 10 * num_calls
++;
9334 case CEE_MONO_NOT_TAKEN
:
9335 bblock
->out_of_line
= TRUE
;
9339 CHECK_STACK_OVF (1);
9341 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
9342 ins
->dreg
= alloc_preg (cfg
);
9343 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
9344 ins
->type
= STACK_PTR
;
9345 MONO_ADD_INS (bblock
, ins
);
9349 case CEE_MONO_DYN_CALL
: {
9352 /* It would be easier to call a trampoline, but that would put an
9353 * extra frame on the stack, confusing exception handling. So
9354 * implement it inline using an opcode for now.
9357 if (!cfg
->dyn_call_var
) {
9358 cfg
->dyn_call_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
9359 /* prevent it from being register allocated */
9360 cfg
->dyn_call_var
->flags
|= MONO_INST_INDIRECT
;
9363 /* Has to use a call inst since it local regalloc expects it */
9364 MONO_INST_NEW_CALL (cfg
, call
, OP_DYN_CALL
);
9365 ins
= (MonoInst
*)call
;
9367 ins
->sreg1
= sp
[0]->dreg
;
9368 ins
->sreg2
= sp
[1]->dreg
;
9369 MONO_ADD_INS (bblock
, ins
);
9371 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9372 cfg
->param_area
= MAX (cfg
->param_area
, MONO_ARCH_DYN_CALL_PARAM_AREA
);
9376 inline_costs
+= 10 * num_calls
++;
9381 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
9391 /* somewhat similar to LDTOKEN */
9392 MonoInst
*addr
, *vtvar
;
9393 CHECK_STACK_OVF (1);
9394 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
9396 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9397 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
9399 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9400 ins
->type
= STACK_VTYPE
;
9401 ins
->klass
= mono_defaults
.argumenthandle_class
;
9414 * The following transforms:
9415 * CEE_CEQ into OP_CEQ
9416 * CEE_CGT into OP_CGT
9417 * CEE_CGT_UN into OP_CGT_UN
9418 * CEE_CLT into OP_CLT
9419 * CEE_CLT_UN into OP_CLT_UN
9421 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
9423 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
9425 cmp
->sreg1
= sp
[0]->dreg
;
9426 cmp
->sreg2
= sp
[1]->dreg
;
9427 type_from_op (cmp
, sp
[0], sp
[1]);
9429 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
9430 cmp
->opcode
= OP_LCOMPARE
;
9431 else if (sp
[0]->type
== STACK_R8
)
9432 cmp
->opcode
= OP_FCOMPARE
;
9434 cmp
->opcode
= OP_ICOMPARE
;
9435 MONO_ADD_INS (bblock
, cmp
);
9436 ins
->type
= STACK_I4
;
9437 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
9438 type_from_op (ins
, sp
[0], sp
[1]);
9440 if (cmp
->opcode
== OP_FCOMPARE
) {
9442 * The backends expect the fceq opcodes to do the
9445 cmp
->opcode
= OP_NOP
;
9446 ins
->sreg1
= cmp
->sreg1
;
9447 ins
->sreg2
= cmp
->sreg2
;
9449 MONO_ADD_INS (bblock
, ins
);
9456 MonoMethod
*cil_method
;
9457 gboolean needs_static_rgctx_invoke
;
9459 CHECK_STACK_OVF (1);
9461 n
= read32 (ip
+ 2);
9462 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9465 mono_class_init (cmethod
->klass
);
9467 mono_save_token_info (cfg
, image
, n
, cmethod
);
9469 if (cfg
->generic_sharing_context
)
9470 context_used
= mono_method_check_context_used (cmethod
);
9472 needs_static_rgctx_invoke
= mono_method_needs_static_rgctx_invoke (cmethod
, TRUE
);
9474 cil_method
= cmethod
;
9475 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
9476 METHOD_ACCESS_FAILURE
;
9478 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9479 if (check_linkdemand (cfg
, method
, cmethod
))
9481 CHECK_CFG_EXCEPTION
;
9482 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9483 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9487 * Optimize the common case of ldftn+delegate creation
9489 if ((sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
9490 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
9491 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
9493 int invoke_context_used
= 0;
9495 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
9496 if (!invoke
|| !mono_method_signature (invoke
))
9499 if (cfg
->generic_sharing_context
)
9500 invoke_context_used
= mono_method_check_context_used (invoke
);
9502 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9503 /* FIXME: SGEN support */
9504 if (invoke_context_used
== 0) {
9505 MonoInst
*target_ins
;
9508 if (cfg
->verbose_level
> 3)
9509 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
9510 target_ins
= sp
[-1];
9512 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
, context_used
);
9513 CHECK_CFG_EXCEPTION
;
9522 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
9523 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
9527 inline_costs
+= 10 * num_calls
++;
9530 case CEE_LDVIRTFTN
: {
9535 n
= read32 (ip
+ 2);
9536 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9539 mono_class_init (cmethod
->klass
);
9541 if (cfg
->generic_sharing_context
)
9542 context_used
= mono_method_check_context_used (cmethod
);
9544 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9545 if (check_linkdemand (cfg
, method
, cmethod
))
9547 CHECK_CFG_EXCEPTION
;
9548 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9549 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9555 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
9556 cmethod
, MONO_RGCTX_INFO_METHOD
);
9559 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
9561 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
9564 inline_costs
+= 10 * num_calls
++;
9568 CHECK_STACK_OVF (1);
9570 n
= read16 (ip
+ 2);
9572 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
9577 CHECK_STACK_OVF (1);
9579 n
= read16 (ip
+ 2);
9581 NEW_ARGLOADA (cfg
, ins
, n
);
9582 MONO_ADD_INS (cfg
->cbb
, ins
);
9590 n
= read16 (ip
+ 2);
9592 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
9594 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
9598 CHECK_STACK_OVF (1);
9600 n
= read16 (ip
+ 2);
9602 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
9607 unsigned char *tmp_ip
;
9608 CHECK_STACK_OVF (1);
9610 n
= read16 (ip
+ 2);
9613 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
9619 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
9628 n
= read16 (ip
+ 2);
9630 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
9632 emit_stloc_ir (cfg
, sp
, header
, n
);
9639 if (sp
!= stack_start
)
9641 if (cfg
->method
!= method
)
9643 * Inlining this into a loop in a parent could lead to
9644 * stack overflows which is different behavior than the
9645 * non-inlined case, thus disable inlining in this case.
9647 goto inline_failure
;
9649 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
9650 ins
->dreg
= alloc_preg (cfg
);
9651 ins
->sreg1
= sp
[0]->dreg
;
9652 ins
->type
= STACK_PTR
;
9653 MONO_ADD_INS (cfg
->cbb
, ins
);
9655 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
9657 ins
->flags
|= MONO_INST_INIT
;
9662 case CEE_ENDFILTER
: {
9663 MonoExceptionClause
*clause
, *nearest
;
9664 int cc
, nearest_num
;
9668 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
9670 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
9671 ins
->sreg1
= (*sp
)->dreg
;
9672 MONO_ADD_INS (bblock
, ins
);
9673 start_new_bblock
= 1;
9678 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
9679 clause
= &header
->clauses
[cc
];
9680 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
9681 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
9682 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
9688 if ((ip
- header
->code
) != nearest
->handler_offset
)
9693 case CEE_UNALIGNED_
:
9694 ins_flag
|= MONO_INST_UNALIGNED
;
9695 /* FIXME: record alignment? we can assume 1 for now */
9700 ins_flag
|= MONO_INST_VOLATILE
;
9704 ins_flag
|= MONO_INST_TAILCALL
;
9705 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
9706 /* Can't inline tail calls at this time */
9707 inline_costs
+= 100000;
9714 token
= read32 (ip
+ 2);
9715 klass
= mini_get_class (method
, token
, generic_context
);
9716 CHECK_TYPELOAD (klass
);
9717 if (generic_class_is_reference_type (cfg
, klass
))
9718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
9720 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
9724 case CEE_CONSTRAINED_
:
9726 token
= read32 (ip
+ 2);
9727 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
9728 constrained_call
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9730 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
9731 CHECK_TYPELOAD (constrained_call
);
9736 MonoInst
*iargs
[3];
9740 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
9741 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
9742 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
9743 /* emit_memset only works when val == 0 */
9744 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
9749 if (ip
[1] == CEE_CPBLK
) {
9750 MonoMethod
*memcpy_method
= get_memcpy_method ();
9751 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9753 MonoMethod
*memset_method
= get_memset_method ();
9754 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
9764 ins_flag
|= MONO_INST_NOTYPECHECK
;
9766 ins_flag
|= MONO_INST_NORANGECHECK
;
9767 /* we ignore the no-nullcheck for now since we
9768 * really do it explicitly only when doing callvirt->call
9774 int handler_offset
= -1;
9776 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9777 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9778 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
9779 handler_offset
= clause
->handler_offset
;
9784 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
9786 g_assert (handler_offset
!= -1);
9788 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
9789 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
9790 ins
->sreg1
= load
->dreg
;
9791 MONO_ADD_INS (bblock
, ins
);
9793 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
9794 MONO_ADD_INS (bblock
, ins
);
9797 link_bblock (cfg
, bblock
, end_bblock
);
9798 start_new_bblock
= 1;
9806 CHECK_STACK_OVF (1);
9808 token
= read32 (ip
+ 2);
9809 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
&& !method
->klass
->image
->dynamic
) {
9810 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
9811 token
= mono_type_size (type
, &ialign
);
9813 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
9814 CHECK_TYPELOAD (klass
);
9815 mono_class_init (klass
);
9816 token
= mono_class_value_size (klass
, &align
);
9818 EMIT_NEW_ICONST (cfg
, ins
, token
);
9823 case CEE_REFANYTYPE
: {
9824 MonoInst
*src_var
, *src
;
9830 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9832 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9833 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9834 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
9852 g_warning ("opcode 0xfe 0x%02x not handled", ip
[1]);
9862 g_warning ("opcode 0x%02x not handled", *ip
);
9866 if (start_new_bblock
!= 1)
9869 bblock
->cil_length
= ip
- bblock
->cil_code
;
9870 bblock
->next_bb
= end_bblock
;
9872 if (cfg
->method
== method
&& cfg
->domainvar
) {
9874 MonoInst
*get_domain
;
9876 cfg
->cbb
= init_localsbb
;
9878 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
9879 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
9882 get_domain
->dreg
= alloc_preg (cfg
);
9883 MONO_ADD_INS (cfg
->cbb
, get_domain
);
9885 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
9886 MONO_ADD_INS (cfg
->cbb
, store
);
9889 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9890 if (cfg
->compile_aot
)
9891 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9892 mono_get_got_var (cfg
);
9895 if (cfg
->method
== method
&& cfg
->got_var
)
9896 mono_emit_load_got_addr (cfg
);
9901 cfg
->cbb
= init_localsbb
;
9903 for (i
= 0; i
< header
->num_locals
; ++i
) {
9904 MonoType
*ptype
= header
->locals
[i
];
9905 int t
= ptype
->type
;
9906 dreg
= cfg
->locals
[i
]->dreg
;
9908 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
9909 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
9911 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9912 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
9913 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9914 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
9915 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9916 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
9917 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
9918 ins
->type
= STACK_R8
;
9919 ins
->inst_p0
= (void*)&r8_0
;
9920 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
9921 MONO_ADD_INS (init_localsbb
, ins
);
9922 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
9923 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
9924 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
9925 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
9927 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9932 if (cfg
->init_ref_vars
&& cfg
->method
== method
) {
9933 /* Emit initialization for ref vars */
9934 // FIXME: Avoid duplication initialization for IL locals.
9935 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
9936 MonoInst
*ins
= cfg
->varinfo
[i
];
9938 if (ins
->opcode
== OP_LOCAL
&& ins
->type
== STACK_OBJ
)
9939 MONO_EMIT_NEW_PCONST (cfg
, ins
->dreg
, NULL
);
9943 /* Add a sequence point for method entry/exit events */
9945 NEW_SEQ_POINT (cfg
, ins
, METHOD_ENTRY_IL_OFFSET
, FALSE
);
9946 MONO_ADD_INS (init_localsbb
, ins
);
9947 NEW_SEQ_POINT (cfg
, ins
, METHOD_EXIT_IL_OFFSET
, FALSE
);
9948 MONO_ADD_INS (cfg
->bb_exit
, ins
);
9953 if (cfg
->method
== method
) {
9955 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
9956 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
9958 mono_create_spvar_for_region (cfg
, bb
->region
);
9959 if (cfg
->verbose_level
> 2)
9960 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
9964 g_slist_free (class_inits
);
9965 dont_inline
= g_list_remove (dont_inline
, method
);
9967 if (inline_costs
< 0) {
9970 /* Method is too large */
9971 mname
= mono_method_full_name (method
, TRUE
);
9972 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
9973 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
9975 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
9976 mono_basic_block_free (original_bb
);
9980 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
9981 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
9983 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
9984 mono_basic_block_free (original_bb
);
9985 return inline_costs
;
9988 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
9995 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
9999 set_exception_type_from_invalid_il (cfg
, method
, ip
);
10003 g_slist_free (class_inits
);
10004 mono_basic_block_free (original_bb
);
10005 dont_inline
= g_list_remove (dont_inline
, method
);
10006 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10011 store_membase_reg_to_store_membase_imm (int opcode
)
10014 case OP_STORE_MEMBASE_REG
:
10015 return OP_STORE_MEMBASE_IMM
;
10016 case OP_STOREI1_MEMBASE_REG
:
10017 return OP_STOREI1_MEMBASE_IMM
;
10018 case OP_STOREI2_MEMBASE_REG
:
10019 return OP_STOREI2_MEMBASE_IMM
;
10020 case OP_STOREI4_MEMBASE_REG
:
10021 return OP_STOREI4_MEMBASE_IMM
;
10022 case OP_STOREI8_MEMBASE_REG
:
10023 return OP_STOREI8_MEMBASE_IMM
;
10025 g_assert_not_reached ();
10031 #endif /* DISABLE_JIT */
10034 mono_op_to_op_imm (int opcode
)
10038 return OP_IADD_IMM
;
10040 return OP_ISUB_IMM
;
10042 return OP_IDIV_IMM
;
10044 return OP_IDIV_UN_IMM
;
10046 return OP_IREM_IMM
;
10048 return OP_IREM_UN_IMM
;
10050 return OP_IMUL_IMM
;
10052 return OP_IAND_IMM
;
10056 return OP_IXOR_IMM
;
10058 return OP_ISHL_IMM
;
10060 return OP_ISHR_IMM
;
10062 return OP_ISHR_UN_IMM
;
10065 return OP_LADD_IMM
;
10067 return OP_LSUB_IMM
;
10069 return OP_LAND_IMM
;
10073 return OP_LXOR_IMM
;
10075 return OP_LSHL_IMM
;
10077 return OP_LSHR_IMM
;
10079 return OP_LSHR_UN_IMM
;
10082 return OP_COMPARE_IMM
;
10084 return OP_ICOMPARE_IMM
;
10086 return OP_LCOMPARE_IMM
;
10088 case OP_STORE_MEMBASE_REG
:
10089 return OP_STORE_MEMBASE_IMM
;
10090 case OP_STOREI1_MEMBASE_REG
:
10091 return OP_STOREI1_MEMBASE_IMM
;
10092 case OP_STOREI2_MEMBASE_REG
:
10093 return OP_STOREI2_MEMBASE_IMM
;
10094 case OP_STOREI4_MEMBASE_REG
:
10095 return OP_STOREI4_MEMBASE_IMM
;
10097 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10099 return OP_X86_PUSH_IMM
;
10100 case OP_X86_COMPARE_MEMBASE_REG
:
10101 return OP_X86_COMPARE_MEMBASE_IMM
;
10103 #if defined(TARGET_AMD64)
10104 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
10105 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10107 case OP_VOIDCALL_REG
:
10108 return OP_VOIDCALL
;
10116 return OP_LOCALLOC_IMM
;
10123 ldind_to_load_membase (int opcode
)
10127 return OP_LOADI1_MEMBASE
;
10129 return OP_LOADU1_MEMBASE
;
10131 return OP_LOADI2_MEMBASE
;
10133 return OP_LOADU2_MEMBASE
;
10135 return OP_LOADI4_MEMBASE
;
10137 return OP_LOADU4_MEMBASE
;
10139 return OP_LOAD_MEMBASE
;
10140 case CEE_LDIND_REF
:
10141 return OP_LOAD_MEMBASE
;
10143 return OP_LOADI8_MEMBASE
;
10145 return OP_LOADR4_MEMBASE
;
10147 return OP_LOADR8_MEMBASE
;
10149 g_assert_not_reached ();
10156 stind_to_store_membase (int opcode
)
10160 return OP_STOREI1_MEMBASE_REG
;
10162 return OP_STOREI2_MEMBASE_REG
;
10164 return OP_STOREI4_MEMBASE_REG
;
10166 case CEE_STIND_REF
:
10167 return OP_STORE_MEMBASE_REG
;
10169 return OP_STOREI8_MEMBASE_REG
;
10171 return OP_STORER4_MEMBASE_REG
;
10173 return OP_STORER8_MEMBASE_REG
;
10175 g_assert_not_reached ();
10182 mono_load_membase_to_load_mem (int opcode
)
10184 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10185 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10187 case OP_LOAD_MEMBASE
:
10188 return OP_LOAD_MEM
;
10189 case OP_LOADU1_MEMBASE
:
10190 return OP_LOADU1_MEM
;
10191 case OP_LOADU2_MEMBASE
:
10192 return OP_LOADU2_MEM
;
10193 case OP_LOADI4_MEMBASE
:
10194 return OP_LOADI4_MEM
;
10195 case OP_LOADU4_MEMBASE
:
10196 return OP_LOADU4_MEM
;
10197 #if SIZEOF_REGISTER == 8
10198 case OP_LOADI8_MEMBASE
:
10199 return OP_LOADI8_MEM
;
10208 op_to_op_dest_membase (int store_opcode
, int opcode
)
10210 #if defined(TARGET_X86)
10211 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
10216 return OP_X86_ADD_MEMBASE_REG
;
10218 return OP_X86_SUB_MEMBASE_REG
;
10220 return OP_X86_AND_MEMBASE_REG
;
10222 return OP_X86_OR_MEMBASE_REG
;
10224 return OP_X86_XOR_MEMBASE_REG
;
10227 return OP_X86_ADD_MEMBASE_IMM
;
10230 return OP_X86_SUB_MEMBASE_IMM
;
10233 return OP_X86_AND_MEMBASE_IMM
;
10236 return OP_X86_OR_MEMBASE_IMM
;
10239 return OP_X86_XOR_MEMBASE_IMM
;
10245 #if defined(TARGET_AMD64)
10246 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
10251 return OP_X86_ADD_MEMBASE_REG
;
10253 return OP_X86_SUB_MEMBASE_REG
;
10255 return OP_X86_AND_MEMBASE_REG
;
10257 return OP_X86_OR_MEMBASE_REG
;
10259 return OP_X86_XOR_MEMBASE_REG
;
10261 return OP_X86_ADD_MEMBASE_IMM
;
10263 return OP_X86_SUB_MEMBASE_IMM
;
10265 return OP_X86_AND_MEMBASE_IMM
;
10267 return OP_X86_OR_MEMBASE_IMM
;
10269 return OP_X86_XOR_MEMBASE_IMM
;
10271 return OP_AMD64_ADD_MEMBASE_REG
;
10273 return OP_AMD64_SUB_MEMBASE_REG
;
10275 return OP_AMD64_AND_MEMBASE_REG
;
10277 return OP_AMD64_OR_MEMBASE_REG
;
10279 return OP_AMD64_XOR_MEMBASE_REG
;
10282 return OP_AMD64_ADD_MEMBASE_IMM
;
10285 return OP_AMD64_SUB_MEMBASE_IMM
;
10288 return OP_AMD64_AND_MEMBASE_IMM
;
10291 return OP_AMD64_OR_MEMBASE_IMM
;
10294 return OP_AMD64_XOR_MEMBASE_IMM
;
10304 op_to_op_store_membase (int store_opcode
, int opcode
)
10306 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10309 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10310 return OP_X86_SETEQ_MEMBASE
;
10312 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10313 return OP_X86_SETNE_MEMBASE
;
10321 op_to_op_src1_membase (int load_opcode
, int opcode
)
10324 /* FIXME: This has sign extension issues */
10326 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10327 return OP_X86_COMPARE_MEMBASE8_IMM;
10330 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10335 return OP_X86_PUSH_MEMBASE
;
10336 case OP_COMPARE_IMM
:
10337 case OP_ICOMPARE_IMM
:
10338 return OP_X86_COMPARE_MEMBASE_IMM
;
10341 return OP_X86_COMPARE_MEMBASE_REG
;
10345 #ifdef TARGET_AMD64
10346 /* FIXME: This has sign extension issues */
10348 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10349 return OP_X86_COMPARE_MEMBASE8_IMM;
10354 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10355 return OP_X86_PUSH_MEMBASE
;
10357 /* FIXME: This only works for 32 bit immediates
10358 case OP_COMPARE_IMM:
10359 case OP_LCOMPARE_IMM:
10360 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10361 return OP_AMD64_COMPARE_MEMBASE_IMM;
10363 case OP_ICOMPARE_IMM
:
10364 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10365 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10369 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10370 return OP_AMD64_COMPARE_MEMBASE_REG
;
10373 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10374 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
10383 op_to_op_src2_membase (int load_opcode
, int opcode
)
10386 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10392 return OP_X86_COMPARE_REG_MEMBASE
;
10394 return OP_X86_ADD_REG_MEMBASE
;
10396 return OP_X86_SUB_REG_MEMBASE
;
10398 return OP_X86_AND_REG_MEMBASE
;
10400 return OP_X86_OR_REG_MEMBASE
;
10402 return OP_X86_XOR_REG_MEMBASE
;
10406 #ifdef TARGET_AMD64
10409 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10410 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
10414 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10415 return OP_AMD64_COMPARE_REG_MEMBASE
;
10418 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10419 return OP_X86_ADD_REG_MEMBASE
;
10421 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10422 return OP_X86_SUB_REG_MEMBASE
;
10424 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10425 return OP_X86_AND_REG_MEMBASE
;
10427 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10428 return OP_X86_OR_REG_MEMBASE
;
10430 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10431 return OP_X86_XOR_REG_MEMBASE
;
10433 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10434 return OP_AMD64_ADD_REG_MEMBASE
;
10436 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10437 return OP_AMD64_SUB_REG_MEMBASE
;
10439 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10440 return OP_AMD64_AND_REG_MEMBASE
;
10442 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10443 return OP_AMD64_OR_REG_MEMBASE
;
10445 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10446 return OP_AMD64_XOR_REG_MEMBASE
;
10454 mono_op_to_op_imm_noemul (int opcode
)
10457 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10463 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10471 return mono_op_to_op_imm (opcode
);
10475 #ifndef DISABLE_JIT
10478 * mono_handle_global_vregs:
10480 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10484 mono_handle_global_vregs (MonoCompile
*cfg
)
10486 gint32
*vreg_to_bb
;
10487 MonoBasicBlock
*bb
;
10490 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
10492 #ifdef MONO_ARCH_SIMD_INTRINSICS
10493 if (cfg
->uses_simd_intrinsics
)
10494 mono_simd_simplify_indirection (cfg
);
10497 /* Find local vregs used in more than one bb */
10498 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10499 MonoInst
*ins
= bb
->code
;
10500 int block_num
= bb
->block_num
;
10502 if (cfg
->verbose_level
> 2)
10503 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
10506 for (; ins
; ins
= ins
->next
) {
10507 const char *spec
= INS_INFO (ins
->opcode
);
10508 int regtype
= 0, regindex
;
10511 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10512 mono_print_ins (ins
);
10514 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
10516 for (regindex
= 0; regindex
< 4; regindex
++) {
10519 if (regindex
== 0) {
10520 regtype
= spec
[MONO_INST_DEST
];
10521 if (regtype
== ' ')
10524 } else if (regindex
== 1) {
10525 regtype
= spec
[MONO_INST_SRC1
];
10526 if (regtype
== ' ')
10529 } else if (regindex
== 2) {
10530 regtype
= spec
[MONO_INST_SRC2
];
10531 if (regtype
== ' ')
10534 } else if (regindex
== 3) {
10535 regtype
= spec
[MONO_INST_SRC3
];
10536 if (regtype
== ' ')
10541 #if SIZEOF_REGISTER == 4
10542 /* In the LLVM case, the long opcodes are not decomposed */
10543 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
10545 * Since some instructions reference the original long vreg,
10546 * and some reference the two component vregs, it is quite hard
10547 * to determine when it needs to be global. So be conservative.
10549 if (!get_vreg_to_inst (cfg
, vreg
)) {
10550 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
10552 if (cfg
->verbose_level
> 2)
10553 printf ("LONG VREG R%d made global.\n", vreg
);
10557 * Make the component vregs volatile since the optimizations can
10558 * get confused otherwise.
10560 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
10561 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
10565 g_assert (vreg
!= -1);
10567 prev_bb
= vreg_to_bb
[vreg
];
10568 if (prev_bb
== 0) {
10569 /* 0 is a valid block num */
10570 vreg_to_bb
[vreg
] = block_num
+ 1;
10571 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
10572 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
10575 if (!get_vreg_to_inst (cfg
, vreg
)) {
10576 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10577 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
10581 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
10584 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
10587 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
10590 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
10593 g_assert_not_reached ();
10597 /* Flag as having been used in more than one bb */
10598 vreg_to_bb
[vreg
] = -1;
10604 /* If a variable is used in only one bblock, convert it into a local vreg */
10605 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10606 MonoInst
*var
= cfg
->varinfo
[i
];
10607 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
10609 switch (var
->type
) {
10615 #if SIZEOF_REGISTER == 8
10618 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10619 /* Enabling this screws up the fp stack on x86 */
10622 /* Arguments are implicitly global */
10623 /* Putting R4 vars into registers doesn't work currently */
10624 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
10626 * Make that the variable's liveness interval doesn't contain a call, since
10627 * that would cause the lvreg to be spilled, making the whole optimization
10630 /* This is too slow for JIT compilation */
10632 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
10634 int def_index
, call_index
, ins_index
;
10635 gboolean spilled
= FALSE
;
10640 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
10641 const char *spec
= INS_INFO (ins
->opcode
);
10643 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
10644 def_index
= ins_index
;
10646 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
10647 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
10648 if (call_index
> def_index
) {
10654 if (MONO_IS_CALL (ins
))
10655 call_index
= ins_index
;
10665 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10666 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
10667 var
->flags
|= MONO_INST_IS_DEAD
;
10668 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
10675 * Compress the varinfo and vars tables so the liveness computation is faster and
10676 * takes up less space.
10679 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10680 MonoInst
*var
= cfg
->varinfo
[i
];
10681 if (pos
< i
&& cfg
->locals_start
== i
)
10682 cfg
->locals_start
= pos
;
10683 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
10685 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
10686 cfg
->varinfo
[pos
]->inst_c0
= pos
;
10687 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
10688 cfg
->vars
[pos
].idx
= pos
;
10689 #if SIZEOF_REGISTER == 4
10690 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
10691 /* Modify the two component vars too */
10694 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
10695 var1
->inst_c0
= pos
;
10696 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
10697 var1
->inst_c0
= pos
;
10704 cfg
->num_varinfo
= pos
;
10705 if (cfg
->locals_start
> cfg
->num_varinfo
)
10706 cfg
->locals_start
= cfg
->num_varinfo
;
10710 * mono_spill_global_vars:
10712 * Generate spill code for variables which are not allocated to registers,
10713 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10714 * code is generated which could be optimized by the local optimization passes.
10717 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
10719 MonoBasicBlock
*bb
;
10721 int orig_next_vreg
;
10722 guint32
*vreg_to_lvreg
;
10724 guint32 i
, lvregs_len
;
10725 gboolean dest_has_lvreg
= FALSE
;
10726 guint32 stacktypes
[128];
10727 MonoInst
**live_range_start
, **live_range_end
;
10728 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
10730 *need_local_opts
= FALSE
;
10732 memset (spec2
, 0, sizeof (spec2
));
10734 /* FIXME: Move this function to mini.c */
10735 stacktypes
['i'] = STACK_PTR
;
10736 stacktypes
['l'] = STACK_I8
;
10737 stacktypes
['f'] = STACK_R8
;
10738 #ifdef MONO_ARCH_SIMD_INTRINSICS
10739 stacktypes
['x'] = STACK_VTYPE
;
10742 #if SIZEOF_REGISTER == 4
10743 /* Create MonoInsts for longs */
10744 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10745 MonoInst
*ins
= cfg
->varinfo
[i
];
10747 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
10748 switch (ins
->type
) {
10753 if (ins
->type
== STACK_R8
&& !COMPILE_SOFT_FLOAT (cfg
))
10756 g_assert (ins
->opcode
== OP_REGOFFSET
);
10758 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
10760 tree
->opcode
= OP_REGOFFSET
;
10761 tree
->inst_basereg
= ins
->inst_basereg
;
10762 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
10764 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
10766 tree
->opcode
= OP_REGOFFSET
;
10767 tree
->inst_basereg
= ins
->inst_basereg
;
10768 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
10778 /* FIXME: widening and truncation */
10781 * As an optimization, when a variable allocated to the stack is first loaded into
10782 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10783 * the variable again.
10785 orig_next_vreg
= cfg
->next_vreg
;
10786 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
10787 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
10791 * These arrays contain the first and last instructions accessing a given
10793 * Since we emit bblocks in the same order we process them here, and we
10794 * don't split live ranges, these will precisely describe the live range of
10795 * the variable, i.e. the instruction range where a valid value can be found
10796 * in the variables location.
10797 * The live range is computed using the liveness info computed by the liveness pass.
10798 * We can't use vmv->range, since that is an abstract live range, and we need
10799 * one which is instruction precise.
10800 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10802 /* FIXME: Only do this if debugging info is requested */
10803 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10804 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10805 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10806 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10808 /* Add spill loads/stores */
10809 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10812 if (cfg
->verbose_level
> 2)
10813 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
10815 /* Clear vreg_to_lvreg array */
10816 for (i
= 0; i
< lvregs_len
; i
++)
10817 vreg_to_lvreg
[lvregs
[i
]] = 0;
10821 MONO_BB_FOR_EACH_INS (bb
, ins
) {
10822 const char *spec
= INS_INFO (ins
->opcode
);
10823 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
10824 gboolean store
, no_lvreg
;
10825 int sregs
[MONO_MAX_SRC_REGS
];
10827 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10828 mono_print_ins (ins
);
10830 if (ins
->opcode
== OP_NOP
)
10834 * We handle LDADDR here as well, since it can only be decomposed
10835 * when variable addresses are known.
10837 if (ins
->opcode
== OP_LDADDR
) {
10838 MonoInst
*var
= ins
->inst_p0
;
10840 if (var
->opcode
== OP_VTARG_ADDR
) {
10841 /* Happens on SPARC/S390 where vtypes are passed by reference */
10842 MonoInst
*vtaddr
= var
->inst_left
;
10843 if (vtaddr
->opcode
== OP_REGVAR
) {
10844 ins
->opcode
= OP_MOVE
;
10845 ins
->sreg1
= vtaddr
->dreg
;
10847 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
10848 ins
->opcode
= OP_LOAD_MEMBASE
;
10849 ins
->inst_basereg
= vtaddr
->inst_basereg
;
10850 ins
->inst_offset
= vtaddr
->inst_offset
;
10854 g_assert (var
->opcode
== OP_REGOFFSET
);
10856 ins
->opcode
= OP_ADD_IMM
;
10857 ins
->sreg1
= var
->inst_basereg
;
10858 ins
->inst_imm
= var
->inst_offset
;
10861 *need_local_opts
= TRUE
;
10862 spec
= INS_INFO (ins
->opcode
);
10865 if (ins
->opcode
< MONO_CEE_LAST
) {
10866 mono_print_ins (ins
);
10867 g_assert_not_reached ();
10871 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10875 if (MONO_IS_STORE_MEMBASE (ins
)) {
10876 tmp_reg
= ins
->dreg
;
10877 ins
->dreg
= ins
->sreg2
;
10878 ins
->sreg2
= tmp_reg
;
10881 spec2
[MONO_INST_DEST
] = ' ';
10882 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10883 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10884 spec2
[MONO_INST_SRC3
] = ' ';
10886 } else if (MONO_IS_STORE_MEMINDEX (ins
))
10887 g_assert_not_reached ();
10892 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
10893 printf ("\t %.3s %d", spec
, ins
->dreg
);
10894 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
10895 for (srcindex
= 0; srcindex
< 3; ++srcindex
)
10896 printf (" %d", sregs
[srcindex
]);
10903 regtype
= spec
[MONO_INST_DEST
];
10904 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
10907 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
10908 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
10909 MonoInst
*store_ins
;
10911 MonoInst
*def_ins
= ins
;
10912 int dreg
= ins
->dreg
; /* The original vreg */
10914 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
10916 if (var
->opcode
== OP_REGVAR
) {
10917 ins
->dreg
= var
->dreg
;
10918 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
10920 * Instead of emitting a load+store, use a _membase opcode.
10922 g_assert (var
->opcode
== OP_REGOFFSET
);
10923 if (ins
->opcode
== OP_MOVE
) {
10927 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
10928 ins
->inst_basereg
= var
->inst_basereg
;
10929 ins
->inst_offset
= var
->inst_offset
;
10932 spec
= INS_INFO (ins
->opcode
);
10936 g_assert (var
->opcode
== OP_REGOFFSET
);
10938 prev_dreg
= ins
->dreg
;
10940 /* Invalidate any previous lvreg for this vreg */
10941 vreg_to_lvreg
[ins
->dreg
] = 0;
10945 if (COMPILE_SOFT_FLOAT (cfg
) && store_opcode
== OP_STORER8_MEMBASE_REG
) {
10947 store_opcode
= OP_STOREI8_MEMBASE_REG
;
10950 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10952 if (regtype
== 'l') {
10953 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
10954 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10955 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
10956 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10957 def_ins
= store_ins
;
10960 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
10962 /* Try to fuse the store into the instruction itself */
10963 /* FIXME: Add more instructions */
10964 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
10965 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
10966 ins
->inst_imm
= ins
->inst_c0
;
10967 ins
->inst_destbasereg
= var
->inst_basereg
;
10968 ins
->inst_offset
= var
->inst_offset
;
10969 spec
= INS_INFO (ins
->opcode
);
10970 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
10971 ins
->opcode
= store_opcode
;
10972 ins
->inst_destbasereg
= var
->inst_basereg
;
10973 ins
->inst_offset
= var
->inst_offset
;
10977 tmp_reg
= ins
->dreg
;
10978 ins
->dreg
= ins
->sreg2
;
10979 ins
->sreg2
= tmp_reg
;
10982 spec2
[MONO_INST_DEST
] = ' ';
10983 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10984 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10985 spec2
[MONO_INST_SRC3
] = ' ';
10987 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
10988 // FIXME: The backends expect the base reg to be in inst_basereg
10989 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
10991 ins
->inst_basereg
= var
->inst_basereg
;
10992 ins
->inst_offset
= var
->inst_offset
;
10993 spec
= INS_INFO (ins
->opcode
);
10995 /* printf ("INS: "); mono_print_ins (ins); */
10996 /* Create a store instruction */
10997 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
10999 /* Insert it after the instruction */
11000 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11002 def_ins
= store_ins
;
11005 * We can't assign ins->dreg to var->dreg here, since the
11006 * sregs could use it. So set a flag, and do it after
11009 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
11010 dest_has_lvreg
= TRUE
;
11015 if (def_ins
&& !live_range_start
[dreg
]) {
11016 live_range_start
[dreg
] = def_ins
;
11017 live_range_start_bb
[dreg
] = bb
;
11024 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
11025 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
11026 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
11027 sreg
= sregs
[srcindex
];
11029 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
11030 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
11031 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
11032 MonoInst
*use_ins
= ins
;
11033 MonoInst
*load_ins
;
11034 guint32 load_opcode
;
11036 if (var
->opcode
== OP_REGVAR
) {
11037 sregs
[srcindex
] = var
->dreg
;
11038 //mono_inst_set_src_registers (ins, sregs);
11039 live_range_end
[sreg
] = use_ins
;
11040 live_range_end_bb
[sreg
] = bb
;
11044 g_assert (var
->opcode
== OP_REGOFFSET
);
11046 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
11048 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
11050 if (vreg_to_lvreg
[sreg
]) {
11051 g_assert (vreg_to_lvreg
[sreg
] != -1);
11053 /* The variable is already loaded to an lvreg */
11054 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11055 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
11056 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
11057 //mono_inst_set_src_registers (ins, sregs);
11061 /* Try to fuse the load into the instruction */
11062 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
11063 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
11064 sregs
[0] = var
->inst_basereg
;
11065 //mono_inst_set_src_registers (ins, sregs);
11066 ins
->inst_offset
= var
->inst_offset
;
11067 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
11068 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
11069 sregs
[1] = var
->inst_basereg
;
11070 //mono_inst_set_src_registers (ins, sregs);
11071 ins
->inst_offset
= var
->inst_offset
;
11073 if (MONO_IS_REAL_MOVE (ins
)) {
11074 ins
->opcode
= OP_NOP
;
11077 //printf ("%d ", srcindex); mono_print_ins (ins);
11079 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
11081 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
11082 if (var
->dreg
== prev_dreg
) {
11084 * sreg refers to the value loaded by the load
11085 * emitted below, but we need to use ins->dreg
11086 * since it refers to the store emitted earlier.
11090 g_assert (sreg
!= -1);
11091 vreg_to_lvreg
[var
->dreg
] = sreg
;
11092 g_assert (lvregs_len
< 1024);
11093 lvregs
[lvregs_len
++] = var
->dreg
;
11097 sregs
[srcindex
] = sreg
;
11098 //mono_inst_set_src_registers (ins, sregs);
11100 if (regtype
== 'l') {
11101 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
11102 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11103 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
11104 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11105 use_ins
= load_ins
;
11108 #if SIZEOF_REGISTER == 4
11109 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
11111 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
11112 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11113 use_ins
= load_ins
;
11117 if (var
->dreg
< orig_next_vreg
) {
11118 live_range_end
[var
->dreg
] = use_ins
;
11119 live_range_end_bb
[var
->dreg
] = bb
;
11123 mono_inst_set_src_registers (ins
, sregs
);
11125 if (dest_has_lvreg
) {
11126 g_assert (ins
->dreg
!= -1);
11127 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
11128 g_assert (lvregs_len
< 1024);
11129 lvregs
[lvregs_len
++] = prev_dreg
;
11130 dest_has_lvreg
= FALSE
;
11134 tmp_reg
= ins
->dreg
;
11135 ins
->dreg
= ins
->sreg2
;
11136 ins
->sreg2
= tmp_reg
;
11139 if (MONO_IS_CALL (ins
)) {
11140 /* Clear vreg_to_lvreg array */
11141 for (i
= 0; i
< lvregs_len
; i
++)
11142 vreg_to_lvreg
[lvregs
[i
]] = 0;
11144 } else if (ins
->opcode
== OP_NOP
) {
11146 MONO_INST_NULLIFY_SREGS (ins
);
11149 if (cfg
->verbose_level
> 2)
11150 mono_print_ins_index (1, ins
);
11153 /* Extend the live range based on the liveness info */
11154 if (cfg
->compute_precise_live_ranges
&& bb
->live_out_set
&& bb
->code
) {
11155 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11156 MonoMethodVar
*vi
= MONO_VARINFO (cfg
, i
);
11158 if (vreg_is_volatile (cfg
, vi
->vreg
))
11159 /* The liveness info is incomplete */
11162 if (mono_bitset_test_fast (bb
->live_in_set
, i
) && !live_range_start
[vi
->vreg
]) {
11163 /* Live from at least the first ins of this bb */
11164 live_range_start
[vi
->vreg
] = bb
->code
;
11165 live_range_start_bb
[vi
->vreg
] = bb
;
11168 if (mono_bitset_test_fast (bb
->live_out_set
, i
)) {
11169 /* Live at least until the last ins of this bb */
11170 live_range_end
[vi
->vreg
] = bb
->last_ins
;
11171 live_range_end_bb
[vi
->vreg
] = bb
;
11177 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11179 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11180 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11182 if (cfg
->compute_precise_live_ranges
&& cfg
->comp_done
& MONO_COMP_LIVENESS
) {
11183 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11184 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
11187 if (live_range_start
[vreg
]) {
11188 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
11190 ins
->inst_c1
= vreg
;
11191 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
11193 if (live_range_end
[vreg
]) {
11194 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
11196 ins
->inst_c1
= vreg
;
11197 if (live_range_end
[vreg
] == live_range_end_bb
[vreg
]->last_ins
)
11198 mono_add_ins_to_end (live_range_end_bb
[vreg
], ins
);
11200 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
11206 g_free (live_range_start
);
11207 g_free (live_range_end
);
11208 g_free (live_range_start_bb
);
11209 g_free (live_range_end_bb
);
11214 * - use 'iadd' instead of 'int_add'
11215 * - handling ovf opcodes: decompose in method_to_ir.
11216 * - unify iregs/fregs
11217 * -> partly done, the missing parts are:
11218 * - a more complete unification would involve unifying the hregs as well, so
11219 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11220 * would no longer map to the machine hregs, so the code generators would need to
11221 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11222 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11223 * fp/non-fp branches speeds it up by about 15%.
11224 * - use sext/zext opcodes instead of shifts
11226 * - get rid of TEMPLOADs if possible and use vregs instead
11227 * - clean up usage of OP_P/OP_ opcodes
11228 * - cleanup usage of DUMMY_USE
11229 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11231 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11232 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11233 * - make sure handle_stack_args () is called before the branch is emitted
11234 * - when the new IR is done, get rid of all unused stuff
11235 * - COMPARE/BEQ as separate instructions or unify them ?
11236 * - keeping them separate allows specialized compare instructions like
11237 * compare_imm, compare_membase
11238 * - most back ends unify fp compare+branch, fp compare+ceq
11239 * - integrate mono_save_args into inline_method
11240 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11241 * - handle long shift opts on 32 bit platforms somehow: they require
11242 * 3 sregs (2 for arg1 and 1 for arg2)
11243 * - make byref a 'normal' type.
11244 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11245 * variable if needed.
11246 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11247 * like inline_method.
11248 * - remove inlining restrictions
11249 * - fix LNEG and enable cfold of INEG
11250 * - generalize x86 optimizations like ldelema as a peephole optimization
11251 * - add store_mem_imm for amd64
11252 * - optimize the loading of the interruption flag in the managed->native wrappers
11253 * - avoid special handling of OP_NOP in passes
11254 * - move code inserting instructions into one function/macro.
11255 * - try a coalescing phase after liveness analysis
11256 * - add float -> vreg conversion + local optimizations on !x86
11257 * - figure out how to handle decomposed branches during optimizations, ie.
11258 * compare+branch, op_jump_table+op_br etc.
11259 * - promote RuntimeXHandles to vregs
11260 * - vtype cleanups:
11261 * - add a NEW_VARLOADA_VREG macro
11262 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11263 * accessing vtype fields.
11264 * - get rid of I8CONST on 64 bit platforms
11265 * - dealing with the increase in code size due to branches created during opcode
11267 * - use extended basic blocks
11268 * - all parts of the JIT
11269 * - handle_global_vregs () && local regalloc
11270 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11271 * - sources of increase in code size:
11274 * - isinst and castclass
11275 * - lvregs not allocated to global registers even if used multiple times
11276 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11278 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11279 * - add all micro optimizations from the old JIT
11280 * - put tree optimizations into the deadce pass
11281 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11282 * specific function.
11283 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11284 * fcompare + branchCC.
11285 * - create a helper function for allocating a stack slot, taking into account
11286 * MONO_CFG_HAS_SPILLUP.
11288 * - merge the ia64 switch changes.
11289 * - optimize mono_regstate2_alloc_int/float.
11290 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11291 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11292 * parts of the tree could be separated by other instructions, killing the tree
11293 * arguments, or stores killing loads etc. Also, should we fold loads into other
11294 * instructions if the result of the load is used multiple times ?
11295 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11296 * - LAST MERGE: 108395.
11297 * - when returning vtypes in registers, generate IR and append it to the end of the
11298 * last bb instead of doing it in the epilog.
11299 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11307 - When to decompose opcodes:
11308 - earlier: this makes some optimizations hard to implement, since the low level IR
11309 no longer contains the neccessary information. But it is easier to do.
11310 - later: harder to implement, enables more optimizations.
11311 - Branches inside bblocks:
11312 - created when decomposing complex opcodes.
11313 - branches to another bblock: harmless, but not tracked by the branch
11314 optimizations, so need to branch to a label at the start of the bblock.
11315 - branches to inside the same bblock: very problematic, trips up the local
11316 reg allocator. Can be fixed by spitting the current bblock, but that is a
11317 complex operation, since some local vregs can become global vregs etc.
11318 - Local/global vregs:
11319 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11320 local register allocator.
11321 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11322 structure, created by mono_create_var (). Assigned to hregs or the stack by
11323 the global register allocator.
11324 - When to do optimizations like alu->alu_imm:
11325 - earlier -> saves work later on since the IR will be smaller/simpler
11326 - later -> can work on more instructions
11327 - Handling of valuetypes:
11328 - When a vtype is pushed on the stack, a new temporary is created, an
11329 instruction computing its address (LDADDR) is emitted and pushed on
11330 the stack. Need to optimize cases when the vtype is used immediately as in
11331 argument passing, stloc etc.
11332 - Instead of the to_end stuff in the old JIT, simply call the function handling
11333 the values on the stack before emitting the last instruction of the bb.
11336 #endif /* DISABLE_JIT */