2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
60 #include "debugger-agent.h"
62 #define BRANCH_COST 100
63 #define INLINE_LENGTH_LIMIT 20
64 #define INLINE_FAILURE do {\
65 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 #define CHECK_CFG_EXCEPTION do {\
69 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 #define METHOD_ACCESS_FAILURE do { \
73 char *method_fname = mono_method_full_name (method, TRUE); \
74 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
75 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
76 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
77 g_free (method_fname); \
78 g_free (cil_method_fname); \
79 goto exception_exit; \
81 #define FIELD_ACCESS_FAILURE do { \
82 char *method_fname = mono_method_full_name (method, TRUE); \
83 char *field_fname = mono_field_full_name (field); \
84 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
85 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
86 g_free (method_fname); \
87 g_free (field_fname); \
88 goto exception_exit; \
90 #define GENERIC_SHARING_FAILURE(opcode) do { \
91 if (cfg->generic_sharing_context) { \
92 if (cfg->verbose_level > 2) \
93 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
94 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
95 goto exception_exit; \
99 /* Determine whenever 'ins' represents a load of the 'this' argument */
100 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
102 static int ldind_to_load_membase (int opcode
);
103 static int stind_to_store_membase (int opcode
);
105 int mono_op_to_op_imm (int opcode
);
106 int mono_op_to_op_imm_noemul (int opcode
);
108 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
109 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
110 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
112 /* helper methods signature */
113 extern MonoMethodSignature
*helper_sig_class_init_trampoline
;
114 extern MonoMethodSignature
*helper_sig_domain_get
;
115 extern MonoMethodSignature
*helper_sig_generic_class_init_trampoline
;
116 extern MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
;
117 extern MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
;
120 * Instruction metadata
128 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
129 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
135 #if SIZEOF_REGISTER == 8
140 /* keep in sync with the enum in mini.h */
143 #include "mini-ops.h"
148 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
149 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
151 * This should contain the index of the last sreg + 1. This is not the same
152 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
154 const gint8 ins_sreg_counts
[] = {
155 #include "mini-ops.h"
160 extern GHashTable
*jit_icall_name_hash
;
162 #define MONO_INIT_VARINFO(vi,id) do { \
163 (vi)->range.first_use.pos.bid = 0xffff; \
169 mono_inst_set_src_registers (MonoInst
*ins
, int *regs
)
171 ins
->sreg1
= regs
[0];
172 ins
->sreg2
= regs
[1];
173 ins
->sreg3
= regs
[2];
177 mono_alloc_ireg (MonoCompile
*cfg
)
179 return alloc_ireg (cfg
);
183 mono_alloc_freg (MonoCompile
*cfg
)
185 return alloc_freg (cfg
);
189 mono_alloc_preg (MonoCompile
*cfg
)
191 return alloc_preg (cfg
);
195 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
197 return alloc_dreg (cfg
, stack_type
);
201 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
207 switch (type
->type
) {
210 case MONO_TYPE_BOOLEAN
:
222 case MONO_TYPE_FNPTR
:
224 case MONO_TYPE_CLASS
:
225 case MONO_TYPE_STRING
:
226 case MONO_TYPE_OBJECT
:
227 case MONO_TYPE_SZARRAY
:
228 case MONO_TYPE_ARRAY
:
232 #if SIZEOF_REGISTER == 8
241 case MONO_TYPE_VALUETYPE
:
242 if (type
->data
.klass
->enumtype
) {
243 type
= mono_class_enum_basetype (type
->data
.klass
);
246 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
249 case MONO_TYPE_TYPEDBYREF
:
251 case MONO_TYPE_GENERICINST
:
252 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
256 g_assert (cfg
->generic_sharing_context
);
259 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
265 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
270 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
271 for (i
= 0; i
< bb
->in_count
; ++i
)
272 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
274 for (i
= 0; i
< bb
->out_count
; ++i
)
275 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
277 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
278 mono_print_ins_index (-1, tree
);
282 * Can't put this at the beginning, since other files reference stuff from this
287 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
289 #define GET_BBLOCK(cfg,tblock,ip) do { \
290 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
292 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
293 NEW_BBLOCK (cfg, (tblock)); \
294 (tblock)->cil_code = (ip); \
295 ADD_BBLOCK (cfg, (tblock)); \
299 #if defined(TARGET_X86) || defined(TARGET_AMD64)
300 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
301 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
302 (dest)->dreg = alloc_preg ((cfg)); \
303 (dest)->sreg1 = (sr1); \
304 (dest)->sreg2 = (sr2); \
305 (dest)->inst_imm = (imm); \
306 (dest)->backend.shift_amount = (shift); \
307 MONO_ADD_INS ((cfg)->cbb, (dest)); \
311 #if SIZEOF_REGISTER == 8
312 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
313 /* FIXME: Need to add many more cases */ \
314 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
316 int dr = alloc_preg (cfg); \
317 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
318 (ins)->sreg2 = widen->dreg; \
322 #define ADD_WIDEN_OP(ins, arg1, arg2)
325 #define ADD_BINOP(op) do { \
326 MONO_INST_NEW (cfg, ins, (op)); \
328 ins->sreg1 = sp [0]->dreg; \
329 ins->sreg2 = sp [1]->dreg; \
330 type_from_op (ins, sp [0], sp [1]); \
332 /* Have to insert a widening op */ \
333 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
334 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
335 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
347 *sp++ = mono_decompose_opcode (cfg, ins); \
350 #define ADD_BINCOND(next_block) do { \
353 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
354 cmp->sreg1 = sp [0]->dreg; \
355 cmp->sreg2 = sp [1]->dreg; \
356 type_from_op (cmp, sp [0], sp [1]); \
358 type_from_op (ins, sp [0], sp [1]); \
359 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
360 GET_BBLOCK (cfg, tblock, target); \
361 link_bblock (cfg, bblock, tblock); \
362 ins->inst_true_bb = tblock; \
363 if ((next_block)) { \
364 link_bblock (cfg, bblock, (next_block)); \
365 ins->inst_false_bb = (next_block); \
366 start_new_bblock = 1; \
368 GET_BBLOCK (cfg, tblock, ip); \
369 link_bblock (cfg, bblock, tblock); \
370 ins->inst_false_bb = tblock; \
371 start_new_bblock = 2; \
373 if (sp != stack_start) { \
374 handle_stack_args (cfg, stack_start, sp - stack_start); \
375 CHECK_UNVERIFIABLE (cfg); \
377 MONO_ADD_INS (bblock, cmp); \
378 MONO_ADD_INS (bblock, ins); \
382 * link_bblock: Links two basic blocks
384 * links two basic blocks in the control flow graph, the 'from'
385 * argument is the starting block and the 'to' argument is the block
386 * the control flow ends to after 'from'.
389 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
391 MonoBasicBlock
**newa
;
395 if (from
->cil_code
) {
397 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
399 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
402 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
404 printf ("edge from entry to exit\n");
409 for (i
= 0; i
< from
->out_count
; ++i
) {
410 if (to
== from
->out_bb
[i
]) {
416 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
417 for (i
= 0; i
< from
->out_count
; ++i
) {
418 newa
[i
] = from
->out_bb
[i
];
426 for (i
= 0; i
< to
->in_count
; ++i
) {
427 if (from
== to
->in_bb
[i
]) {
433 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
434 for (i
= 0; i
< to
->in_count
; ++i
) {
435 newa
[i
] = to
->in_bb
[i
];
444 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
446 link_bblock (cfg
, from
, to
);
450 * mono_find_block_region:
452 * We mark each basic block with a region ID. We use that to avoid BB
453 * optimizations when blocks are in different regions.
456 * A region token that encodes where this region is, and information
457 * about the clause owner for this block.
459 * The region encodes the try/catch/filter clause that owns this block
460 * as well as the type. -1 is a special value that represents a block
461 * that is in none of try/catch/filter.
464 mono_find_block_region (MonoCompile
*cfg
, int offset
)
466 MonoMethod
*method
= cfg
->method
;
467 MonoMethodHeader
*header
= mono_method_get_header (method
);
468 MonoExceptionClause
*clause
;
471 for (i
= 0; i
< header
->num_clauses
; ++i
) {
472 clause
= &header
->clauses
[i
];
473 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
474 (offset
< (clause
->handler_offset
)))
475 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
477 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
478 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
479 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
480 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
481 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
483 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
486 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
487 return ((i
+ 1) << 8) | clause
->flags
;
494 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
496 MonoMethod
*method
= cfg
->method
;
497 MonoMethodHeader
*header
= mono_method_get_header (method
);
498 MonoExceptionClause
*clause
;
499 MonoBasicBlock
*handler
;
503 for (i
= 0; i
< header
->num_clauses
; ++i
) {
504 clause
= &header
->clauses
[i
];
505 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
506 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
507 if (clause
->flags
== type
) {
508 handler
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
510 res
= g_list_append (res
, handler
);
518 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
522 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
526 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
527 /* prevent it from being register allocated */
528 var
->flags
|= MONO_INST_INDIRECT
;
530 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
534 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
536 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
540 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
544 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
548 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
549 /* prevent it from being register allocated */
550 var
->flags
|= MONO_INST_INDIRECT
;
552 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
558 * Returns the type used in the eval stack when @type is loaded.
559 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
562 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
566 inst
->klass
= klass
= mono_class_from_mono_type (type
);
568 inst
->type
= STACK_MP
;
573 switch (type
->type
) {
575 inst
->type
= STACK_INV
;
579 case MONO_TYPE_BOOLEAN
:
585 inst
->type
= STACK_I4
;
590 case MONO_TYPE_FNPTR
:
591 inst
->type
= STACK_PTR
;
593 case MONO_TYPE_CLASS
:
594 case MONO_TYPE_STRING
:
595 case MONO_TYPE_OBJECT
:
596 case MONO_TYPE_SZARRAY
:
597 case MONO_TYPE_ARRAY
:
598 inst
->type
= STACK_OBJ
;
602 inst
->type
= STACK_I8
;
606 inst
->type
= STACK_R8
;
608 case MONO_TYPE_VALUETYPE
:
609 if (type
->data
.klass
->enumtype
) {
610 type
= mono_class_enum_basetype (type
->data
.klass
);
614 inst
->type
= STACK_VTYPE
;
617 case MONO_TYPE_TYPEDBYREF
:
618 inst
->klass
= mono_defaults
.typed_reference_class
;
619 inst
->type
= STACK_VTYPE
;
621 case MONO_TYPE_GENERICINST
:
622 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
625 case MONO_TYPE_MVAR
:
626 /* FIXME: all the arguments must be references for now,
627 * later look inside cfg and see if the arg num is
630 g_assert (cfg
->generic_sharing_context
);
631 inst
->type
= STACK_OBJ
;
634 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
639 * The following tables are used to quickly validate the IL code in type_from_op ().
642 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
643 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
644 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
645 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
646 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
647 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
648 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
649 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
650 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
655 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
658 /* reduce the size of this table */
660 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
661 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
662 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
663 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
664 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
665 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
666 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
667 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
668 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
672 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
673 /* Inv i L p F & O vt */
675 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
676 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
677 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
678 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
679 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
680 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
681 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
684 /* reduce the size of this table */
686 shift_table
[STACK_MAX
] [STACK_MAX
] = {
687 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
688 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
689 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
690 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
691 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
692 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
693 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
694 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
698 * Tables to map from the non-specific opcode to the matching
699 * type-specific opcode.
701 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
703 binops_op_map
[STACK_MAX
] = {
704 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
707 /* handles from CEE_NEG to CEE_CONV_U8 */
709 unops_op_map
[STACK_MAX
] = {
710 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
713 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
715 ovfops_op_map
[STACK_MAX
] = {
716 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
719 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
721 ovf2ops_op_map
[STACK_MAX
] = {
722 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
725 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
727 ovf3ops_op_map
[STACK_MAX
] = {
728 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
731 /* handles from CEE_BEQ to CEE_BLT_UN */
733 beqops_op_map
[STACK_MAX
] = {
734 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
737 /* handles from CEE_CEQ to CEE_CLT_UN */
739 ceqops_op_map
[STACK_MAX
] = {
740 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
744 * Sets ins->type (the type on the eval stack) according to the
745 * type of the opcode and the arguments to it.
746 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
748 * FIXME: this function sets ins->type unconditionally in some cases, but
749 * it should set it to invalid for some types (a conv.x on an object)
752 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
754 switch (ins
->opcode
) {
761 /* FIXME: check unverifiable args for STACK_MP */
762 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
763 ins
->opcode
+= binops_op_map
[ins
->type
];
770 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
771 ins
->opcode
+= binops_op_map
[ins
->type
];
776 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
777 ins
->opcode
+= binops_op_map
[ins
->type
];
782 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
783 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
784 ins
->opcode
= OP_LCOMPARE
;
785 else if (src1
->type
== STACK_R8
)
786 ins
->opcode
= OP_FCOMPARE
;
788 ins
->opcode
= OP_ICOMPARE
;
790 case OP_ICOMPARE_IMM
:
791 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
792 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
793 ins
->opcode
= OP_LCOMPARE_IMM
;
805 ins
->opcode
+= beqops_op_map
[src1
->type
];
808 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
809 ins
->opcode
+= ceqops_op_map
[src1
->type
];
815 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
816 ins
->opcode
+= ceqops_op_map
[src1
->type
];
820 ins
->type
= neg_table
[src1
->type
];
821 ins
->opcode
+= unops_op_map
[ins
->type
];
824 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
825 ins
->type
= src1
->type
;
827 ins
->type
= STACK_INV
;
828 ins
->opcode
+= unops_op_map
[ins
->type
];
834 ins
->type
= STACK_I4
;
835 ins
->opcode
+= unops_op_map
[src1
->type
];
838 ins
->type
= STACK_R8
;
839 switch (src1
->type
) {
842 ins
->opcode
= OP_ICONV_TO_R_UN
;
845 ins
->opcode
= OP_LCONV_TO_R_UN
;
849 case CEE_CONV_OVF_I1
:
850 case CEE_CONV_OVF_U1
:
851 case CEE_CONV_OVF_I2
:
852 case CEE_CONV_OVF_U2
:
853 case CEE_CONV_OVF_I4
:
854 case CEE_CONV_OVF_U4
:
855 ins
->type
= STACK_I4
;
856 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
858 case CEE_CONV_OVF_I_UN
:
859 case CEE_CONV_OVF_U_UN
:
860 ins
->type
= STACK_PTR
;
861 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
863 case CEE_CONV_OVF_I1_UN
:
864 case CEE_CONV_OVF_I2_UN
:
865 case CEE_CONV_OVF_I4_UN
:
866 case CEE_CONV_OVF_U1_UN
:
867 case CEE_CONV_OVF_U2_UN
:
868 case CEE_CONV_OVF_U4_UN
:
869 ins
->type
= STACK_I4
;
870 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
873 ins
->type
= STACK_PTR
;
874 switch (src1
->type
) {
876 ins
->opcode
= OP_ICONV_TO_U
;
880 #if SIZEOF_REGISTER == 8
881 ins
->opcode
= OP_LCONV_TO_U
;
883 ins
->opcode
= OP_MOVE
;
887 ins
->opcode
= OP_LCONV_TO_U
;
890 ins
->opcode
= OP_FCONV_TO_U
;
896 ins
->type
= STACK_I8
;
897 ins
->opcode
+= unops_op_map
[src1
->type
];
899 case CEE_CONV_OVF_I8
:
900 case CEE_CONV_OVF_U8
:
901 ins
->type
= STACK_I8
;
902 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
904 case CEE_CONV_OVF_U8_UN
:
905 case CEE_CONV_OVF_I8_UN
:
906 ins
->type
= STACK_I8
;
907 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
911 ins
->type
= STACK_R8
;
912 ins
->opcode
+= unops_op_map
[src1
->type
];
915 ins
->type
= STACK_R8
;
919 ins
->type
= STACK_I4
;
920 ins
->opcode
+= ovfops_op_map
[src1
->type
];
925 ins
->type
= STACK_PTR
;
926 ins
->opcode
+= ovfops_op_map
[src1
->type
];
934 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
935 ins
->opcode
+= ovfops_op_map
[src1
->type
];
936 if (ins
->type
== STACK_R8
)
937 ins
->type
= STACK_INV
;
939 case OP_LOAD_MEMBASE
:
940 ins
->type
= STACK_PTR
;
942 case OP_LOADI1_MEMBASE
:
943 case OP_LOADU1_MEMBASE
:
944 case OP_LOADI2_MEMBASE
:
945 case OP_LOADU2_MEMBASE
:
946 case OP_LOADI4_MEMBASE
:
947 case OP_LOADU4_MEMBASE
:
948 ins
->type
= STACK_PTR
;
950 case OP_LOADI8_MEMBASE
:
951 ins
->type
= STACK_I8
;
953 case OP_LOADR4_MEMBASE
:
954 case OP_LOADR8_MEMBASE
:
955 ins
->type
= STACK_R8
;
958 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
962 if (ins
->type
== STACK_MP
)
963 ins
->klass
= mono_defaults
.object_class
;
968 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
974 param_table
[STACK_MAX
] [STACK_MAX
] = {
979 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
983 switch (args
->type
) {
993 for (i
= 0; i
< sig
->param_count
; ++i
) {
994 switch (args
[i
].type
) {
998 if (!sig
->params
[i
]->byref
)
1002 if (sig
->params
[i
]->byref
)
1004 switch (sig
->params
[i
]->type
) {
1005 case MONO_TYPE_CLASS
:
1006 case MONO_TYPE_STRING
:
1007 case MONO_TYPE_OBJECT
:
1008 case MONO_TYPE_SZARRAY
:
1009 case MONO_TYPE_ARRAY
:
1016 if (sig
->params
[i
]->byref
)
1018 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1027 /*if (!param_table [args [i].type] [sig->params [i]->type])
1035 * When we need a pointer to the current domain many times in a method, we
1036 * call mono_domain_get() once and we store the result in a local variable.
1037 * This function returns the variable that represents the MonoDomain*.
1039 inline static MonoInst
*
1040 mono_get_domainvar (MonoCompile
*cfg
)
1042 if (!cfg
->domainvar
)
1043 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1044 return cfg
->domainvar
;
1048 * The got_var contains the address of the Global Offset Table when AOT
1052 mono_get_got_var (MonoCompile
*cfg
)
1054 #ifdef MONO_ARCH_NEED_GOT_VAR
1055 if (!cfg
->compile_aot
)
1057 if (!cfg
->got_var
) {
1058 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1060 return cfg
->got_var
;
1067 mono_get_vtable_var (MonoCompile
*cfg
)
1069 g_assert (cfg
->generic_sharing_context
);
1071 if (!cfg
->rgctx_var
) {
1072 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1073 /* force the var to be stack allocated */
1074 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1077 return cfg
->rgctx_var
;
1081 type_from_stack_type (MonoInst
*ins
) {
1082 switch (ins
->type
) {
1083 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1084 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1085 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1086 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1088 return &ins
->klass
->this_arg
;
1089 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1090 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1092 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1097 static G_GNUC_UNUSED
int
1098 type_to_stack_type (MonoType
*t
)
1100 switch (mono_type_get_underlying_type (t
)->type
) {
1103 case MONO_TYPE_BOOLEAN
:
1106 case MONO_TYPE_CHAR
:
1113 case MONO_TYPE_FNPTR
:
1115 case MONO_TYPE_CLASS
:
1116 case MONO_TYPE_STRING
:
1117 case MONO_TYPE_OBJECT
:
1118 case MONO_TYPE_SZARRAY
:
1119 case MONO_TYPE_ARRAY
:
1127 case MONO_TYPE_VALUETYPE
:
1128 case MONO_TYPE_TYPEDBYREF
:
1130 case MONO_TYPE_GENERICINST
:
1131 if (mono_type_generic_inst_is_valuetype (t
))
1137 g_assert_not_reached ();
1144 array_access_to_klass (int opcode
)
1148 return mono_defaults
.byte_class
;
1150 return mono_defaults
.uint16_class
;
1153 return mono_defaults
.int_class
;
1156 return mono_defaults
.sbyte_class
;
1159 return mono_defaults
.int16_class
;
1162 return mono_defaults
.int32_class
;
1164 return mono_defaults
.uint32_class
;
1167 return mono_defaults
.int64_class
;
1170 return mono_defaults
.single_class
;
1173 return mono_defaults
.double_class
;
1174 case CEE_LDELEM_REF
:
1175 case CEE_STELEM_REF
:
1176 return mono_defaults
.object_class
;
1178 g_assert_not_reached ();
1184 * We try to share variables when possible
1187 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1192 /* inlining can result in deeper stacks */
1193 if (slot
>= mono_method_get_header (cfg
->method
)->max_stack
)
1194 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1196 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1198 switch (ins
->type
) {
1205 if ((vnum
= cfg
->intvars
[pos
]))
1206 return cfg
->varinfo
[vnum
];
1207 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1208 cfg
->intvars
[pos
] = res
->inst_c0
;
1211 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1217 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1220 * Don't use this if a generic_context is set, since that means AOT can't
1221 * look up the method using just the image+token.
1222 * table == 0 means this is a reference made from a wrapper.
1224 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1225 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1226 jump_info_token
->image
= image
;
1227 jump_info_token
->token
= token
;
1228 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1233 * This function is called to handle items that are left on the evaluation stack
1234 * at basic block boundaries. What happens is that we save the values to local variables
1235 * and we reload them later when first entering the target basic block (with the
1236 * handle_loaded_temps () function).
1237 * A single joint point will use the same variables (stored in the array bb->out_stack or
1238 * bb->in_stack, if the basic block is before or after the joint point).
1240 * This function needs to be called _before_ emitting the last instruction of
1241 * the bb (i.e. before emitting a branch).
1242 * If the stack merge fails at a join point, cfg->unverifiable is set.
1245 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1248 MonoBasicBlock
*bb
= cfg
->cbb
;
1249 MonoBasicBlock
*outb
;
1250 MonoInst
*inst
, **locals
;
1255 if (cfg
->verbose_level
> 3)
1256 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1257 if (!bb
->out_scount
) {
1258 bb
->out_scount
= count
;
1259 //printf ("bblock %d has out:", bb->block_num);
1261 for (i
= 0; i
< bb
->out_count
; ++i
) {
1262 outb
= bb
->out_bb
[i
];
1263 /* exception handlers are linked, but they should not be considered for stack args */
1264 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1266 //printf (" %d", outb->block_num);
1267 if (outb
->in_stack
) {
1269 bb
->out_stack
= outb
->in_stack
;
1275 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1276 for (i
= 0; i
< count
; ++i
) {
1278 * try to reuse temps already allocated for this purpouse, if they occupy the same
1279 * stack slot and if they are of the same type.
1280 * This won't cause conflicts since if 'local' is used to
1281 * store one of the values in the in_stack of a bblock, then
1282 * the same variable will be used for the same outgoing stack
1284 * This doesn't work when inlining methods, since the bblocks
1285 * in the inlined methods do not inherit their in_stack from
1286 * the bblock they are inlined to. See bug #58863 for an
1289 if (cfg
->inlined_method
)
1290 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1292 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1297 for (i
= 0; i
< bb
->out_count
; ++i
) {
1298 outb
= bb
->out_bb
[i
];
1299 /* exception handlers are linked, but they should not be considered for stack args */
1300 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1302 if (outb
->in_scount
) {
1303 if (outb
->in_scount
!= bb
->out_scount
) {
1304 cfg
->unverifiable
= TRUE
;
1307 continue; /* check they are the same locals */
1309 outb
->in_scount
= count
;
1310 outb
->in_stack
= bb
->out_stack
;
1313 locals
= bb
->out_stack
;
1315 for (i
= 0; i
< count
; ++i
) {
1316 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1317 inst
->cil_code
= sp
[i
]->cil_code
;
1318 sp
[i
] = locals
[i
];
1319 if (cfg
->verbose_level
> 3)
1320 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1324 * It is possible that the out bblocks already have in_stack assigned, and
1325 * the in_stacks differ. In this case, we will store to all the different
1332 /* Find a bblock which has a different in_stack */
1334 while (bindex
< bb
->out_count
) {
1335 outb
= bb
->out_bb
[bindex
];
1336 /* exception handlers are linked, but they should not be considered for stack args */
1337 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1341 if (outb
->in_stack
!= locals
) {
1342 for (i
= 0; i
< count
; ++i
) {
1343 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1344 inst
->cil_code
= sp
[i
]->cil_code
;
1345 sp
[i
] = locals
[i
];
1346 if (cfg
->verbose_level
> 3)
1347 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1349 locals
= outb
->in_stack
;
1358 /* Emit code which loads interface_offsets [klass->interface_id]
1359 * The array is stored in memory before vtable.
1362 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1364 if (cfg
->compile_aot
) {
1365 int ioffset_reg
= alloc_preg (cfg
);
1366 int iid_reg
= alloc_preg (cfg
);
1368 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1369 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1378 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1379 * stored in "klass_reg" implements the interface "klass".
1382 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1384 int ibitmap_reg
= alloc_preg (cfg
);
1385 int ibitmap_byte_reg
= alloc_preg (cfg
);
1387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
));
1389 if (cfg
->compile_aot
) {
1390 int iid_reg
= alloc_preg (cfg
);
1391 int shifted_iid_reg
= alloc_preg (cfg
);
1392 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1393 int masked_iid_reg
= alloc_preg (cfg
);
1394 int iid_one_bit_reg
= alloc_preg (cfg
);
1395 int iid_bit_reg
= alloc_preg (cfg
);
1396 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1397 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1398 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1399 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1400 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1401 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1402 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1403 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1411 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1412 * stored in "vtable_reg" implements the interface "klass".
1415 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1417 int ibitmap_reg
= alloc_preg (cfg
);
1418 int ibitmap_byte_reg
= alloc_preg (cfg
);
1420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
));
1422 if (cfg
->compile_aot
) {
1423 int iid_reg
= alloc_preg (cfg
);
1424 int shifted_iid_reg
= alloc_preg (cfg
);
1425 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1426 int masked_iid_reg
= alloc_preg (cfg
);
1427 int iid_one_bit_reg
= alloc_preg (cfg
);
1428 int iid_bit_reg
= alloc_preg (cfg
);
1429 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1431 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1432 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1433 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, masked_iid_reg
, iid_reg
, 7);
1434 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1435 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1436 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1439 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1444 * Emit code which checks whenever the interface id of @klass is smaller than
1445 * than the value given by max_iid_reg.
1448 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1449 MonoBasicBlock
*false_target
)
1451 if (cfg
->compile_aot
) {
1452 int iid_reg
= alloc_preg (cfg
);
1453 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1454 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1457 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1461 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1464 /* Same as above, but obtains max_iid from a vtable */
1466 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1467 MonoBasicBlock
*false_target
)
1469 int max_iid_reg
= alloc_preg (cfg
);
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1472 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1475 /* Same as above, but obtains max_iid from a klass */
1477 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1478 MonoBasicBlock
*false_target
)
1480 int max_iid_reg
= alloc_preg (cfg
);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1483 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1487 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1489 int idepth_reg
= alloc_preg (cfg
);
1490 int stypes_reg
= alloc_preg (cfg
);
1491 int stype
= alloc_preg (cfg
);
1493 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1495 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1500 if (cfg
->compile_aot
) {
1501 int const_reg
= alloc_preg (cfg
);
1502 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1503 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1511 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1513 int intf_reg
= alloc_preg (cfg
);
1515 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1516 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1521 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1525 * Variant of the above that takes a register to the class, not the vtable.
1528 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1530 int intf_bit_reg
= alloc_preg (cfg
);
1532 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1533 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1534 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1538 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1542 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1544 if (cfg
->compile_aot
) {
1545 int const_reg
= alloc_preg (cfg
);
1546 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1547 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1549 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1551 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1555 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1557 if (cfg
->compile_aot
) {
1558 int const_reg
= alloc_preg (cfg
);
1559 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1560 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1562 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1568 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1571 int rank_reg
= alloc_preg (cfg
);
1572 int eclass_reg
= alloc_preg (cfg
);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1575 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1576 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1577 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1579 if (klass
->cast_class
== mono_defaults
.object_class
) {
1580 int parent_reg
= alloc_preg (cfg
);
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1582 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1583 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1584 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1585 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1586 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1587 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1588 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1589 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1590 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1592 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1593 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1596 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1597 /* Check that the object is a vector too */
1598 int bounds_reg
= alloc_preg (cfg
);
1599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1600 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1601 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1604 int idepth_reg
= alloc_preg (cfg
);
1605 int stypes_reg
= alloc_preg (cfg
);
1606 int stype
= alloc_preg (cfg
);
1608 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1609 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1610 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1611 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1615 mini_emit_class_check (cfg
, stype
, klass
);
1620 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1624 g_assert (val
== 0);
1629 if ((size
<= 4) && (size
<= align
)) {
1632 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1635 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1640 #if SIZEOF_REGISTER == 8
1642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1648 val_reg
= alloc_preg (cfg
);
1650 if (SIZEOF_REGISTER
== 8)
1651 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1653 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1656 /* This could be optimized further if neccesary */
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1665 #if !NO_UNALIGNED_ACCESS
1666 if (SIZEOF_REGISTER
== 8) {
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1673 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1691 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1697 #endif /* DISABLE_JIT */
1700 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1708 /* This could be optimized further if neccesary */
1710 cur_reg
= alloc_preg (cfg
);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1719 #if !NO_UNALIGNED_ACCESS
1720 if (SIZEOF_REGISTER
== 8) {
1722 cur_reg
= alloc_preg (cfg
);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1733 cur_reg
= alloc_preg (cfg
);
1734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1741 cur_reg
= alloc_preg (cfg
);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1749 cur_reg
= alloc_preg (cfg
);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1761 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1764 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1767 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1768 switch (type
->type
) {
1769 case MONO_TYPE_VOID
:
1770 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1773 case MONO_TYPE_BOOLEAN
:
1776 case MONO_TYPE_CHAR
:
1779 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1783 case MONO_TYPE_FNPTR
:
1784 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1785 case MONO_TYPE_CLASS
:
1786 case MONO_TYPE_STRING
:
1787 case MONO_TYPE_OBJECT
:
1788 case MONO_TYPE_SZARRAY
:
1789 case MONO_TYPE_ARRAY
:
1790 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1793 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1796 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1797 case MONO_TYPE_VALUETYPE
:
1798 if (type
->data
.klass
->enumtype
) {
1799 type
= mono_class_enum_basetype (type
->data
.klass
);
1802 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1803 case MONO_TYPE_TYPEDBYREF
:
1804 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1805 case MONO_TYPE_GENERICINST
:
1806 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1809 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1815 * target_type_is_incompatible:
1816 * @cfg: MonoCompile context
1818 * Check that the item @arg on the evaluation stack can be stored
1819 * in the target type (can be a local, or field, etc).
1820 * The cfg arg can be used to check if we need verification or just
1823 * Returns: non-0 value if arg can't be stored on a target.
1826 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1828 MonoType
*simple_type
;
1831 if (target
->byref
) {
1832 /* FIXME: check that the pointed to types match */
1833 if (arg
->type
== STACK_MP
)
1834 return arg
->klass
!= mono_class_from_mono_type (target
);
1835 if (arg
->type
== STACK_PTR
)
1840 simple_type
= mono_type_get_underlying_type (target
);
1841 switch (simple_type
->type
) {
1842 case MONO_TYPE_VOID
:
1846 case MONO_TYPE_BOOLEAN
:
1849 case MONO_TYPE_CHAR
:
1852 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1856 /* STACK_MP is needed when setting pinned locals */
1857 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1862 case MONO_TYPE_FNPTR
:
1863 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1866 case MONO_TYPE_CLASS
:
1867 case MONO_TYPE_STRING
:
1868 case MONO_TYPE_OBJECT
:
1869 case MONO_TYPE_SZARRAY
:
1870 case MONO_TYPE_ARRAY
:
1871 if (arg
->type
!= STACK_OBJ
)
1873 /* FIXME: check type compatibility */
1877 if (arg
->type
!= STACK_I8
)
1882 if (arg
->type
!= STACK_R8
)
1885 case MONO_TYPE_VALUETYPE
:
1886 if (arg
->type
!= STACK_VTYPE
)
1888 klass
= mono_class_from_mono_type (simple_type
);
1889 if (klass
!= arg
->klass
)
1892 case MONO_TYPE_TYPEDBYREF
:
1893 if (arg
->type
!= STACK_VTYPE
)
1895 klass
= mono_class_from_mono_type (simple_type
);
1896 if (klass
!= arg
->klass
)
1899 case MONO_TYPE_GENERICINST
:
1900 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1901 if (arg
->type
!= STACK_VTYPE
)
1903 klass
= mono_class_from_mono_type (simple_type
);
1904 if (klass
!= arg
->klass
)
1908 if (arg
->type
!= STACK_OBJ
)
1910 /* FIXME: check type compatibility */
1914 case MONO_TYPE_MVAR
:
1915 /* FIXME: all the arguments must be references for now,
1916 * later look inside cfg and see if the arg num is
1917 * really a reference
1919 g_assert (cfg
->generic_sharing_context
);
1920 if (arg
->type
!= STACK_OBJ
)
1924 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
1930 * Prepare arguments for passing to a function call.
1931 * Return a non-zero value if the arguments can't be passed to the given
1933 * The type checks are not yet complete and some conversions may need
1934 * casts on 32 or 64 bit architectures.
1936 * FIXME: implement this using target_type_is_incompatible ()
1939 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
1941 MonoType
*simple_type
;
1945 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
1949 for (i
= 0; i
< sig
->param_count
; ++i
) {
1950 if (sig
->params
[i
]->byref
) {
1951 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
1955 simple_type
= sig
->params
[i
];
1956 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
1958 switch (simple_type
->type
) {
1959 case MONO_TYPE_VOID
:
1964 case MONO_TYPE_BOOLEAN
:
1967 case MONO_TYPE_CHAR
:
1970 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
1976 case MONO_TYPE_FNPTR
:
1977 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
1980 case MONO_TYPE_CLASS
:
1981 case MONO_TYPE_STRING
:
1982 case MONO_TYPE_OBJECT
:
1983 case MONO_TYPE_SZARRAY
:
1984 case MONO_TYPE_ARRAY
:
1985 if (args
[i
]->type
!= STACK_OBJ
)
1990 if (args
[i
]->type
!= STACK_I8
)
1995 if (args
[i
]->type
!= STACK_R8
)
1998 case MONO_TYPE_VALUETYPE
:
1999 if (simple_type
->data
.klass
->enumtype
) {
2000 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
2003 if (args
[i
]->type
!= STACK_VTYPE
)
2006 case MONO_TYPE_TYPEDBYREF
:
2007 if (args
[i
]->type
!= STACK_VTYPE
)
2010 case MONO_TYPE_GENERICINST
:
2011 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
2015 g_error ("unknown type 0x%02x in check_call_signature",
2023 callvirt_to_call (int opcode
)
2028 case OP_VOIDCALLVIRT
:
2037 g_assert_not_reached ();
2044 callvirt_to_call_membase (int opcode
)
2048 return OP_CALL_MEMBASE
;
2049 case OP_VOIDCALLVIRT
:
2050 return OP_VOIDCALL_MEMBASE
;
2052 return OP_FCALL_MEMBASE
;
2054 return OP_LCALL_MEMBASE
;
2056 return OP_VCALL_MEMBASE
;
2058 g_assert_not_reached ();
2064 #ifdef MONO_ARCH_HAVE_IMT
2066 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2068 #ifdef MONO_ARCH_IMT_REG
2069 int method_reg
= alloc_preg (cfg
);
2072 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2073 } else if (cfg
->compile_aot
) {
2074 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2077 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2078 ins
->inst_p0
= call
->method
;
2079 ins
->dreg
= method_reg
;
2080 MONO_ADD_INS (cfg
->cbb
, ins
);
2083 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2085 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2090 static MonoJumpInfo
*
2091 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2093 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2097 ji
->data
.target
= target
;
2102 inline static MonoInst
*
2103 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
);
2105 inline static MonoCallInst
*
2106 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2107 MonoInst
**args
, int calli
, int virtual, int tail
)
2110 #ifdef MONO_ARCH_SOFT_FLOAT
2115 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
2117 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2120 call
->signature
= sig
;
2122 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2125 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2126 call
->vret_var
= cfg
->vret_addr
;
2127 //g_assert_not_reached ();
2129 } else if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2130 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2133 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2136 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2137 * address of return value to increase optimization opportunities.
2138 * Before vtype decomposition, the dreg of the call ins itself represents the
2139 * fact the call modifies the return value. After decomposition, the call will
2140 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2141 * will be transformed into an LDADDR.
2143 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2144 loada
->dreg
= alloc_preg (cfg
);
2145 loada
->inst_p0
= temp
;
2146 /* We reference the call too since call->dreg could change during optimization */
2147 loada
->inst_p1
= call
;
2148 MONO_ADD_INS (cfg
->cbb
, loada
);
2150 call
->inst
.dreg
= temp
->dreg
;
2152 call
->vret_var
= loada
;
2153 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2154 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2156 #ifdef MONO_ARCH_SOFT_FLOAT
2158 * If the call has a float argument, we would need to do an r8->r4 conversion using
2159 * an icall, but that cannot be done during the call sequence since it would clobber
2160 * the call registers + the stack. So we do it before emitting the call.
2162 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2164 MonoInst
*in
= call
->args
[i
];
2166 if (i
>= sig
->hasthis
)
2167 t
= sig
->params
[i
- sig
->hasthis
];
2169 t
= &mono_defaults
.int_class
->byval_arg
;
2170 t
= mono_type_get_underlying_type (t
);
2172 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2173 MonoInst
*iargs
[1];
2177 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2179 /* The result will be in an int vreg */
2180 call
->args
[i
] = conv
;
2186 if (COMPILE_LLVM (cfg
))
2187 mono_llvm_emit_call (cfg
, call
);
2189 mono_arch_emit_call (cfg
, call
);
2191 mono_arch_emit_call (cfg
, call
);
2194 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2195 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2200 inline static MonoInst
*
2201 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
)
2203 MonoCallInst
*call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
);
2205 call
->inst
.sreg1
= addr
->dreg
;
2207 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2209 return (MonoInst
*)call
;
2212 inline static MonoInst
*
2213 mono_emit_rgctx_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2215 #ifdef MONO_ARCH_RGCTX_REG
2220 rgctx_reg
= mono_alloc_preg (cfg
);
2221 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2223 call
= (MonoCallInst
*)mono_emit_calli (cfg
, sig
, args
, addr
);
2225 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2226 cfg
->uses_rgctx_reg
= TRUE
;
2227 call
->rgctx_reg
= TRUE
;
2229 return (MonoInst
*)call
;
2231 g_assert_not_reached ();
2237 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
, MonoMethod
*cmethod
, int rgctx_type
);
2239 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
, MonoClass
*klass
, int rgctx_type
);
2242 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2243 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
)
2245 gboolean might_be_remote
;
2246 gboolean
virtual = this != NULL
;
2247 gboolean enable_for_aot
= TRUE
;
2251 if (method
->string_ctor
) {
2252 /* Create the real signature */
2253 /* FIXME: Cache these */
2254 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2255 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2260 might_be_remote
= this && sig
->hasthis
&&
2261 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2262 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && !MONO_CHECK_THIS (this);
2264 context_used
= mono_method_check_context_used (method
);
2265 if (might_be_remote
&& context_used
) {
2268 g_assert (cfg
->generic_sharing_context
);
2270 addr
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
2272 return mono_emit_calli (cfg
, sig
, args
, addr
);
2275 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual, FALSE
);
2277 if (might_be_remote
)
2278 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2280 call
->method
= method
;
2281 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2282 call
->inst
.inst_left
= this;
2285 int vtable_reg
, slot_reg
, this_reg
;
2287 this_reg
= this->dreg
;
2289 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2290 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2291 /* Make a call to delegate->invoke_impl */
2292 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2293 call
->inst
.inst_basereg
= this_reg
;
2294 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2295 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2297 return (MonoInst
*)call
;
2301 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2302 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2303 (MONO_METHOD_IS_FINAL (method
) &&
2304 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
))) {
2306 * the method is not virtual, we just need to ensure this is not null
2307 * and then we can call the method directly.
2309 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2310 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2313 if (!method
->string_ctor
) {
2314 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2315 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2316 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2319 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2321 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2323 return (MonoInst
*)call
;
2326 if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2328 * the method is virtual, but we can statically dispatch since either
2329 * it's class or the method itself are sealed.
2330 * But first we need to ensure it's not a null reference.
2332 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2333 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2334 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2336 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2337 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2339 return (MonoInst
*)call
;
2342 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2344 vtable_reg
= alloc_preg (cfg
);
2345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2346 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2348 #ifdef MONO_ARCH_HAVE_IMT
2350 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2351 emit_imt_argument (cfg
, call
, imt_arg
);
2352 slot_reg
= vtable_reg
;
2353 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2356 if (slot_reg
== -1) {
2357 slot_reg
= alloc_preg (cfg
);
2358 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2359 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2362 slot_reg
= vtable_reg
;
2363 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2364 (mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
);
2365 #ifdef MONO_ARCH_HAVE_IMT
2367 g_assert (mono_method_signature (method
)->generic_param_count
);
2368 emit_imt_argument (cfg
, call
, imt_arg
);
2373 call
->inst
.sreg1
= slot_reg
;
2374 call
->virtual = TRUE
;
2377 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2379 return (MonoInst
*)call
;
2383 mono_emit_rgctx_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2384 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*vtable_arg
)
2391 #ifdef MONO_ARCH_RGCTX_REG
2392 rgctx_reg
= mono_alloc_preg (cfg
);
2393 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
2398 ins
= mono_emit_method_call_full (cfg
, method
, sig
, args
, this, imt_arg
);
2400 call
= (MonoCallInst
*)ins
;
2402 #ifdef MONO_ARCH_RGCTX_REG
2403 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2404 cfg
->uses_rgctx_reg
= TRUE
;
2405 call
->rgctx_reg
= TRUE
;
2414 static inline MonoInst
*
2415 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2417 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
);
2421 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2428 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
);
2431 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2433 return (MonoInst
*)call
;
2436 inline static MonoInst
*
2437 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2439 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2443 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2447 * mono_emit_abs_call:
2449 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2451 inline static MonoInst
*
2452 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2453 MonoMethodSignature
*sig
, MonoInst
**args
)
2455 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2459 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2462 if (cfg
->abs_patches
== NULL
)
2463 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2464 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2465 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2466 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2471 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2473 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2474 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2478 * Native code might return non register sized integers
2479 * without initializing the upper bits.
2481 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2482 case OP_LOADI1_MEMBASE
:
2483 widen_op
= OP_ICONV_TO_I1
;
2485 case OP_LOADU1_MEMBASE
:
2486 widen_op
= OP_ICONV_TO_U1
;
2488 case OP_LOADI2_MEMBASE
:
2489 widen_op
= OP_ICONV_TO_I2
;
2491 case OP_LOADU2_MEMBASE
:
2492 widen_op
= OP_ICONV_TO_U2
;
2498 if (widen_op
!= -1) {
2499 int dreg
= alloc_preg (cfg
);
2502 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2503 widen
->type
= ins
->type
;
2513 get_memcpy_method (void)
2515 static MonoMethod
*memcpy_method
= NULL
;
2516 if (!memcpy_method
) {
2517 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2519 g_error ("Old corlib found. Install a new one");
2521 return memcpy_method
;
2525 * Emit code to copy a valuetype of type @klass whose address is stored in
2526 * @src->dreg to memory whose address is stored at @dest->dreg.
2529 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2531 MonoInst
*iargs
[3];
2534 MonoMethod
*memcpy_method
;
2538 * This check breaks with spilled vars... need to handle it during verification anyway.
2539 * g_assert (klass && klass == src->klass && klass == dest->klass);
2543 n
= mono_class_native_size (klass
, &align
);
2545 n
= mono_class_value_size (klass
, &align
);
2547 #if HAVE_WRITE_BARRIERS
2548 /* if native is true there should be no references in the struct */
2549 if (klass
->has_references
&& !native
) {
2550 /* Avoid barriers when storing to the stack */
2551 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2552 (dest
->opcode
== OP_LDADDR
))) {
2553 int context_used
= 0;
2558 if (cfg
->generic_sharing_context
)
2559 context_used
= mono_class_check_context_used (klass
);
2561 iargs
[2] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
2563 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2564 mono_class_compute_gc_descriptor (klass
);
2567 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2572 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2573 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2574 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2578 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2580 memcpy_method
= get_memcpy_method ();
2581 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2586 get_memset_method (void)
2588 static MonoMethod
*memset_method
= NULL
;
2589 if (!memset_method
) {
2590 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2592 g_error ("Old corlib found. Install a new one");
2594 return memset_method
;
2598 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2600 MonoInst
*iargs
[3];
2603 MonoMethod
*memset_method
;
2605 /* FIXME: Optimize this for the case when dest is an LDADDR */
2607 mono_class_init (klass
);
2608 n
= mono_class_value_size (klass
, &align
);
2610 if (n
<= sizeof (gpointer
) * 5) {
2611 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2614 memset_method
= get_memset_method ();
2616 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2617 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2618 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2623 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2625 MonoInst
*this = NULL
;
2627 g_assert (cfg
->generic_sharing_context
);
2629 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2630 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2631 !method
->klass
->valuetype
)
2632 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2634 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2635 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2638 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2640 mrgctx_loc
= mono_get_vtable_var (cfg
);
2641 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2644 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2645 MonoInst
*vtable_loc
, *vtable_var
;
2649 vtable_loc
= mono_get_vtable_var (cfg
);
2650 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2652 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2653 MonoInst
*mrgctx_var
= vtable_var
;
2656 vtable_reg
= alloc_preg (cfg
);
2657 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2658 vtable_var
->type
= STACK_PTR
;
2664 int vtable_reg
, res_reg
;
2666 vtable_reg
= alloc_preg (cfg
);
2667 res_reg
= alloc_preg (cfg
);
2668 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2673 static MonoJumpInfoRgctxEntry
*
2674 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2676 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2677 res
->method
= method
;
2678 res
->in_mrgctx
= in_mrgctx
;
2679 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2680 res
->data
->type
= patch_type
;
2681 res
->data
->data
.target
= patch_data
;
2682 res
->info_type
= info_type
;
2687 static inline MonoInst
*
2688 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2690 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2694 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2695 MonoClass
*klass
, int rgctx_type
)
2697 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2698 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2700 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2704 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2705 MonoMethod
*cmethod
, int rgctx_type
)
2707 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
2708 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2710 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2714 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
2715 MonoClassField
*field
, int rgctx_type
)
2717 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
2718 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2720 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2724 * On return the caller must check @klass for load errors.
2727 emit_generic_class_init (MonoCompile
*cfg
, MonoClass
*klass
)
2729 MonoInst
*vtable_arg
;
2731 int context_used
= 0;
2733 if (cfg
->generic_sharing_context
)
2734 context_used
= mono_class_check_context_used (klass
);
2737 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
2738 klass
, MONO_RGCTX_INFO_VTABLE
);
2740 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
2744 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
2747 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable_arg
);
2748 #ifdef MONO_ARCH_VTABLE_REG
2749 mono_call_inst_add_outarg_reg (cfg
, call
, vtable_arg
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
2750 cfg
->uses_vtable_reg
= TRUE
;
2757 * On return the caller must check @array_class for load errors
2760 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
2762 int vtable_reg
= alloc_preg (cfg
);
2763 int context_used
= 0;
2765 if (cfg
->generic_sharing_context
)
2766 context_used
= mono_class_check_context_used (array_class
);
2768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2770 if (cfg
->opt
& MONO_OPT_SHARED
) {
2771 int class_reg
= alloc_preg (cfg
);
2772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2773 if (cfg
->compile_aot
) {
2774 int klass_reg
= alloc_preg (cfg
);
2775 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
2776 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
2778 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
2780 } else if (context_used
) {
2781 MonoInst
*vtable_ins
;
2783 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
2784 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
2786 if (cfg
->compile_aot
) {
2790 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
2792 vt_reg
= alloc_preg (cfg
);
2793 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, vtable
);
2794 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
2797 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
2799 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vtable
);
2803 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
2807 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
2809 if (mini_get_debug_options ()->better_cast_details
) {
2810 int to_klass_reg
= alloc_preg (cfg
);
2811 int vtable_reg
= alloc_preg (cfg
);
2812 int klass_reg
= alloc_preg (cfg
);
2813 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2816 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
2820 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2824 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
2825 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
2826 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
2831 reset_cast_details (MonoCompile
*cfg
)
2833 /* Reset the variables holding the cast details */
2834 if (mini_get_debug_options ()->better_cast_details
) {
2835 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2837 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2838 /* It is enough to reset the from field */
2839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
2844 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2845 * generic code is generated.
2848 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
2850 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
2853 MonoInst
*rgctx
, *addr
;
2855 /* FIXME: What if the class is shared? We might not
2856 have to get the address of the method from the
2858 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2859 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2861 rgctx
= emit_get_rgctx (cfg
, method
, context_used
);
2863 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2865 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2870 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
2874 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
2875 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2876 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2877 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
2879 obj_reg
= sp
[0]->dreg
;
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2881 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
2883 /* FIXME: generics */
2884 g_assert (klass
->rank
== 0);
2887 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
2888 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
2894 MonoInst
*element_class
;
2896 /* This assertion is from the unboxcast insn */
2897 g_assert (klass
->rank
== 0);
2899 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
2900 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
2902 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
2903 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2905 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
2906 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
2907 reset_cast_details (cfg
);
2910 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_PTR
), obj_reg
, sizeof (MonoObject
));
2911 MONO_ADD_INS (cfg
->cbb
, add
);
2912 add
->type
= STACK_MP
;
2919 * Returns NULL and set the cfg exception on error.
2922 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
)
2924 MonoInst
*iargs
[2];
2927 if (cfg
->opt
& MONO_OPT_SHARED
) {
2928 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2929 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
2931 alloc_ftn
= mono_object_new
;
2932 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
2933 /* This happens often in argument checking code, eg. throw new FooException... */
2934 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2935 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
2936 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
2938 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
2939 MonoMethod
*managed_alloc
= NULL
;
2943 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
2944 cfg
->exception_ptr
= klass
;
2948 #ifndef MONO_CROSS_COMPILE
2949 managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
2952 if (managed_alloc
) {
2953 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2954 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2956 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
2958 guint32 lw
= vtable
->klass
->instance_size
;
2959 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
2960 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
2961 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
2964 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2968 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2972 handle_alloc_from_inst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*data_inst
,
2975 MonoInst
*iargs
[2];
2976 MonoMethod
*managed_alloc
= NULL
;
2980 FIXME: we cannot get managed_alloc here because we can't get
2981 the class's vtable (because it's not a closed class)
2983 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2984 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2987 if (cfg
->opt
& MONO_OPT_SHARED
) {
2988 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2989 iargs
[1] = data_inst
;
2990 alloc_ftn
= mono_object_new
;
2992 if (managed_alloc
) {
2993 iargs
[0] = data_inst
;
2994 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2997 iargs
[0] = data_inst
;
2998 alloc_ftn
= mono_object_new_specific
;
3001 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3005 * Returns NULL and set the cfg exception on error.
3008 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
)
3010 MonoInst
*alloc
, *ins
;
3012 if (mono_class_is_nullable (klass
)) {
3013 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
3014 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3017 alloc
= handle_alloc (cfg
, klass
, TRUE
);
3021 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3027 handle_box_from_inst (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
, MonoInst
*data_inst
)
3029 MonoInst
*alloc
, *ins
;
3031 if (mono_class_is_nullable (klass
)) {
3032 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
3033 /* FIXME: What if the class is shared? We might not
3034 have to get the method address from the RGCTX. */
3035 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3036 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3037 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3039 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3041 alloc
= handle_alloc_from_inst (cfg
, klass
, data_inst
, TRUE
);
3043 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3050 * Returns NULL and set the cfg exception on error.
3053 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3055 MonoBasicBlock
*is_null_bb
;
3056 int obj_reg
= src
->dreg
;
3057 int vtable_reg
= alloc_preg (cfg
);
3059 NEW_BBLOCK (cfg
, is_null_bb
);
3061 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
3064 save_cast_details (cfg
, klass
, obj_reg
);
3066 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3068 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
3070 int klass_reg
= alloc_preg (cfg
);
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3074 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3075 /* the remoting code is broken, access the class for now */
3076 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3077 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3079 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3080 cfg
->exception_ptr
= klass
;
3083 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3086 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3088 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3091 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, is_null_bb
);
3095 MONO_START_BB (cfg
, is_null_bb
);
3097 reset_cast_details (cfg
);
3103 * Returns NULL and set the cfg exception on error.
3106 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3109 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
3110 int obj_reg
= src
->dreg
;
3111 int vtable_reg
= alloc_preg (cfg
);
3112 int res_reg
= alloc_preg (cfg
);
3114 NEW_BBLOCK (cfg
, is_null_bb
);
3115 NEW_BBLOCK (cfg
, false_bb
);
3116 NEW_BBLOCK (cfg
, end_bb
);
3118 /* Do the assignment at the beginning, so the other assignment can be if converted */
3119 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
3120 ins
->type
= STACK_OBJ
;
3123 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
3126 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3128 /* the is_null_bb target simply copies the input register to the output */
3129 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
3131 int klass_reg
= alloc_preg (cfg
);
3133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3136 int rank_reg
= alloc_preg (cfg
);
3137 int eclass_reg
= alloc_preg (cfg
);
3139 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3140 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
3141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
3144 if (klass
->cast_class
== mono_defaults
.object_class
) {
3145 int parent_reg
= alloc_preg (cfg
);
3146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
3147 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
3148 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3150 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
3151 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
3152 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3154 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
3155 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3157 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3158 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3160 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
3161 /* Check that the object is a vector too */
3162 int bounds_reg
= alloc_preg (cfg
);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
3165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3168 /* the is_null_bb target simply copies the input register to the output */
3169 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3171 } else if (mono_class_is_nullable (klass
)) {
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3173 /* the is_null_bb target simply copies the input register to the output */
3174 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3176 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3177 /* the remoting code is broken, access the class for now */
3178 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3179 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3181 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3182 cfg
->exception_ptr
= klass
;
3185 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3188 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3193 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3194 /* the is_null_bb target simply copies the input register to the output */
3195 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, is_null_bb
);
3200 MONO_START_BB (cfg
, false_bb
);
3202 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3203 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3205 MONO_START_BB (cfg
, is_null_bb
);
3207 MONO_START_BB (cfg
, end_bb
);
3213 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3215 /* This opcode takes as input an object reference and a class, and returns:
3216 0) if the object is an instance of the class,
3217 1) if the object is not instance of the class,
3218 2) if the object is a proxy whose type cannot be determined */
3221 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3222 int obj_reg
= src
->dreg
;
3223 int dreg
= alloc_ireg (cfg
);
3225 int klass_reg
= alloc_preg (cfg
);
3227 NEW_BBLOCK (cfg
, true_bb
);
3228 NEW_BBLOCK (cfg
, false_bb
);
3229 NEW_BBLOCK (cfg
, false2_bb
);
3230 NEW_BBLOCK (cfg
, end_bb
);
3231 NEW_BBLOCK (cfg
, no_proxy_bb
);
3233 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3234 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3236 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3237 NEW_BBLOCK (cfg
, interface_fail_bb
);
3239 tmp_reg
= alloc_preg (cfg
);
3240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3241 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3242 MONO_START_BB (cfg
, interface_fail_bb
);
3243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3245 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3247 tmp_reg
= alloc_preg (cfg
);
3248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3249 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3252 tmp_reg
= alloc_preg (cfg
);
3253 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3254 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3256 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3257 tmp_reg
= alloc_preg (cfg
);
3258 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3259 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3261 tmp_reg
= alloc_preg (cfg
);
3262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3263 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3264 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3266 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3267 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3269 MONO_START_BB (cfg
, no_proxy_bb
);
3271 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3274 MONO_START_BB (cfg
, false_bb
);
3276 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3277 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3279 MONO_START_BB (cfg
, false2_bb
);
3281 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3284 MONO_START_BB (cfg
, true_bb
);
3286 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3288 MONO_START_BB (cfg
, end_bb
);
3291 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3293 ins
->type
= STACK_I4
;
3299 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3301 /* This opcode takes as input an object reference and a class, and returns:
3302 0) if the object is an instance of the class,
3303 1) if the object is a proxy whose type cannot be determined
3304 an InvalidCastException exception is thrown otherwhise*/
3307 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3308 int obj_reg
= src
->dreg
;
3309 int dreg
= alloc_ireg (cfg
);
3310 int tmp_reg
= alloc_preg (cfg
);
3311 int klass_reg
= alloc_preg (cfg
);
3313 NEW_BBLOCK (cfg
, end_bb
);
3314 NEW_BBLOCK (cfg
, ok_result_bb
);
3316 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3317 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3319 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3320 NEW_BBLOCK (cfg
, interface_fail_bb
);
3322 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3323 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3324 MONO_START_BB (cfg
, interface_fail_bb
);
3325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3327 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3329 tmp_reg
= alloc_preg (cfg
);
3330 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3331 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3332 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3334 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3335 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3338 NEW_BBLOCK (cfg
, no_proxy_bb
);
3340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3342 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3344 tmp_reg
= alloc_preg (cfg
);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3348 tmp_reg
= alloc_preg (cfg
);
3349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3350 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3353 NEW_BBLOCK (cfg
, fail_1_bb
);
3355 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3357 MONO_START_BB (cfg
, fail_1_bb
);
3359 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3360 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3362 MONO_START_BB (cfg
, no_proxy_bb
);
3364 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3367 MONO_START_BB (cfg
, ok_result_bb
);
3369 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3371 MONO_START_BB (cfg
, end_bb
);
3374 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3376 ins
->type
= STACK_I4
;
3382 * Returns NULL and set the cfg exception on error.
3384 static G_GNUC_UNUSED MonoInst
*
3385 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
)
3387 gpointer
*trampoline
;
3388 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3392 obj
= handle_alloc (cfg
, klass
, FALSE
);
3396 /* Inline the contents of mono_delegate_ctor */
3398 /* Set target field */
3399 /* Optimize away setting of NULL target */
3400 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0))
3401 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3403 /* Set method field */
3404 EMIT_NEW_METHODCONST (cfg
, method_ins
, method
);
3405 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3408 * To avoid looking up the compiled code belonging to the target method
3409 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3410 * store it, and we fill it after the method has been compiled.
3412 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3413 MonoInst
*code_slot_ins
;
3415 domain
= mono_domain_get ();
3416 mono_domain_lock (domain
);
3417 if (!domain_jit_info (domain
)->method_code_hash
)
3418 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3419 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3421 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3422 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3424 mono_domain_unlock (domain
);
3426 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3427 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3430 /* Set invoke_impl field */
3431 if (cfg
->compile_aot
) {
3432 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3434 trampoline
= mono_create_delegate_trampoline (klass
);
3435 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3437 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3439 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3445 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3447 MonoJitICallInfo
*info
;
3449 /* Need to register the icall so it gets an icall wrapper */
3450 info
= mono_get_array_new_va_icall (rank
);
3452 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3454 /* mono_array_new_va () needs a vararg calling convention */
3455 cfg
->disable_llvm
= TRUE
;
3457 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3458 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3462 mono_emit_load_got_addr (MonoCompile
*cfg
)
3464 MonoInst
*getaddr
, *dummy_use
;
3466 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3469 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3470 getaddr
->dreg
= cfg
->got_var
->dreg
;
3472 /* Add it to the start of the first bblock */
3473 if (cfg
->bb_entry
->code
) {
3474 getaddr
->next
= cfg
->bb_entry
->code
;
3475 cfg
->bb_entry
->code
= getaddr
;
3478 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3480 cfg
->got_var_allocated
= TRUE
;
3483 * Add a dummy use to keep the got_var alive, since real uses might
3484 * only be generated by the back ends.
3485 * Add it to end_bblock, so the variable's lifetime covers the whole
3487 * It would be better to make the usage of the got var explicit in all
3488 * cases when the backend needs it (i.e. calls, throw etc.), so this
3489 * wouldn't be needed.
3491 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3492 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3495 static int inline_limit
;
3496 static gboolean inline_limit_inited
;
3499 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3501 MonoMethodHeader
*header
;
3503 #ifdef MONO_ARCH_SOFT_FLOAT
3504 MonoMethodSignature
*sig
= mono_method_signature (method
);
3508 if (cfg
->generic_sharing_context
)
3511 if (cfg
->inline_depth
> 10)
3514 #ifdef MONO_ARCH_HAVE_LMF_OPS
3515 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3516 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3517 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3521 if (method
->is_inflated
)
3522 /* Avoid inflating the header */
3523 header
= mono_method_get_header (((MonoMethodInflated
*)method
)->declaring
);
3525 header
= mono_method_get_header (method
);
3527 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_RUNTIME
) ||
3528 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3529 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3530 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3531 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
) ||
3532 (method
->klass
->marshalbyref
) ||
3533 !header
|| header
->num_clauses
)
3536 /* also consider num_locals? */
3537 /* Do the size check early to avoid creating vtables */
3538 if (!inline_limit_inited
) {
3539 if (getenv ("MONO_INLINELIMIT"))
3540 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3542 inline_limit
= INLINE_LENGTH_LIMIT
;
3543 inline_limit_inited
= TRUE
;
3545 if (header
->code_size
>= inline_limit
)
3549 * if we can initialize the class of the method right away, we do,
3550 * otherwise we don't allow inlining if the class needs initialization,
3551 * since it would mean inserting a call to mono_runtime_class_init()
3552 * inside the inlined code
3554 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3555 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3556 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3557 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3558 if (!method
->klass
->runtime_info
)
3559 /* No vtable created yet */
3561 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3564 /* This makes so that inline cannot trigger */
3565 /* .cctors: too many apps depend on them */
3566 /* running with a specific order... */
3567 if (! vtable
->initialized
)
3569 mono_runtime_class_init (vtable
);
3571 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3572 if (!method
->klass
->runtime_info
)
3573 /* No vtable created yet */
3575 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3578 if (!vtable
->initialized
)
3583 * If we're compiling for shared code
3584 * the cctor will need to be run at aot method load time, for example,
3585 * or at the end of the compilation of the inlining method.
3587 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
3592 * CAS - do not inline methods with declarative security
3593 * Note: this has to be before any possible return TRUE;
3595 if (mono_method_has_declsec (method
))
3598 #ifdef MONO_ARCH_SOFT_FLOAT
3600 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
3602 for (i
= 0; i
< sig
->param_count
; ++i
)
3603 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
3611 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
3613 if (vtable
->initialized
&& !cfg
->compile_aot
)
3616 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
3619 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
3622 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
3623 /* The initialization is already done before the method is called */
3630 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
)
3634 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
3636 mono_class_init (klass
);
3637 size
= mono_class_array_element_size (klass
);
3639 mult_reg
= alloc_preg (cfg
);
3640 array_reg
= arr
->dreg
;
3641 index_reg
= index
->dreg
;
3643 #if SIZEOF_REGISTER == 8
3644 /* The array reg is 64 bits but the index reg is only 32 */
3645 index2_reg
= alloc_preg (cfg
);
3646 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
3648 if (index
->type
== STACK_I8
) {
3649 index2_reg
= alloc_preg (cfg
);
3650 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
3652 index2_reg
= index_reg
;
3656 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
3658 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3659 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
3660 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3662 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
3663 ins
->type
= STACK_PTR
;
3669 add_reg
= alloc_preg (cfg
);
3671 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
3672 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
3673 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3674 ins
->type
= STACK_PTR
;
3675 MONO_ADD_INS (cfg
->cbb
, ins
);
3680 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3682 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
3684 int bounds_reg
= alloc_preg (cfg
);
3685 int add_reg
= alloc_preg (cfg
);
3686 int mult_reg
= alloc_preg (cfg
);
3687 int mult2_reg
= alloc_preg (cfg
);
3688 int low1_reg
= alloc_preg (cfg
);
3689 int low2_reg
= alloc_preg (cfg
);
3690 int high1_reg
= alloc_preg (cfg
);
3691 int high2_reg
= alloc_preg (cfg
);
3692 int realidx1_reg
= alloc_preg (cfg
);
3693 int realidx2_reg
= alloc_preg (cfg
);
3694 int sum_reg
= alloc_preg (cfg
);
3699 mono_class_init (klass
);
3700 size
= mono_class_array_element_size (klass
);
3702 index1
= index_ins1
->dreg
;
3703 index2
= index_ins2
->dreg
;
3705 /* range checking */
3706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
3707 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3709 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
3710 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3711 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
3712 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
3713 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3714 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
3715 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3717 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
3718 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3719 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
3720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
3721 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3722 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
3723 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3725 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
3726 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
3727 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
3728 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
3729 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3731 ins
->type
= STACK_MP
;
3733 MONO_ADD_INS (cfg
->cbb
, ins
);
3740 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
3744 MonoMethod
*addr_method
;
3747 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
3750 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1]);
3752 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3753 /* emit_ldelema_2 depends on OP_LMUL */
3754 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
3755 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
3759 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
3760 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
3761 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
3767 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
3769 MonoInst
*ins
= NULL
;
3771 static MonoClass
*runtime_helpers_class
= NULL
;
3772 if (! runtime_helpers_class
)
3773 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
3774 "System.Runtime.CompilerServices", "RuntimeHelpers");
3776 if (cmethod
->klass
== mono_defaults
.string_class
) {
3777 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
3778 int dreg
= alloc_ireg (cfg
);
3779 int index_reg
= alloc_preg (cfg
);
3780 int mult_reg
= alloc_preg (cfg
);
3781 int add_reg
= alloc_preg (cfg
);
3783 #if SIZEOF_REGISTER == 8
3784 /* The array reg is 64 bits but the index reg is only 32 */
3785 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
3787 index_reg
= args
[1]->dreg
;
3789 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
3791 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3792 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
3793 add_reg
= ins
->dreg
;
3794 /* Avoid a warning */
3796 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3799 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
3800 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3801 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3802 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
3804 type_from_op (ins
, NULL
, NULL
);
3806 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3807 int dreg
= alloc_ireg (cfg
);
3808 /* Decompose later to allow more optimizations */
3809 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
3810 ins
->type
= STACK_I4
;
3811 cfg
->cbb
->has_array_access
= TRUE
;
3812 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
3815 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
3816 int mult_reg
= alloc_preg (cfg
);
3817 int add_reg
= alloc_preg (cfg
);
3819 /* The corlib functions check for oob already. */
3820 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
3821 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3822 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
3825 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
3827 if (strcmp (cmethod
->name
, "GetType") == 0) {
3828 int dreg
= alloc_preg (cfg
);
3829 int vt_reg
= alloc_preg (cfg
);
3830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3831 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
3832 type_from_op (ins
, NULL
, NULL
);
3835 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3836 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0) {
3837 int dreg
= alloc_ireg (cfg
);
3838 int t1
= alloc_ireg (cfg
);
3840 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
3841 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
3842 ins
->type
= STACK_I4
;
3846 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
3847 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
3848 MONO_ADD_INS (cfg
->cbb
, ins
);
3852 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
3853 if (cmethod
->name
[0] != 'g')
3856 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
3857 int dreg
= alloc_ireg (cfg
);
3858 int vtable_reg
= alloc_preg (cfg
);
3859 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
3860 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3861 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
3862 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3863 type_from_op (ins
, NULL
, NULL
);
3866 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3867 int dreg
= alloc_ireg (cfg
);
3869 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
3870 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
3871 type_from_op (ins
, NULL
, NULL
);
3876 } else if (cmethod
->klass
== runtime_helpers_class
) {
3878 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
3879 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
3883 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
3884 if (strcmp (cmethod
->name
, "get_CurrentThread") == 0 && (ins
= mono_arch_get_thread_intrinsic (cfg
))) {
3885 ins
->dreg
= alloc_preg (cfg
);
3886 ins
->type
= STACK_OBJ
;
3887 MONO_ADD_INS (cfg
->cbb
, ins
);
3889 } else if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
3890 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
3891 MONO_ADD_INS (cfg
->cbb
, ins
);
3893 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
3894 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
3895 MONO_ADD_INS (cfg
->cbb
, ins
);
3898 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
3899 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3900 if (strcmp (cmethod
->name
, "Enter") == 0) {
3903 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
3904 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3905 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3906 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3908 return (MonoInst
*)call
;
3909 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
3912 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
3913 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3914 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3915 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3917 return (MonoInst
*)call
;
3919 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3920 MonoMethod
*fast_method
= NULL
;
3922 /* Avoid infinite recursion */
3923 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
3924 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
3925 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
3928 if (strcmp (cmethod
->name
, "Enter") == 0 ||
3929 strcmp (cmethod
->name
, "Exit") == 0)
3930 fast_method
= mono_monitor_get_fast_path (cmethod
);
3934 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
3936 } else if (mini_class_is_system_array (cmethod
->klass
) &&
3937 strcmp (cmethod
->name
, "GetGenericValueImpl") == 0) {
3938 MonoInst
*addr
, *store
, *load
;
3939 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[1]);
3941 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1]);
3942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
3943 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
3945 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
3946 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
3947 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
3950 #if SIZEOF_REGISTER == 8
3951 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
3952 /* 64 bit reads are already atomic */
3953 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
3954 ins
->dreg
= mono_alloc_preg (cfg
);
3955 ins
->inst_basereg
= args
[0]->dreg
;
3956 ins
->inst_offset
= 0;
3957 MONO_ADD_INS (cfg
->cbb
, ins
);
3961 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3962 if (strcmp (cmethod
->name
, "Increment") == 0) {
3963 MonoInst
*ins_iconst
;
3966 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3967 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3968 #if SIZEOF_REGISTER == 8
3969 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3970 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3973 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3974 ins_iconst
->inst_c0
= 1;
3975 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3976 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3978 MONO_INST_NEW (cfg
, ins
, opcode
);
3979 ins
->dreg
= mono_alloc_ireg (cfg
);
3980 ins
->inst_basereg
= args
[0]->dreg
;
3981 ins
->inst_offset
= 0;
3982 ins
->sreg2
= ins_iconst
->dreg
;
3983 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3984 MONO_ADD_INS (cfg
->cbb
, ins
);
3986 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
3987 MonoInst
*ins_iconst
;
3990 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3991 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3992 #if SIZEOF_REGISTER == 8
3993 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3994 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3997 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3998 ins_iconst
->inst_c0
= -1;
3999 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4000 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4002 MONO_INST_NEW (cfg
, ins
, opcode
);
4003 ins
->dreg
= mono_alloc_ireg (cfg
);
4004 ins
->inst_basereg
= args
[0]->dreg
;
4005 ins
->inst_offset
= 0;
4006 ins
->sreg2
= ins_iconst
->dreg
;
4007 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4008 MONO_ADD_INS (cfg
->cbb
, ins
);
4010 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4013 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4014 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4015 #if SIZEOF_REGISTER == 8
4016 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4017 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4021 MONO_INST_NEW (cfg
, ins
, opcode
);
4022 ins
->dreg
= mono_alloc_ireg (cfg
);
4023 ins
->inst_basereg
= args
[0]->dreg
;
4024 ins
->inst_offset
= 0;
4025 ins
->sreg2
= args
[1]->dreg
;
4026 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4027 MONO_ADD_INS (cfg
->cbb
, ins
);
4030 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4032 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4033 if (strcmp (cmethod
->name
, "Exchange") == 0) {
4035 gboolean is_ref
= fsig
->params
[0]->type
== MONO_TYPE_OBJECT
;
4037 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4038 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4039 #if SIZEOF_REGISTER == 8
4040 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
4041 (fsig
->params
[0]->type
== MONO_TYPE_I
))
4042 opcode
= OP_ATOMIC_EXCHANGE_I8
;
4044 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I
))
4045 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4050 MONO_INST_NEW (cfg
, ins
, opcode
);
4051 ins
->dreg
= mono_alloc_ireg (cfg
);
4052 ins
->inst_basereg
= args
[0]->dreg
;
4053 ins
->inst_offset
= 0;
4054 ins
->sreg2
= args
[1]->dreg
;
4055 MONO_ADD_INS (cfg
->cbb
, ins
);
4057 switch (fsig
->params
[0]->type
) {
4059 ins
->type
= STACK_I4
;
4063 ins
->type
= STACK_I8
;
4065 case MONO_TYPE_OBJECT
:
4066 ins
->type
= STACK_OBJ
;
4069 g_assert_not_reached ();
4072 #if HAVE_WRITE_BARRIERS
4074 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
4075 mono_emit_method_call (cfg
, write_barrier
, &args
[0], NULL
);
4079 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4081 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4082 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
4084 gboolean is_ref
= MONO_TYPE_IS_REFERENCE (fsig
->params
[1]);
4085 if (fsig
->params
[1]->type
== MONO_TYPE_I4
)
4087 else if (is_ref
|| fsig
->params
[1]->type
== MONO_TYPE_I
)
4088 size
= sizeof (gpointer
);
4089 else if (sizeof (gpointer
) == 8 && fsig
->params
[1]->type
== MONO_TYPE_I4
)
4092 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I4
);
4093 ins
->dreg
= alloc_ireg (cfg
);
4094 ins
->sreg1
= args
[0]->dreg
;
4095 ins
->sreg2
= args
[1]->dreg
;
4096 ins
->sreg3
= args
[2]->dreg
;
4097 ins
->type
= STACK_I4
;
4098 MONO_ADD_INS (cfg
->cbb
, ins
);
4099 } else if (size
== 8) {
4100 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I8
);
4101 ins
->dreg
= alloc_ireg (cfg
);
4102 ins
->sreg1
= args
[0]->dreg
;
4103 ins
->sreg2
= args
[1]->dreg
;
4104 ins
->sreg3
= args
[2]->dreg
;
4105 ins
->type
= STACK_I8
;
4106 MONO_ADD_INS (cfg
->cbb
, ins
);
4108 /* g_assert_not_reached (); */
4110 #if HAVE_WRITE_BARRIERS
4112 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
4113 mono_emit_method_call (cfg
, write_barrier
, &args
[0], NULL
);
4117 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4121 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
4122 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
4123 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
4124 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
4125 MONO_ADD_INS (cfg
->cbb
, ins
);
4128 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
4129 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
4130 #ifdef PLATFORM_WIN32
4131 EMIT_NEW_ICONST (cfg
, ins
, 1);
4133 EMIT_NEW_ICONST (cfg
, ins
, 0);
4137 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
4139 * There is general branches code for Min/Max, but it does not work for
4141 * http://everything2.com/?node_id=1051618
4145 #ifdef MONO_ARCH_SIMD_INTRINSICS
4146 if (cfg
->opt
& MONO_OPT_SIMD
) {
4147 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4153 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
4157 * This entry point could be used later for arbitrary method
4160 inline static MonoInst
*
4161 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
4162 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
4164 if (method
->klass
== mono_defaults
.string_class
) {
4165 /* managed string allocation support */
4166 if (strcmp (method
->name
, "InternalAllocateStr") == 0) {
4167 MonoInst
*iargs
[2];
4168 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4169 MonoMethod
*managed_alloc
= NULL
;
4171 g_assert (vtable
); /*Should not fail since it System.String*/
4172 #ifndef MONO_CROSS_COMPILE
4173 managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
4177 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4178 iargs
[1] = args
[0];
4179 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
4186 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
4188 MonoInst
*store
, *temp
;
4191 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4192 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
4195 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4196 * would be different than the MonoInst's used to represent arguments, and
4197 * the ldelema implementation can't deal with that.
4198 * Solution: When ldelema is used on an inline argument, create a var for
4199 * it, emit ldelema on that var, and emit the saving code below in
4200 * inline_method () if needed.
4202 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
4203 cfg
->args
[i
] = temp
;
4204 /* This uses cfg->args [i] which is set by the preceeding line */
4205 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
4206 store
->cil_code
= sp
[0]->cil_code
;
4211 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4212 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4214 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4216 check_inline_called_method_name_limit (MonoMethod
*called_method
)
4219 static char *limit
= NULL
;
4221 if (limit
== NULL
) {
4222 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4224 if (limit_string
!= NULL
)
4225 limit
= limit_string
;
4227 limit
= (char *) "";
4230 if (limit
[0] != '\0') {
4231 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4233 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4234 g_free (called_method_name
);
4236 //return (strncmp_result <= 0);
4237 return (strncmp_result
== 0);
4244 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4246 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4249 static char *limit
= NULL
;
4251 if (limit
== NULL
) {
4252 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4253 if (limit_string
!= NULL
) {
4254 limit
= limit_string
;
4256 limit
= (char *) "";
4260 if (limit
[0] != '\0') {
4261 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4263 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4264 g_free (caller_method_name
);
4266 //return (strncmp_result <= 0);
4267 return (strncmp_result
== 0);
4275 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4276 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_allways
)
4278 MonoInst
*ins
, *rvar
= NULL
;
4279 MonoMethodHeader
*cheader
;
4280 MonoBasicBlock
*ebblock
, *sbblock
;
4282 MonoMethod
*prev_inlined_method
;
4283 MonoInst
**prev_locals
, **prev_args
;
4284 MonoType
**prev_arg_types
;
4285 guint prev_real_offset
;
4286 GHashTable
*prev_cbb_hash
;
4287 MonoBasicBlock
**prev_cil_offset_to_bb
;
4288 MonoBasicBlock
*prev_cbb
;
4289 unsigned char* prev_cil_start
;
4290 guint32 prev_cil_offset_to_bb_len
;
4291 MonoMethod
*prev_current_method
;
4292 MonoGenericContext
*prev_generic_context
;
4293 gboolean ret_var_set
, prev_ret_var_set
;
4295 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4297 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4298 if ((! inline_allways
) && ! check_inline_called_method_name_limit (cmethod
))
4301 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4302 if ((! inline_allways
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4306 if (cfg
->verbose_level
> 2)
4307 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4309 if (!cmethod
->inline_info
) {
4310 mono_jit_stats
.inlineable_methods
++;
4311 cmethod
->inline_info
= 1;
4313 /* allocate space to store the return value */
4314 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4315 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4318 /* allocate local variables */
4319 cheader
= mono_method_get_header (cmethod
);
4320 prev_locals
= cfg
->locals
;
4321 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4322 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4323 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4325 /* allocate start and end blocks */
4326 /* This is needed so if the inline is aborted, we can clean up */
4327 NEW_BBLOCK (cfg
, sbblock
);
4328 sbblock
->real_offset
= real_offset
;
4330 NEW_BBLOCK (cfg
, ebblock
);
4331 ebblock
->block_num
= cfg
->num_bblocks
++;
4332 ebblock
->real_offset
= real_offset
;
4334 prev_args
= cfg
->args
;
4335 prev_arg_types
= cfg
->arg_types
;
4336 prev_inlined_method
= cfg
->inlined_method
;
4337 cfg
->inlined_method
= cmethod
;
4338 cfg
->ret_var_set
= FALSE
;
4339 cfg
->inline_depth
++;
4340 prev_real_offset
= cfg
->real_offset
;
4341 prev_cbb_hash
= cfg
->cbb_hash
;
4342 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4343 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4344 prev_cil_start
= cfg
->cil_start
;
4345 prev_cbb
= cfg
->cbb
;
4346 prev_current_method
= cfg
->current_method
;
4347 prev_generic_context
= cfg
->generic_context
;
4348 prev_ret_var_set
= cfg
->ret_var_set
;
4350 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, *ip
== CEE_CALLVIRT
);
4352 ret_var_set
= cfg
->ret_var_set
;
4354 cfg
->inlined_method
= prev_inlined_method
;
4355 cfg
->real_offset
= prev_real_offset
;
4356 cfg
->cbb_hash
= prev_cbb_hash
;
4357 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4358 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4359 cfg
->cil_start
= prev_cil_start
;
4360 cfg
->locals
= prev_locals
;
4361 cfg
->args
= prev_args
;
4362 cfg
->arg_types
= prev_arg_types
;
4363 cfg
->current_method
= prev_current_method
;
4364 cfg
->generic_context
= prev_generic_context
;
4365 cfg
->ret_var_set
= prev_ret_var_set
;
4366 cfg
->inline_depth
--;
4368 if ((costs
>= 0 && costs
< 60) || inline_allways
) {
4369 if (cfg
->verbose_level
> 2)
4370 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4372 mono_jit_stats
.inlined_methods
++;
4374 /* always add some code to avoid block split failures */
4375 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4376 MONO_ADD_INS (prev_cbb
, ins
);
4378 prev_cbb
->next_bb
= sbblock
;
4379 link_bblock (cfg
, prev_cbb
, sbblock
);
4382 * Get rid of the begin and end bblocks if possible to aid local
4385 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4387 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4388 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4390 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4391 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4392 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4394 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4395 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4396 cfg
->cbb
= prev_cbb
;
4404 * If the inlined method contains only a throw, then the ret var is not
4405 * set, so set it to a dummy value.
4408 static double r8_0
= 0.0;
4410 switch (rvar
->type
) {
4412 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
4415 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
4420 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
4423 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4424 ins
->type
= STACK_R8
;
4425 ins
->inst_p0
= (void*)&r8_0
;
4426 ins
->dreg
= rvar
->dreg
;
4427 MONO_ADD_INS (cfg
->cbb
, ins
);
4430 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (fsig
->ret
));
4433 g_assert_not_reached ();
4437 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
4442 if (cfg
->verbose_level
> 2)
4443 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
4444 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
4445 mono_loader_clear_error ();
4447 /* This gets rid of the newly added bblocks */
4448 cfg
->cbb
= prev_cbb
;
4454 * Some of these comments may well be out-of-date.
4455 * Design decisions: we do a single pass over the IL code (and we do bblock
4456 * splitting/merging in the few cases when it's required: a back jump to an IL
4457 * address that was not already seen as bblock starting point).
4458 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4459 * Complex operations are decomposed in simpler ones right away. We need to let the
4460 * arch-specific code peek and poke inside this process somehow (except when the
4461 * optimizations can take advantage of the full semantic info of coarse opcodes).
4462 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4463 * MonoInst->opcode initially is the IL opcode or some simplification of that
4464 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4465 * opcode with value bigger than OP_LAST.
4466 * At this point the IR can be handed over to an interpreter, a dumb code generator
4467 * or to the optimizing code generator that will translate it to SSA form.
4469 * Profiling directed optimizations.
4470 * We may compile by default with few or no optimizations and instrument the code
4471 * or the user may indicate what methods to optimize the most either in a config file
4472 * or through repeated runs where the compiler applies offline the optimizations to
4473 * each method and then decides if it was worth it.
4476 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4477 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4478 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4479 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4480 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4481 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4482 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4483 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4485 /* offset from br.s -> br like opcodes */
4486 #define BIG_BRANCH_OFFSET 13
4489 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
4491 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
4493 return b
== NULL
|| b
== bb
;
4497 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
4499 unsigned char *ip
= start
;
4500 unsigned char *target
;
4503 MonoBasicBlock
*bblock
;
4504 const MonoOpcode
*opcode
;
4507 cli_addr
= ip
- start
;
4508 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
4511 opcode
= &mono_opcodes
[i
];
4512 switch (opcode
->argument
) {
4513 case MonoInlineNone
:
4516 case MonoInlineString
:
4517 case MonoInlineType
:
4518 case MonoInlineField
:
4519 case MonoInlineMethod
:
4522 case MonoShortInlineR
:
4529 case MonoShortInlineVar
:
4530 case MonoShortInlineI
:
4533 case MonoShortInlineBrTarget
:
4534 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
4535 GET_BBLOCK (cfg
, bblock
, target
);
4538 GET_BBLOCK (cfg
, bblock
, ip
);
4540 case MonoInlineBrTarget
:
4541 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
4542 GET_BBLOCK (cfg
, bblock
, target
);
4545 GET_BBLOCK (cfg
, bblock
, ip
);
4547 case MonoInlineSwitch
: {
4548 guint32 n
= read32 (ip
+ 1);
4551 cli_addr
+= 5 + 4 * n
;
4552 target
= start
+ cli_addr
;
4553 GET_BBLOCK (cfg
, bblock
, target
);
4555 for (j
= 0; j
< n
; ++j
) {
4556 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
4557 GET_BBLOCK (cfg
, bblock
, target
);
4567 g_assert_not_reached ();
4570 if (i
== CEE_THROW
) {
4571 unsigned char *bb_start
= ip
- 1;
4573 /* Find the start of the bblock containing the throw */
4575 while ((bb_start
>= start
) && !bblock
) {
4576 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
4580 bblock
->out_of_line
= 1;
4589 static inline MonoMethod
*
4590 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4594 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
4595 return mono_method_get_wrapper_data (m
, token
);
4597 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
4602 static inline MonoMethod
*
4603 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4605 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
4607 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
4613 static inline MonoClass
*
4614 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
4618 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
4619 klass
= mono_method_get_wrapper_data (method
, token
);
4621 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
4623 mono_class_init (klass
);
4628 * Returns TRUE if the JIT should abort inlining because "callee"
4629 * is influenced by security attributes.
4632 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
4636 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
4640 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
4641 if (result
== MONO_JIT_SECURITY_OK
)
4644 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
4645 /* Generate code to throw a SecurityException before the actual call/link */
4646 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4649 NEW_ICONST (cfg
, args
[0], 4);
4650 NEW_METHODCONST (cfg
, args
[1], caller
);
4651 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
4652 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
4653 /* don't hide previous results */
4654 cfg
->exception_type
= MONO_EXCEPTION_SECURITY_LINKDEMAND
;
4655 cfg
->exception_data
= result
;
4663 throw_exception (void)
4665 static MonoMethod
*method
= NULL
;
4668 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4669 method
= mono_class_get_method_from_name (secman
->securitymanager
, "ThrowException", 1);
4676 emit_throw_exception (MonoCompile
*cfg
, MonoException
*ex
)
4678 MonoMethod
*thrower
= throw_exception ();
4681 EMIT_NEW_PCONST (cfg
, args
[0], ex
);
4682 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4686 * Return the original method is a wrapper is specified. We can only access
4687 * the custom attributes from the original method.
4690 get_original_method (MonoMethod
*method
)
4692 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
4695 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4696 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
4699 /* in other cases we need to find the original method */
4700 return mono_marshal_method_from_wrapper (method
);
4704 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
4705 MonoBasicBlock
*bblock
, unsigned char *ip
)
4707 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4708 if (mono_security_core_clr_class_level (mono_field_get_parent (field
)) != MONO_SECURITY_CORE_CLR_CRITICAL
)
4711 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4712 caller
= get_original_method (caller
);
4716 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4717 if (mono_security_core_clr_method_level (caller
, TRUE
) == MONO_SECURITY_CORE_CLR_TRANSPARENT
)
4718 emit_throw_exception (cfg
, mono_get_exception_field_access ());
4722 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4723 MonoBasicBlock
*bblock
, unsigned char *ip
)
4725 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4726 if (mono_security_core_clr_method_level (callee
, TRUE
) != MONO_SECURITY_CORE_CLR_CRITICAL
)
4729 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4730 caller
= get_original_method (caller
);
4734 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4735 if (mono_security_core_clr_method_level (caller
, TRUE
) == MONO_SECURITY_CORE_CLR_TRANSPARENT
)
4736 emit_throw_exception (cfg
, mono_get_exception_method_access ());
4740 * Check that the IL instructions at ip are the array initialization
4741 * sequence and return the pointer to the data and the size.
4744 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
4747 * newarr[System.Int32]
4749 * ldtoken field valuetype ...
4750 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4752 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
4753 guint32 token
= read32 (ip
+ 7);
4754 guint32 field_token
= read32 (ip
+ 2);
4755 guint32 field_index
= field_token
& 0xffffff;
4757 const char *data_ptr
;
4759 MonoMethod
*cmethod
;
4760 MonoClass
*dummy_class
;
4761 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
4767 *out_field_token
= field_token
;
4769 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
4772 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
4774 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
4775 case MONO_TYPE_BOOLEAN
:
4779 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4780 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4781 case MONO_TYPE_CHAR
:
4791 return NULL
; /* stupid ARM FP swapped format */
4801 if (size
> mono_type_size (field
->type
, &dummy_align
))
4804 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4805 if (!method
->klass
->image
->dynamic
) {
4806 field_index
= read32 (ip
+ 2) & 0xffffff;
4807 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
4808 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
4809 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4810 /* for aot code we do the lookup on load */
4811 if (aot
&& data_ptr
)
4812 return GUINT_TO_POINTER (rva
);
4814 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4816 data_ptr
= mono_field_get_data (field
);
4824 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
4826 char *method_fname
= mono_method_full_name (method
, TRUE
);
4829 if (mono_method_get_header (method
)->code_size
== 0)
4830 method_code
= g_strdup ("method body is empty.");
4832 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
4833 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
4834 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
4835 g_free (method_fname
);
4836 g_free (method_code
);
4840 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
4842 cfg
->exception_type
= MONO_EXCEPTION_OBJECT_SUPPLIED
;
4843 MONO_GC_REGISTER_ROOT (cfg
->exception_ptr
);
4844 cfg
->exception_ptr
= exception
;
4848 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
4852 if (cfg
->generic_sharing_context
)
4853 type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, &klass
->byval_arg
);
4855 type
= &klass
->byval_arg
;
4856 return MONO_TYPE_IS_REFERENCE (type
);
4860 * mono_decompose_array_access_opts:
4862 * Decompose array access opcodes.
4863 * This should be in decompose.c, but it emits calls so it has to stay here until
4864 * the old JIT is gone.
4867 mono_decompose_array_access_opts (MonoCompile
*cfg
)
4869 MonoBasicBlock
*bb
, *first_bb
;
4872 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4873 * can be executed anytime. It should be run before decompose_long
4877 * Create a dummy bblock and emit code into it so we can use the normal
4878 * code generation macros.
4880 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4881 first_bb
= cfg
->cbb
;
4883 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4885 MonoInst
*prev
= NULL
;
4887 MonoInst
*iargs
[3];
4890 if (!bb
->has_array_access
)
4893 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4895 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4901 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4902 switch (ins
->opcode
) {
4904 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
, ins
->sreg1
,
4905 G_STRUCT_OFFSET (MonoArray
, max_length
));
4906 MONO_ADD_INS (cfg
->cbb
, dest
);
4908 case OP_BOUNDS_CHECK
:
4909 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
);
4912 if (cfg
->opt
& MONO_OPT_SHARED
) {
4913 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
4914 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], ins
->inst_newa_class
);
4915 MONO_INST_NEW (cfg
, iargs
[2], OP_MOVE
);
4916 iargs
[2]->dreg
= ins
->sreg1
;
4918 dest
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
4919 dest
->dreg
= ins
->dreg
;
4921 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, mono_array_class_get (ins
->inst_newa_class
, 1));
4923 g_assert (vtable
); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
4924 NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4925 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
4926 MONO_INST_NEW (cfg
, iargs
[1], OP_MOVE
);
4927 iargs
[1]->dreg
= ins
->sreg1
;
4929 dest
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, iargs
);
4930 dest
->dreg
= ins
->dreg
;
4934 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
,
4935 ins
->sreg1
, G_STRUCT_OFFSET (MonoString
, length
));
4936 MONO_ADD_INS (cfg
->cbb
, dest
);
4942 g_assert (cfg
->cbb
== first_bb
);
4944 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
4945 /* Replace the original instruction with the new code sequence */
4947 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
4948 first_bb
->code
= first_bb
->last_ins
= NULL
;
4949 first_bb
->in_count
= first_bb
->out_count
= 0;
4950 cfg
->cbb
= first_bb
;
4957 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4967 #ifdef MONO_ARCH_SOFT_FLOAT
4970 * mono_decompose_soft_float:
4972 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4973 * similar to long support on 32 bit platforms. 32 bit float values require special
4974 * handling when used as locals, arguments, and in calls.
4975 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4978 mono_decompose_soft_float (MonoCompile
*cfg
)
4980 MonoBasicBlock
*bb
, *first_bb
;
4983 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4987 * Create a dummy bblock and emit code into it so we can use the normal
4988 * code generation macros.
4990 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4991 first_bb
= cfg
->cbb
;
4993 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4995 MonoInst
*prev
= NULL
;
4998 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE HANDLE-SOFT-FLOAT ");
5000 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
5006 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
5007 const char *spec
= INS_INFO (ins
->opcode
);
5009 /* Most fp operations are handled automatically by opcode emulation */
5011 switch (ins
->opcode
) {
5014 d
.vald
= *(double*)ins
->inst_p0
;
5015 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
5020 /* We load the r8 value */
5021 d
.vald
= *(float*)ins
->inst_p0
;
5022 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
5026 ins
->opcode
= OP_LMOVE
;
5029 ins
->opcode
= OP_MOVE
;
5030 ins
->sreg1
= ins
->sreg1
+ 1;
5033 ins
->opcode
= OP_MOVE
;
5034 ins
->sreg1
= ins
->sreg1
+ 2;
5037 int reg
= ins
->sreg1
;
5039 ins
->opcode
= OP_SETLRET
;
5041 ins
->sreg1
= reg
+ 1;
5042 ins
->sreg2
= reg
+ 2;
5045 case OP_LOADR8_MEMBASE
:
5046 ins
->opcode
= OP_LOADI8_MEMBASE
;
5048 case OP_STORER8_MEMBASE_REG
:
5049 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
5051 case OP_STORER4_MEMBASE_REG
: {
5052 MonoInst
*iargs
[2];
5055 /* Arg 1 is the double value */
5056 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5057 iargs
[0]->dreg
= ins
->sreg1
;
5059 /* Arg 2 is the address to store to */
5060 addr_reg
= mono_alloc_preg (cfg
);
5061 EMIT_NEW_BIALU_IMM (cfg
, iargs
[1], OP_PADD_IMM
, addr_reg
, ins
->inst_destbasereg
, ins
->inst_offset
);
5062 mono_emit_jit_icall (cfg
, mono_fstore_r4
, iargs
);
5066 case OP_LOADR4_MEMBASE
: {
5067 MonoInst
*iargs
[1];
5071 addr_reg
= mono_alloc_preg (cfg
);
5072 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, addr_reg
, ins
->inst_basereg
, ins
->inst_offset
);
5073 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
5074 conv
->dreg
= ins
->dreg
;
5079 case OP_FCALL_MEMBASE
: {
5080 MonoCallInst
*call
= (MonoCallInst
*)ins
;
5081 if (call
->signature
->ret
->type
== MONO_TYPE_R4
) {
5082 MonoCallInst
*call2
;
5083 MonoInst
*iargs
[1];
5086 /* Convert the call into a call returning an int */
5087 MONO_INST_NEW_CALL (cfg
, call2
, OP_CALL
);
5088 memcpy (call2
, call
, sizeof (MonoCallInst
));
5089 switch (ins
->opcode
) {
5091 call2
->inst
.opcode
= OP_CALL
;
5094 call2
->inst
.opcode
= OP_CALL_REG
;
5096 case OP_FCALL_MEMBASE
:
5097 call2
->inst
.opcode
= OP_CALL_MEMBASE
;
5100 g_assert_not_reached ();
5102 call2
->inst
.dreg
= mono_alloc_ireg (cfg
);
5103 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call2
);
5105 /* FIXME: Optimize this */
5107 /* Emit an r4->r8 conversion */
5108 EMIT_NEW_VARLOADA_VREG (cfg
, iargs
[0], call2
->inst
.dreg
, &mono_defaults
.int32_class
->byval_arg
);
5109 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
5110 conv
->dreg
= ins
->dreg
;
5112 switch (ins
->opcode
) {
5114 ins
->opcode
= OP_LCALL
;
5117 ins
->opcode
= OP_LCALL_REG
;
5119 case OP_FCALL_MEMBASE
:
5120 ins
->opcode
= OP_LCALL_MEMBASE
;
5123 g_assert_not_reached ();
5129 MonoJitICallInfo
*info
;
5130 MonoInst
*iargs
[2];
5131 MonoInst
*call
, *cmp
, *br
;
5133 /* Convert fcompare+fbcc to icall+icompare+beq */
5135 info
= mono_find_jit_opcode_emulation (ins
->next
->opcode
);
5138 /* Create dummy MonoInst's for the arguments */
5139 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5140 iargs
[0]->dreg
= ins
->sreg1
;
5141 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
5142 iargs
[1]->dreg
= ins
->sreg2
;
5144 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
5146 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
5147 cmp
->sreg1
= call
->dreg
;
5149 MONO_ADD_INS (cfg
->cbb
, cmp
);
5151 MONO_INST_NEW (cfg
, br
, OP_IBNE_UN
);
5152 br
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * 2);
5153 br
->inst_true_bb
= ins
->next
->inst_true_bb
;
5154 br
->inst_false_bb
= ins
->next
->inst_false_bb
;
5155 MONO_ADD_INS (cfg
->cbb
, br
);
5157 /* The call sequence might include fp ins */
5160 /* Skip fbcc or fccc */
5161 NULLIFY_INS (ins
->next
);
5169 MonoJitICallInfo
*info
;
5170 MonoInst
*iargs
[2];
5173 /* Convert fccc to icall+icompare+iceq */
5175 info
= mono_find_jit_opcode_emulation (ins
->opcode
);
5178 /* Create dummy MonoInst's for the arguments */
5179 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5180 iargs
[0]->dreg
= ins
->sreg1
;
5181 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
5182 iargs
[1]->dreg
= ins
->sreg2
;
5184 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
5186 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, call
->dreg
, 1);
5187 MONO_EMIT_NEW_UNALU (cfg
, OP_ICEQ
, ins
->dreg
, -1);
5189 /* The call sequence might include fp ins */
5194 MonoInst
*iargs
[2];
5195 MonoInst
*call
, *cmp
;
5197 /* Convert to icall+icompare+cond_exc+move */
5199 /* Create dummy MonoInst's for the arguments */
5200 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5201 iargs
[0]->dreg
= ins
->sreg1
;
5203 call
= mono_emit_jit_icall (cfg
, mono_isfinite
, iargs
);
5205 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
5206 cmp
->sreg1
= call
->dreg
;
5208 MONO_ADD_INS (cfg
->cbb
, cmp
);
5210 MONO_EMIT_NEW_COND_EXC (cfg
, INE_UN
, "ArithmeticException");
5212 /* Do the assignment if the value is finite */
5213 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
5219 if (spec
[MONO_INST_SRC1
] == 'f' || spec
[MONO_INST_SRC2
] == 'f' || spec
[MONO_INST_DEST
] == 'f') {
5220 mono_print_ins (ins
);
5221 g_assert_not_reached ();
5226 g_assert (cfg
->cbb
== first_bb
);
5228 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
5229 /* Replace the original instruction with the new code sequence */
5231 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
5232 first_bb
->code
= first_bb
->last_ins
= NULL
;
5233 first_bb
->in_count
= first_bb
->out_count
= 0;
5234 cfg
->cbb
= first_bb
;
5241 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER HANDLE-SOFT-FLOAT ");
5244 mono_decompose_long_opts (cfg
);
5250 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5253 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5254 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5255 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5256 /* Optimize reg-reg moves away */
5258 * Can't optimize other opcodes, since sp[0] might point to
5259 * the last ins of a decomposed opcode.
5261 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5263 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5268 * ldloca inhibits many optimizations so try to get rid of it in common
5271 static inline unsigned char *
5272 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5281 local
= read16 (ip
+ 2);
5285 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5286 gboolean skip
= FALSE
;
5288 /* From the INITOBJ case */
5289 token
= read32 (ip
+ 2);
5290 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5291 CHECK_TYPELOAD (klass
);
5292 if (generic_class_is_reference_type (cfg
, klass
)) {
5293 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5294 } else if (MONO_TYPE_IS_REFERENCE (&klass
->byval_arg
)) {
5295 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5296 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5297 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5310 is_exception_class (MonoClass
*class)
5313 if (class == mono_defaults
.exception_class
)
5315 class = class->parent
;
5321 * mono_method_to_ir:
5323 * Translate the .net IL into linear IR.
5326 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5327 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5328 guint inline_offset
, gboolean is_virtual_call
)
5330 MonoInst
*ins
, **sp
, **stack_start
;
5331 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5332 MonoMethod
*cmethod
, *method_definition
;
5333 MonoInst
**arg_array
;
5334 MonoMethodHeader
*header
;
5336 guint32 token
, ins_flag
;
5338 MonoClass
*constrained_call
= NULL
;
5339 unsigned char *ip
, *end
, *target
, *err_pos
;
5340 static double r8_0
= 0.0;
5341 MonoMethodSignature
*sig
;
5342 MonoGenericContext
*generic_context
= NULL
;
5343 MonoGenericContainer
*generic_container
= NULL
;
5344 MonoType
**param_types
;
5345 int i
, n
, start_new_bblock
, dreg
;
5346 int num_calls
= 0, inline_costs
= 0;
5347 int breakpoint_id
= 0;
5349 MonoBoolean security
, pinvoke
;
5350 MonoSecurityManager
* secman
= NULL
;
5351 MonoDeclSecurityActions actions
;
5352 GSList
*class_inits
= NULL
;
5353 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5355 gboolean init_locals
, seq_points
;
5357 /* serialization and xdomain stuff may need access to private fields and methods */
5358 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5359 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5360 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5361 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5362 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5363 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5365 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5367 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5368 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5369 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5370 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5372 image
= method
->klass
->image
;
5373 header
= mono_method_get_header (method
);
5374 generic_container
= mono_method_get_generic_container (method
);
5375 sig
= mono_method_signature (method
);
5376 num_args
= sig
->hasthis
+ sig
->param_count
;
5377 ip
= (unsigned char*)header
->code
;
5378 cfg
->cil_start
= ip
;
5379 end
= ip
+ header
->code_size
;
5380 mono_jit_stats
.cil_code_size
+= header
->code_size
;
5381 init_locals
= header
->init_locals
;
5383 seq_points
= cfg
->gen_seq_points
&& cfg
->method
== method
;
5386 * Methods without init_locals set could cause asserts in various passes
5391 method_definition
= method
;
5392 while (method_definition
->is_inflated
) {
5393 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5394 method_definition
= imethod
->declaring
;
5397 /* SkipVerification is not allowed if core-clr is enabled */
5398 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5400 dont_verify_stloc
= TRUE
;
5403 if (!dont_verify
&& mini_method_verify (cfg
, method_definition
))
5404 goto exception_exit
;
5406 if (mono_debug_using_mono_debugger ())
5407 cfg
->keep_cil_nops
= TRUE
;
5409 if (sig
->is_inflated
)
5410 generic_context
= mono_method_get_context (method
);
5411 else if (generic_container
)
5412 generic_context
= &generic_container
->context
;
5413 cfg
->generic_context
= generic_context
;
5415 if (!cfg
->generic_sharing_context
)
5416 g_assert (!sig
->has_type_parameters
);
5418 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5419 g_assert (method
->is_inflated
);
5420 g_assert (mono_method_get_context (method
)->method_inst
);
5422 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5423 g_assert (sig
->generic_param_count
);
5425 if (cfg
->method
== method
) {
5426 cfg
->real_offset
= 0;
5428 cfg
->real_offset
= inline_offset
;
5431 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5432 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5434 cfg
->current_method
= method
;
5436 if (cfg
->verbose_level
> 2)
5437 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5439 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5441 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5442 for (n
= 0; n
< sig
->param_count
; ++n
)
5443 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5444 cfg
->arg_types
= param_types
;
5446 dont_inline
= g_list_prepend (dont_inline
, method
);
5447 if (cfg
->method
== method
) {
5449 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5450 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5453 NEW_BBLOCK (cfg
, start_bblock
);
5454 cfg
->bb_entry
= start_bblock
;
5455 start_bblock
->cil_code
= NULL
;
5456 start_bblock
->cil_length
= 0;
5459 NEW_BBLOCK (cfg
, end_bblock
);
5460 cfg
->bb_exit
= end_bblock
;
5461 end_bblock
->cil_code
= NULL
;
5462 end_bblock
->cil_length
= 0;
5463 g_assert (cfg
->num_bblocks
== 2);
5465 arg_array
= cfg
->args
;
5467 if (header
->num_clauses
) {
5468 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5469 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5471 /* handle exception clauses */
5472 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5473 MonoBasicBlock
*try_bb
;
5474 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5475 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5476 try_bb
->real_offset
= clause
->try_offset
;
5477 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5478 tblock
->real_offset
= clause
->handler_offset
;
5479 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5481 link_bblock (cfg
, try_bb
, tblock
);
5483 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5484 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5486 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5487 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5488 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5489 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5490 MONO_ADD_INS (tblock
, ins
);
5492 /* todo: is a fault block unsafe to optimize? */
5493 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5494 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5498 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5500 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5502 /* catch and filter blocks get the exception object on the stack */
5503 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5504 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5505 MonoInst
*dummy_use
;
5507 /* mostly like handle_stack_args (), but just sets the input args */
5508 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5509 tblock
->in_scount
= 1;
5510 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5511 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5514 * Add a dummy use for the exvar so its liveness info will be
5518 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5520 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5521 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5522 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5523 tblock
->real_offset
= clause
->data
.filter_offset
;
5524 tblock
->in_scount
= 1;
5525 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5526 /* The filter block shares the exvar with the handler block */
5527 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5528 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5529 MONO_ADD_INS (tblock
, ins
);
5533 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5534 clause
->data
.catch_class
&&
5535 cfg
->generic_sharing_context
&&
5536 mono_class_check_context_used (clause
->data
.catch_class
)) {
5538 * In shared generic code with catch
5539 * clauses containing type variables
5540 * the exception handling code has to
5541 * be able to get to the rgctx.
5542 * Therefore we have to make sure that
5543 * the vtable/mrgctx argument (for
5544 * static or generic methods) or the
5545 * "this" argument (for non-static
5546 * methods) are live.
5548 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5549 mini_method_get_context (method
)->method_inst
||
5550 method
->klass
->valuetype
) {
5551 mono_get_vtable_var (cfg
);
5553 MonoInst
*dummy_use
;
5555 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5560 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5561 cfg
->cbb
= start_bblock
;
5562 cfg
->args
= arg_array
;
5563 mono_save_args (cfg
, sig
, inline_args
);
5566 /* FIRST CODE BLOCK */
5567 NEW_BBLOCK (cfg
, bblock
);
5568 bblock
->cil_code
= ip
;
5572 ADD_BBLOCK (cfg
, bblock
);
5574 if (cfg
->method
== method
) {
5575 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5576 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5577 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5578 MONO_ADD_INS (bblock
, ins
);
5582 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5583 secman
= mono_security_manager_get_methods ();
5585 security
= (secman
&& mono_method_has_declsec (method
));
5586 /* at this point having security doesn't mean we have any code to generate */
5587 if (security
&& (cfg
->method
== method
)) {
5588 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5589 * And we do not want to enter the next section (with allocation) if we
5590 * have nothing to generate */
5591 security
= mono_declsec_get_demands (method
, &actions
);
5594 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5595 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5597 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5598 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5599 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
5601 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5602 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5606 mono_custom_attrs_free (custom
);
5609 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
5610 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5614 mono_custom_attrs_free (custom
);
5617 /* not a P/Invoke after all */
5622 if ((init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
5623 /* we use a separate basic block for the initialization code */
5624 NEW_BBLOCK (cfg
, init_localsbb
);
5625 cfg
->bb_init
= init_localsbb
;
5626 init_localsbb
->real_offset
= cfg
->real_offset
;
5627 start_bblock
->next_bb
= init_localsbb
;
5628 init_localsbb
->next_bb
= bblock
;
5629 link_bblock (cfg
, start_bblock
, init_localsbb
);
5630 link_bblock (cfg
, init_localsbb
, bblock
);
5632 cfg
->cbb
= init_localsbb
;
5634 start_bblock
->next_bb
= bblock
;
5635 link_bblock (cfg
, start_bblock
, bblock
);
5638 /* at this point we know, if security is TRUE, that some code needs to be generated */
5639 if (security
&& (cfg
->method
== method
)) {
5642 mono_jit_stats
.cas_demand_generation
++;
5644 if (actions
.demand
.blob
) {
5645 /* Add code for SecurityAction.Demand */
5646 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
5647 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
5648 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5649 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5651 if (actions
.noncasdemand
.blob
) {
5652 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5653 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5654 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
5655 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
5656 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5657 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5659 if (actions
.demandchoice
.blob
) {
5660 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5661 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
5662 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
5663 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5664 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
5668 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5670 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
5673 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
5674 /* check if this is native code, e.g. an icall or a p/invoke */
5675 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
5676 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5678 gboolean pinvk
= (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
);
5679 gboolean icall
= (wrapped
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
);
5681 /* if this ia a native call then it can only be JITted from platform code */
5682 if ((icall
|| pinvk
) && method
->klass
&& method
->klass
->image
) {
5683 if (!mono_security_core_clr_is_platform_image (method
->klass
->image
)) {
5684 MonoException
*ex
= icall
? mono_get_exception_security () :
5685 mono_get_exception_method_access ();
5686 emit_throw_exception (cfg
, ex
);
5693 if (header
->code_size
== 0)
5696 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
5701 if (cfg
->method
== method
)
5702 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
5704 for (n
= 0; n
< header
->num_locals
; ++n
) {
5705 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
5710 /* We force the vtable variable here for all shared methods
5711 for the possibility that they might show up in a stack
5712 trace where their exact instantiation is needed. */
5713 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
5714 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5715 mini_method_get_context (method
)->method_inst
||
5716 method
->klass
->valuetype
) {
5717 mono_get_vtable_var (cfg
);
5719 /* FIXME: Is there a better way to do this?
5720 We need the variable live for the duration
5721 of the whole method. */
5722 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
5726 /* add a check for this != NULL to inlined methods */
5727 if (is_virtual_call
) {
5730 NEW_ARGLOAD (cfg
, arg_ins
, 0);
5731 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
5732 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
5733 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, arg_ins
->dreg
);
5734 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, arg_ins
->dreg
);
5737 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5738 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
5741 start_new_bblock
= 0;
5745 if (cfg
->method
== method
)
5746 cfg
->real_offset
= ip
- header
->code
;
5748 cfg
->real_offset
= inline_offset
;
5753 if (start_new_bblock
) {
5754 bblock
->cil_length
= ip
- bblock
->cil_code
;
5755 if (start_new_bblock
== 2) {
5756 g_assert (ip
== tblock
->cil_code
);
5758 GET_BBLOCK (cfg
, tblock
, ip
);
5760 bblock
->next_bb
= tblock
;
5763 start_new_bblock
= 0;
5764 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5765 if (cfg
->verbose_level
> 3)
5766 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5767 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5771 g_slist_free (class_inits
);
5774 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
5775 link_bblock (cfg
, bblock
, tblock
);
5776 if (sp
!= stack_start
) {
5777 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
5779 CHECK_UNVERIFIABLE (cfg
);
5781 bblock
->next_bb
= tblock
;
5784 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5785 if (cfg
->verbose_level
> 3)
5786 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5787 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5790 g_slist_free (class_inits
);
5796 * Sequence points are points where the debugger can place a breakpoint.
5797 * Currently, we generate these automatically at points where the IL
5800 if (seq_points
&& sp
== stack_start
) {
5801 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
5802 MONO_ADD_INS (cfg
->cbb
, ins
);
5805 bblock
->real_offset
= cfg
->real_offset
;
5807 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
5808 guint32 cil_offset
= ip
- header
->code
;
5809 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
5811 /* TODO: Use an increment here */
5812 #if defined(TARGET_X86)
5813 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
5814 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
5816 MONO_ADD_INS (cfg
->cbb
, ins
);
5818 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
5819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
5823 if (cfg
->verbose_level
> 3)
5824 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
5828 if (cfg
->keep_cil_nops
)
5829 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
5831 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5833 MONO_ADD_INS (bblock
, ins
);
5836 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5838 MONO_ADD_INS (bblock
, ins
);
5844 CHECK_STACK_OVF (1);
5845 n
= (*ip
)-CEE_LDARG_0
;
5847 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5855 CHECK_STACK_OVF (1);
5856 n
= (*ip
)-CEE_LDLOC_0
;
5858 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5867 n
= (*ip
)-CEE_STLOC_0
;
5870 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
5872 emit_stloc_ir (cfg
, sp
, header
, n
);
5879 CHECK_STACK_OVF (1);
5882 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5888 CHECK_STACK_OVF (1);
5891 NEW_ARGLOADA (cfg
, ins
, n
);
5892 MONO_ADD_INS (cfg
->cbb
, ins
);
5902 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
5904 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
5909 CHECK_STACK_OVF (1);
5912 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5916 case CEE_LDLOCA_S
: {
5917 unsigned char *tmp_ip
;
5919 CHECK_STACK_OVF (1);
5920 CHECK_LOCAL (ip
[1]);
5922 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
5928 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
5937 CHECK_LOCAL (ip
[1]);
5938 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
5940 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
5945 CHECK_STACK_OVF (1);
5946 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
5947 ins
->type
= STACK_OBJ
;
5952 CHECK_STACK_OVF (1);
5953 EMIT_NEW_ICONST (cfg
, ins
, -1);
5966 CHECK_STACK_OVF (1);
5967 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
5973 CHECK_STACK_OVF (1);
5975 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
5981 CHECK_STACK_OVF (1);
5982 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
5988 CHECK_STACK_OVF (1);
5989 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
5990 ins
->type
= STACK_I8
;
5991 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
5993 ins
->inst_l
= (gint64
)read64 (ip
);
5994 MONO_ADD_INS (bblock
, ins
);
6000 gboolean use_aotconst
= FALSE
;
6002 #ifdef TARGET_POWERPC
6003 /* FIXME: Clean this up */
6004 if (cfg
->compile_aot
)
6005 use_aotconst
= TRUE
;
6008 /* FIXME: we should really allocate this only late in the compilation process */
6009 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
6011 CHECK_STACK_OVF (1);
6017 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
6019 dreg
= alloc_freg (cfg
);
6020 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
6021 ins
->type
= STACK_R8
;
6023 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
6024 ins
->type
= STACK_R8
;
6025 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6027 MONO_ADD_INS (bblock
, ins
);
6037 gboolean use_aotconst
= FALSE
;
6039 #ifdef TARGET_POWERPC
6040 /* FIXME: Clean this up */
6041 if (cfg
->compile_aot
)
6042 use_aotconst
= TRUE
;
6045 /* FIXME: we should really allocate this only late in the compilation process */
6046 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
6048 CHECK_STACK_OVF (1);
6054 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
6056 dreg
= alloc_freg (cfg
);
6057 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
6058 ins
->type
= STACK_R8
;
6060 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
6061 ins
->type
= STACK_R8
;
6062 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6064 MONO_ADD_INS (bblock
, ins
);
6073 MonoInst
*temp
, *store
;
6075 CHECK_STACK_OVF (1);
6079 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
6080 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
6082 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6085 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6098 if (sp
[0]->type
== STACK_R8
)
6099 /* we need to pop the value from the x86 FP stack */
6100 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
6109 if (stack_start
!= sp
)
6111 token
= read32 (ip
+ 1);
6112 /* FIXME: check the signature matches */
6113 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6118 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
6119 GENERIC_SHARING_FAILURE (CEE_JMP
);
6121 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6122 CHECK_CFG_EXCEPTION
;
6124 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6126 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
6129 /* Handle tail calls similarly to calls */
6130 n
= fsig
->param_count
+ fsig
->hasthis
;
6132 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6133 call
->method
= cmethod
;
6134 call
->tail_call
= TRUE
;
6135 call
->signature
= mono_method_signature (cmethod
);
6136 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6137 call
->inst
.inst_p0
= cmethod
;
6138 for (i
= 0; i
< n
; ++i
)
6139 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6141 mono_arch_emit_call (cfg
, call
);
6142 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
6145 for (i
= 0; i
< num_args
; ++i
)
6146 /* Prevent arguments from being optimized away */
6147 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6149 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6150 ins
= (MonoInst
*)call
;
6151 ins
->inst_p0
= cmethod
;
6152 MONO_ADD_INS (bblock
, ins
);
6156 start_new_bblock
= 1;
6161 case CEE_CALLVIRT
: {
6162 MonoInst
*addr
= NULL
;
6163 MonoMethodSignature
*fsig
= NULL
;
6165 int virtual = *ip
== CEE_CALLVIRT
;
6166 int calli
= *ip
== CEE_CALLI
;
6167 gboolean pass_imt_from_rgctx
= FALSE
;
6168 MonoInst
*imt_arg
= NULL
;
6169 gboolean pass_vtable
= FALSE
;
6170 gboolean pass_mrgctx
= FALSE
;
6171 MonoInst
*vtable_arg
= NULL
;
6172 gboolean check_this
= FALSE
;
6173 gboolean supported_tail_call
= FALSE
;
6176 token
= read32 (ip
+ 1);
6183 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
6184 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
6186 fsig
= mono_metadata_parse_signature (image
, token
);
6188 n
= fsig
->param_count
+ fsig
->hasthis
;
6190 MonoMethod
*cil_method
;
6192 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
6193 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
6194 cil_method
= cmethod
;
6195 } else if (constrained_call
) {
6196 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
6198 * This is needed since get_method_constrained can't find
6199 * the method in klass representing a type var.
6200 * The type var is guaranteed to be a reference type in this
6203 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6204 cil_method
= cmethod
;
6205 g_assert (!cmethod
->klass
->valuetype
);
6207 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
6210 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6211 cil_method
= cmethod
;
6216 if (!dont_verify
&& !cfg
->skip_visibility
) {
6217 MonoMethod
*target_method
= cil_method
;
6218 if (method
->is_inflated
) {
6219 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
6221 if (!mono_method_can_access_method (method_definition
, target_method
) &&
6222 !mono_method_can_access_method (method
, cil_method
))
6223 METHOD_ACCESS_FAILURE
;
6226 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
6227 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
6229 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
6230 /* MS.NET seems to silently convert this to a callvirt */
6233 if (!cmethod
->klass
->inited
)
6234 if (!mono_class_init (cmethod
->klass
))
6237 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
6238 mini_class_is_system_array (cmethod
->klass
)) {
6239 array_rank
= cmethod
->klass
->rank
;
6240 fsig
= mono_method_signature (cmethod
);
6242 if (mono_method_signature (cmethod
)->pinvoke
) {
6243 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
6244 check_for_pending_exc
, FALSE
);
6245 fsig
= mono_method_signature (wrapper
);
6246 } else if (constrained_call
) {
6247 fsig
= mono_method_signature (cmethod
);
6249 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
6253 mono_save_token_info (cfg
, image
, token
, cil_method
);
6255 n
= fsig
->param_count
+ fsig
->hasthis
;
6257 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
6258 if (check_linkdemand (cfg
, method
, cmethod
))
6260 CHECK_CFG_EXCEPTION
;
6263 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
6264 g_assert_not_reached ();
6267 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
6270 if (!cfg
->generic_sharing_context
&& cmethod
)
6271 g_assert (!mono_method_check_context_used (cmethod
));
6275 //g_assert (!virtual || fsig->hasthis);
6279 if (constrained_call
) {
6281 * We have the `constrained.' prefix opcode.
6283 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
6285 * The type parameter is instantiated as a valuetype,
6286 * but that type doesn't override the method we're
6287 * calling, so we need to box `this'.
6289 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &constrained_call
->byval_arg
, sp
[0]->dreg
, 0);
6290 ins
->klass
= constrained_call
;
6291 sp
[0] = handle_box (cfg
, ins
, constrained_call
);
6292 CHECK_CFG_EXCEPTION
;
6293 } else if (!constrained_call
->valuetype
) {
6294 int dreg
= alloc_preg (cfg
);
6297 * The type parameter is instantiated as a reference
6298 * type. We have a managed pointer on the stack, so
6299 * we need to dereference it here.
6301 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6302 ins
->type
= STACK_OBJ
;
6304 } else if (cmethod
->klass
->valuetype
)
6306 constrained_call
= NULL
;
6309 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6313 * If the callee is a shared method, then its static cctor
6314 * might not get called after the call was patched.
6316 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
6317 emit_generic_class_init (cfg
, cmethod
->klass
);
6318 CHECK_TYPELOAD (cmethod
->klass
);
6321 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6322 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6323 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6324 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6325 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6328 * Pass vtable iff target method might
6329 * be shared, which means that sharing
6330 * is enabled for its class and its
6331 * context is sharable (and it's not a
6334 if (sharing_enabled
&& context_sharable
&&
6335 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6339 if (cmethod
&& mini_method_get_context (cmethod
) &&
6340 mini_method_get_context (cmethod
)->method_inst
) {
6341 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6342 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6343 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6345 g_assert (!pass_vtable
);
6347 if (sharing_enabled
&& context_sharable
)
6351 if (cfg
->generic_sharing_context
&& cmethod
) {
6352 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6354 context_used
= mono_method_check_context_used (cmethod
);
6356 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6357 /* Generic method interface
6358 calls are resolved via a
6359 helper function and don't
6361 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6362 pass_imt_from_rgctx
= TRUE
;
6366 * If a shared method calls another
6367 * shared method then the caller must
6368 * have a generic sharing context
6369 * because the magic trampoline
6370 * requires it. FIXME: We shouldn't
6371 * have to force the vtable/mrgctx
6372 * variable here. Instead there
6373 * should be a flag in the cfg to
6374 * request a generic sharing context.
6377 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6378 mono_get_vtable_var (cfg
);
6383 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6385 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6387 CHECK_TYPELOAD (cmethod
->klass
);
6388 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6393 g_assert (!vtable_arg
);
6396 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6398 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
6401 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6402 MONO_METHOD_IS_FINAL (cmethod
)) {
6409 if (pass_imt_from_rgctx
) {
6410 g_assert (!pass_vtable
);
6413 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6414 cmethod
, MONO_RGCTX_INFO_METHOD
);
6420 MONO_INST_NEW (cfg
, check
, OP_CHECK_THIS
);
6421 check
->sreg1
= sp
[0]->dreg
;
6422 MONO_ADD_INS (cfg
->cbb
, check
);
6425 /* Calling virtual generic methods */
6426 if (cmethod
&& virtual &&
6427 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6428 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6429 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6430 mono_method_signature (cmethod
)->generic_param_count
) {
6431 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6432 MonoInst
*iargs
[4];
6434 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6436 /* Prevent inlining of methods that contain indirect calls */
6439 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6440 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
&& mono_use_imt
) {
6441 g_assert (!imt_arg
);
6443 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6444 cmethod
, MONO_RGCTX_INFO_METHOD
);
6447 g_assert (cmethod
->is_inflated
);
6448 EMIT_NEW_METHODCONST (cfg
, imt_arg
, cmethod
);
6450 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
);
6454 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6455 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6456 MONO_ADD_INS (bblock
, store
);
6458 /* FIXME: This should be a managed pointer */
6459 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6461 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6463 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6464 cmethod
, MONO_RGCTX_INFO_METHOD
);
6465 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6466 addr
= mono_emit_jit_icall (cfg
,
6467 mono_helper_compile_generic_method
, iargs
);
6469 EMIT_NEW_METHODCONST (cfg
, iargs
[1], cmethod
);
6470 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6471 addr
= mono_emit_jit_icall (cfg
, mono_helper_compile_generic_method
, iargs
);
6474 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6476 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6479 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6480 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6487 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6488 supported_tail_call
= cmethod
&& MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method
), mono_method_signature (cmethod
));
6490 supported_tail_call
= cmethod
&& mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->ret
);
6494 /* FIXME: runtime generic context pointer for jumps? */
6495 /* FIXME: handle this for generic sharing eventually */
6496 if ((ins_flag
& MONO_INST_TAILCALL
) && !cfg
->generic_sharing_context
&& !vtable_arg
&& cmethod
&& (*ip
== CEE_CALL
) && supported_tail_call
) {
6499 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6502 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6503 /* Handle tail calls similarly to calls */
6504 call
= mono_emit_call_args (cfg
, mono_method_signature (cmethod
), sp
, FALSE
, FALSE
, TRUE
);
6506 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6507 call
->tail_call
= TRUE
;
6508 call
->method
= cmethod
;
6509 call
->signature
= mono_method_signature (cmethod
);
6512 * We implement tail calls by storing the actual arguments into the
6513 * argument variables, then emitting a CEE_JMP.
6515 for (i
= 0; i
< n
; ++i
) {
6516 /* Prevent argument from being register allocated */
6517 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6518 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6522 ins
= (MonoInst
*)call
;
6523 ins
->inst_p0
= cmethod
;
6524 ins
->inst_p1
= arg_array
[0];
6525 MONO_ADD_INS (bblock
, ins
);
6526 link_bblock (cfg
, bblock
, end_bblock
);
6527 start_new_bblock
= 1;
6528 /* skip CEE_RET as well */
6534 /* Conversion to a JIT intrinsic */
6535 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6536 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6537 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6548 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
6549 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
6550 mono_method_check_inlining (cfg
, cmethod
) &&
6551 !g_list_find (dont_inline
, cmethod
)) {
6553 gboolean allways
= FALSE
;
6555 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
6556 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6557 /* Prevent inlining of methods that call wrappers */
6559 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
6563 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, allways
))) {
6565 cfg
->real_offset
+= 5;
6568 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6569 /* *sp is already set by inline_method */
6572 inline_costs
+= costs
;
6578 inline_costs
+= 10 * num_calls
++;
6580 /* Tail recursion elimination */
6581 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
6582 gboolean has_vtargs
= FALSE
;
6585 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6588 /* keep it simple */
6589 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
6590 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
6595 for (i
= 0; i
< n
; ++i
)
6596 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6597 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6598 MONO_ADD_INS (bblock
, ins
);
6599 tblock
= start_bblock
->out_bb
[0];
6600 link_bblock (cfg
, bblock
, tblock
);
6601 ins
->inst_target_bb
= tblock
;
6602 start_new_bblock
= 1;
6604 /* skip the CEE_RET, too */
6605 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
6615 /* Generic sharing */
6616 /* FIXME: only do this for generic methods if
6617 they are not shared! */
6618 if (context_used
&& !imt_arg
&& !array_rank
&&
6619 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
6620 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
6621 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
6622 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
6625 g_assert (cfg
->generic_sharing_context
&& cmethod
);
6629 * We are compiling a call to a
6630 * generic method from shared code,
6631 * which means that we have to look up
6632 * the method in the rgctx and do an
6635 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
6638 /* Indirect calls */
6640 g_assert (!imt_arg
);
6642 if (*ip
== CEE_CALL
)
6643 g_assert (context_used
);
6644 else if (*ip
== CEE_CALLI
)
6645 g_assert (!vtable_arg
);
6647 /* FIXME: what the hell is this??? */
6648 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
6649 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
6651 /* Prevent inlining of methods with indirect calls */
6655 #ifdef MONO_ARCH_RGCTX_REG
6657 int rgctx_reg
= mono_alloc_preg (cfg
);
6659 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
6660 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6661 call
= (MonoCallInst
*)ins
;
6662 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
6663 cfg
->uses_rgctx_reg
= TRUE
;
6664 call
->rgctx_reg
= TRUE
;
6669 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6671 * Instead of emitting an indirect call, emit a direct call
6672 * with the contents of the aotconst as the patch info.
6674 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
6676 } else if (addr
->opcode
== OP_GOT_ENTRY
&& addr
->inst_right
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6677 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_right
->inst_left
, fsig
, sp
);
6680 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6683 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6684 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6695 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
6696 if (sp
[fsig
->param_count
]->type
== STACK_OBJ
) {
6697 MonoInst
*iargs
[2];
6700 iargs
[1] = sp
[fsig
->param_count
];
6702 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
6705 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
6706 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, sp
[fsig
->param_count
]->dreg
);
6707 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
6708 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6710 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
6713 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
6714 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
6715 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
6716 CHECK_TYPELOAD (cmethod
->klass
);
6719 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6722 g_assert_not_reached ();
6730 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
6732 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6733 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6743 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
6745 } else if (imt_arg
) {
6746 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, imt_arg
);
6748 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, NULL
);
6751 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6752 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6759 if (cfg
->method
!= method
) {
6760 /* return from inlined method */
6762 * If in_count == 0, that means the ret is unreachable due to
6763 * being preceeded by a throw. In that case, inline_method () will
6764 * handle setting the return value
6765 * (test case: test_0_inline_throw ()).
6767 if (return_var
&& cfg
->cbb
->in_count
) {
6771 //g_assert (returnvar != -1);
6772 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
6773 cfg
->ret_var_set
= TRUE
;
6777 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
6779 g_assert (!return_var
);
6782 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
6785 if (!cfg
->vret_addr
) {
6788 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
6790 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
6792 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
6793 ins
->klass
= mono_class_from_mono_type (ret_type
);
6796 #ifdef MONO_ARCH_SOFT_FLOAT
6797 if (!ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
6798 MonoInst
*iargs
[1];
6802 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
6803 mono_arch_emit_setret (cfg
, method
, conv
);
6805 mono_arch_emit_setret (cfg
, method
, *sp
);
6808 mono_arch_emit_setret (cfg
, method
, *sp
);
6813 if (sp
!= stack_start
)
6815 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6817 ins
->inst_target_bb
= end_bblock
;
6818 MONO_ADD_INS (bblock
, ins
);
6819 link_bblock (cfg
, bblock
, end_bblock
);
6820 start_new_bblock
= 1;
6824 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6826 target
= ip
+ 1 + (signed char)(*ip
);
6828 GET_BBLOCK (cfg
, tblock
, target
);
6829 link_bblock (cfg
, bblock
, tblock
);
6830 ins
->inst_target_bb
= tblock
;
6831 if (sp
!= stack_start
) {
6832 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6834 CHECK_UNVERIFIABLE (cfg
);
6836 MONO_ADD_INS (bblock
, ins
);
6837 start_new_bblock
= 1;
6838 inline_costs
+= BRANCH_COST
;
6852 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
6854 target
= ip
+ 1 + *(signed char*)ip
;
6860 inline_costs
+= BRANCH_COST
;
6864 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6867 target
= ip
+ 4 + (gint32
)read32(ip
);
6869 GET_BBLOCK (cfg
, tblock
, target
);
6870 link_bblock (cfg
, bblock
, tblock
);
6871 ins
->inst_target_bb
= tblock
;
6872 if (sp
!= stack_start
) {
6873 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6875 CHECK_UNVERIFIABLE (cfg
);
6878 MONO_ADD_INS (bblock
, ins
);
6880 start_new_bblock
= 1;
6881 inline_costs
+= BRANCH_COST
;
6888 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
6889 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
6890 guint32 opsize
= is_short
? 1 : 4;
6892 CHECK_OPSIZE (opsize
);
6894 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
6897 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
6902 GET_BBLOCK (cfg
, tblock
, target
);
6903 link_bblock (cfg
, bblock
, tblock
);
6904 GET_BBLOCK (cfg
, tblock
, ip
);
6905 link_bblock (cfg
, bblock
, tblock
);
6907 if (sp
!= stack_start
) {
6908 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6909 CHECK_UNVERIFIABLE (cfg
);
6912 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
6913 cmp
->sreg1
= sp
[0]->dreg
;
6914 type_from_op (cmp
, sp
[0], NULL
);
6917 #if SIZEOF_REGISTER == 4
6918 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
6919 /* Convert it to OP_LCOMPARE */
6920 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6921 ins
->type
= STACK_I8
;
6922 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6924 MONO_ADD_INS (bblock
, ins
);
6925 cmp
->opcode
= OP_LCOMPARE
;
6926 cmp
->sreg2
= ins
->dreg
;
6929 MONO_ADD_INS (bblock
, cmp
);
6931 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
6932 type_from_op (ins
, sp
[0], NULL
);
6933 MONO_ADD_INS (bblock
, ins
);
6934 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
6935 GET_BBLOCK (cfg
, tblock
, target
);
6936 ins
->inst_true_bb
= tblock
;
6937 GET_BBLOCK (cfg
, tblock
, ip
);
6938 ins
->inst_false_bb
= tblock
;
6939 start_new_bblock
= 2;
6942 inline_costs
+= BRANCH_COST
;
6957 MONO_INST_NEW (cfg
, ins
, *ip
);
6959 target
= ip
+ 4 + (gint32
)read32(ip
);
6965 inline_costs
+= BRANCH_COST
;
6969 MonoBasicBlock
**targets
;
6970 MonoBasicBlock
*default_bblock
;
6971 MonoJumpInfoBBTable
*table
;
6972 int offset_reg
= alloc_preg (cfg
);
6973 int target_reg
= alloc_preg (cfg
);
6974 int table_reg
= alloc_preg (cfg
);
6975 int sum_reg
= alloc_preg (cfg
);
6976 gboolean use_op_switch
;
6980 n
= read32 (ip
+ 1);
6983 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
6987 CHECK_OPSIZE (n
* sizeof (guint32
));
6988 target
= ip
+ n
* sizeof (guint32
);
6990 GET_BBLOCK (cfg
, default_bblock
, target
);
6992 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
6993 for (i
= 0; i
< n
; ++i
) {
6994 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
6995 targets
[i
] = tblock
;
6999 if (sp
!= stack_start
) {
7001 * Link the current bb with the targets as well, so handle_stack_args
7002 * will set their in_stack correctly.
7004 link_bblock (cfg
, bblock
, default_bblock
);
7005 for (i
= 0; i
< n
; ++i
)
7006 link_bblock (cfg
, bblock
, targets
[i
]);
7008 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7010 CHECK_UNVERIFIABLE (cfg
);
7013 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
7014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
7017 for (i
= 0; i
< n
; ++i
)
7018 link_bblock (cfg
, bblock
, targets
[i
]);
7020 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
7021 table
->table
= targets
;
7022 table
->table_size
= n
;
7024 use_op_switch
= FALSE
;
7026 /* ARM implements SWITCH statements differently */
7027 /* FIXME: Make it use the generic implementation */
7028 if (!cfg
->compile_aot
)
7029 use_op_switch
= TRUE
;
7032 if (COMPILE_LLVM (cfg
))
7033 use_op_switch
= TRUE
;
7035 cfg
->cbb
->has_jump_table
= 1;
7037 if (use_op_switch
) {
7038 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
7039 ins
->sreg1
= src1
->dreg
;
7040 ins
->inst_p0
= table
;
7041 ins
->inst_many_bb
= targets
;
7042 ins
->klass
= GUINT_TO_POINTER (n
);
7043 MONO_ADD_INS (cfg
->cbb
, ins
);
7045 if (sizeof (gpointer
) == 8)
7046 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
7048 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
7050 #if SIZEOF_REGISTER == 8
7051 /* The upper word might not be zero, and we add it to a 64 bit address later */
7052 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
7055 if (cfg
->compile_aot
) {
7056 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
7058 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
7059 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
7060 ins
->inst_p0
= table
;
7061 ins
->dreg
= table_reg
;
7062 MONO_ADD_INS (cfg
->cbb
, ins
);
7065 /* FIXME: Use load_memindex */
7066 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
7067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
7068 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
7070 start_new_bblock
= 1;
7071 inline_costs
+= (BRANCH_COST
* 2);
7091 dreg
= alloc_freg (cfg
);
7094 dreg
= alloc_lreg (cfg
);
7097 dreg
= alloc_preg (cfg
);
7100 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
7101 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
7102 ins
->flags
|= ins_flag
;
7104 MONO_ADD_INS (bblock
, ins
);
7119 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
7120 ins
->flags
|= ins_flag
;
7122 MONO_ADD_INS (bblock
, ins
);
7124 #if HAVE_WRITE_BARRIERS
7125 if (*ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0))) {
7126 /* insert call to write barrier */
7127 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
7128 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
7139 MONO_INST_NEW (cfg
, ins
, (*ip
));
7141 ins
->sreg1
= sp
[0]->dreg
;
7142 ins
->sreg2
= sp
[1]->dreg
;
7143 type_from_op (ins
, sp
[0], sp
[1]);
7145 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7147 /* Use the immediate opcodes if possible */
7148 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
7149 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
7150 if (imm_opcode
!= -1) {
7151 ins
->opcode
= imm_opcode
;
7152 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7155 sp
[1]->opcode
= OP_NOP
;
7159 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7161 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7178 MONO_INST_NEW (cfg
, ins
, (*ip
));
7180 ins
->sreg1
= sp
[0]->dreg
;
7181 ins
->sreg2
= sp
[1]->dreg
;
7182 type_from_op (ins
, sp
[0], sp
[1]);
7184 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
7185 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7187 /* FIXME: Pass opcode to is_inst_imm */
7189 /* Use the immediate opcodes if possible */
7190 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
7193 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
7194 if (imm_opcode
!= -1) {
7195 ins
->opcode
= imm_opcode
;
7196 if (sp
[1]->opcode
== OP_I8CONST
) {
7197 #if SIZEOF_REGISTER == 8
7198 ins
->inst_imm
= sp
[1]->inst_l
;
7200 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
7201 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
7205 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7208 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7209 if (sp
[1]->next
== NULL
)
7210 sp
[1]->opcode
= OP_NOP
;
7213 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7215 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7228 case CEE_CONV_OVF_I8
:
7229 case CEE_CONV_OVF_U8
:
7233 /* Special case this earlier so we have long constants in the IR */
7234 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
7235 int data
= sp
[-1]->inst_c0
;
7236 sp
[-1]->opcode
= OP_I8CONST
;
7237 sp
[-1]->type
= STACK_I8
;
7238 #if SIZEOF_REGISTER == 8
7239 if ((*ip
) == CEE_CONV_U8
)
7240 sp
[-1]->inst_c0
= (guint32
)data
;
7242 sp
[-1]->inst_c0
= data
;
7244 sp
[-1]->inst_ls_word
= data
;
7245 if ((*ip
) == CEE_CONV_U8
)
7246 sp
[-1]->inst_ms_word
= 0;
7248 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
7250 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
7257 case CEE_CONV_OVF_I4
:
7258 case CEE_CONV_OVF_I1
:
7259 case CEE_CONV_OVF_I2
:
7260 case CEE_CONV_OVF_I
:
7261 case CEE_CONV_OVF_U
:
7264 if (sp
[-1]->type
== STACK_R8
) {
7265 ADD_UNOP (CEE_CONV_OVF_I8
);
7272 case CEE_CONV_OVF_U1
:
7273 case CEE_CONV_OVF_U2
:
7274 case CEE_CONV_OVF_U4
:
7277 if (sp
[-1]->type
== STACK_R8
) {
7278 ADD_UNOP (CEE_CONV_OVF_U8
);
7285 case CEE_CONV_OVF_I1_UN
:
7286 case CEE_CONV_OVF_I2_UN
:
7287 case CEE_CONV_OVF_I4_UN
:
7288 case CEE_CONV_OVF_I8_UN
:
7289 case CEE_CONV_OVF_U1_UN
:
7290 case CEE_CONV_OVF_U2_UN
:
7291 case CEE_CONV_OVF_U4_UN
:
7292 case CEE_CONV_OVF_U8_UN
:
7293 case CEE_CONV_OVF_I_UN
:
7294 case CEE_CONV_OVF_U_UN
:
7304 case CEE_ADD_OVF_UN
:
7306 case CEE_MUL_OVF_UN
:
7308 case CEE_SUB_OVF_UN
:
7316 token
= read32 (ip
+ 1);
7317 klass
= mini_get_class (method
, token
, generic_context
);
7318 CHECK_TYPELOAD (klass
);
7320 if (generic_class_is_reference_type (cfg
, klass
)) {
7321 MonoInst
*store
, *load
;
7322 int dreg
= alloc_preg (cfg
);
7324 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7325 load
->flags
|= ins_flag
;
7326 MONO_ADD_INS (cfg
->cbb
, load
);
7328 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7329 store
->flags
|= ins_flag
;
7330 MONO_ADD_INS (cfg
->cbb
, store
);
7332 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7344 token
= read32 (ip
+ 1);
7345 klass
= mini_get_class (method
, token
, generic_context
);
7346 CHECK_TYPELOAD (klass
);
7348 /* Optimize the common ldobj+stloc combination */
7358 loc_index
= ip
[5] - CEE_STLOC_0
;
7365 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7366 CHECK_LOCAL (loc_index
);
7368 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7369 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7375 /* Optimize the ldobj+stobj combination */
7376 /* The reference case ends up being a load+store anyway */
7377 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 5) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7382 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7389 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7398 CHECK_STACK_OVF (1);
7400 n
= read32 (ip
+ 1);
7402 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7403 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7404 ins
->type
= STACK_OBJ
;
7407 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7408 MonoInst
*iargs
[1];
7410 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7411 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7413 if (cfg
->opt
& MONO_OPT_SHARED
) {
7414 MonoInst
*iargs
[3];
7416 if (cfg
->compile_aot
) {
7417 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7419 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7420 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7421 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7422 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7423 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7425 if (bblock
->out_of_line
) {
7426 MonoInst
*iargs
[2];
7428 if (image
== mono_defaults
.corlib
) {
7430 * Avoid relocations in AOT and save some space by using a
7431 * version of helper_ldstr specialized to mscorlib.
7433 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7434 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7436 /* Avoid creating the string object */
7437 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7438 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7439 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7443 if (cfg
->compile_aot
) {
7444 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7446 MONO_ADD_INS (bblock
, ins
);
7449 NEW_PCONST (cfg
, ins
, NULL
);
7450 ins
->type
= STACK_OBJ
;
7451 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7453 MONO_ADD_INS (bblock
, ins
);
7462 MonoInst
*iargs
[2];
7463 MonoMethodSignature
*fsig
;
7466 MonoInst
*vtable_arg
= NULL
;
7469 token
= read32 (ip
+ 1);
7470 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7473 fsig
= mono_method_get_signature (cmethod
, image
, token
);
7475 mono_save_token_info (cfg
, image
, token
, cmethod
);
7477 if (!mono_class_init (cmethod
->klass
))
7480 if (cfg
->generic_sharing_context
)
7481 context_used
= mono_method_check_context_used (cmethod
);
7483 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7484 if (check_linkdemand (cfg
, method
, cmethod
))
7486 CHECK_CFG_EXCEPTION
;
7487 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
7488 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
7491 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
7492 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7493 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
7495 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
7496 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7498 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
7502 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
7503 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7505 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7507 CHECK_TYPELOAD (cmethod
->klass
);
7508 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7513 n
= fsig
->param_count
;
7517 * Generate smaller code for the common newobj <exception> instruction in
7518 * argument checking code.
7520 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
7521 is_exception_class (cmethod
->klass
) && n
<= 2 &&
7522 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
7523 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
7524 MonoInst
*iargs
[3];
7526 g_assert (!vtable_arg
);
7530 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
7533 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
7537 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
7542 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
7545 g_assert_not_reached ();
7553 /* move the args to allow room for 'this' in the first position */
7559 /* check_call_signature () requires sp[0] to be set */
7560 this_ins
.type
= STACK_OBJ
;
7562 if (check_call_signature (cfg
, fsig
, sp
))
7567 if (mini_class_is_system_array (cmethod
->klass
)) {
7568 g_assert (!vtable_arg
);
7571 *sp
= emit_get_rgctx_method (cfg
, context_used
,
7572 cmethod
, MONO_RGCTX_INFO_METHOD
);
7574 EMIT_NEW_METHODCONST (cfg
, *sp
, cmethod
);
7577 /* Avoid varargs in the common case */
7578 if (fsig
->param_count
== 1)
7579 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
7580 else if (fsig
->param_count
== 2)
7581 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
7583 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
7584 } else if (cmethod
->string_ctor
) {
7585 g_assert (!context_used
);
7586 g_assert (!vtable_arg
);
7587 /* we simply pass a null pointer */
7588 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
7589 /* now call the string ctor */
7590 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
);
7592 MonoInst
* callvirt_this_arg
= NULL
;
7594 if (cmethod
->klass
->valuetype
) {
7595 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
7596 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
7597 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
7602 * The code generated by mini_emit_virtual_call () expects
7603 * iargs [0] to be a boxed instance, but luckily the vcall
7604 * will be transformed into a normal call there.
7606 } else if (context_used
) {
7610 if (cfg
->opt
& MONO_OPT_SHARED
)
7611 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7613 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7614 data
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, rgctx_info
);
7616 alloc
= handle_alloc_from_inst (cfg
, cmethod
->klass
, data
, FALSE
);
7619 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7621 CHECK_TYPELOAD (cmethod
->klass
);
7624 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7625 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7626 * As a workaround, we call class cctors before allocating objects.
7628 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
7629 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
7630 if (cfg
->verbose_level
> 2)
7631 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
7632 class_inits
= g_slist_prepend (class_inits
, vtable
);
7635 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
);
7638 CHECK_CFG_EXCEPTION
; /*for handle_alloc*/
7641 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
7643 /* Now call the actual ctor */
7644 /* Avoid virtual calls to ctors if possible */
7645 if (cmethod
->klass
->marshalbyref
)
7646 callvirt_this_arg
= sp
[0];
7648 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
7649 mono_method_check_inlining (cfg
, cmethod
) &&
7650 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
7651 !g_list_find (dont_inline
, cmethod
)) {
7654 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
7655 cfg
->real_offset
+= 5;
7658 inline_costs
+= costs
- 5;
7661 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
);
7663 } else if (context_used
&&
7664 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7665 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
7666 MonoInst
*cmethod_addr
;
7668 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
7669 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7671 mono_emit_rgctx_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
7674 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
,
7675 callvirt_this_arg
, NULL
, vtable_arg
);
7679 if (alloc
== NULL
) {
7681 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
7682 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
7696 token
= read32 (ip
+ 1);
7697 klass
= mini_get_class (method
, token
, generic_context
);
7698 CHECK_TYPELOAD (klass
);
7699 if (sp
[0]->type
!= STACK_OBJ
)
7702 if (cfg
->generic_sharing_context
)
7703 context_used
= mono_class_check_context_used (klass
);
7712 args
[1] = emit_get_rgctx_klass (cfg
, context_used
,
7713 klass
, MONO_RGCTX_INFO_KLASS
);
7715 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
7719 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7720 MonoMethod
*mono_castclass
;
7721 MonoInst
*iargs
[1];
7724 mono_castclass
= mono_marshal_get_castclass (klass
);
7727 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7728 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7729 g_assert (costs
> 0);
7732 cfg
->real_offset
+= 5;
7737 inline_costs
+= costs
;
7740 ins
= handle_castclass (cfg
, klass
, *sp
);
7741 CHECK_CFG_EXCEPTION
;
7751 token
= read32 (ip
+ 1);
7752 klass
= mini_get_class (method
, token
, generic_context
);
7753 CHECK_TYPELOAD (klass
);
7754 if (sp
[0]->type
!= STACK_OBJ
)
7757 if (cfg
->generic_sharing_context
)
7758 context_used
= mono_class_check_context_used (klass
);
7767 args
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7769 *sp
= mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
7773 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7774 MonoMethod
*mono_isinst
;
7775 MonoInst
*iargs
[1];
7778 mono_isinst
= mono_marshal_get_isinst (klass
);
7781 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
7782 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7783 g_assert (costs
> 0);
7786 cfg
->real_offset
+= 5;
7791 inline_costs
+= costs
;
7794 ins
= handle_isinst (cfg
, klass
, *sp
);
7795 CHECK_CFG_EXCEPTION
;
7802 case CEE_UNBOX_ANY
: {
7806 token
= read32 (ip
+ 1);
7807 klass
= mini_get_class (method
, token
, generic_context
);
7808 CHECK_TYPELOAD (klass
);
7810 mono_save_token_info (cfg
, image
, token
, klass
);
7812 if (cfg
->generic_sharing_context
)
7813 context_used
= mono_class_check_context_used (klass
);
7815 if (generic_class_is_reference_type (cfg
, klass
)) {
7816 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7818 MonoInst
*iargs
[2];
7823 iargs
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7824 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, iargs
);
7828 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7829 MonoMethod
*mono_castclass
;
7830 MonoInst
*iargs
[1];
7833 mono_castclass
= mono_marshal_get_castclass (klass
);
7836 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7837 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7839 g_assert (costs
> 0);
7842 cfg
->real_offset
+= 5;
7846 inline_costs
+= costs
;
7848 ins
= handle_castclass (cfg
, klass
, *sp
);
7849 CHECK_CFG_EXCEPTION
;
7857 if (mono_class_is_nullable (klass
)) {
7858 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7865 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7871 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7884 token
= read32 (ip
+ 1);
7885 klass
= mini_get_class (method
, token
, generic_context
);
7886 CHECK_TYPELOAD (klass
);
7888 mono_save_token_info (cfg
, image
, token
, klass
);
7890 if (cfg
->generic_sharing_context
)
7891 context_used
= mono_class_check_context_used (klass
);
7893 if (generic_class_is_reference_type (cfg
, klass
)) {
7899 if (klass
== mono_defaults
.void_class
)
7901 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
7903 /* frequent check in generic code: box (struct), brtrue */
7904 if (!mono_class_is_nullable (klass
) &&
7905 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) && (ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
)) {
7906 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7908 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7909 if (*ip
== CEE_BRTRUE_S
) {
7912 target
= ip
+ 1 + (signed char)(*ip
);
7917 target
= ip
+ 4 + (gint
)(read32 (ip
));
7920 GET_BBLOCK (cfg
, tblock
, target
);
7921 link_bblock (cfg
, bblock
, tblock
);
7922 ins
->inst_target_bb
= tblock
;
7923 GET_BBLOCK (cfg
, tblock
, ip
);
7925 * This leads to some inconsistency, since the two bblocks are
7926 * not really connected, but it is needed for handling stack
7927 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7928 * FIXME: This should only be needed if sp != stack_start, but that
7929 * doesn't work for some reason (test failure in mcs/tests on x86).
7931 link_bblock (cfg
, bblock
, tblock
);
7932 if (sp
!= stack_start
) {
7933 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7935 CHECK_UNVERIFIABLE (cfg
);
7937 MONO_ADD_INS (bblock
, ins
);
7938 start_new_bblock
= 1;
7946 if (cfg
->opt
& MONO_OPT_SHARED
)
7947 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7949 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7950 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
7951 *sp
++ = handle_box_from_inst (cfg
, val
, klass
, context_used
, data
);
7953 *sp
++ = handle_box (cfg
, val
, klass
);
7956 CHECK_CFG_EXCEPTION
;
7965 token
= read32 (ip
+ 1);
7966 klass
= mini_get_class (method
, token
, generic_context
);
7967 CHECK_TYPELOAD (klass
);
7969 mono_save_token_info (cfg
, image
, token
, klass
);
7971 if (cfg
->generic_sharing_context
)
7972 context_used
= mono_class_check_context_used (klass
);
7974 if (mono_class_is_nullable (klass
)) {
7977 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7978 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
7982 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7992 MonoClassField
*field
;
7996 if (*ip
== CEE_STFLD
) {
8003 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
8005 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
8008 token
= read32 (ip
+ 1);
8009 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8010 field
= mono_method_get_wrapper_data (method
, token
);
8011 klass
= field
->parent
;
8014 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8018 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8019 FIELD_ACCESS_FAILURE
;
8020 mono_class_init (klass
);
8022 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8023 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8024 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8025 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8028 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
8029 if (*ip
== CEE_STFLD
) {
8030 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
8032 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8033 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
8034 MonoInst
*iargs
[5];
8037 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8038 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8039 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
8043 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8044 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
8045 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8046 g_assert (costs
> 0);
8048 cfg
->real_offset
+= 5;
8051 inline_costs
+= costs
;
8053 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
8058 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
8060 #if HAVE_WRITE_BARRIERS
8061 if (mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
8062 /* insert call to write barrier */
8063 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
8064 MonoInst
*iargs
[2];
8067 dreg
= alloc_preg (cfg
);
8068 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8070 mono_emit_method_call (cfg
, write_barrier
, iargs
, NULL
);
8074 store
->flags
|= ins_flag
;
8081 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8082 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
8083 MonoInst
*iargs
[4];
8086 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8087 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8088 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
8089 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8090 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
8091 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8093 g_assert (costs
> 0);
8095 cfg
->real_offset
+= 5;
8099 inline_costs
+= costs
;
8101 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
8105 if (sp
[0]->type
== STACK_VTYPE
) {
8108 /* Have to compute the address of the variable */
8110 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8112 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8114 g_assert (var
->klass
== klass
);
8116 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
8120 if (*ip
== CEE_LDFLDA
) {
8121 dreg
= alloc_preg (cfg
);
8123 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8124 ins
->klass
= mono_class_from_mono_type (field
->type
);
8125 ins
->type
= STACK_MP
;
8130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
8131 load
->flags
|= ins_flag
;
8142 MonoClassField
*field
;
8143 gpointer addr
= NULL
;
8144 gboolean is_special_static
;
8147 token
= read32 (ip
+ 1);
8149 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8150 field
= mono_method_get_wrapper_data (method
, token
);
8151 klass
= field
->parent
;
8154 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8157 mono_class_init (klass
);
8158 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8159 FIELD_ACCESS_FAILURE
;
8161 /* if the class is Critical then transparent code cannot access it's fields */
8162 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
8163 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
8166 * We can only support shared generic static
8167 * field access on architectures where the
8168 * trampoline code has been extended to handle
8169 * the generic class init.
8171 #ifndef MONO_ARCH_VTABLE_REG
8172 GENERIC_SHARING_FAILURE (*ip
);
8175 if (cfg
->generic_sharing_context
)
8176 context_used
= mono_class_check_context_used (klass
);
8178 g_assert (!(field
->type
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
8180 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8181 * to be called here.
8183 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
8184 mono_class_vtable (cfg
->domain
, klass
);
8185 CHECK_TYPELOAD (klass
);
8187 mono_domain_lock (cfg
->domain
);
8188 if (cfg
->domain
->special_static_fields
)
8189 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
8190 mono_domain_unlock (cfg
->domain
);
8192 is_special_static
= mono_class_field_is_special_static (field
);
8194 /* Generate IR to compute the field address */
8196 if ((cfg
->opt
& MONO_OPT_SHARED
) ||
8197 (cfg
->compile_aot
&& is_special_static
) ||
8198 (context_used
&& is_special_static
)) {
8199 MonoInst
*iargs
[2];
8201 g_assert (field
->parent
);
8202 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8204 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
8205 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
8207 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8209 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8210 } else if (context_used
) {
8211 MonoInst
*static_data
;
8214 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8215 method->klass->name_space, method->klass->name, method->name,
8216 depth, field->offset);
8219 if (mono_class_needs_cctor_run (klass
, method
)) {
8223 vtable
= emit_get_rgctx_klass (cfg
, context_used
,
8224 klass
, MONO_RGCTX_INFO_VTABLE
);
8226 // FIXME: This doesn't work since it tries to pass the argument
8227 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8229 * The vtable pointer is always passed in a register regardless of
8230 * the calling convention, so assign it manually, and make a call
8231 * using a signature without parameters.
8233 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable
);
8234 #ifdef MONO_ARCH_VTABLE_REG
8235 mono_call_inst_add_outarg_reg (cfg
, call
, vtable
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
8236 cfg
->uses_vtable_reg
= TRUE
;
8243 * The pointer we're computing here is
8245 * super_info.static_data + field->offset
8247 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
8248 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
8250 if (field
->offset
== 0) {
8253 int addr_reg
= mono_alloc_preg (cfg
);
8254 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
8256 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
8257 MonoInst
*iargs
[2];
8259 g_assert (field
->parent
);
8260 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8261 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8262 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8264 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
8266 CHECK_TYPELOAD (klass
);
8268 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8269 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8270 if (cfg
->verbose_level
> 2)
8271 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
8272 class_inits
= g_slist_prepend (class_inits
, vtable
);
8274 if (cfg
->run_cctors
) {
8276 /* This makes so that inline cannot trigger */
8277 /* .cctors: too many apps depend on them */
8278 /* running with a specific order... */
8279 if (! vtable
->initialized
)
8281 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
8283 set_exception_object (cfg
, ex
);
8284 goto exception_exit
;
8288 addr
= (char*)vtable
->data
+ field
->offset
;
8290 if (cfg
->compile_aot
)
8291 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8293 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8296 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8297 * This could be later optimized to do just a couple of
8298 * memory dereferences with constant offsets.
8300 MonoInst
*iargs
[1];
8301 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8302 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8306 /* Generate IR to do the actual load/store operation */
8308 if (*ip
== CEE_LDSFLDA
) {
8309 ins
->klass
= mono_class_from_mono_type (field
->type
);
8310 ins
->type
= STACK_PTR
;
8312 } else if (*ip
== CEE_STSFLD
) {
8317 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, ins
->dreg
, 0, sp
[0]->dreg
);
8318 store
->flags
|= ins_flag
;
8320 gboolean is_const
= FALSE
;
8321 MonoVTable
*vtable
= NULL
;
8323 if (!context_used
) {
8324 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8325 CHECK_TYPELOAD (klass
);
8327 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8328 vtable
->initialized
&& (field
->type
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8329 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8330 int ro_type
= field
->type
->type
;
8331 if (ro_type
== MONO_TYPE_VALUETYPE
&& field
->type
->data
.klass
->enumtype
) {
8332 ro_type
= mono_class_enum_basetype (field
->type
->data
.klass
)->type
;
8334 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8337 case MONO_TYPE_BOOLEAN
:
8339 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8343 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8346 case MONO_TYPE_CHAR
:
8348 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8352 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8357 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8361 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8364 #ifndef HAVE_MOVING_COLLECTOR
8367 case MONO_TYPE_STRING
:
8368 case MONO_TYPE_OBJECT
:
8369 case MONO_TYPE_CLASS
:
8370 case MONO_TYPE_SZARRAY
:
8372 case MONO_TYPE_FNPTR
:
8373 case MONO_TYPE_ARRAY
:
8374 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8375 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8381 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
8386 case MONO_TYPE_VALUETYPE
:
8396 CHECK_STACK_OVF (1);
8398 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
8399 load
->flags
|= ins_flag
;
8412 token
= read32 (ip
+ 1);
8413 klass
= mini_get_class (method
, token
, generic_context
);
8414 CHECK_TYPELOAD (klass
);
8415 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8416 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
8427 const char *data_ptr
;
8429 guint32 field_token
;
8435 token
= read32 (ip
+ 1);
8437 klass
= mini_get_class (method
, token
, generic_context
);
8438 CHECK_TYPELOAD (klass
);
8440 if (cfg
->generic_sharing_context
)
8441 context_used
= mono_class_check_context_used (klass
);
8443 if (sp
[0]->type
== STACK_I8
|| (SIZEOF_VOID_P
== 8 && sp
[0]->type
== STACK_PTR
)) {
8444 MONO_INST_NEW (cfg
, ins
, OP_LCONV_TO_I4
);
8445 ins
->sreg1
= sp
[0]->dreg
;
8446 ins
->type
= STACK_I4
;
8447 ins
->dreg
= alloc_ireg (cfg
);
8448 MONO_ADD_INS (cfg
->cbb
, ins
);
8449 *sp
= mono_decompose_opcode (cfg
, ins
);
8455 /* FIXME: Decompose later to help abcrem */
8458 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
8459 mono_array_class_get (klass
, 1), MONO_RGCTX_INFO_VTABLE
);
8464 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
8466 if (cfg
->opt
& MONO_OPT_SHARED
) {
8467 /* Decompose now to avoid problems with references to the domainvar */
8468 MonoInst
*iargs
[3];
8470 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8471 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8474 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
8476 /* Decompose later since it is needed by abcrem */
8477 MonoClass
*array_type
= mono_array_class_get (klass
, 1);
8478 mono_class_vtable (cfg
->domain
, array_type
);
8479 CHECK_TYPELOAD (array_type
);
8481 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
8482 ins
->dreg
= alloc_preg (cfg
);
8483 ins
->sreg1
= sp
[0]->dreg
;
8484 ins
->inst_newa_class
= klass
;
8485 ins
->type
= STACK_OBJ
;
8487 MONO_ADD_INS (cfg
->cbb
, ins
);
8488 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8489 cfg
->cbb
->has_array_access
= TRUE
;
8491 /* Needed so mono_emit_load_get_addr () gets called */
8492 mono_get_got_var (cfg
);
8502 * we inline/optimize the initialization sequence if possible.
8503 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8504 * for small sizes open code the memcpy
8505 * ensure the rva field is big enough
8507 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
8508 MonoMethod
*memcpy_method
= get_memcpy_method ();
8509 MonoInst
*iargs
[3];
8510 int add_reg
= alloc_preg (cfg
);
8512 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
8513 if (cfg
->compile_aot
) {
8514 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
8516 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
8518 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
8519 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
8528 if (sp
[0]->type
!= STACK_OBJ
)
8531 dreg
= alloc_preg (cfg
);
8532 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
8533 ins
->dreg
= alloc_preg (cfg
);
8534 ins
->sreg1
= sp
[0]->dreg
;
8535 ins
->type
= STACK_I4
;
8536 MONO_ADD_INS (cfg
->cbb
, ins
);
8537 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8538 cfg
->cbb
->has_array_access
= TRUE
;
8546 if (sp
[0]->type
!= STACK_OBJ
)
8549 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8551 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
8552 CHECK_TYPELOAD (klass
);
8553 /* we need to make sure that this array is exactly the type it needs
8554 * to be for correctness. the wrappers are lax with their usage
8555 * so we need to ignore them here
8557 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
) {
8558 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
8559 mini_emit_check_array_type (cfg
, sp
[0], array_class
);
8560 CHECK_TYPELOAD (array_class
);
8564 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8579 case CEE_LDELEM_REF
: {
8585 if (*ip
== CEE_LDELEM
) {
8587 token
= read32 (ip
+ 1);
8588 klass
= mini_get_class (method
, token
, generic_context
);
8589 CHECK_TYPELOAD (klass
);
8590 mono_class_init (klass
);
8593 klass
= array_access_to_klass (*ip
);
8595 if (sp
[0]->type
!= STACK_OBJ
)
8598 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8600 if (sp
[1]->opcode
== OP_ICONST
) {
8601 int array_reg
= sp
[0]->dreg
;
8602 int index_reg
= sp
[1]->dreg
;
8603 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8605 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8606 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
8608 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8609 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
8612 if (*ip
== CEE_LDELEM
)
8625 case CEE_STELEM_REF
:
8632 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8634 if (*ip
== CEE_STELEM
) {
8636 token
= read32 (ip
+ 1);
8637 klass
= mini_get_class (method
, token
, generic_context
);
8638 CHECK_TYPELOAD (klass
);
8639 mono_class_init (klass
);
8642 klass
= array_access_to_klass (*ip
);
8644 if (sp
[0]->type
!= STACK_OBJ
)
8647 /* storing a NULL doesn't need any of the complex checks in stelemref */
8648 if (generic_class_is_reference_type (cfg
, klass
) &&
8649 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
8650 MonoMethod
* helper
= mono_marshal_get_stelemref ();
8651 MonoInst
*iargs
[3];
8653 if (sp
[0]->type
!= STACK_OBJ
)
8655 if (sp
[2]->type
!= STACK_OBJ
)
8662 mono_emit_method_call (cfg
, helper
, iargs
, NULL
);
8664 if (sp
[1]->opcode
== OP_ICONST
) {
8665 int array_reg
= sp
[0]->dreg
;
8666 int index_reg
= sp
[1]->dreg
;
8667 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8669 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8670 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
8672 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8673 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
8677 if (*ip
== CEE_STELEM
)
8684 case CEE_CKFINITE
: {
8688 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
8689 ins
->sreg1
= sp
[0]->dreg
;
8690 ins
->dreg
= alloc_freg (cfg
);
8691 ins
->type
= STACK_R8
;
8692 MONO_ADD_INS (bblock
, ins
);
8694 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8699 case CEE_REFANYVAL
: {
8700 MonoInst
*src_var
, *src
;
8702 int klass_reg
= alloc_preg (cfg
);
8703 int dreg
= alloc_preg (cfg
);
8706 MONO_INST_NEW (cfg
, ins
, *ip
);
8709 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8710 CHECK_TYPELOAD (klass
);
8711 mono_class_init (klass
);
8713 if (cfg
->generic_sharing_context
)
8714 context_used
= mono_class_check_context_used (klass
);
8717 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8719 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8720 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
8721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
8724 MonoInst
*klass_ins
;
8726 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
8727 klass
, MONO_RGCTX_INFO_KLASS
);
8730 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
8731 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
8733 mini_emit_class_check (cfg
, klass_reg
, klass
);
8735 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
8736 ins
->type
= STACK_MP
;
8741 case CEE_MKREFANY
: {
8742 MonoInst
*loc
, *addr
;
8745 MONO_INST_NEW (cfg
, ins
, *ip
);
8748 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8749 CHECK_TYPELOAD (klass
);
8750 mono_class_init (klass
);
8752 if (cfg
->generic_sharing_context
)
8753 context_used
= mono_class_check_context_used (klass
);
8755 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
8756 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
8759 MonoInst
*const_ins
;
8760 int type_reg
= alloc_preg (cfg
);
8762 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
8763 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
8764 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8765 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8766 } else if (cfg
->compile_aot
) {
8767 int const_reg
= alloc_preg (cfg
);
8768 int type_reg
= alloc_preg (cfg
);
8770 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
8771 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
8772 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8773 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8775 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
8776 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
8778 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
8780 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
8781 ins
->type
= STACK_VTYPE
;
8782 ins
->klass
= mono_defaults
.typed_reference_class
;
8789 MonoClass
*handle_class
;
8791 CHECK_STACK_OVF (1);
8794 n
= read32 (ip
+ 1);
8796 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
8797 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
8798 handle
= mono_method_get_wrapper_data (method
, n
);
8799 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
8800 if (handle_class
== mono_defaults
.typehandle_class
)
8801 handle
= &((MonoClass
*)handle
)->byval_arg
;
8804 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
8808 mono_class_init (handle_class
);
8809 if (cfg
->generic_sharing_context
) {
8810 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
8811 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
8812 /* This case handles ldtoken
8813 of an open type, like for
8816 } else if (handle_class
== mono_defaults
.typehandle_class
) {
8817 /* If we get a MONO_TYPE_CLASS
8818 then we need to provide the
8820 instantiation of it. */
8821 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
8824 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
8825 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
8826 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
8827 else if (handle_class
== mono_defaults
.methodhandle_class
)
8828 context_used
= mono_method_check_context_used (handle
);
8830 g_assert_not_reached ();
8833 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
8834 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
8835 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
8836 MonoInst
*addr
, *vtvar
, *iargs
[3];
8837 int method_context_used
;
8839 if (cfg
->generic_sharing_context
)
8840 method_context_used
= mono_method_check_context_used (method
);
8842 method_context_used
= 0;
8844 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8846 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8847 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
8848 if (method_context_used
) {
8849 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
8850 method
, MONO_RGCTX_INFO_METHOD
);
8851 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
8853 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
8854 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
8856 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8858 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8860 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8862 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8863 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
8864 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
8865 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
8866 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
8867 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
8869 mono_class_init (tclass
);
8871 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8872 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
8873 } else if (cfg
->compile_aot
) {
8874 if (method
->wrapper_type
) {
8875 if (mono_class_get (tclass
->image
, tclass
->type_token
) == tclass
&& !generic_context
) {
8876 /* Special case for static synchronized wrappers */
8877 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, tclass
->image
, tclass
->type_token
, generic_context
);
8879 /* FIXME: n is not a normal token */
8880 cfg
->disable_aot
= TRUE
;
8881 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
8884 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
8887 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
8889 ins
->type
= STACK_OBJ
;
8890 ins
->klass
= cmethod
->klass
;
8893 MonoInst
*addr
, *vtvar
;
8895 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8898 if (handle_class
== mono_defaults
.typehandle_class
) {
8899 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8900 mono_class_from_mono_type (handle
),
8901 MONO_RGCTX_INFO_TYPE
);
8902 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
8903 ins
= emit_get_rgctx_method (cfg
, context_used
,
8904 handle
, MONO_RGCTX_INFO_METHOD
);
8905 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
8906 ins
= emit_get_rgctx_field (cfg
, context_used
,
8907 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
8909 g_assert_not_reached ();
8911 } else if (cfg
->compile_aot
) {
8912 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
8914 EMIT_NEW_PCONST (cfg
, ins
, handle
);
8916 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8917 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8918 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8928 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
8930 ins
->sreg1
= sp
[0]->dreg
;
8932 bblock
->out_of_line
= TRUE
;
8933 MONO_ADD_INS (bblock
, ins
);
8934 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
8935 MONO_ADD_INS (bblock
, ins
);
8938 link_bblock (cfg
, bblock
, end_bblock
);
8939 start_new_bblock
= 1;
8941 case CEE_ENDFINALLY
:
8942 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
8943 MONO_ADD_INS (bblock
, ins
);
8945 start_new_bblock
= 1;
8948 * Control will leave the method so empty the stack, otherwise
8949 * the next basic block will start with a nonempty stack.
8951 while (sp
!= stack_start
) {
8959 if (*ip
== CEE_LEAVE
) {
8961 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
8964 target
= ip
+ 2 + (signed char)(ip
[1]);
8967 /* empty the stack */
8968 while (sp
!= stack_start
) {
8973 * If this leave statement is in a catch block, check for a
8974 * pending exception, and rethrow it if necessary.
8976 for (i
= 0; i
< header
->num_clauses
; ++i
) {
8977 MonoExceptionClause
*clause
= &header
->clauses
[i
];
8980 * Use <= in the final comparison to handle clauses with multiple
8981 * leave statements, like in bug #78024.
8982 * The ordering of the exception clauses guarantees that we find the
8985 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
)) {
8987 MonoBasicBlock
*dont_throw
;
8992 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8995 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
8997 NEW_BBLOCK (cfg
, dont_throw
);
9000 * Currently, we allways rethrow the abort exception, despite the
9001 * fact that this is not correct. See thread6.cs for an example.
9002 * But propagating the abort exception is more important than
9003 * getting the sematics right.
9005 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
9006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
9007 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
9009 MONO_START_BB (cfg
, dont_throw
);
9014 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
9016 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
9018 link_bblock (cfg
, bblock
, tblock
);
9019 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
9020 ins
->inst_target_bb
= tblock
;
9021 MONO_ADD_INS (bblock
, ins
);
9022 bblock
->has_call_handler
= 1;
9024 g_list_free (handlers
);
9027 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9028 MONO_ADD_INS (bblock
, ins
);
9029 GET_BBLOCK (cfg
, tblock
, target
);
9030 link_bblock (cfg
, bblock
, tblock
);
9031 ins
->inst_target_bb
= tblock
;
9032 start_new_bblock
= 1;
9034 if (*ip
== CEE_LEAVE
)
9043 * Mono specific opcodes
9045 case MONO_CUSTOM_PREFIX
: {
9047 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
9051 case CEE_MONO_ICALL
: {
9053 MonoJitICallInfo
*info
;
9055 token
= read32 (ip
+ 2);
9056 func
= mono_method_get_wrapper_data (method
, token
);
9057 info
= mono_find_jit_icall_by_addr (func
);
9060 CHECK_STACK (info
->sig
->param_count
);
9061 sp
-= info
->sig
->param_count
;
9063 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
9064 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
9068 inline_costs
+= 10 * num_calls
++;
9072 case CEE_MONO_LDPTR
: {
9075 CHECK_STACK_OVF (1);
9077 token
= read32 (ip
+ 2);
9079 ptr
= mono_method_get_wrapper_data (method
, token
);
9080 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
9081 MonoJitICallInfo
*callinfo
;
9082 const char *icall_name
;
9084 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
9085 g_assert (icall_name
);
9086 callinfo
= mono_find_jit_icall_by_name (icall_name
);
9087 g_assert (callinfo
);
9089 if (ptr
== callinfo
->func
) {
9090 /* Will be transformed into an AOTCONST later */
9091 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9097 /* FIXME: Generalize this */
9098 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
9099 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
9104 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9107 inline_costs
+= 10 * num_calls
++;
9108 /* Can't embed random pointers into AOT code */
9109 cfg
->disable_aot
= 1;
9112 case CEE_MONO_ICALL_ADDR
: {
9113 MonoMethod
*cmethod
;
9116 CHECK_STACK_OVF (1);
9118 token
= read32 (ip
+ 2);
9120 cmethod
= mono_method_get_wrapper_data (method
, token
);
9122 if (cfg
->compile_aot
) {
9123 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
9125 ptr
= mono_lookup_internal_call (cmethod
);
9127 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9133 case CEE_MONO_VTADDR
: {
9134 MonoInst
*src_var
, *src
;
9140 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9141 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
9146 case CEE_MONO_NEWOBJ
: {
9147 MonoInst
*iargs
[2];
9149 CHECK_STACK_OVF (1);
9151 token
= read32 (ip
+ 2);
9152 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9153 mono_class_init (klass
);
9154 NEW_DOMAINCONST (cfg
, iargs
[0]);
9155 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
9156 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9157 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
9158 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
9160 inline_costs
+= 10 * num_calls
++;
9163 case CEE_MONO_OBJADDR
:
9166 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
9167 ins
->dreg
= alloc_preg (cfg
);
9168 ins
->sreg1
= sp
[0]->dreg
;
9169 ins
->type
= STACK_MP
;
9170 MONO_ADD_INS (cfg
->cbb
, ins
);
9174 case CEE_MONO_LDNATIVEOBJ
:
9176 * Similar to LDOBJ, but instead load the unmanaged
9177 * representation of the vtype to the stack.
9182 token
= read32 (ip
+ 2);
9183 klass
= mono_method_get_wrapper_data (method
, token
);
9184 g_assert (klass
->valuetype
);
9185 mono_class_init (klass
);
9188 MonoInst
*src
, *dest
, *temp
;
9191 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
9192 temp
->backend
.is_pinvoke
= 1;
9193 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
9194 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
9196 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
9197 dest
->type
= STACK_VTYPE
;
9198 dest
->klass
= klass
;
9204 case CEE_MONO_RETOBJ
: {
9206 * Same as RET, but return the native representation of a vtype
9209 g_assert (cfg
->ret
);
9210 g_assert (mono_method_signature (method
)->pinvoke
);
9215 token
= read32 (ip
+ 2);
9216 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9218 if (!cfg
->vret_addr
) {
9219 g_assert (cfg
->ret_var_is_local
);
9221 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
9223 EMIT_NEW_RETLOADA (cfg
, ins
);
9225 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
9227 if (sp
!= stack_start
)
9230 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9231 ins
->inst_target_bb
= end_bblock
;
9232 MONO_ADD_INS (bblock
, ins
);
9233 link_bblock (cfg
, bblock
, end_bblock
);
9234 start_new_bblock
= 1;
9238 case CEE_MONO_CISINST
:
9239 case CEE_MONO_CCASTCLASS
: {
9244 token
= read32 (ip
+ 2);
9245 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9246 if (ip
[1] == CEE_MONO_CISINST
)
9247 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
9249 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
9255 case CEE_MONO_SAVE_LMF
:
9256 case CEE_MONO_RESTORE_LMF
:
9257 #ifdef MONO_ARCH_HAVE_LMF_OPS
9258 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
9259 MONO_ADD_INS (bblock
, ins
);
9260 cfg
->need_lmf_area
= TRUE
;
9264 case CEE_MONO_CLASSCONST
:
9265 CHECK_STACK_OVF (1);
9267 token
= read32 (ip
+ 2);
9268 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
9271 inline_costs
+= 10 * num_calls
++;
9273 case CEE_MONO_NOT_TAKEN
:
9274 bblock
->out_of_line
= TRUE
;
9278 CHECK_STACK_OVF (1);
9280 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
9281 ins
->dreg
= alloc_preg (cfg
);
9282 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
9283 ins
->type
= STACK_PTR
;
9284 MONO_ADD_INS (bblock
, ins
);
9288 case CEE_MONO_DYN_CALL
: {
9291 /* It would be easier to call a trampoline, but that would put an
9292 * extra frame on the stack, confusing exception handling. So
9293 * implement it inline using an opcode for now.
9296 if (!cfg
->dyn_call_var
) {
9297 cfg
->dyn_call_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
9298 /* prevent it from being register allocated */
9299 cfg
->dyn_call_var
->flags
|= MONO_INST_INDIRECT
;
9302 /* Has to use a call inst since it local regalloc expects it */
9303 MONO_INST_NEW_CALL (cfg
, call
, OP_DYN_CALL
);
9304 ins
= (MonoInst
*)call
;
9306 ins
->sreg1
= sp
[0]->dreg
;
9307 ins
->sreg2
= sp
[1]->dreg
;
9308 MONO_ADD_INS (bblock
, ins
);
9310 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9311 cfg
->param_area
= MAX (cfg
->param_area
, MONO_ARCH_DYN_CALL_PARAM_AREA
);
9315 inline_costs
+= 10 * num_calls
++;
9320 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
9330 /* somewhat similar to LDTOKEN */
9331 MonoInst
*addr
, *vtvar
;
9332 CHECK_STACK_OVF (1);
9333 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
9335 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9336 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
9338 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9339 ins
->type
= STACK_VTYPE
;
9340 ins
->klass
= mono_defaults
.argumenthandle_class
;
9353 * The following transforms:
9354 * CEE_CEQ into OP_CEQ
9355 * CEE_CGT into OP_CGT
9356 * CEE_CGT_UN into OP_CGT_UN
9357 * CEE_CLT into OP_CLT
9358 * CEE_CLT_UN into OP_CLT_UN
9360 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
9362 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
9364 cmp
->sreg1
= sp
[0]->dreg
;
9365 cmp
->sreg2
= sp
[1]->dreg
;
9366 type_from_op (cmp
, sp
[0], sp
[1]);
9368 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
9369 cmp
->opcode
= OP_LCOMPARE
;
9370 else if (sp
[0]->type
== STACK_R8
)
9371 cmp
->opcode
= OP_FCOMPARE
;
9373 cmp
->opcode
= OP_ICOMPARE
;
9374 MONO_ADD_INS (bblock
, cmp
);
9375 ins
->type
= STACK_I4
;
9376 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
9377 type_from_op (ins
, sp
[0], sp
[1]);
9379 if (cmp
->opcode
== OP_FCOMPARE
) {
9381 * The backends expect the fceq opcodes to do the
9384 cmp
->opcode
= OP_NOP
;
9385 ins
->sreg1
= cmp
->sreg1
;
9386 ins
->sreg2
= cmp
->sreg2
;
9388 MONO_ADD_INS (bblock
, ins
);
9395 MonoMethod
*cil_method
;
9396 gboolean needs_static_rgctx_invoke
;
9398 CHECK_STACK_OVF (1);
9400 n
= read32 (ip
+ 2);
9401 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9404 mono_class_init (cmethod
->klass
);
9406 mono_save_token_info (cfg
, image
, n
, cmethod
);
9408 if (cfg
->generic_sharing_context
)
9409 context_used
= mono_method_check_context_used (cmethod
);
9411 needs_static_rgctx_invoke
= mono_method_needs_static_rgctx_invoke (cmethod
, TRUE
);
9413 cil_method
= cmethod
;
9414 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
9415 METHOD_ACCESS_FAILURE
;
9417 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9418 if (check_linkdemand (cfg
, method
, cmethod
))
9420 CHECK_CFG_EXCEPTION
;
9421 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9422 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9426 * Optimize the common case of ldftn+delegate creation
9428 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9429 /* FIXME: SGEN support */
9430 /* FIXME: handle shared static generic methods */
9431 /* FIXME: handle this in shared code */
9432 if (!needs_static_rgctx_invoke
&& !context_used
&& (sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
9433 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
9434 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
9435 MonoInst
*target_ins
;
9438 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
9439 if (!invoke
|| !mono_method_signature (invoke
))
9443 if (cfg
->verbose_level
> 3)
9444 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
9445 target_ins
= sp
[-1];
9447 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
);
9448 CHECK_CFG_EXCEPTION
;
9457 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
9459 EMIT_NEW_METHODCONST (cfg
, argconst
, cmethod
);
9461 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
9465 inline_costs
+= 10 * num_calls
++;
9468 case CEE_LDVIRTFTN
: {
9473 n
= read32 (ip
+ 2);
9474 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9477 mono_class_init (cmethod
->klass
);
9479 if (cfg
->generic_sharing_context
)
9480 context_used
= mono_method_check_context_used (cmethod
);
9482 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9483 if (check_linkdemand (cfg
, method
, cmethod
))
9485 CHECK_CFG_EXCEPTION
;
9486 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9487 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9494 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
9495 cmethod
, MONO_RGCTX_INFO_METHOD
);
9496 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
9498 EMIT_NEW_METHODCONST (cfg
, args
[1], cmethod
);
9499 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
9503 inline_costs
+= 10 * num_calls
++;
9507 CHECK_STACK_OVF (1);
9509 n
= read16 (ip
+ 2);
9511 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
9516 CHECK_STACK_OVF (1);
9518 n
= read16 (ip
+ 2);
9520 NEW_ARGLOADA (cfg
, ins
, n
);
9521 MONO_ADD_INS (cfg
->cbb
, ins
);
9529 n
= read16 (ip
+ 2);
9531 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
9533 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
9537 CHECK_STACK_OVF (1);
9539 n
= read16 (ip
+ 2);
9541 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
9546 unsigned char *tmp_ip
;
9547 CHECK_STACK_OVF (1);
9549 n
= read16 (ip
+ 2);
9552 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
9558 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
9567 n
= read16 (ip
+ 2);
9569 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
9571 emit_stloc_ir (cfg
, sp
, header
, n
);
9578 if (sp
!= stack_start
)
9580 if (cfg
->method
!= method
)
9582 * Inlining this into a loop in a parent could lead to
9583 * stack overflows which is different behavior than the
9584 * non-inlined case, thus disable inlining in this case.
9586 goto inline_failure
;
9588 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
9589 ins
->dreg
= alloc_preg (cfg
);
9590 ins
->sreg1
= sp
[0]->dreg
;
9591 ins
->type
= STACK_PTR
;
9592 MONO_ADD_INS (cfg
->cbb
, ins
);
9594 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
9596 ins
->flags
|= MONO_INST_INIT
;
9601 case CEE_ENDFILTER
: {
9602 MonoExceptionClause
*clause
, *nearest
;
9603 int cc
, nearest_num
;
9607 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
9609 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
9610 ins
->sreg1
= (*sp
)->dreg
;
9611 MONO_ADD_INS (bblock
, ins
);
9612 start_new_bblock
= 1;
9617 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
9618 clause
= &header
->clauses
[cc
];
9619 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
9620 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
9621 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
9627 if ((ip
- header
->code
) != nearest
->handler_offset
)
9632 case CEE_UNALIGNED_
:
9633 ins_flag
|= MONO_INST_UNALIGNED
;
9634 /* FIXME: record alignment? we can assume 1 for now */
9639 ins_flag
|= MONO_INST_VOLATILE
;
9643 ins_flag
|= MONO_INST_TAILCALL
;
9644 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
9645 /* Can't inline tail calls at this time */
9646 inline_costs
+= 100000;
9653 token
= read32 (ip
+ 2);
9654 klass
= mini_get_class (method
, token
, generic_context
);
9655 CHECK_TYPELOAD (klass
);
9656 if (generic_class_is_reference_type (cfg
, klass
))
9657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
9659 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
9663 case CEE_CONSTRAINED_
:
9665 token
= read32 (ip
+ 2);
9666 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
9667 constrained_call
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9669 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
9670 CHECK_TYPELOAD (constrained_call
);
9675 MonoInst
*iargs
[3];
9679 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
9680 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
9681 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
9682 /* emit_memset only works when val == 0 */
9683 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
9688 if (ip
[1] == CEE_CPBLK
) {
9689 MonoMethod
*memcpy_method
= get_memcpy_method ();
9690 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9692 MonoMethod
*memset_method
= get_memset_method ();
9693 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
9703 ins_flag
|= MONO_INST_NOTYPECHECK
;
9705 ins_flag
|= MONO_INST_NORANGECHECK
;
9706 /* we ignore the no-nullcheck for now since we
9707 * really do it explicitly only when doing callvirt->call
9713 int handler_offset
= -1;
9715 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9716 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9717 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
9718 handler_offset
= clause
->handler_offset
;
9723 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
9725 g_assert (handler_offset
!= -1);
9727 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
9728 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
9729 ins
->sreg1
= load
->dreg
;
9730 MONO_ADD_INS (bblock
, ins
);
9732 link_bblock (cfg
, bblock
, end_bblock
);
9733 start_new_bblock
= 1;
9741 CHECK_STACK_OVF (1);
9743 token
= read32 (ip
+ 2);
9744 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
) {
9745 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
9746 token
= mono_type_size (type
, &ialign
);
9748 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
9749 CHECK_TYPELOAD (klass
);
9750 mono_class_init (klass
);
9751 token
= mono_class_value_size (klass
, &align
);
9753 EMIT_NEW_ICONST (cfg
, ins
, token
);
9758 case CEE_REFANYTYPE
: {
9759 MonoInst
*src_var
, *src
;
9765 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9767 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9768 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
9787 g_warning ("opcode 0xfe 0x%02x not handled", ip
[1]);
9797 g_warning ("opcode 0x%02x not handled", *ip
);
9801 if (start_new_bblock
!= 1)
9804 bblock
->cil_length
= ip
- bblock
->cil_code
;
9805 bblock
->next_bb
= end_bblock
;
9807 if (cfg
->method
== method
&& cfg
->domainvar
) {
9809 MonoInst
*get_domain
;
9811 cfg
->cbb
= init_localsbb
;
9813 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
9814 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
9817 get_domain
->dreg
= alloc_preg (cfg
);
9818 MONO_ADD_INS (cfg
->cbb
, get_domain
);
9820 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
9821 MONO_ADD_INS (cfg
->cbb
, store
);
9824 #ifdef TARGET_POWERPC
9825 if (cfg
->compile_aot
)
9826 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9827 mono_get_got_var (cfg
);
9830 if (cfg
->method
== method
&& cfg
->got_var
)
9831 mono_emit_load_got_addr (cfg
);
9836 cfg
->cbb
= init_localsbb
;
9838 for (i
= 0; i
< header
->num_locals
; ++i
) {
9839 MonoType
*ptype
= header
->locals
[i
];
9840 int t
= ptype
->type
;
9841 dreg
= cfg
->locals
[i
]->dreg
;
9843 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
9844 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
9846 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9847 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
9848 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9849 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
9850 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9851 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
9852 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
9853 ins
->type
= STACK_R8
;
9854 ins
->inst_p0
= (void*)&r8_0
;
9855 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
9856 MONO_ADD_INS (init_localsbb
, ins
);
9857 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
9858 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
9859 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
9860 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
9862 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9867 /* Add a sequence point for method entry/exit events */
9869 NEW_SEQ_POINT (cfg
, ins
, METHOD_ENTRY_IL_OFFSET
, FALSE
);
9870 MONO_ADD_INS (init_localsbb
, ins
);
9871 NEW_SEQ_POINT (cfg
, ins
, METHOD_EXIT_IL_OFFSET
, FALSE
);
9872 MONO_ADD_INS (cfg
->bb_exit
, ins
);
9877 if (cfg
->method
== method
) {
9879 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
9880 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
9882 mono_create_spvar_for_region (cfg
, bb
->region
);
9883 if (cfg
->verbose_level
> 2)
9884 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
9888 g_slist_free (class_inits
);
9889 dont_inline
= g_list_remove (dont_inline
, method
);
9891 if (inline_costs
< 0) {
9894 /* Method is too large */
9895 mname
= mono_method_full_name (method
, TRUE
);
9896 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
9897 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
9902 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
9903 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
9905 return inline_costs
;
9908 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
9909 g_slist_free (class_inits
);
9910 dont_inline
= g_list_remove (dont_inline
, method
);
9914 g_slist_free (class_inits
);
9915 dont_inline
= g_list_remove (dont_inline
, method
);
9919 g_slist_free (class_inits
);
9920 dont_inline
= g_list_remove (dont_inline
, method
);
9921 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
9925 g_slist_free (class_inits
);
9926 dont_inline
= g_list_remove (dont_inline
, method
);
9927 set_exception_type_from_invalid_il (cfg
, method
, ip
);
9932 store_membase_reg_to_store_membase_imm (int opcode
)
9935 case OP_STORE_MEMBASE_REG
:
9936 return OP_STORE_MEMBASE_IMM
;
9937 case OP_STOREI1_MEMBASE_REG
:
9938 return OP_STOREI1_MEMBASE_IMM
;
9939 case OP_STOREI2_MEMBASE_REG
:
9940 return OP_STOREI2_MEMBASE_IMM
;
9941 case OP_STOREI4_MEMBASE_REG
:
9942 return OP_STOREI4_MEMBASE_IMM
;
9943 case OP_STOREI8_MEMBASE_REG
:
9944 return OP_STOREI8_MEMBASE_IMM
;
9946 g_assert_not_reached ();
9952 #endif /* DISABLE_JIT */
9955 mono_op_to_op_imm (int opcode
)
9965 return OP_IDIV_UN_IMM
;
9969 return OP_IREM_UN_IMM
;
9983 return OP_ISHR_UN_IMM
;
10000 return OP_LSHR_UN_IMM
;
10003 return OP_COMPARE_IMM
;
10005 return OP_ICOMPARE_IMM
;
10007 return OP_LCOMPARE_IMM
;
10009 case OP_STORE_MEMBASE_REG
:
10010 return OP_STORE_MEMBASE_IMM
;
10011 case OP_STOREI1_MEMBASE_REG
:
10012 return OP_STOREI1_MEMBASE_IMM
;
10013 case OP_STOREI2_MEMBASE_REG
:
10014 return OP_STOREI2_MEMBASE_IMM
;
10015 case OP_STOREI4_MEMBASE_REG
:
10016 return OP_STOREI4_MEMBASE_IMM
;
10018 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10020 return OP_X86_PUSH_IMM
;
10021 case OP_X86_COMPARE_MEMBASE_REG
:
10022 return OP_X86_COMPARE_MEMBASE_IMM
;
10024 #if defined(TARGET_AMD64)
10025 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
10026 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10028 case OP_VOIDCALL_REG
:
10029 return OP_VOIDCALL
;
10037 return OP_LOCALLOC_IMM
;
10044 ldind_to_load_membase (int opcode
)
10048 return OP_LOADI1_MEMBASE
;
10050 return OP_LOADU1_MEMBASE
;
10052 return OP_LOADI2_MEMBASE
;
10054 return OP_LOADU2_MEMBASE
;
10056 return OP_LOADI4_MEMBASE
;
10058 return OP_LOADU4_MEMBASE
;
10060 return OP_LOAD_MEMBASE
;
10061 case CEE_LDIND_REF
:
10062 return OP_LOAD_MEMBASE
;
10064 return OP_LOADI8_MEMBASE
;
10066 return OP_LOADR4_MEMBASE
;
10068 return OP_LOADR8_MEMBASE
;
10070 g_assert_not_reached ();
10077 stind_to_store_membase (int opcode
)
10081 return OP_STOREI1_MEMBASE_REG
;
10083 return OP_STOREI2_MEMBASE_REG
;
10085 return OP_STOREI4_MEMBASE_REG
;
10087 case CEE_STIND_REF
:
10088 return OP_STORE_MEMBASE_REG
;
10090 return OP_STOREI8_MEMBASE_REG
;
10092 return OP_STORER4_MEMBASE_REG
;
10094 return OP_STORER8_MEMBASE_REG
;
10096 g_assert_not_reached ();
10103 mono_load_membase_to_load_mem (int opcode
)
10105 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10106 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10108 case OP_LOAD_MEMBASE
:
10109 return OP_LOAD_MEM
;
10110 case OP_LOADU1_MEMBASE
:
10111 return OP_LOADU1_MEM
;
10112 case OP_LOADU2_MEMBASE
:
10113 return OP_LOADU2_MEM
;
10114 case OP_LOADI4_MEMBASE
:
10115 return OP_LOADI4_MEM
;
10116 case OP_LOADU4_MEMBASE
:
10117 return OP_LOADU4_MEM
;
10118 #if SIZEOF_REGISTER == 8
10119 case OP_LOADI8_MEMBASE
:
10120 return OP_LOADI8_MEM
;
10129 op_to_op_dest_membase (int store_opcode
, int opcode
)
10131 #if defined(TARGET_X86)
10132 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
10137 return OP_X86_ADD_MEMBASE_REG
;
10139 return OP_X86_SUB_MEMBASE_REG
;
10141 return OP_X86_AND_MEMBASE_REG
;
10143 return OP_X86_OR_MEMBASE_REG
;
10145 return OP_X86_XOR_MEMBASE_REG
;
10148 return OP_X86_ADD_MEMBASE_IMM
;
10151 return OP_X86_SUB_MEMBASE_IMM
;
10154 return OP_X86_AND_MEMBASE_IMM
;
10157 return OP_X86_OR_MEMBASE_IMM
;
10160 return OP_X86_XOR_MEMBASE_IMM
;
10166 #if defined(TARGET_AMD64)
10167 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
10172 return OP_X86_ADD_MEMBASE_REG
;
10174 return OP_X86_SUB_MEMBASE_REG
;
10176 return OP_X86_AND_MEMBASE_REG
;
10178 return OP_X86_OR_MEMBASE_REG
;
10180 return OP_X86_XOR_MEMBASE_REG
;
10182 return OP_X86_ADD_MEMBASE_IMM
;
10184 return OP_X86_SUB_MEMBASE_IMM
;
10186 return OP_X86_AND_MEMBASE_IMM
;
10188 return OP_X86_OR_MEMBASE_IMM
;
10190 return OP_X86_XOR_MEMBASE_IMM
;
10192 return OP_AMD64_ADD_MEMBASE_REG
;
10194 return OP_AMD64_SUB_MEMBASE_REG
;
10196 return OP_AMD64_AND_MEMBASE_REG
;
10198 return OP_AMD64_OR_MEMBASE_REG
;
10200 return OP_AMD64_XOR_MEMBASE_REG
;
10203 return OP_AMD64_ADD_MEMBASE_IMM
;
10206 return OP_AMD64_SUB_MEMBASE_IMM
;
10209 return OP_AMD64_AND_MEMBASE_IMM
;
10212 return OP_AMD64_OR_MEMBASE_IMM
;
10215 return OP_AMD64_XOR_MEMBASE_IMM
;
10225 op_to_op_store_membase (int store_opcode
, int opcode
)
10227 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10230 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10231 return OP_X86_SETEQ_MEMBASE
;
10233 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10234 return OP_X86_SETNE_MEMBASE
;
10242 op_to_op_src1_membase (int load_opcode
, int opcode
)
10245 /* FIXME: This has sign extension issues */
10247 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10248 return OP_X86_COMPARE_MEMBASE8_IMM;
10251 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10256 return OP_X86_PUSH_MEMBASE
;
10257 case OP_COMPARE_IMM
:
10258 case OP_ICOMPARE_IMM
:
10259 return OP_X86_COMPARE_MEMBASE_IMM
;
10262 return OP_X86_COMPARE_MEMBASE_REG
;
10266 #ifdef TARGET_AMD64
10267 /* FIXME: This has sign extension issues */
10269 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10270 return OP_X86_COMPARE_MEMBASE8_IMM;
10275 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10276 return OP_X86_PUSH_MEMBASE
;
10278 /* FIXME: This only works for 32 bit immediates
10279 case OP_COMPARE_IMM:
10280 case OP_LCOMPARE_IMM:
10281 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10282 return OP_AMD64_COMPARE_MEMBASE_IMM;
10284 case OP_ICOMPARE_IMM
:
10285 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10286 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10290 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10291 return OP_AMD64_COMPARE_MEMBASE_REG
;
10294 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10295 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
10304 op_to_op_src2_membase (int load_opcode
, int opcode
)
10307 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10313 return OP_X86_COMPARE_REG_MEMBASE
;
10315 return OP_X86_ADD_REG_MEMBASE
;
10317 return OP_X86_SUB_REG_MEMBASE
;
10319 return OP_X86_AND_REG_MEMBASE
;
10321 return OP_X86_OR_REG_MEMBASE
;
10323 return OP_X86_XOR_REG_MEMBASE
;
10327 #ifdef TARGET_AMD64
10330 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10331 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
10335 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10336 return OP_AMD64_COMPARE_REG_MEMBASE
;
10339 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10340 return OP_X86_ADD_REG_MEMBASE
;
10342 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10343 return OP_X86_SUB_REG_MEMBASE
;
10345 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10346 return OP_X86_AND_REG_MEMBASE
;
10348 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10349 return OP_X86_OR_REG_MEMBASE
;
10351 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10352 return OP_X86_XOR_REG_MEMBASE
;
10354 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10355 return OP_AMD64_ADD_REG_MEMBASE
;
10357 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10358 return OP_AMD64_SUB_REG_MEMBASE
;
10360 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10361 return OP_AMD64_AND_REG_MEMBASE
;
10363 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10364 return OP_AMD64_OR_REG_MEMBASE
;
10366 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10367 return OP_AMD64_XOR_REG_MEMBASE
;
10375 mono_op_to_op_imm_noemul (int opcode
)
10378 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10383 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10391 return mono_op_to_op_imm (opcode
);
10395 #ifndef DISABLE_JIT
10398 * mono_handle_global_vregs:
10400 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10404 mono_handle_global_vregs (MonoCompile
*cfg
)
10406 gint32
*vreg_to_bb
;
10407 MonoBasicBlock
*bb
;
10410 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
10412 #ifdef MONO_ARCH_SIMD_INTRINSICS
10413 if (cfg
->uses_simd_intrinsics
)
10414 mono_simd_simplify_indirection (cfg
);
10417 /* Find local vregs used in more than one bb */
10418 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10419 MonoInst
*ins
= bb
->code
;
10420 int block_num
= bb
->block_num
;
10422 if (cfg
->verbose_level
> 2)
10423 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
10426 for (; ins
; ins
= ins
->next
) {
10427 const char *spec
= INS_INFO (ins
->opcode
);
10428 int regtype
= 0, regindex
;
10431 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10432 mono_print_ins (ins
);
10434 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
10436 for (regindex
= 0; regindex
< 4; regindex
++) {
10439 if (regindex
== 0) {
10440 regtype
= spec
[MONO_INST_DEST
];
10441 if (regtype
== ' ')
10444 } else if (regindex
== 1) {
10445 regtype
= spec
[MONO_INST_SRC1
];
10446 if (regtype
== ' ')
10449 } else if (regindex
== 2) {
10450 regtype
= spec
[MONO_INST_SRC2
];
10451 if (regtype
== ' ')
10454 } else if (regindex
== 3) {
10455 regtype
= spec
[MONO_INST_SRC3
];
10456 if (regtype
== ' ')
10461 #if SIZEOF_REGISTER == 4
10462 /* In the LLVM case, the long opcodes are not decomposed */
10463 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
10465 * Since some instructions reference the original long vreg,
10466 * and some reference the two component vregs, it is quite hard
10467 * to determine when it needs to be global. So be conservative.
10469 if (!get_vreg_to_inst (cfg
, vreg
)) {
10470 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
10472 if (cfg
->verbose_level
> 2)
10473 printf ("LONG VREG R%d made global.\n", vreg
);
10477 * Make the component vregs volatile since the optimizations can
10478 * get confused otherwise.
10480 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
10481 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
10485 g_assert (vreg
!= -1);
10487 prev_bb
= vreg_to_bb
[vreg
];
10488 if (prev_bb
== 0) {
10489 /* 0 is a valid block num */
10490 vreg_to_bb
[vreg
] = block_num
+ 1;
10491 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
10492 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
10495 if (!get_vreg_to_inst (cfg
, vreg
)) {
10496 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10497 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
10501 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
10504 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
10507 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
10510 g_assert_not_reached ();
10514 /* Flag as having been used in more than one bb */
10515 vreg_to_bb
[vreg
] = -1;
10521 /* If a variable is used in only one bblock, convert it into a local vreg */
10522 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10523 MonoInst
*var
= cfg
->varinfo
[i
];
10524 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
10526 switch (var
->type
) {
10532 #if SIZEOF_REGISTER == 8
10535 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10536 /* Enabling this screws up the fp stack on x86 */
10539 /* Arguments are implicitly global */
10540 /* Putting R4 vars into registers doesn't work currently */
10541 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
10543 * Make that the variable's liveness interval doesn't contain a call, since
10544 * that would cause the lvreg to be spilled, making the whole optimization
10547 /* This is too slow for JIT compilation */
10549 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
10551 int def_index
, call_index
, ins_index
;
10552 gboolean spilled
= FALSE
;
10557 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
10558 const char *spec
= INS_INFO (ins
->opcode
);
10560 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
10561 def_index
= ins_index
;
10563 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
10564 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
10565 if (call_index
> def_index
) {
10571 if (MONO_IS_CALL (ins
))
10572 call_index
= ins_index
;
10582 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10583 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
10584 var
->flags
|= MONO_INST_IS_DEAD
;
10585 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
10592 * Compress the varinfo and vars tables so the liveness computation is faster and
10593 * takes up less space.
10596 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10597 MonoInst
*var
= cfg
->varinfo
[i
];
10598 if (pos
< i
&& cfg
->locals_start
== i
)
10599 cfg
->locals_start
= pos
;
10600 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
10602 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
10603 cfg
->varinfo
[pos
]->inst_c0
= pos
;
10604 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
10605 cfg
->vars
[pos
].idx
= pos
;
10606 #if SIZEOF_REGISTER == 4
10607 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
10608 /* Modify the two component vars too */
10611 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
10612 var1
->inst_c0
= pos
;
10613 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
10614 var1
->inst_c0
= pos
;
10621 cfg
->num_varinfo
= pos
;
10622 if (cfg
->locals_start
> cfg
->num_varinfo
)
10623 cfg
->locals_start
= cfg
->num_varinfo
;
10627 * mono_spill_global_vars:
10629 * Generate spill code for variables which are not allocated to registers,
10630 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10631 * code is generated which could be optimized by the local optimization passes.
10634 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
10636 MonoBasicBlock
*bb
;
10638 int orig_next_vreg
;
10639 guint32
*vreg_to_lvreg
;
10641 guint32 i
, lvregs_len
;
10642 gboolean dest_has_lvreg
= FALSE
;
10643 guint32 stacktypes
[128];
10644 MonoInst
**live_range_start
, **live_range_end
;
10645 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
10647 *need_local_opts
= FALSE
;
10649 memset (spec2
, 0, sizeof (spec2
));
10651 /* FIXME: Move this function to mini.c */
10652 stacktypes
['i'] = STACK_PTR
;
10653 stacktypes
['l'] = STACK_I8
;
10654 stacktypes
['f'] = STACK_R8
;
10655 #ifdef MONO_ARCH_SIMD_INTRINSICS
10656 stacktypes
['x'] = STACK_VTYPE
;
10659 #if SIZEOF_REGISTER == 4
10660 /* Create MonoInsts for longs */
10661 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10662 MonoInst
*ins
= cfg
->varinfo
[i
];
10664 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
10665 switch (ins
->type
) {
10666 #ifdef MONO_ARCH_SOFT_FLOAT
10672 g_assert (ins
->opcode
== OP_REGOFFSET
);
10674 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
10676 tree
->opcode
= OP_REGOFFSET
;
10677 tree
->inst_basereg
= ins
->inst_basereg
;
10678 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
10680 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
10682 tree
->opcode
= OP_REGOFFSET
;
10683 tree
->inst_basereg
= ins
->inst_basereg
;
10684 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
10694 /* FIXME: widening and truncation */
10697 * As an optimization, when a variable allocated to the stack is first loaded into
10698 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10699 * the variable again.
10701 orig_next_vreg
= cfg
->next_vreg
;
10702 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
10703 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
10707 * These arrays contain the first and last instructions accessing a given
10709 * Since we emit bblocks in the same order we process them here, and we
10710 * don't split live ranges, these will precisely describe the live range of
10711 * the variable, i.e. the instruction range where a valid value can be found
10712 * in the variables location.
10714 /* FIXME: Only do this if debugging info is requested */
10715 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10716 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10717 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10718 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10720 /* Add spill loads/stores */
10721 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10724 if (cfg
->verbose_level
> 2)
10725 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
10727 /* Clear vreg_to_lvreg array */
10728 for (i
= 0; i
< lvregs_len
; i
++)
10729 vreg_to_lvreg
[lvregs
[i
]] = 0;
10733 MONO_BB_FOR_EACH_INS (bb
, ins
) {
10734 const char *spec
= INS_INFO (ins
->opcode
);
10735 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
10736 gboolean store
, no_lvreg
;
10737 int sregs
[MONO_MAX_SRC_REGS
];
10739 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10740 mono_print_ins (ins
);
10742 if (ins
->opcode
== OP_NOP
)
10746 * We handle LDADDR here as well, since it can only be decomposed
10747 * when variable addresses are known.
10749 if (ins
->opcode
== OP_LDADDR
) {
10750 MonoInst
*var
= ins
->inst_p0
;
10752 if (var
->opcode
== OP_VTARG_ADDR
) {
10753 /* Happens on SPARC/S390 where vtypes are passed by reference */
10754 MonoInst
*vtaddr
= var
->inst_left
;
10755 if (vtaddr
->opcode
== OP_REGVAR
) {
10756 ins
->opcode
= OP_MOVE
;
10757 ins
->sreg1
= vtaddr
->dreg
;
10759 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
10760 ins
->opcode
= OP_LOAD_MEMBASE
;
10761 ins
->inst_basereg
= vtaddr
->inst_basereg
;
10762 ins
->inst_offset
= vtaddr
->inst_offset
;
10766 g_assert (var
->opcode
== OP_REGOFFSET
);
10768 ins
->opcode
= OP_ADD_IMM
;
10769 ins
->sreg1
= var
->inst_basereg
;
10770 ins
->inst_imm
= var
->inst_offset
;
10773 *need_local_opts
= TRUE
;
10774 spec
= INS_INFO (ins
->opcode
);
10777 if (ins
->opcode
< MONO_CEE_LAST
) {
10778 mono_print_ins (ins
);
10779 g_assert_not_reached ();
10783 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10787 if (MONO_IS_STORE_MEMBASE (ins
)) {
10788 tmp_reg
= ins
->dreg
;
10789 ins
->dreg
= ins
->sreg2
;
10790 ins
->sreg2
= tmp_reg
;
10793 spec2
[MONO_INST_DEST
] = ' ';
10794 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10795 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10796 spec2
[MONO_INST_SRC3
] = ' ';
10798 } else if (MONO_IS_STORE_MEMINDEX (ins
))
10799 g_assert_not_reached ();
10804 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
10805 printf ("\t %.3s %d", spec
, ins
->dreg
);
10806 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
10807 for (srcindex
= 0; srcindex
< 3; ++srcindex
)
10808 printf (" %d", sregs
[srcindex
]);
10815 regtype
= spec
[MONO_INST_DEST
];
10816 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
10819 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
10820 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
10821 MonoInst
*store_ins
;
10823 MonoInst
*def_ins
= ins
;
10824 int dreg
= ins
->dreg
; /* The original vreg */
10826 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
10828 if (var
->opcode
== OP_REGVAR
) {
10829 ins
->dreg
= var
->dreg
;
10830 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
10832 * Instead of emitting a load+store, use a _membase opcode.
10834 g_assert (var
->opcode
== OP_REGOFFSET
);
10835 if (ins
->opcode
== OP_MOVE
) {
10839 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
10840 ins
->inst_basereg
= var
->inst_basereg
;
10841 ins
->inst_offset
= var
->inst_offset
;
10844 spec
= INS_INFO (ins
->opcode
);
10848 g_assert (var
->opcode
== OP_REGOFFSET
);
10850 prev_dreg
= ins
->dreg
;
10852 /* Invalidate any previous lvreg for this vreg */
10853 vreg_to_lvreg
[ins
->dreg
] = 0;
10857 #ifdef MONO_ARCH_SOFT_FLOAT
10858 if (store_opcode
== OP_STORER8_MEMBASE_REG
) {
10860 store_opcode
= OP_STOREI8_MEMBASE_REG
;
10864 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10866 if (regtype
== 'l') {
10867 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
10868 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10869 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
10870 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10871 def_ins
= store_ins
;
10874 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
10876 /* Try to fuse the store into the instruction itself */
10877 /* FIXME: Add more instructions */
10878 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
10879 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
10880 ins
->inst_imm
= ins
->inst_c0
;
10881 ins
->inst_destbasereg
= var
->inst_basereg
;
10882 ins
->inst_offset
= var
->inst_offset
;
10883 spec
= INS_INFO (ins
->opcode
);
10884 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
10885 ins
->opcode
= store_opcode
;
10886 ins
->inst_destbasereg
= var
->inst_basereg
;
10887 ins
->inst_offset
= var
->inst_offset
;
10891 tmp_reg
= ins
->dreg
;
10892 ins
->dreg
= ins
->sreg2
;
10893 ins
->sreg2
= tmp_reg
;
10896 spec2
[MONO_INST_DEST
] = ' ';
10897 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10898 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10899 spec2
[MONO_INST_SRC3
] = ' ';
10901 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
10902 // FIXME: The backends expect the base reg to be in inst_basereg
10903 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
10905 ins
->inst_basereg
= var
->inst_basereg
;
10906 ins
->inst_offset
= var
->inst_offset
;
10907 spec
= INS_INFO (ins
->opcode
);
10909 /* printf ("INS: "); mono_print_ins (ins); */
10910 /* Create a store instruction */
10911 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
10913 /* Insert it after the instruction */
10914 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10916 def_ins
= store_ins
;
10919 * We can't assign ins->dreg to var->dreg here, since the
10920 * sregs could use it. So set a flag, and do it after
10923 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
10924 dest_has_lvreg
= TRUE
;
10929 if (def_ins
&& !live_range_start
[dreg
]) {
10930 live_range_start
[dreg
] = def_ins
;
10931 live_range_start_bb
[dreg
] = bb
;
10938 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
10939 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
10940 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
10941 sreg
= sregs
[srcindex
];
10943 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
10944 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
10945 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
10946 MonoInst
*use_ins
= ins
;
10947 MonoInst
*load_ins
;
10948 guint32 load_opcode
;
10950 if (var
->opcode
== OP_REGVAR
) {
10951 sregs
[srcindex
] = var
->dreg
;
10952 //mono_inst_set_src_registers (ins, sregs);
10953 live_range_end
[sreg
] = use_ins
;
10954 live_range_end_bb
[sreg
] = bb
;
10958 g_assert (var
->opcode
== OP_REGOFFSET
);
10960 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
10962 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
10964 if (vreg_to_lvreg
[sreg
]) {
10965 g_assert (vreg_to_lvreg
[sreg
] != -1);
10967 /* The variable is already loaded to an lvreg */
10968 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10969 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
10970 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
10971 //mono_inst_set_src_registers (ins, sregs);
10975 /* Try to fuse the load into the instruction */
10976 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
10977 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
10978 sregs
[0] = var
->inst_basereg
;
10979 //mono_inst_set_src_registers (ins, sregs);
10980 ins
->inst_offset
= var
->inst_offset
;
10981 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
10982 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
10983 sregs
[1] = var
->inst_basereg
;
10984 //mono_inst_set_src_registers (ins, sregs);
10985 ins
->inst_offset
= var
->inst_offset
;
10987 if (MONO_IS_REAL_MOVE (ins
)) {
10988 ins
->opcode
= OP_NOP
;
10991 //printf ("%d ", srcindex); mono_print_ins (ins);
10993 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10995 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
10996 if (var
->dreg
== prev_dreg
) {
10998 * sreg refers to the value loaded by the load
10999 * emitted below, but we need to use ins->dreg
11000 * since it refers to the store emitted earlier.
11004 g_assert (sreg
!= -1);
11005 vreg_to_lvreg
[var
->dreg
] = sreg
;
11006 g_assert (lvregs_len
< 1024);
11007 lvregs
[lvregs_len
++] = var
->dreg
;
11011 sregs
[srcindex
] = sreg
;
11012 //mono_inst_set_src_registers (ins, sregs);
11014 if (regtype
== 'l') {
11015 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
11016 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11017 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
11018 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11019 use_ins
= load_ins
;
11022 #if SIZEOF_REGISTER == 4
11023 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
11025 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
11026 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11027 use_ins
= load_ins
;
11031 if (var
->dreg
< orig_next_vreg
) {
11032 live_range_end
[var
->dreg
] = use_ins
;
11033 live_range_end_bb
[var
->dreg
] = bb
;
11037 mono_inst_set_src_registers (ins
, sregs
);
11039 if (dest_has_lvreg
) {
11040 g_assert (ins
->dreg
!= -1);
11041 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
11042 g_assert (lvregs_len
< 1024);
11043 lvregs
[lvregs_len
++] = prev_dreg
;
11044 dest_has_lvreg
= FALSE
;
11048 tmp_reg
= ins
->dreg
;
11049 ins
->dreg
= ins
->sreg2
;
11050 ins
->sreg2
= tmp_reg
;
11053 if (MONO_IS_CALL (ins
)) {
11054 /* Clear vreg_to_lvreg array */
11055 for (i
= 0; i
< lvregs_len
; i
++)
11056 vreg_to_lvreg
[lvregs
[i
]] = 0;
11058 } else if (ins
->opcode
== OP_NOP
) {
11060 MONO_INST_NULLIFY_SREGS (ins
);
11063 if (cfg
->verbose_level
> 2)
11064 mono_print_ins_index (1, ins
);
11068 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11070 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11071 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11073 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11074 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
11077 if (live_range_start
[vreg
]) {
11078 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
11080 ins
->inst_c1
= vreg
;
11081 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
11083 if (live_range_end
[vreg
]) {
11084 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
11086 ins
->inst_c1
= vreg
;
11087 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
11092 g_free (live_range_start
);
11093 g_free (live_range_end
);
11094 g_free (live_range_start_bb
);
11095 g_free (live_range_end_bb
);
11100 * - use 'iadd' instead of 'int_add'
11101 * - handling ovf opcodes: decompose in method_to_ir.
11102 * - unify iregs/fregs
11103 * -> partly done, the missing parts are:
11104 * - a more complete unification would involve unifying the hregs as well, so
11105 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11106 * would no longer map to the machine hregs, so the code generators would need to
11107 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11108 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11109 * fp/non-fp branches speeds it up by about 15%.
11110 * - use sext/zext opcodes instead of shifts
11112 * - get rid of TEMPLOADs if possible and use vregs instead
11113 * - clean up usage of OP_P/OP_ opcodes
11114 * - cleanup usage of DUMMY_USE
11115 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11117 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11118 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11119 * - make sure handle_stack_args () is called before the branch is emitted
11120 * - when the new IR is done, get rid of all unused stuff
11121 * - COMPARE/BEQ as separate instructions or unify them ?
11122 * - keeping them separate allows specialized compare instructions like
11123 * compare_imm, compare_membase
11124 * - most back ends unify fp compare+branch, fp compare+ceq
11125 * - integrate mono_save_args into inline_method
11126 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11127 * - handle long shift opts on 32 bit platforms somehow: they require
11128 * 3 sregs (2 for arg1 and 1 for arg2)
11129 * - make byref a 'normal' type.
11130 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11131 * variable if needed.
11132 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11133 * like inline_method.
11134 * - remove inlining restrictions
11135 * - fix LNEG and enable cfold of INEG
11136 * - generalize x86 optimizations like ldelema as a peephole optimization
11137 * - add store_mem_imm for amd64
11138 * - optimize the loading of the interruption flag in the managed->native wrappers
11139 * - avoid special handling of OP_NOP in passes
11140 * - move code inserting instructions into one function/macro.
11141 * - try a coalescing phase after liveness analysis
11142 * - add float -> vreg conversion + local optimizations on !x86
11143 * - figure out how to handle decomposed branches during optimizations, ie.
11144 * compare+branch, op_jump_table+op_br etc.
11145 * - promote RuntimeXHandles to vregs
11146 * - vtype cleanups:
11147 * - add a NEW_VARLOADA_VREG macro
11148 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11149 * accessing vtype fields.
11150 * - get rid of I8CONST on 64 bit platforms
11151 * - dealing with the increase in code size due to branches created during opcode
11153 * - use extended basic blocks
11154 * - all parts of the JIT
11155 * - handle_global_vregs () && local regalloc
11156 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11157 * - sources of increase in code size:
11160 * - isinst and castclass
11161 * - lvregs not allocated to global registers even if used multiple times
11162 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11164 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11165 * - add all micro optimizations from the old JIT
11166 * - put tree optimizations into the deadce pass
11167 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11168 * specific function.
11169 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11170 * fcompare + branchCC.
11171 * - create a helper function for allocating a stack slot, taking into account
11172 * MONO_CFG_HAS_SPILLUP.
11174 * - merge the ia64 switch changes.
11175 * - optimize mono_regstate2_alloc_int/float.
11176 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11177 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11178 * parts of the tree could be separated by other instructions, killing the tree
11179 * arguments, or stores killing loads etc. Also, should we fold loads into other
11180 * instructions if the result of the load is used multiple times ?
11181 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11182 * - LAST MERGE: 108395.
11183 * - when returning vtypes in registers, generate IR and append it to the end of the
11184 * last bb instead of doing it in the epilog.
11185 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11193 - When to decompose opcodes:
11194 - earlier: this makes some optimizations hard to implement, since the low level IR
11195 no longer contains the neccessary information. But it is easier to do.
11196 - later: harder to implement, enables more optimizations.
11197 - Branches inside bblocks:
11198 - created when decomposing complex opcodes.
11199 - branches to another bblock: harmless, but not tracked by the branch
11200 optimizations, so need to branch to a label at the start of the bblock.
11201 - branches to inside the same bblock: very problematic, trips up the local
11202 reg allocator. Can be fixed by spitting the current bblock, but that is a
11203 complex operation, since some local vregs can become global vregs etc.
11204 - Local/global vregs:
11205 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11206 local register allocator.
11207 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11208 structure, created by mono_create_var (). Assigned to hregs or the stack by
11209 the global register allocator.
11210 - When to do optimizations like alu->alu_imm:
11211 - earlier -> saves work later on since the IR will be smaller/simpler
11212 - later -> can work on more instructions
11213 - Handling of valuetypes:
11214 - When a vtype is pushed on the stack, a new temporary is created, an
11215 instruction computing its address (LDADDR) is emitted and pushed on
11216 the stack. Need to optimize cases when the vtype is used immediately as in
11217 argument passing, stloc etc.
11218 - Instead of the to_end stuff in the old JIT, simply call the function handling
11219 the values on the stack before emitting the last instruction of the bb.
11222 #endif /* DISABLE_JIT */