2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode
);
102 static int stind_to_store_membase (int opcode
);
104 int mono_op_to_op_imm (int opcode
);
105 int mono_op_to_op_imm_noemul (int opcode
);
107 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
108 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
109 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
111 /* helper methods signature */
112 extern MonoMethodSignature
*helper_sig_class_init_trampoline
;
113 extern MonoMethodSignature
*helper_sig_domain_get
;
114 extern MonoMethodSignature
*helper_sig_generic_class_init_trampoline
;
115 extern MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
;
116 extern MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts
[] = {
154 #include "mini-ops.h"
159 extern GHashTable
*jit_icall_name_hash
;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
168 mono_inst_set_src_registers (MonoInst
*ins
, int *regs
)
170 ins
->sreg1
= regs
[0];
171 ins
->sreg2
= regs
[1];
172 ins
->sreg3
= regs
[2];
176 mono_alloc_ireg (MonoCompile
*cfg
)
178 return alloc_ireg (cfg
);
182 mono_alloc_freg (MonoCompile
*cfg
)
184 return alloc_freg (cfg
);
188 mono_alloc_preg (MonoCompile
*cfg
)
190 return alloc_preg (cfg
);
194 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
196 return alloc_dreg (cfg
, stack_type
);
200 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
206 switch (type
->type
) {
209 case MONO_TYPE_BOOLEAN
:
221 case MONO_TYPE_FNPTR
:
223 case MONO_TYPE_CLASS
:
224 case MONO_TYPE_STRING
:
225 case MONO_TYPE_OBJECT
:
226 case MONO_TYPE_SZARRAY
:
227 case MONO_TYPE_ARRAY
:
231 #if SIZEOF_REGISTER == 8
240 case MONO_TYPE_VALUETYPE
:
241 if (type
->data
.klass
->enumtype
) {
242 type
= mono_class_enum_basetype (type
->data
.klass
);
245 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
248 case MONO_TYPE_TYPEDBYREF
:
250 case MONO_TYPE_GENERICINST
:
251 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
255 g_assert (cfg
->generic_sharing_context
);
258 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
264 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
269 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
270 for (i
= 0; i
< bb
->in_count
; ++i
)
271 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
273 for (i
= 0; i
< bb
->out_count
; ++i
)
274 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
276 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
277 mono_print_ins_index (-1, tree
);
281 * Can't put this at the beginning, since other files reference stuff from this
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
349 #define ADD_BINCOND(next_block) do { \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
388 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
390 MonoBasicBlock
**newa
;
394 if (from
->cil_code
) {
396 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
398 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
401 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
403 printf ("edge from entry to exit\n");
408 for (i
= 0; i
< from
->out_count
; ++i
) {
409 if (to
== from
->out_bb
[i
]) {
415 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
416 for (i
= 0; i
< from
->out_count
; ++i
) {
417 newa
[i
] = from
->out_bb
[i
];
425 for (i
= 0; i
< to
->in_count
; ++i
) {
426 if (from
== to
->in_bb
[i
]) {
432 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
433 for (i
= 0; i
< to
->in_count
; ++i
) {
434 newa
[i
] = to
->in_bb
[i
];
443 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
445 link_bblock (cfg
, from
, to
);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
463 mono_find_block_region (MonoCompile
*cfg
, int offset
)
465 MonoMethod
*method
= cfg
->method
;
466 MonoMethodHeader
*header
= mono_method_get_header (method
);
467 MonoExceptionClause
*clause
;
470 for (i
= 0; i
< header
->num_clauses
; ++i
) {
471 clause
= &header
->clauses
[i
];
472 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
473 (offset
< (clause
->handler_offset
)))
474 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
476 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
477 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
478 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
479 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
480 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
482 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
485 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
486 return ((i
+ 1) << 8) | clause
->flags
;
493 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
495 MonoMethod
*method
= cfg
->method
;
496 MonoMethodHeader
*header
= mono_method_get_header (method
);
497 MonoExceptionClause
*clause
;
498 MonoBasicBlock
*handler
;
502 for (i
= 0; i
< header
->num_clauses
; ++i
) {
503 clause
= &header
->clauses
[i
];
504 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
506 if (clause
->flags
== type
) {
507 handler
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
509 res
= g_list_append (res
, handler
);
517 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
521 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
525 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
526 /* prevent it from being register allocated */
527 var
->flags
|= MONO_INST_INDIRECT
;
529 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
533 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
535 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
539 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
543 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
547 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
548 /* prevent it from being register allocated */
549 var
->flags
|= MONO_INST_INDIRECT
;
551 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
565 inst
->klass
= klass
= mono_class_from_mono_type (type
);
567 inst
->type
= STACK_MP
;
572 switch (type
->type
) {
574 inst
->type
= STACK_INV
;
578 case MONO_TYPE_BOOLEAN
:
584 inst
->type
= STACK_I4
;
589 case MONO_TYPE_FNPTR
:
590 inst
->type
= STACK_PTR
;
592 case MONO_TYPE_CLASS
:
593 case MONO_TYPE_STRING
:
594 case MONO_TYPE_OBJECT
:
595 case MONO_TYPE_SZARRAY
:
596 case MONO_TYPE_ARRAY
:
597 inst
->type
= STACK_OBJ
;
601 inst
->type
= STACK_I8
;
605 inst
->type
= STACK_R8
;
607 case MONO_TYPE_VALUETYPE
:
608 if (type
->data
.klass
->enumtype
) {
609 type
= mono_class_enum_basetype (type
->data
.klass
);
613 inst
->type
= STACK_VTYPE
;
616 case MONO_TYPE_TYPEDBYREF
:
617 inst
->klass
= mono_defaults
.typed_reference_class
;
618 inst
->type
= STACK_VTYPE
;
620 case MONO_TYPE_GENERICINST
:
621 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
624 case MONO_TYPE_MVAR
:
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
629 g_assert (cfg
->generic_sharing_context
);
630 inst
->type
= STACK_OBJ
;
633 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
641 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
642 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
643 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
644 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
645 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
646 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
647 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
648 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
649 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
654 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
657 /* reduce the size of this table */
659 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
660 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
661 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
662 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
663 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
664 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
665 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
666 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
667 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
671 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
672 /* Inv i L p F & O vt */
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
685 shift_table
[STACK_MAX
] [STACK_MAX
] = {
686 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
687 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
688 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
689 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
690 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
691 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
692 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
693 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 binops_op_map
[STACK_MAX
] = {
703 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
708 unops_op_map
[STACK_MAX
] = {
709 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 ovfops_op_map
[STACK_MAX
] = {
715 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 ovf2ops_op_map
[STACK_MAX
] = {
721 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 ovf3ops_op_map
[STACK_MAX
] = {
727 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
732 beqops_op_map
[STACK_MAX
] = {
733 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
738 ceqops_op_map
[STACK_MAX
] = {
739 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
751 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
753 switch (ins
->opcode
) {
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
762 ins
->opcode
+= binops_op_map
[ins
->type
];
769 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
770 ins
->opcode
+= binops_op_map
[ins
->type
];
775 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
776 ins
->opcode
+= binops_op_map
[ins
->type
];
781 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
782 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
783 ins
->opcode
= OP_LCOMPARE
;
784 else if (src1
->type
== STACK_R8
)
785 ins
->opcode
= OP_FCOMPARE
;
787 ins
->opcode
= OP_ICOMPARE
;
789 case OP_ICOMPARE_IMM
:
790 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
791 if ((src1
->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
792 ins
->opcode
= OP_LCOMPARE_IMM
;
804 ins
->opcode
+= beqops_op_map
[src1
->type
];
807 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
808 ins
->opcode
+= ceqops_op_map
[src1
->type
];
814 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
815 ins
->opcode
+= ceqops_op_map
[src1
->type
];
819 ins
->type
= neg_table
[src1
->type
];
820 ins
->opcode
+= unops_op_map
[ins
->type
];
823 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
824 ins
->type
= src1
->type
;
826 ins
->type
= STACK_INV
;
827 ins
->opcode
+= unops_op_map
[ins
->type
];
833 ins
->type
= STACK_I4
;
834 ins
->opcode
+= unops_op_map
[src1
->type
];
837 ins
->type
= STACK_R8
;
838 switch (src1
->type
) {
841 ins
->opcode
= OP_ICONV_TO_R_UN
;
844 ins
->opcode
= OP_LCONV_TO_R_UN
;
848 case CEE_CONV_OVF_I1
:
849 case CEE_CONV_OVF_U1
:
850 case CEE_CONV_OVF_I2
:
851 case CEE_CONV_OVF_U2
:
852 case CEE_CONV_OVF_I4
:
853 case CEE_CONV_OVF_U4
:
854 ins
->type
= STACK_I4
;
855 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
857 case CEE_CONV_OVF_I_UN
:
858 case CEE_CONV_OVF_U_UN
:
859 ins
->type
= STACK_PTR
;
860 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
862 case CEE_CONV_OVF_I1_UN
:
863 case CEE_CONV_OVF_I2_UN
:
864 case CEE_CONV_OVF_I4_UN
:
865 case CEE_CONV_OVF_U1_UN
:
866 case CEE_CONV_OVF_U2_UN
:
867 case CEE_CONV_OVF_U4_UN
:
868 ins
->type
= STACK_I4
;
869 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
872 ins
->type
= STACK_PTR
;
873 switch (src1
->type
) {
875 ins
->opcode
= OP_ICONV_TO_U
;
879 #if SIZEOF_REGISTER == 8
880 ins
->opcode
= OP_LCONV_TO_U
;
882 ins
->opcode
= OP_MOVE
;
886 ins
->opcode
= OP_LCONV_TO_U
;
889 ins
->opcode
= OP_FCONV_TO_U
;
895 ins
->type
= STACK_I8
;
896 ins
->opcode
+= unops_op_map
[src1
->type
];
898 case CEE_CONV_OVF_I8
:
899 case CEE_CONV_OVF_U8
:
900 ins
->type
= STACK_I8
;
901 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
903 case CEE_CONV_OVF_U8_UN
:
904 case CEE_CONV_OVF_I8_UN
:
905 ins
->type
= STACK_I8
;
906 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
910 ins
->type
= STACK_R8
;
911 ins
->opcode
+= unops_op_map
[src1
->type
];
914 ins
->type
= STACK_R8
;
918 ins
->type
= STACK_I4
;
919 ins
->opcode
+= ovfops_op_map
[src1
->type
];
924 ins
->type
= STACK_PTR
;
925 ins
->opcode
+= ovfops_op_map
[src1
->type
];
933 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
934 ins
->opcode
+= ovfops_op_map
[src1
->type
];
935 if (ins
->type
== STACK_R8
)
936 ins
->type
= STACK_INV
;
938 case OP_LOAD_MEMBASE
:
939 ins
->type
= STACK_PTR
;
941 case OP_LOADI1_MEMBASE
:
942 case OP_LOADU1_MEMBASE
:
943 case OP_LOADI2_MEMBASE
:
944 case OP_LOADU2_MEMBASE
:
945 case OP_LOADI4_MEMBASE
:
946 case OP_LOADU4_MEMBASE
:
947 ins
->type
= STACK_PTR
;
949 case OP_LOADI8_MEMBASE
:
950 ins
->type
= STACK_I8
;
952 case OP_LOADR4_MEMBASE
:
953 case OP_LOADR8_MEMBASE
:
954 ins
->type
= STACK_R8
;
957 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
961 if (ins
->type
== STACK_MP
)
962 ins
->klass
= mono_defaults
.object_class
;
967 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
973 param_table
[STACK_MAX
] [STACK_MAX
] = {
978 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
982 switch (args
->type
) {
992 for (i
= 0; i
< sig
->param_count
; ++i
) {
993 switch (args
[i
].type
) {
997 if (!sig
->params
[i
]->byref
)
1001 if (sig
->params
[i
]->byref
)
1003 switch (sig
->params
[i
]->type
) {
1004 case MONO_TYPE_CLASS
:
1005 case MONO_TYPE_STRING
:
1006 case MONO_TYPE_OBJECT
:
1007 case MONO_TYPE_SZARRAY
:
1008 case MONO_TYPE_ARRAY
:
1015 if (sig
->params
[i
]->byref
)
1017 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst
*
1039 mono_get_domainvar (MonoCompile
*cfg
)
1041 if (!cfg
->domainvar
)
1042 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1043 return cfg
->domainvar
;
1047 * The got_var contains the address of the Global Offset Table when AOT
1051 mono_get_got_var (MonoCompile
*cfg
)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg
->compile_aot
)
1056 if (!cfg
->got_var
) {
1057 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1059 return cfg
->got_var
;
1066 mono_get_vtable_var (MonoCompile
*cfg
)
1068 g_assert (cfg
->generic_sharing_context
);
1070 if (!cfg
->rgctx_var
) {
1071 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1072 /* force the var to be stack allocated */
1073 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1076 return cfg
->rgctx_var
;
1080 type_from_stack_type (MonoInst
*ins
) {
1081 switch (ins
->type
) {
1082 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1083 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1084 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1085 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1087 return &ins
->klass
->this_arg
;
1088 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1089 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1091 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1096 static G_GNUC_UNUSED
int
1097 type_to_stack_type (MonoType
*t
)
1099 switch (mono_type_get_underlying_type (t
)->type
) {
1102 case MONO_TYPE_BOOLEAN
:
1105 case MONO_TYPE_CHAR
:
1112 case MONO_TYPE_FNPTR
:
1114 case MONO_TYPE_CLASS
:
1115 case MONO_TYPE_STRING
:
1116 case MONO_TYPE_OBJECT
:
1117 case MONO_TYPE_SZARRAY
:
1118 case MONO_TYPE_ARRAY
:
1126 case MONO_TYPE_VALUETYPE
:
1127 case MONO_TYPE_TYPEDBYREF
:
1129 case MONO_TYPE_GENERICINST
:
1130 if (mono_type_generic_inst_is_valuetype (t
))
1136 g_assert_not_reached ();
1143 array_access_to_klass (int opcode
)
1147 return mono_defaults
.byte_class
;
1149 return mono_defaults
.uint16_class
;
1152 return mono_defaults
.int_class
;
1155 return mono_defaults
.sbyte_class
;
1158 return mono_defaults
.int16_class
;
1161 return mono_defaults
.int32_class
;
1163 return mono_defaults
.uint32_class
;
1166 return mono_defaults
.int64_class
;
1169 return mono_defaults
.single_class
;
1172 return mono_defaults
.double_class
;
1173 case CEE_LDELEM_REF
:
1174 case CEE_STELEM_REF
:
1175 return mono_defaults
.object_class
;
1177 g_assert_not_reached ();
1183 * We try to share variables when possible
1186 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1191 /* inlining can result in deeper stacks */
1192 if (slot
>= mono_method_get_header (cfg
->method
)->max_stack
)
1193 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1195 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1197 switch (ins
->type
) {
1204 if ((vnum
= cfg
->intvars
[pos
]))
1205 return cfg
->varinfo
[vnum
];
1206 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1207 cfg
->intvars
[pos
] = res
->inst_c0
;
1210 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1216 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1224 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1225 jump_info_token
->image
= image
;
1226 jump_info_token
->token
= token
;
1227 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1247 MonoBasicBlock
*bb
= cfg
->cbb
;
1248 MonoBasicBlock
*outb
;
1249 MonoInst
*inst
, **locals
;
1254 if (cfg
->verbose_level
> 3)
1255 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1256 if (!bb
->out_scount
) {
1257 bb
->out_scount
= count
;
1258 //printf ("bblock %d has out:", bb->block_num);
1260 for (i
= 0; i
< bb
->out_count
; ++i
) {
1261 outb
= bb
->out_bb
[i
];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1265 //printf (" %d", outb->block_num);
1266 if (outb
->in_stack
) {
1268 bb
->out_stack
= outb
->in_stack
;
1274 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1275 for (i
= 0; i
< count
; ++i
) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1288 if (cfg
->inlined_method
)
1289 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1291 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1296 for (i
= 0; i
< bb
->out_count
; ++i
) {
1297 outb
= bb
->out_bb
[i
];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1301 if (outb
->in_scount
) {
1302 if (outb
->in_scount
!= bb
->out_scount
) {
1303 cfg
->unverifiable
= TRUE
;
1306 continue; /* check they are the same locals */
1308 outb
->in_scount
= count
;
1309 outb
->in_stack
= bb
->out_stack
;
1312 locals
= bb
->out_stack
;
1314 for (i
= 0; i
< count
; ++i
) {
1315 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1316 inst
->cil_code
= sp
[i
]->cil_code
;
1317 sp
[i
] = locals
[i
];
1318 if (cfg
->verbose_level
> 3)
1319 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1331 /* Find a bblock which has a different in_stack */
1333 while (bindex
< bb
->out_count
) {
1334 outb
= bb
->out_bb
[bindex
];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1340 if (outb
->in_stack
!= locals
) {
1341 for (i
= 0; i
< count
; ++i
) {
1342 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1343 inst
->cil_code
= sp
[i
]->cil_code
;
1344 sp
[i
] = locals
[i
];
1345 if (cfg
->verbose_level
> 3)
1346 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1348 locals
= outb
->in_stack
;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1361 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1363 if (cfg
->compile_aot
) {
1364 int ioffset_reg
= alloc_preg (cfg
);
1365 int iid_reg
= alloc_preg (cfg
);
1367 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1368 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1381 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1383 int ibitmap_reg
= alloc_preg (cfg
);
1384 int ibitmap_byte_reg
= alloc_preg (cfg
);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
));
1388 if (cfg
->compile_aot
) {
1389 int iid_reg
= alloc_preg (cfg
);
1390 int shifted_iid_reg
= alloc_preg (cfg
);
1391 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1392 int masked_iid_reg
= alloc_preg (cfg
);
1393 int iid_one_bit_reg
= alloc_preg (cfg
);
1394 int iid_bit_reg
= alloc_preg (cfg
);
1395 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1397 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1400 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1401 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1402 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1416 int ibitmap_reg
= alloc_preg (cfg
);
1417 int ibitmap_byte_reg
= alloc_preg (cfg
);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
));
1421 if (cfg
->compile_aot
) {
1422 int iid_reg
= alloc_preg (cfg
);
1423 int shifted_iid_reg
= alloc_preg (cfg
);
1424 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1425 int masked_iid_reg
= alloc_preg (cfg
);
1426 int iid_one_bit_reg
= alloc_preg (cfg
);
1427 int iid_bit_reg
= alloc_preg (cfg
);
1428 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1430 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, masked_iid_reg
, iid_reg
, 7);
1433 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1434 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1435 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1447 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1448 MonoBasicBlock
*false_target
)
1450 if (cfg
->compile_aot
) {
1451 int iid_reg
= alloc_preg (cfg
);
1452 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1453 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1456 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1460 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1465 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1466 MonoBasicBlock
*false_target
)
1468 int max_iid_reg
= alloc_preg (cfg
);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1471 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1474 /* Same as above, but obtains max_iid from a klass */
1476 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1477 MonoBasicBlock
*false_target
)
1479 int max_iid_reg
= alloc_preg (cfg
);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1482 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1486 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1488 int idepth_reg
= alloc_preg (cfg
);
1489 int stypes_reg
= alloc_preg (cfg
);
1490 int stype
= alloc_preg (cfg
);
1492 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1499 if (cfg
->compile_aot
) {
1500 int const_reg
= alloc_preg (cfg
);
1501 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1502 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1504 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1510 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1512 int intf_reg
= alloc_preg (cfg
);
1514 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1515 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1520 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1527 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1529 int intf_bit_reg
= alloc_preg (cfg
);
1531 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1532 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1537 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1541 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1543 if (cfg
->compile_aot
) {
1544 int const_reg
= alloc_preg (cfg
);
1545 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1546 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1550 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1554 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1556 if (cfg
->compile_aot
) {
1557 int const_reg
= alloc_preg (cfg
);
1558 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1559 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1561 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1567 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1570 int rank_reg
= alloc_preg (cfg
);
1571 int eclass_reg
= alloc_preg (cfg
);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1575 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1578 if (klass
->cast_class
== mono_defaults
.object_class
) {
1579 int parent_reg
= alloc_preg (cfg
);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1581 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1582 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1583 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1584 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1585 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1586 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1587 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1588 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1589 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1595 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg
= alloc_preg (cfg
);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1603 int idepth_reg
= alloc_preg (cfg
);
1604 int stypes_reg
= alloc_preg (cfg
);
1605 int stype
= alloc_preg (cfg
);
1607 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1610 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1614 mini_emit_class_check (cfg
, stype
, klass
);
1619 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1623 g_assert (val
== 0);
1628 if ((size
<= 4) && (size
<= align
)) {
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1639 #if SIZEOF_REGISTER == 8
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1647 val_reg
= alloc_preg (cfg
);
1649 if (SIZEOF_REGISTER
== 8)
1650 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1652 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1655 /* This could be optimized further if neccesary */
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER
== 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1696 #endif /* DISABLE_JIT */
1699 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1707 /* This could be optimized further if neccesary */
1709 cur_reg
= alloc_preg (cfg
);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER
== 8) {
1721 cur_reg
= alloc_preg (cfg
);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1732 cur_reg
= alloc_preg (cfg
);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1740 cur_reg
= alloc_preg (cfg
);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1748 cur_reg
= alloc_preg (cfg
);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1760 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1763 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1766 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1767 switch (type
->type
) {
1768 case MONO_TYPE_VOID
:
1769 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1772 case MONO_TYPE_BOOLEAN
:
1775 case MONO_TYPE_CHAR
:
1778 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1782 case MONO_TYPE_FNPTR
:
1783 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1784 case MONO_TYPE_CLASS
:
1785 case MONO_TYPE_STRING
:
1786 case MONO_TYPE_OBJECT
:
1787 case MONO_TYPE_SZARRAY
:
1788 case MONO_TYPE_ARRAY
:
1789 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1792 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1795 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1796 case MONO_TYPE_VALUETYPE
:
1797 if (type
->data
.klass
->enumtype
) {
1798 type
= mono_class_enum_basetype (type
->data
.klass
);
1801 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1802 case MONO_TYPE_TYPEDBYREF
:
1803 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1804 case MONO_TYPE_GENERICINST
:
1805 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1822 * Returns: non-0 value if arg can't be stored on a target.
1825 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1827 MonoType
*simple_type
;
1830 if (target
->byref
) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg
->type
== STACK_MP
)
1833 return arg
->klass
!= mono_class_from_mono_type (target
);
1834 if (arg
->type
== STACK_PTR
)
1839 simple_type
= mono_type_get_underlying_type (target
);
1840 switch (simple_type
->type
) {
1841 case MONO_TYPE_VOID
:
1845 case MONO_TYPE_BOOLEAN
:
1848 case MONO_TYPE_CHAR
:
1851 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1861 case MONO_TYPE_FNPTR
:
1862 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1865 case MONO_TYPE_CLASS
:
1866 case MONO_TYPE_STRING
:
1867 case MONO_TYPE_OBJECT
:
1868 case MONO_TYPE_SZARRAY
:
1869 case MONO_TYPE_ARRAY
:
1870 if (arg
->type
!= STACK_OBJ
)
1872 /* FIXME: check type compatibility */
1876 if (arg
->type
!= STACK_I8
)
1881 if (arg
->type
!= STACK_R8
)
1884 case MONO_TYPE_VALUETYPE
:
1885 if (arg
->type
!= STACK_VTYPE
)
1887 klass
= mono_class_from_mono_type (simple_type
);
1888 if (klass
!= arg
->klass
)
1891 case MONO_TYPE_TYPEDBYREF
:
1892 if (arg
->type
!= STACK_VTYPE
)
1894 klass
= mono_class_from_mono_type (simple_type
);
1895 if (klass
!= arg
->klass
)
1898 case MONO_TYPE_GENERICINST
:
1899 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1900 if (arg
->type
!= STACK_VTYPE
)
1902 klass
= mono_class_from_mono_type (simple_type
);
1903 if (klass
!= arg
->klass
)
1907 if (arg
->type
!= STACK_OBJ
)
1909 /* FIXME: check type compatibility */
1913 case MONO_TYPE_MVAR
:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg
->generic_sharing_context
);
1919 if (arg
->type
!= STACK_OBJ
)
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1938 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
1940 MonoType
*simple_type
;
1944 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
1948 for (i
= 0; i
< sig
->param_count
; ++i
) {
1949 if (sig
->params
[i
]->byref
) {
1950 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
1954 simple_type
= sig
->params
[i
];
1955 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
1957 switch (simple_type
->type
) {
1958 case MONO_TYPE_VOID
:
1963 case MONO_TYPE_BOOLEAN
:
1966 case MONO_TYPE_CHAR
:
1969 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
1975 case MONO_TYPE_FNPTR
:
1976 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
1979 case MONO_TYPE_CLASS
:
1980 case MONO_TYPE_STRING
:
1981 case MONO_TYPE_OBJECT
:
1982 case MONO_TYPE_SZARRAY
:
1983 case MONO_TYPE_ARRAY
:
1984 if (args
[i
]->type
!= STACK_OBJ
)
1989 if (args
[i
]->type
!= STACK_I8
)
1994 if (args
[i
]->type
!= STACK_R8
)
1997 case MONO_TYPE_VALUETYPE
:
1998 if (simple_type
->data
.klass
->enumtype
) {
1999 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
2002 if (args
[i
]->type
!= STACK_VTYPE
)
2005 case MONO_TYPE_TYPEDBYREF
:
2006 if (args
[i
]->type
!= STACK_VTYPE
)
2009 case MONO_TYPE_GENERICINST
:
2010 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
2014 g_error ("unknown type 0x%02x in check_call_signature",
2022 callvirt_to_call (int opcode
)
2027 case OP_VOIDCALLVIRT
:
2036 g_assert_not_reached ();
2043 callvirt_to_call_membase (int opcode
)
2047 return OP_CALL_MEMBASE
;
2048 case OP_VOIDCALLVIRT
:
2049 return OP_VOIDCALL_MEMBASE
;
2051 return OP_FCALL_MEMBASE
;
2053 return OP_LCALL_MEMBASE
;
2055 return OP_VCALL_MEMBASE
;
2057 g_assert_not_reached ();
2063 #ifdef MONO_ARCH_HAVE_IMT
2065 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg
= alloc_preg (cfg
);
2071 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2072 } else if (cfg
->compile_aot
) {
2073 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2076 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2077 ins
->inst_p0
= call
->method
;
2078 ins
->dreg
= method_reg
;
2079 MONO_ADD_INS (cfg
->cbb
, ins
);
2082 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2084 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2089 static MonoJumpInfo
*
2090 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2092 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2096 ji
->data
.target
= target
;
2101 inline static MonoInst
*
2102 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
);
2104 inline static MonoCallInst
*
2105 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2106 MonoInst
**args
, int calli
, int virtual, int tail
)
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2114 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
2116 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2119 call
->signature
= sig
;
2121 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2124 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2125 call
->vret_var
= cfg
->vret_addr
;
2126 //g_assert_not_reached ();
2128 } else if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2129 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2132 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2135 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2136 * address of return value to increase optimization opportunities.
2137 * Before vtype decomposition, the dreg of the call ins itself represents the
2138 * fact the call modifies the return value. After decomposition, the call will
2139 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2140 * will be transformed into an LDADDR.
2142 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2143 loada
->dreg
= alloc_preg (cfg
);
2144 loada
->inst_p0
= temp
;
2145 /* We reference the call too since call->dreg could change during optimization */
2146 loada
->inst_p1
= call
;
2147 MONO_ADD_INS (cfg
->cbb
, loada
);
2149 call
->inst
.dreg
= temp
->dreg
;
2151 call
->vret_var
= loada
;
2152 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2153 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2157 * If the call has a float argument, we would need to do an r8->r4 conversion using
2158 * an icall, but that cannot be done during the call sequence since it would clobber
2159 * the call registers + the stack. So we do it before emitting the call.
2161 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2163 MonoInst
*in
= call
->args
[i
];
2165 if (i
>= sig
->hasthis
)
2166 t
= sig
->params
[i
- sig
->hasthis
];
2168 t
= &mono_defaults
.int_class
->byval_arg
;
2169 t
= mono_type_get_underlying_type (t
);
2171 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2172 MonoInst
*iargs
[1];
2176 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2178 /* The result will be in an int vreg */
2179 call
->args
[i
] = conv
;
2185 if (COMPILE_LLVM (cfg
))
2186 mono_llvm_emit_call (cfg
, call
);
2188 mono_arch_emit_call (cfg
, call
);
2190 mono_arch_emit_call (cfg
, call
);
2193 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2194 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2199 inline static MonoInst
*
2200 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
)
2202 MonoCallInst
*call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
);
2204 call
->inst
.sreg1
= addr
->dreg
;
2206 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2208 return (MonoInst
*)call
;
2211 inline static MonoInst
*
2212 mono_emit_rgctx_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2214 #ifdef MONO_ARCH_RGCTX_REG
2219 rgctx_reg
= mono_alloc_preg (cfg
);
2220 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2222 call
= (MonoCallInst
*)mono_emit_calli (cfg
, sig
, args
, addr
);
2224 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2225 cfg
->uses_rgctx_reg
= TRUE
;
2226 call
->rgctx_reg
= TRUE
;
2228 return (MonoInst
*)call
;
2230 g_assert_not_reached ();
2236 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2237 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
)
2239 gboolean
virtual = this != NULL
;
2240 gboolean enable_for_aot
= TRUE
;
2243 if (method
->string_ctor
) {
2244 /* Create the real signature */
2245 /* FIXME: Cache these */
2246 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2247 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2252 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual, FALSE
);
2254 if (this && sig
->hasthis
&&
2255 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2256 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && !MONO_CHECK_THIS (this)) {
2257 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2259 call
->method
= method
;
2261 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2262 call
->inst
.inst_left
= this;
2265 int vtable_reg
, slot_reg
, this_reg
;
2267 this_reg
= this->dreg
;
2269 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2270 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2271 /* Make a call to delegate->invoke_impl */
2272 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2273 call
->inst
.inst_basereg
= this_reg
;
2274 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2275 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2277 return (MonoInst
*)call
;
2281 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2282 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2283 (MONO_METHOD_IS_FINAL (method
) &&
2284 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
))) {
2286 * the method is not virtual, we just need to ensure this is not null
2287 * and then we can call the method directly.
2289 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2290 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2293 if (!method
->string_ctor
) {
2294 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2295 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2296 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2299 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2301 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2303 return (MonoInst
*)call
;
2306 if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2308 * the method is virtual, but we can statically dispatch since either
2309 * it's class or the method itself are sealed.
2310 * But first we need to ensure it's not a null reference.
2312 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
2313 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, this_reg
);
2314 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, this_reg
);
2316 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2317 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2319 return (MonoInst
*)call
;
2322 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2324 vtable_reg
= alloc_preg (cfg
);
2325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2326 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2328 #ifdef MONO_ARCH_HAVE_IMT
2330 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2331 emit_imt_argument (cfg
, call
, imt_arg
);
2332 slot_reg
= vtable_reg
;
2333 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2336 if (slot_reg
== -1) {
2337 slot_reg
= alloc_preg (cfg
);
2338 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2339 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2342 slot_reg
= vtable_reg
;
2343 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2344 (mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
);
2345 #ifdef MONO_ARCH_HAVE_IMT
2347 g_assert (mono_method_signature (method
)->generic_param_count
);
2348 emit_imt_argument (cfg
, call
, imt_arg
);
2353 call
->inst
.sreg1
= slot_reg
;
2354 call
->virtual = TRUE
;
2357 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2359 return (MonoInst
*)call
;
2363 mono_emit_rgctx_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2364 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*vtable_arg
)
2371 #ifdef MONO_ARCH_RGCTX_REG
2372 rgctx_reg
= mono_alloc_preg (cfg
);
2373 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
2378 ins
= mono_emit_method_call_full (cfg
, method
, sig
, args
, this, imt_arg
);
2380 call
= (MonoCallInst
*)ins
;
2382 #ifdef MONO_ARCH_RGCTX_REG
2383 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2384 cfg
->uses_rgctx_reg
= TRUE
;
2385 call
->rgctx_reg
= TRUE
;
2394 static inline MonoInst
*
2395 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2397 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
);
2401 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2408 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
);
2411 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2413 return (MonoInst
*)call
;
2416 inline static MonoInst
*
2417 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2419 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2423 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2427 * mono_emit_abs_call:
2429 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2431 inline static MonoInst
*
2432 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2433 MonoMethodSignature
*sig
, MonoInst
**args
)
2435 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2439 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2442 if (cfg
->abs_patches
== NULL
)
2443 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2444 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2445 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2446 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2451 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2453 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2454 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2458 * Native code might return non register sized integers
2459 * without initializing the upper bits.
2461 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2462 case OP_LOADI1_MEMBASE
:
2463 widen_op
= OP_ICONV_TO_I1
;
2465 case OP_LOADU1_MEMBASE
:
2466 widen_op
= OP_ICONV_TO_U1
;
2468 case OP_LOADI2_MEMBASE
:
2469 widen_op
= OP_ICONV_TO_I2
;
2471 case OP_LOADU2_MEMBASE
:
2472 widen_op
= OP_ICONV_TO_U2
;
2478 if (widen_op
!= -1) {
2479 int dreg
= alloc_preg (cfg
);
2482 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2483 widen
->type
= ins
->type
;
2493 get_memcpy_method (void)
2495 static MonoMethod
*memcpy_method
= NULL
;
2496 if (!memcpy_method
) {
2497 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2499 g_error ("Old corlib found. Install a new one");
2501 return memcpy_method
;
2505 * Emit code to copy a valuetype of type @klass whose address is stored in
2506 * @src->dreg to memory whose address is stored at @dest->dreg.
2509 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2511 MonoInst
*iargs
[3];
2514 MonoMethod
*memcpy_method
;
2518 * This check breaks with spilled vars... need to handle it during verification anyway.
2519 * g_assert (klass && klass == src->klass && klass == dest->klass);
2523 n
= mono_class_native_size (klass
, &align
);
2525 n
= mono_class_value_size (klass
, &align
);
2527 #if HAVE_WRITE_BARRIERS
2528 /* if native is true there should be no references in the struct */
2529 if (klass
->has_references
&& !native
) {
2530 /* Avoid barriers when storing to the stack */
2531 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2532 (dest
->opcode
== OP_LDADDR
))) {
2535 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2537 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2542 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2543 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2544 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2548 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2550 memcpy_method
= get_memcpy_method ();
2551 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2556 get_memset_method (void)
2558 static MonoMethod
*memset_method
= NULL
;
2559 if (!memset_method
) {
2560 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2562 g_error ("Old corlib found. Install a new one");
2564 return memset_method
;
2568 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2570 MonoInst
*iargs
[3];
2573 MonoMethod
*memset_method
;
2575 /* FIXME: Optimize this for the case when dest is an LDADDR */
2577 mono_class_init (klass
);
2578 n
= mono_class_value_size (klass
, &align
);
2580 if (n
<= sizeof (gpointer
) * 5) {
2581 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2584 memset_method
= get_memset_method ();
2586 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2587 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2588 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2593 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2595 MonoInst
*this = NULL
;
2597 g_assert (cfg
->generic_sharing_context
);
2599 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2600 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2601 !method
->klass
->valuetype
)
2602 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2604 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2605 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2608 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2610 mrgctx_loc
= mono_get_vtable_var (cfg
);
2611 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2614 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2615 MonoInst
*vtable_loc
, *vtable_var
;
2619 vtable_loc
= mono_get_vtable_var (cfg
);
2620 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2622 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2623 MonoInst
*mrgctx_var
= vtable_var
;
2626 vtable_reg
= alloc_preg (cfg
);
2627 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2628 vtable_var
->type
= STACK_PTR
;
2634 int vtable_reg
, res_reg
;
2636 vtable_reg
= alloc_preg (cfg
);
2637 res_reg
= alloc_preg (cfg
);
2638 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2643 static MonoJumpInfoRgctxEntry
*
2644 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2646 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2647 res
->method
= method
;
2648 res
->in_mrgctx
= in_mrgctx
;
2649 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2650 res
->data
->type
= patch_type
;
2651 res
->data
->data
.target
= patch_data
;
2652 res
->info_type
= info_type
;
2657 static inline MonoInst
*
2658 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2660 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2664 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2665 MonoClass
*klass
, int rgctx_type
)
2667 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2668 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2670 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2674 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2675 MonoMethod
*cmethod
, int rgctx_type
)
2677 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
2678 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2680 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2684 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
2685 MonoClassField
*field
, int rgctx_type
)
2687 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
2688 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2690 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2694 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
2696 int vtable_reg
= alloc_preg (cfg
);
2697 int context_used
= 0;
2699 if (cfg
->generic_sharing_context
)
2700 context_used
= mono_class_check_context_used (array_class
);
2702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2704 if (cfg
->opt
& MONO_OPT_SHARED
) {
2705 int class_reg
= alloc_preg (cfg
);
2706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2707 if (cfg
->compile_aot
) {
2708 int klass_reg
= alloc_preg (cfg
);
2709 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
2710 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
2712 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
2714 } else if (context_used
) {
2715 MonoInst
*vtable_ins
;
2717 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
2718 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
2720 if (cfg
->compile_aot
) {
2721 int vt_reg
= alloc_preg (cfg
);
2722 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, mono_class_vtable (cfg
->domain
, array_class
));
2723 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
2725 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, mono_class_vtable (cfg
->domain
, array_class
));
2729 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
2733 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
2735 if (mini_get_debug_options ()->better_cast_details
) {
2736 int to_klass_reg
= alloc_preg (cfg
);
2737 int vtable_reg
= alloc_preg (cfg
);
2738 int klass_reg
= alloc_preg (cfg
);
2739 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2742 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
2746 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2750 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
2751 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
2752 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
2757 reset_cast_details (MonoCompile
*cfg
)
2759 /* Reset the variables holding the cast details */
2760 if (mini_get_debug_options ()->better_cast_details
) {
2761 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
2763 MONO_ADD_INS (cfg
->cbb
, tls_get
);
2764 /* It is enough to reset the from field */
2765 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
2770 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2771 * generic code is generated.
2774 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
2776 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
2779 MonoInst
*rgctx
, *addr
;
2781 /* FIXME: What if the class is shared? We might not
2782 have to get the address of the method from the
2784 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2785 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2787 rgctx
= emit_get_rgctx (cfg
, method
, context_used
);
2789 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2791 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2796 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
2800 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
2801 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2802 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
2803 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
2805 obj_reg
= sp
[0]->dreg
;
2806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
2809 /* FIXME: generics */
2810 g_assert (klass
->rank
== 0);
2813 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
2814 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
2820 MonoInst
*element_class
;
2822 /* This assertion is from the unboxcast insn */
2823 g_assert (klass
->rank
== 0);
2825 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
2826 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
2828 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
2829 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2831 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
2832 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
2833 reset_cast_details (cfg
);
2836 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_PTR
), obj_reg
, sizeof (MonoObject
));
2837 MONO_ADD_INS (cfg
->cbb
, add
);
2838 add
->type
= STACK_MP
;
2845 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
)
2847 MonoInst
*iargs
[2];
2850 if (cfg
->opt
& MONO_OPT_SHARED
) {
2851 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2852 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
2854 alloc_ftn
= mono_object_new
;
2855 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
2856 /* This happens often in argument checking code, eg. throw new FooException... */
2857 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2858 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
2859 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
2861 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
2862 #ifdef MONO_CROSS_COMPILE
2863 MonoMethod
*managed_alloc
= NULL
;
2865 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
2869 if (managed_alloc
) {
2870 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2871 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2873 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
2875 guint32 lw
= vtable
->klass
->instance_size
;
2876 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
2877 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
2878 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
2881 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
2885 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2889 handle_alloc_from_inst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*data_inst
,
2892 MonoInst
*iargs
[2];
2893 MonoMethod
*managed_alloc
= NULL
;
2897 FIXME: we cannot get managed_alloc here because we can't get
2898 the class's vtable (because it's not a closed class)
2900 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2901 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2904 if (cfg
->opt
& MONO_OPT_SHARED
) {
2905 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
2906 iargs
[1] = data_inst
;
2907 alloc_ftn
= mono_object_new
;
2909 if (managed_alloc
) {
2910 iargs
[0] = data_inst
;
2911 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
2914 iargs
[0] = data_inst
;
2915 alloc_ftn
= mono_object_new_specific
;
2918 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
2922 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
)
2924 MonoInst
*alloc
, *ins
;
2926 if (mono_class_is_nullable (klass
)) {
2927 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
2928 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
2931 alloc
= handle_alloc (cfg
, klass
, TRUE
);
2933 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
2939 handle_box_from_inst (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
, MonoInst
*data_inst
)
2941 MonoInst
*alloc
, *ins
;
2943 if (mono_class_is_nullable (klass
)) {
2944 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
2945 /* FIXME: What if the class is shared? We might not
2946 have to get the method address from the RGCTX. */
2947 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
2948 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
2949 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2951 return mono_emit_rgctx_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
2953 alloc
= handle_alloc_from_inst (cfg
, klass
, data_inst
, TRUE
);
2955 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
2962 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
2964 MonoBasicBlock
*is_null_bb
;
2965 int obj_reg
= src
->dreg
;
2966 int vtable_reg
= alloc_preg (cfg
);
2968 NEW_BBLOCK (cfg
, is_null_bb
);
2970 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
2971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2973 save_cast_details (cfg
, klass
, obj_reg
);
2975 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2977 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
2979 int klass_reg
= alloc_preg (cfg
);
2981 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2983 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
2984 /* the remoting code is broken, access the class for now */
2986 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
2987 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
2989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2990 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
2992 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
2994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
2995 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, is_null_bb
);
2999 MONO_START_BB (cfg
, is_null_bb
);
3001 reset_cast_details (cfg
);
3007 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3010 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
3011 int obj_reg
= src
->dreg
;
3012 int vtable_reg
= alloc_preg (cfg
);
3013 int res_reg
= alloc_preg (cfg
);
3015 NEW_BBLOCK (cfg
, is_null_bb
);
3016 NEW_BBLOCK (cfg
, false_bb
);
3017 NEW_BBLOCK (cfg
, end_bb
);
3019 /* Do the assignment at the beginning, so the other assignment can be if converted */
3020 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
3021 ins
->type
= STACK_OBJ
;
3024 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3025 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
3027 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3029 /* the is_null_bb target simply copies the input register to the output */
3030 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
3032 int klass_reg
= alloc_preg (cfg
);
3034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3037 int rank_reg
= alloc_preg (cfg
);
3038 int eclass_reg
= alloc_preg (cfg
);
3040 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3041 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
3042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
3045 if (klass
->cast_class
== mono_defaults
.object_class
) {
3046 int parent_reg
= alloc_preg (cfg
);
3047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
3048 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
3049 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3051 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
3052 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
3053 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3055 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
3056 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3058 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3059 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3061 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
3062 /* Check that the object is a vector too */
3063 int bounds_reg
= alloc_preg (cfg
);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3065 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
3066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3069 /* the is_null_bb target simply copies the input register to the output */
3070 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3072 } else if (mono_class_is_nullable (klass
)) {
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3074 /* the is_null_bb target simply copies the input register to the output */
3075 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3077 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3078 /* the remoting code is broken, access the class for now */
3080 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3081 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3084 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3087 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3090 /* the is_null_bb target simply copies the input register to the output */
3091 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, is_null_bb
);
3096 MONO_START_BB (cfg
, false_bb
);
3098 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3101 MONO_START_BB (cfg
, is_null_bb
);
3103 MONO_START_BB (cfg
, end_bb
);
3109 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3111 /* This opcode takes as input an object reference and a class, and returns:
3112 0) if the object is an instance of the class,
3113 1) if the object is not instance of the class,
3114 2) if the object is a proxy whose type cannot be determined */
3117 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3118 int obj_reg
= src
->dreg
;
3119 int dreg
= alloc_ireg (cfg
);
3121 int klass_reg
= alloc_preg (cfg
);
3123 NEW_BBLOCK (cfg
, true_bb
);
3124 NEW_BBLOCK (cfg
, false_bb
);
3125 NEW_BBLOCK (cfg
, false2_bb
);
3126 NEW_BBLOCK (cfg
, end_bb
);
3127 NEW_BBLOCK (cfg
, no_proxy_bb
);
3129 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3130 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3132 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3133 NEW_BBLOCK (cfg
, interface_fail_bb
);
3135 tmp_reg
= alloc_preg (cfg
);
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3137 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3138 MONO_START_BB (cfg
, interface_fail_bb
);
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3141 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3143 tmp_reg
= alloc_preg (cfg
);
3144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3145 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3148 tmp_reg
= alloc_preg (cfg
);
3149 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3152 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3153 tmp_reg
= alloc_preg (cfg
);
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3157 tmp_reg
= alloc_preg (cfg
);
3158 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3159 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3162 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3163 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3165 MONO_START_BB (cfg
, no_proxy_bb
);
3167 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3170 MONO_START_BB (cfg
, false_bb
);
3172 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3175 MONO_START_BB (cfg
, false2_bb
);
3177 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3180 MONO_START_BB (cfg
, true_bb
);
3182 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3184 MONO_START_BB (cfg
, end_bb
);
3187 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3189 ins
->type
= STACK_I4
;
3195 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3197 /* This opcode takes as input an object reference and a class, and returns:
3198 0) if the object is an instance of the class,
3199 1) if the object is a proxy whose type cannot be determined
3200 an InvalidCastException exception is thrown otherwhise*/
3203 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3204 int obj_reg
= src
->dreg
;
3205 int dreg
= alloc_ireg (cfg
);
3206 int tmp_reg
= alloc_preg (cfg
);
3207 int klass_reg
= alloc_preg (cfg
);
3209 NEW_BBLOCK (cfg
, end_bb
);
3210 NEW_BBLOCK (cfg
, ok_result_bb
);
3212 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3213 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3215 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3216 NEW_BBLOCK (cfg
, interface_fail_bb
);
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3219 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3220 MONO_START_BB (cfg
, interface_fail_bb
);
3221 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3223 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3225 tmp_reg
= alloc_preg (cfg
);
3226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3227 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3228 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3230 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3234 NEW_BBLOCK (cfg
, no_proxy_bb
);
3236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3238 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3240 tmp_reg
= alloc_preg (cfg
);
3241 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3244 tmp_reg
= alloc_preg (cfg
);
3245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3246 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3249 NEW_BBLOCK (cfg
, fail_1_bb
);
3251 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3253 MONO_START_BB (cfg
, fail_1_bb
);
3255 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3258 MONO_START_BB (cfg
, no_proxy_bb
);
3260 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3263 MONO_START_BB (cfg
, ok_result_bb
);
3265 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3267 MONO_START_BB (cfg
, end_bb
);
3270 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3272 ins
->type
= STACK_I4
;
3277 static G_GNUC_UNUSED MonoInst
*
3278 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
)
3280 gpointer
*trampoline
;
3281 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3285 obj
= handle_alloc (cfg
, klass
, FALSE
);
3287 /* Inline the contents of mono_delegate_ctor */
3289 /* Set target field */
3290 /* Optimize away setting of NULL target */
3291 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0))
3292 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3294 /* Set method field */
3295 EMIT_NEW_METHODCONST (cfg
, method_ins
, method
);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3299 * To avoid looking up the compiled code belonging to the target method
3300 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3301 * store it, and we fill it after the method has been compiled.
3303 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3304 MonoInst
*code_slot_ins
;
3306 domain
= mono_domain_get ();
3307 mono_domain_lock (domain
);
3308 if (!domain_jit_info (domain
)->method_code_hash
)
3309 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3310 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3312 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3313 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3315 mono_domain_unlock (domain
);
3317 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3318 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3321 /* Set invoke_impl field */
3322 if (cfg
->compile_aot
) {
3323 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3325 trampoline
= mono_create_delegate_trampoline (klass
);
3326 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3328 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3330 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3336 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3338 MonoJitICallInfo
*info
;
3340 /* Need to register the icall so it gets an icall wrapper */
3341 info
= mono_get_array_new_va_icall (rank
);
3343 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3345 /* mono_array_new_va () needs a vararg calling convention */
3346 cfg
->disable_llvm
= TRUE
;
3348 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3349 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3353 mono_emit_load_got_addr (MonoCompile
*cfg
)
3355 MonoInst
*getaddr
, *dummy_use
;
3357 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3360 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3361 getaddr
->dreg
= cfg
->got_var
->dreg
;
3363 /* Add it to the start of the first bblock */
3364 if (cfg
->bb_entry
->code
) {
3365 getaddr
->next
= cfg
->bb_entry
->code
;
3366 cfg
->bb_entry
->code
= getaddr
;
3369 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3371 cfg
->got_var_allocated
= TRUE
;
3374 * Add a dummy use to keep the got_var alive, since real uses might
3375 * only be generated by the back ends.
3376 * Add it to end_bblock, so the variable's lifetime covers the whole
3378 * It would be better to make the usage of the got var explicit in all
3379 * cases when the backend needs it (i.e. calls, throw etc.), so this
3380 * wouldn't be needed.
3382 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3383 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3386 static int inline_limit
;
3387 static gboolean inline_limit_inited
;
3390 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3392 MonoMethodHeader
*header
;
3394 #ifdef MONO_ARCH_SOFT_FLOAT
3395 MonoMethodSignature
*sig
= mono_method_signature (method
);
3399 if (cfg
->generic_sharing_context
)
3402 #ifdef MONO_ARCH_HAVE_LMF_OPS
3403 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3404 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3405 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3409 if (method
->is_inflated
)
3410 /* Avoid inflating the header */
3411 header
= mono_method_get_header (((MonoMethodInflated
*)method
)->declaring
);
3413 header
= mono_method_get_header (method
);
3415 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_RUNTIME
) ||
3416 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3417 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3418 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3419 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
) ||
3420 (method
->klass
->marshalbyref
) ||
3421 !header
|| header
->num_clauses
)
3424 /* also consider num_locals? */
3425 /* Do the size check early to avoid creating vtables */
3426 if (!inline_limit_inited
) {
3427 if (getenv ("MONO_INLINELIMIT"))
3428 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3430 inline_limit
= INLINE_LENGTH_LIMIT
;
3431 inline_limit_inited
= TRUE
;
3433 if (header
->code_size
>= inline_limit
)
3437 * if we can initialize the class of the method right away, we do,
3438 * otherwise we don't allow inlining if the class needs initialization,
3439 * since it would mean inserting a call to mono_runtime_class_init()
3440 * inside the inlined code
3442 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3443 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3444 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3445 if (!method
->klass
->runtime_info
)
3446 /* No vtable created yet */
3448 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3451 /* This makes so that inline cannot trigger */
3452 /* .cctors: too many apps depend on them */
3453 /* running with a specific order... */
3454 if (! vtable
->initialized
)
3456 mono_runtime_class_init (vtable
);
3458 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3459 if (!method
->klass
->runtime_info
)
3460 /* No vtable created yet */
3462 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3465 if (!vtable
->initialized
)
3470 * If we're compiling for shared code
3471 * the cctor will need to be run at aot method load time, for example,
3472 * or at the end of the compilation of the inlining method.
3474 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
3479 * CAS - do not inline methods with declarative security
3480 * Note: this has to be before any possible return TRUE;
3482 if (mono_method_has_declsec (method
))
3485 #ifdef MONO_ARCH_SOFT_FLOAT
3487 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
3489 for (i
= 0; i
< sig
->param_count
; ++i
)
3490 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
3498 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
3500 if (vtable
->initialized
&& !cfg
->compile_aot
)
3503 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
3506 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
3509 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
3510 /* The initialization is already done before the method is called */
3517 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
)
3521 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
3523 mono_class_init (klass
);
3524 size
= mono_class_array_element_size (klass
);
3526 mult_reg
= alloc_preg (cfg
);
3527 array_reg
= arr
->dreg
;
3528 index_reg
= index
->dreg
;
3530 #if SIZEOF_REGISTER == 8
3531 /* The array reg is 64 bits but the index reg is only 32 */
3532 index2_reg
= alloc_preg (cfg
);
3533 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
3535 if (index
->type
== STACK_I8
) {
3536 index2_reg
= alloc_preg (cfg
);
3537 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
3539 index2_reg
= index_reg
;
3543 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
3545 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3546 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
3547 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3549 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
3550 ins
->type
= STACK_PTR
;
3556 add_reg
= alloc_preg (cfg
);
3558 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
3559 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
3560 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3561 ins
->type
= STACK_PTR
;
3562 MONO_ADD_INS (cfg
->cbb
, ins
);
3567 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3569 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
3571 int bounds_reg
= alloc_preg (cfg
);
3572 int add_reg
= alloc_preg (cfg
);
3573 int mult_reg
= alloc_preg (cfg
);
3574 int mult2_reg
= alloc_preg (cfg
);
3575 int low1_reg
= alloc_preg (cfg
);
3576 int low2_reg
= alloc_preg (cfg
);
3577 int high1_reg
= alloc_preg (cfg
);
3578 int high2_reg
= alloc_preg (cfg
);
3579 int realidx1_reg
= alloc_preg (cfg
);
3580 int realidx2_reg
= alloc_preg (cfg
);
3581 int sum_reg
= alloc_preg (cfg
);
3586 mono_class_init (klass
);
3587 size
= mono_class_array_element_size (klass
);
3589 index1
= index_ins1
->dreg
;
3590 index2
= index_ins2
->dreg
;
3592 /* range checking */
3593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
3594 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
3597 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3598 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
3599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
3600 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3601 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
3602 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3604 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
3605 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
3606 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
3607 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
3608 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
3609 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
3610 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
3612 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
3613 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
3614 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
3615 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
3616 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
3618 ins
->type
= STACK_MP
;
3620 MONO_ADD_INS (cfg
->cbb
, ins
);
3627 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
3631 MonoMethod
*addr_method
;
3634 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
3637 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1]);
3639 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3640 /* emit_ldelema_2 depends on OP_LMUL */
3641 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
3642 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
3646 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
3647 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
3648 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
3654 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
3656 MonoInst
*ins
= NULL
;
3658 static MonoClass
*runtime_helpers_class
= NULL
;
3659 if (! runtime_helpers_class
)
3660 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
3661 "System.Runtime.CompilerServices", "RuntimeHelpers");
3663 if (cmethod
->klass
== mono_defaults
.string_class
) {
3664 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
3665 int dreg
= alloc_ireg (cfg
);
3666 int index_reg
= alloc_preg (cfg
);
3667 int mult_reg
= alloc_preg (cfg
);
3668 int add_reg
= alloc_preg (cfg
);
3670 #if SIZEOF_REGISTER == 8
3671 /* The array reg is 64 bits but the index reg is only 32 */
3672 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
3674 index_reg
= args
[1]->dreg
;
3676 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
3678 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3679 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
3680 add_reg
= ins
->dreg
;
3681 /* Avoid a warning */
3683 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3686 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
3687 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3688 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
3689 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
3691 type_from_op (ins
, NULL
, NULL
);
3693 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3694 int dreg
= alloc_ireg (cfg
);
3695 /* Decompose later to allow more optimizations */
3696 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
3697 ins
->type
= STACK_I4
;
3698 cfg
->cbb
->has_array_access
= TRUE
;
3699 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
3702 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
3703 int mult_reg
= alloc_preg (cfg
);
3704 int add_reg
= alloc_preg (cfg
);
3706 /* The corlib functions check for oob already. */
3707 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
3708 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
3709 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
3712 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
3714 if (strcmp (cmethod
->name
, "GetType") == 0) {
3715 int dreg
= alloc_preg (cfg
);
3716 int vt_reg
= alloc_preg (cfg
);
3717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3718 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
3719 type_from_op (ins
, NULL
, NULL
);
3722 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3723 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0) {
3724 int dreg
= alloc_ireg (cfg
);
3725 int t1
= alloc_ireg (cfg
);
3727 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
3728 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
3729 ins
->type
= STACK_I4
;
3733 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
3734 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
3735 MONO_ADD_INS (cfg
->cbb
, ins
);
3739 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
3740 if (cmethod
->name
[0] != 'g')
3743 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
3744 int dreg
= alloc_ireg (cfg
);
3745 int vtable_reg
= alloc_preg (cfg
);
3746 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
3747 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3748 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
3749 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3750 type_from_op (ins
, NULL
, NULL
);
3753 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
3754 int dreg
= alloc_ireg (cfg
);
3756 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
3757 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
3758 type_from_op (ins
, NULL
, NULL
);
3763 } else if (cmethod
->klass
== runtime_helpers_class
) {
3765 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
3766 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
3770 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
3771 if (strcmp (cmethod
->name
, "get_CurrentThread") == 0 && (ins
= mono_arch_get_thread_intrinsic (cfg
))) {
3772 ins
->dreg
= alloc_preg (cfg
);
3773 ins
->type
= STACK_OBJ
;
3774 MONO_ADD_INS (cfg
->cbb
, ins
);
3776 } else if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
3777 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
3778 MONO_ADD_INS (cfg
->cbb
, ins
);
3780 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
3781 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
3782 MONO_ADD_INS (cfg
->cbb
, ins
);
3785 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
3786 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3787 if (strcmp (cmethod
->name
, "Enter") == 0) {
3790 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
3791 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3792 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3793 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3795 return (MonoInst
*)call
;
3796 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
3799 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
3800 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
3801 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
3802 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
3804 return (MonoInst
*)call
;
3806 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3807 MonoMethod
*fast_method
= NULL
;
3809 /* Avoid infinite recursion */
3810 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
3811 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
3812 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
3815 if (strcmp (cmethod
->name
, "Enter") == 0 ||
3816 strcmp (cmethod
->name
, "Exit") == 0)
3817 fast_method
= mono_monitor_get_fast_path (cmethod
);
3821 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
3823 } else if (mini_class_is_system_array (cmethod
->klass
) &&
3824 strcmp (cmethod
->name
, "GetGenericValueImpl") == 0) {
3825 MonoInst
*addr
, *store
, *load
;
3826 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[1]);
3828 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1]);
3829 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
3830 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
3832 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
3833 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
3834 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
3837 #if SIZEOF_REGISTER == 8
3838 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
3839 /* 64 bit reads are already atomic */
3840 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
3841 ins
->dreg
= mono_alloc_preg (cfg
);
3842 ins
->inst_basereg
= args
[0]->dreg
;
3843 ins
->inst_offset
= 0;
3844 MONO_ADD_INS (cfg
->cbb
, ins
);
3848 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3849 if (strcmp (cmethod
->name
, "Increment") == 0) {
3850 MonoInst
*ins_iconst
;
3853 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3854 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3855 #if SIZEOF_REGISTER == 8
3856 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3857 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3860 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3861 ins_iconst
->inst_c0
= 1;
3862 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3863 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3865 MONO_INST_NEW (cfg
, ins
, opcode
);
3866 ins
->dreg
= mono_alloc_ireg (cfg
);
3867 ins
->inst_basereg
= args
[0]->dreg
;
3868 ins
->inst_offset
= 0;
3869 ins
->sreg2
= ins_iconst
->dreg
;
3870 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3871 MONO_ADD_INS (cfg
->cbb
, ins
);
3873 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
3874 MonoInst
*ins_iconst
;
3877 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3878 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3879 #if SIZEOF_REGISTER == 8
3880 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3881 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3884 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
3885 ins_iconst
->inst_c0
= -1;
3886 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
3887 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
3889 MONO_INST_NEW (cfg
, ins
, opcode
);
3890 ins
->dreg
= mono_alloc_ireg (cfg
);
3891 ins
->inst_basereg
= args
[0]->dreg
;
3892 ins
->inst_offset
= 0;
3893 ins
->sreg2
= ins_iconst
->dreg
;
3894 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3895 MONO_ADD_INS (cfg
->cbb
, ins
);
3897 } else if (strcmp (cmethod
->name
, "Add") == 0) {
3900 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3901 opcode
= OP_ATOMIC_ADD_NEW_I4
;
3902 #if SIZEOF_REGISTER == 8
3903 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
3904 opcode
= OP_ATOMIC_ADD_NEW_I8
;
3908 MONO_INST_NEW (cfg
, ins
, opcode
);
3909 ins
->dreg
= mono_alloc_ireg (cfg
);
3910 ins
->inst_basereg
= args
[0]->dreg
;
3911 ins
->inst_offset
= 0;
3912 ins
->sreg2
= args
[1]->dreg
;
3913 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
3914 MONO_ADD_INS (cfg
->cbb
, ins
);
3917 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3919 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3920 if (strcmp (cmethod
->name
, "Exchange") == 0) {
3923 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
3924 opcode
= OP_ATOMIC_EXCHANGE_I4
;
3925 #if SIZEOF_REGISTER == 8
3926 else if ((fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
3927 (fsig
->params
[0]->type
== MONO_TYPE_I
) ||
3928 (fsig
->params
[0]->type
== MONO_TYPE_OBJECT
))
3929 opcode
= OP_ATOMIC_EXCHANGE_I8
;
3931 else if ((fsig
->params
[0]->type
== MONO_TYPE_I
) ||
3932 (fsig
->params
[0]->type
== MONO_TYPE_OBJECT
))
3933 opcode
= OP_ATOMIC_EXCHANGE_I4
;
3938 MONO_INST_NEW (cfg
, ins
, opcode
);
3939 ins
->dreg
= mono_alloc_ireg (cfg
);
3940 ins
->inst_basereg
= args
[0]->dreg
;
3941 ins
->inst_offset
= 0;
3942 ins
->sreg2
= args
[1]->dreg
;
3943 MONO_ADD_INS (cfg
->cbb
, ins
);
3945 switch (fsig
->params
[0]->type
) {
3947 ins
->type
= STACK_I4
;
3951 ins
->type
= STACK_I8
;
3953 case MONO_TYPE_OBJECT
:
3954 ins
->type
= STACK_OBJ
;
3957 g_assert_not_reached ();
3960 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3962 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3963 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
3965 if (fsig
->params
[1]->type
== MONO_TYPE_I4
)
3967 else if (fsig
->params
[1]->type
== MONO_TYPE_I
|| MONO_TYPE_IS_REFERENCE (fsig
->params
[1]))
3968 size
= sizeof (gpointer
);
3969 else if (sizeof (gpointer
) == 8 && fsig
->params
[1]->type
== MONO_TYPE_I4
)
3972 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I4
);
3973 ins
->dreg
= alloc_ireg (cfg
);
3974 ins
->sreg1
= args
[0]->dreg
;
3975 ins
->sreg2
= args
[1]->dreg
;
3976 ins
->sreg3
= args
[2]->dreg
;
3977 ins
->type
= STACK_I4
;
3978 MONO_ADD_INS (cfg
->cbb
, ins
);
3979 } else if (size
== 8) {
3980 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I8
);
3981 ins
->dreg
= alloc_ireg (cfg
);
3982 ins
->sreg1
= args
[0]->dreg
;
3983 ins
->sreg2
= args
[1]->dreg
;
3984 ins
->sreg3
= args
[2]->dreg
;
3985 ins
->type
= STACK_I8
;
3986 MONO_ADD_INS (cfg
->cbb
, ins
);
3988 /* g_assert_not_reached (); */
3991 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3995 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
3996 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
3997 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
3998 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
3999 MONO_ADD_INS (cfg
->cbb
, ins
);
4002 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
4003 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
4004 #ifdef PLATFORM_WIN32
4005 EMIT_NEW_ICONST (cfg
, ins
, 1);
4007 EMIT_NEW_ICONST (cfg
, ins
, 0);
4011 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
4013 * There is general branches code for Min/Max, but it does not work for
4015 * http://everything2.com/?node_id=1051618
4019 #ifdef MONO_ARCH_SIMD_INTRINSICS
4020 if (cfg
->opt
& MONO_OPT_SIMD
) {
4021 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4027 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
4031 * This entry point could be used later for arbitrary method
4034 inline static MonoInst
*
4035 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
4036 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
4038 if (method
->klass
== mono_defaults
.string_class
) {
4039 /* managed string allocation support */
4040 if (strcmp (method
->name
, "InternalAllocateStr") == 0) {
4041 MonoInst
*iargs
[2];
4042 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4043 #ifdef MONO_CROSS_COMPILE
4044 MonoMethod
*managed_alloc
= NULL
;
4046 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
4050 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4051 iargs
[1] = args
[0];
4052 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
4059 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
4061 MonoInst
*store
, *temp
;
4064 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4065 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
4068 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4069 * would be different than the MonoInst's used to represent arguments, and
4070 * the ldelema implementation can't deal with that.
4071 * Solution: When ldelema is used on an inline argument, create a var for
4072 * it, emit ldelema on that var, and emit the saving code below in
4073 * inline_method () if needed.
4075 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
4076 cfg
->args
[i
] = temp
;
4077 /* This uses cfg->args [i] which is set by the preceeding line */
4078 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
4079 store
->cil_code
= sp
[0]->cil_code
;
4084 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4085 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4087 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4089 check_inline_called_method_name_limit (MonoMethod
*called_method
)
4092 static char *limit
= NULL
;
4094 if (limit
== NULL
) {
4095 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4097 if (limit_string
!= NULL
)
4098 limit
= limit_string
;
4100 limit
= (char *) "";
4103 if (limit
[0] != '\0') {
4104 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4106 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4107 g_free (called_method_name
);
4109 //return (strncmp_result <= 0);
4110 return (strncmp_result
== 0);
4117 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4119 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4122 static char *limit
= NULL
;
4124 if (limit
== NULL
) {
4125 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4126 if (limit_string
!= NULL
) {
4127 limit
= limit_string
;
4129 limit
= (char *) "";
4133 if (limit
[0] != '\0') {
4134 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4136 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4137 g_free (caller_method_name
);
4139 //return (strncmp_result <= 0);
4140 return (strncmp_result
== 0);
4148 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4149 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_allways
)
4151 MonoInst
*ins
, *rvar
= NULL
;
4152 MonoMethodHeader
*cheader
;
4153 MonoBasicBlock
*ebblock
, *sbblock
;
4155 MonoMethod
*prev_inlined_method
;
4156 MonoInst
**prev_locals
, **prev_args
;
4157 MonoType
**prev_arg_types
;
4158 guint prev_real_offset
;
4159 GHashTable
*prev_cbb_hash
;
4160 MonoBasicBlock
**prev_cil_offset_to_bb
;
4161 MonoBasicBlock
*prev_cbb
;
4162 unsigned char* prev_cil_start
;
4163 guint32 prev_cil_offset_to_bb_len
;
4164 MonoMethod
*prev_current_method
;
4165 MonoGenericContext
*prev_generic_context
;
4166 gboolean ret_var_set
, prev_ret_var_set
;
4168 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4170 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4171 if ((! inline_allways
) && ! check_inline_called_method_name_limit (cmethod
))
4174 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4175 if ((! inline_allways
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4179 if (cfg
->verbose_level
> 2)
4180 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4182 if (!cmethod
->inline_info
) {
4183 mono_jit_stats
.inlineable_methods
++;
4184 cmethod
->inline_info
= 1;
4186 /* allocate space to store the return value */
4187 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4188 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4191 /* allocate local variables */
4192 cheader
= mono_method_get_header (cmethod
);
4193 prev_locals
= cfg
->locals
;
4194 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4195 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4196 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4198 /* allocate start and end blocks */
4199 /* This is needed so if the inline is aborted, we can clean up */
4200 NEW_BBLOCK (cfg
, sbblock
);
4201 sbblock
->real_offset
= real_offset
;
4203 NEW_BBLOCK (cfg
, ebblock
);
4204 ebblock
->block_num
= cfg
->num_bblocks
++;
4205 ebblock
->real_offset
= real_offset
;
4207 prev_args
= cfg
->args
;
4208 prev_arg_types
= cfg
->arg_types
;
4209 prev_inlined_method
= cfg
->inlined_method
;
4210 cfg
->inlined_method
= cmethod
;
4211 cfg
->ret_var_set
= FALSE
;
4212 prev_real_offset
= cfg
->real_offset
;
4213 prev_cbb_hash
= cfg
->cbb_hash
;
4214 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4215 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4216 prev_cil_start
= cfg
->cil_start
;
4217 prev_cbb
= cfg
->cbb
;
4218 prev_current_method
= cfg
->current_method
;
4219 prev_generic_context
= cfg
->generic_context
;
4220 prev_ret_var_set
= cfg
->ret_var_set
;
4222 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, *ip
== CEE_CALLVIRT
);
4224 ret_var_set
= cfg
->ret_var_set
;
4226 cfg
->inlined_method
= prev_inlined_method
;
4227 cfg
->real_offset
= prev_real_offset
;
4228 cfg
->cbb_hash
= prev_cbb_hash
;
4229 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4230 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4231 cfg
->cil_start
= prev_cil_start
;
4232 cfg
->locals
= prev_locals
;
4233 cfg
->args
= prev_args
;
4234 cfg
->arg_types
= prev_arg_types
;
4235 cfg
->current_method
= prev_current_method
;
4236 cfg
->generic_context
= prev_generic_context
;
4237 cfg
->ret_var_set
= prev_ret_var_set
;
4239 if ((costs
>= 0 && costs
< 60) || inline_allways
) {
4240 if (cfg
->verbose_level
> 2)
4241 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4243 mono_jit_stats
.inlined_methods
++;
4245 /* always add some code to avoid block split failures */
4246 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4247 MONO_ADD_INS (prev_cbb
, ins
);
4249 prev_cbb
->next_bb
= sbblock
;
4250 link_bblock (cfg
, prev_cbb
, sbblock
);
4253 * Get rid of the begin and end bblocks if possible to aid local
4256 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4258 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4259 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4261 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4262 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4263 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4265 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4266 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4267 cfg
->cbb
= prev_cbb
;
4275 * If the inlined method contains only a throw, then the ret var is not
4276 * set, so set it to a dummy value.
4279 static double r8_0
= 0.0;
4281 switch (rvar
->type
) {
4283 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
4286 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
4291 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
4294 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4295 ins
->type
= STACK_R8
;
4296 ins
->inst_p0
= (void*)&r8_0
;
4297 ins
->dreg
= rvar
->dreg
;
4298 MONO_ADD_INS (cfg
->cbb
, ins
);
4301 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (fsig
->ret
));
4304 g_assert_not_reached ();
4308 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
4313 if (cfg
->verbose_level
> 2)
4314 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
4315 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
4316 mono_loader_clear_error ();
4318 /* This gets rid of the newly added bblocks */
4319 cfg
->cbb
= prev_cbb
;
4325 * Some of these comments may well be out-of-date.
4326 * Design decisions: we do a single pass over the IL code (and we do bblock
4327 * splitting/merging in the few cases when it's required: a back jump to an IL
4328 * address that was not already seen as bblock starting point).
4329 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4330 * Complex operations are decomposed in simpler ones right away. We need to let the
4331 * arch-specific code peek and poke inside this process somehow (except when the
4332 * optimizations can take advantage of the full semantic info of coarse opcodes).
4333 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4334 * MonoInst->opcode initially is the IL opcode or some simplification of that
4335 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4336 * opcode with value bigger than OP_LAST.
4337 * At this point the IR can be handed over to an interpreter, a dumb code generator
4338 * or to the optimizing code generator that will translate it to SSA form.
4340 * Profiling directed optimizations.
4341 * We may compile by default with few or no optimizations and instrument the code
4342 * or the user may indicate what methods to optimize the most either in a config file
4343 * or through repeated runs where the compiler applies offline the optimizations to
4344 * each method and then decides if it was worth it.
4347 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4348 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4349 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4350 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4351 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4352 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4353 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4354 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4356 /* offset from br.s -> br like opcodes */
4357 #define BIG_BRANCH_OFFSET 13
4360 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
4362 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
4364 return b
== NULL
|| b
== bb
;
4368 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
4370 unsigned char *ip
= start
;
4371 unsigned char *target
;
4374 MonoBasicBlock
*bblock
;
4375 const MonoOpcode
*opcode
;
4378 cli_addr
= ip
- start
;
4379 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
4382 opcode
= &mono_opcodes
[i
];
4383 switch (opcode
->argument
) {
4384 case MonoInlineNone
:
4387 case MonoInlineString
:
4388 case MonoInlineType
:
4389 case MonoInlineField
:
4390 case MonoInlineMethod
:
4393 case MonoShortInlineR
:
4400 case MonoShortInlineVar
:
4401 case MonoShortInlineI
:
4404 case MonoShortInlineBrTarget
:
4405 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
4406 GET_BBLOCK (cfg
, bblock
, target
);
4409 GET_BBLOCK (cfg
, bblock
, ip
);
4411 case MonoInlineBrTarget
:
4412 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
4413 GET_BBLOCK (cfg
, bblock
, target
);
4416 GET_BBLOCK (cfg
, bblock
, ip
);
4418 case MonoInlineSwitch
: {
4419 guint32 n
= read32 (ip
+ 1);
4422 cli_addr
+= 5 + 4 * n
;
4423 target
= start
+ cli_addr
;
4424 GET_BBLOCK (cfg
, bblock
, target
);
4426 for (j
= 0; j
< n
; ++j
) {
4427 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
4428 GET_BBLOCK (cfg
, bblock
, target
);
4438 g_assert_not_reached ();
4441 if (i
== CEE_THROW
) {
4442 unsigned char *bb_start
= ip
- 1;
4444 /* Find the start of the bblock containing the throw */
4446 while ((bb_start
>= start
) && !bblock
) {
4447 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
4451 bblock
->out_of_line
= 1;
4460 static inline MonoMethod
*
4461 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4465 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
4466 return mono_method_get_wrapper_data (m
, token
);
4468 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
4473 static inline MonoMethod
*
4474 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4476 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
4478 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
4484 static inline MonoClass
*
4485 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
4489 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
4490 klass
= mono_method_get_wrapper_data (method
, token
);
4492 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
4494 mono_class_init (klass
);
4499 * Returns TRUE if the JIT should abort inlining because "callee"
4500 * is influenced by security attributes.
4503 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
4507 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
4511 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
4512 if (result
== MONO_JIT_SECURITY_OK
)
4515 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
4516 /* Generate code to throw a SecurityException before the actual call/link */
4517 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4520 NEW_ICONST (cfg
, args
[0], 4);
4521 NEW_METHODCONST (cfg
, args
[1], caller
);
4522 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
4523 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
4524 /* don't hide previous results */
4525 cfg
->exception_type
= MONO_EXCEPTION_SECURITY_LINKDEMAND
;
4526 cfg
->exception_data
= result
;
4534 method_access_exception (void)
4536 static MonoMethod
*method
= NULL
;
4539 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4540 method
= mono_class_get_method_from_name (secman
->securitymanager
,
4541 "MethodAccessException", 2);
4548 emit_throw_method_access_exception (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4549 MonoBasicBlock
*bblock
, unsigned char *ip
)
4551 MonoMethod
*thrower
= method_access_exception ();
4554 EMIT_NEW_METHODCONST (cfg
, args
[0], caller
);
4555 EMIT_NEW_METHODCONST (cfg
, args
[1], callee
);
4556 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4560 field_access_exception (void)
4562 static MonoMethod
*method
= NULL
;
4565 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4566 method
= mono_class_get_method_from_name (secman
->securitymanager
,
4567 "FieldAccessException", 2);
4574 emit_throw_field_access_exception (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
4575 MonoBasicBlock
*bblock
, unsigned char *ip
)
4577 MonoMethod
*thrower
= field_access_exception ();
4580 EMIT_NEW_METHODCONST (cfg
, args
[0], caller
);
4581 EMIT_NEW_METHODCONST (cfg
, args
[1], field
);
4582 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4586 * Return the original method is a wrapper is specified. We can only access
4587 * the custom attributes from the original method.
4590 get_original_method (MonoMethod
*method
)
4592 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
4595 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4596 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
4599 /* in other cases we need to find the original method */
4600 return mono_marshal_method_from_wrapper (method
);
4604 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
4605 MonoBasicBlock
*bblock
, unsigned char *ip
)
4607 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4608 if (mono_security_core_clr_class_level (mono_field_get_parent (field
)) != MONO_SECURITY_CORE_CLR_CRITICAL
)
4611 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4612 caller
= get_original_method (caller
);
4616 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4617 if (mono_security_core_clr_method_level (caller
, TRUE
) == MONO_SECURITY_CORE_CLR_TRANSPARENT
)
4618 emit_throw_field_access_exception (cfg
, caller
, field
, bblock
, ip
);
4622 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
4623 MonoBasicBlock
*bblock
, unsigned char *ip
)
4625 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4626 if (mono_security_core_clr_method_level (callee
, TRUE
) != MONO_SECURITY_CORE_CLR_CRITICAL
)
4629 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4630 caller
= get_original_method (caller
);
4634 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4635 if (mono_security_core_clr_method_level (caller
, TRUE
) == MONO_SECURITY_CORE_CLR_TRANSPARENT
)
4636 emit_throw_method_access_exception (cfg
, caller
, callee
, bblock
, ip
);
4640 * Check that the IL instructions at ip are the array initialization
4641 * sequence and return the pointer to the data and the size.
4644 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
4647 * newarr[System.Int32]
4649 * ldtoken field valuetype ...
4650 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4652 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
4653 guint32 token
= read32 (ip
+ 7);
4654 guint32 field_token
= read32 (ip
+ 2);
4655 guint32 field_index
= field_token
& 0xffffff;
4657 const char *data_ptr
;
4659 MonoMethod
*cmethod
;
4660 MonoClass
*dummy_class
;
4661 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
4667 *out_field_token
= field_token
;
4669 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
4672 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
4674 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
4675 case MONO_TYPE_BOOLEAN
:
4679 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4680 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4681 case MONO_TYPE_CHAR
:
4691 return NULL
; /* stupid ARM FP swapped format */
4701 if (size
> mono_type_size (field
->type
, &dummy_align
))
4704 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4705 if (!method
->klass
->image
->dynamic
) {
4706 field_index
= read32 (ip
+ 2) & 0xffffff;
4707 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
4708 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
4709 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4710 /* for aot code we do the lookup on load */
4711 if (aot
&& data_ptr
)
4712 return GUINT_TO_POINTER (rva
);
4714 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4716 data_ptr
= mono_field_get_data (field
);
4724 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
4726 char *method_fname
= mono_method_full_name (method
, TRUE
);
4729 if (mono_method_get_header (method
)->code_size
== 0)
4730 method_code
= g_strdup ("method body is empty.");
4732 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
4733 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
4734 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
4735 g_free (method_fname
);
4736 g_free (method_code
);
4740 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
4742 cfg
->exception_type
= MONO_EXCEPTION_OBJECT_SUPPLIED
;
4743 MONO_GC_REGISTER_ROOT (cfg
->exception_ptr
);
4744 cfg
->exception_ptr
= exception
;
4748 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
4752 if (cfg
->generic_sharing_context
)
4753 type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, &klass
->byval_arg
);
4755 type
= &klass
->byval_arg
;
4756 return MONO_TYPE_IS_REFERENCE (type
);
4760 * mono_decompose_array_access_opts:
4762 * Decompose array access opcodes.
4763 * This should be in decompose.c, but it emits calls so it has to stay here until
4764 * the old JIT is gone.
4767 mono_decompose_array_access_opts (MonoCompile
*cfg
)
4769 MonoBasicBlock
*bb
, *first_bb
;
4772 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4773 * can be executed anytime. It should be run before decompose_long
4777 * Create a dummy bblock and emit code into it so we can use the normal
4778 * code generation macros.
4780 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4781 first_bb
= cfg
->cbb
;
4783 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4785 MonoInst
*prev
= NULL
;
4787 MonoInst
*iargs
[3];
4790 if (!bb
->has_array_access
)
4793 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4795 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4801 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4802 switch (ins
->opcode
) {
4804 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
, ins
->sreg1
,
4805 G_STRUCT_OFFSET (MonoArray
, max_length
));
4806 MONO_ADD_INS (cfg
->cbb
, dest
);
4808 case OP_BOUNDS_CHECK
:
4809 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg
, ins
->sreg1
, ins
->inst_imm
, ins
->sreg2
);
4812 if (cfg
->opt
& MONO_OPT_SHARED
) {
4813 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
4814 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], ins
->inst_newa_class
);
4815 MONO_INST_NEW (cfg
, iargs
[2], OP_MOVE
);
4816 iargs
[2]->dreg
= ins
->sreg1
;
4818 dest
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
4819 dest
->dreg
= ins
->dreg
;
4821 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, mono_array_class_get (ins
->inst_newa_class
, 1));
4824 NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4825 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
4826 MONO_INST_NEW (cfg
, iargs
[1], OP_MOVE
);
4827 iargs
[1]->dreg
= ins
->sreg1
;
4829 dest
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, iargs
);
4830 dest
->dreg
= ins
->dreg
;
4834 NEW_LOAD_MEMBASE (cfg
, dest
, OP_LOADI4_MEMBASE
, ins
->dreg
,
4835 ins
->sreg1
, G_STRUCT_OFFSET (MonoString
, length
));
4836 MONO_ADD_INS (cfg
->cbb
, dest
);
4842 g_assert (cfg
->cbb
== first_bb
);
4844 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
4845 /* Replace the original instruction with the new code sequence */
4847 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
4848 first_bb
->code
= first_bb
->last_ins
= NULL
;
4849 first_bb
->in_count
= first_bb
->out_count
= 0;
4850 cfg
->cbb
= first_bb
;
4857 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4867 #ifdef MONO_ARCH_SOFT_FLOAT
4870 * mono_decompose_soft_float:
4872 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4873 * similar to long support on 32 bit platforms. 32 bit float values require special
4874 * handling when used as locals, arguments, and in calls.
4875 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4878 mono_decompose_soft_float (MonoCompile
*cfg
)
4880 MonoBasicBlock
*bb
, *first_bb
;
4883 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4887 * Create a dummy bblock and emit code into it so we can use the normal
4888 * code generation macros.
4890 cfg
->cbb
= mono_mempool_alloc0 ((cfg
)->mempool
, sizeof (MonoBasicBlock
));
4891 first_bb
= cfg
->cbb
;
4893 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4895 MonoInst
*prev
= NULL
;
4898 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "BEFORE HANDLE-SOFT-FLOAT ");
4900 cfg
->cbb
->code
= cfg
->cbb
->last_ins
= NULL
;
4906 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
4907 const char *spec
= INS_INFO (ins
->opcode
);
4909 /* Most fp operations are handled automatically by opcode emulation */
4911 switch (ins
->opcode
) {
4914 d
.vald
= *(double*)ins
->inst_p0
;
4915 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
4920 /* We load the r8 value */
4921 d
.vald
= *(float*)ins
->inst_p0
;
4922 MONO_EMIT_NEW_I8CONST (cfg
, ins
->dreg
, d
.vall
);
4926 ins
->opcode
= OP_LMOVE
;
4929 ins
->opcode
= OP_MOVE
;
4930 ins
->sreg1
= ins
->sreg1
+ 1;
4933 ins
->opcode
= OP_MOVE
;
4934 ins
->sreg1
= ins
->sreg1
+ 2;
4937 int reg
= ins
->sreg1
;
4939 ins
->opcode
= OP_SETLRET
;
4941 ins
->sreg1
= reg
+ 1;
4942 ins
->sreg2
= reg
+ 2;
4945 case OP_LOADR8_MEMBASE
:
4946 ins
->opcode
= OP_LOADI8_MEMBASE
;
4948 case OP_STORER8_MEMBASE_REG
:
4949 ins
->opcode
= OP_STOREI8_MEMBASE_REG
;
4951 case OP_STORER4_MEMBASE_REG
: {
4952 MonoInst
*iargs
[2];
4955 /* Arg 1 is the double value */
4956 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
4957 iargs
[0]->dreg
= ins
->sreg1
;
4959 /* Arg 2 is the address to store to */
4960 addr_reg
= mono_alloc_preg (cfg
);
4961 EMIT_NEW_BIALU_IMM (cfg
, iargs
[1], OP_PADD_IMM
, addr_reg
, ins
->inst_destbasereg
, ins
->inst_offset
);
4962 mono_emit_jit_icall (cfg
, mono_fstore_r4
, iargs
);
4966 case OP_LOADR4_MEMBASE
: {
4967 MonoInst
*iargs
[1];
4971 addr_reg
= mono_alloc_preg (cfg
);
4972 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, addr_reg
, ins
->inst_basereg
, ins
->inst_offset
);
4973 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
4974 conv
->dreg
= ins
->dreg
;
4979 case OP_FCALL_MEMBASE
: {
4980 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4981 if (call
->signature
->ret
->type
== MONO_TYPE_R4
) {
4982 MonoCallInst
*call2
;
4983 MonoInst
*iargs
[1];
4986 /* Convert the call into a call returning an int */
4987 MONO_INST_NEW_CALL (cfg
, call2
, OP_CALL
);
4988 memcpy (call2
, call
, sizeof (MonoCallInst
));
4989 switch (ins
->opcode
) {
4991 call2
->inst
.opcode
= OP_CALL
;
4994 call2
->inst
.opcode
= OP_CALL_REG
;
4996 case OP_FCALL_MEMBASE
:
4997 call2
->inst
.opcode
= OP_CALL_MEMBASE
;
5000 g_assert_not_reached ();
5002 call2
->inst
.dreg
= mono_alloc_ireg (cfg
);
5003 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call2
);
5005 /* FIXME: Optimize this */
5007 /* Emit an r4->r8 conversion */
5008 EMIT_NEW_VARLOADA_VREG (cfg
, iargs
[0], call2
->inst
.dreg
, &mono_defaults
.int32_class
->byval_arg
);
5009 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4
, iargs
);
5010 conv
->dreg
= ins
->dreg
;
5012 switch (ins
->opcode
) {
5014 ins
->opcode
= OP_LCALL
;
5017 ins
->opcode
= OP_LCALL_REG
;
5019 case OP_FCALL_MEMBASE
:
5020 ins
->opcode
= OP_LCALL_MEMBASE
;
5023 g_assert_not_reached ();
5029 MonoJitICallInfo
*info
;
5030 MonoInst
*iargs
[2];
5031 MonoInst
*call
, *cmp
, *br
;
5033 /* Convert fcompare+fbcc to icall+icompare+beq */
5035 info
= mono_find_jit_opcode_emulation (ins
->next
->opcode
);
5038 /* Create dummy MonoInst's for the arguments */
5039 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5040 iargs
[0]->dreg
= ins
->sreg1
;
5041 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
5042 iargs
[1]->dreg
= ins
->sreg2
;
5044 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
5046 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
5047 cmp
->sreg1
= call
->dreg
;
5049 MONO_ADD_INS (cfg
->cbb
, cmp
);
5051 MONO_INST_NEW (cfg
, br
, OP_IBNE_UN
);
5052 br
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * 2);
5053 br
->inst_true_bb
= ins
->next
->inst_true_bb
;
5054 br
->inst_false_bb
= ins
->next
->inst_false_bb
;
5055 MONO_ADD_INS (cfg
->cbb
, br
);
5057 /* The call sequence might include fp ins */
5060 /* Skip fbcc or fccc */
5061 NULLIFY_INS (ins
->next
);
5069 MonoJitICallInfo
*info
;
5070 MonoInst
*iargs
[2];
5073 /* Convert fccc to icall+icompare+iceq */
5075 info
= mono_find_jit_opcode_emulation (ins
->opcode
);
5078 /* Create dummy MonoInst's for the arguments */
5079 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5080 iargs
[0]->dreg
= ins
->sreg1
;
5081 MONO_INST_NEW (cfg
, iargs
[1], OP_ARG
);
5082 iargs
[1]->dreg
= ins
->sreg2
;
5084 call
= mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, iargs
);
5086 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, call
->dreg
, 1);
5087 MONO_EMIT_NEW_UNALU (cfg
, OP_ICEQ
, ins
->dreg
, -1);
5089 /* The call sequence might include fp ins */
5094 MonoInst
*iargs
[2];
5095 MonoInst
*call
, *cmp
;
5097 /* Convert to icall+icompare+cond_exc+move */
5099 /* Create dummy MonoInst's for the arguments */
5100 MONO_INST_NEW (cfg
, iargs
[0], OP_ARG
);
5101 iargs
[0]->dreg
= ins
->sreg1
;
5103 call
= mono_emit_jit_icall (cfg
, mono_isfinite
, iargs
);
5105 MONO_INST_NEW (cfg
, cmp
, OP_ICOMPARE_IMM
);
5106 cmp
->sreg1
= call
->dreg
;
5108 MONO_ADD_INS (cfg
->cbb
, cmp
);
5110 MONO_EMIT_NEW_COND_EXC (cfg
, INE_UN
, "ArithmeticException");
5112 /* Do the assignment if the value is finite */
5113 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, ins
->dreg
, ins
->sreg1
);
5119 if (spec
[MONO_INST_SRC1
] == 'f' || spec
[MONO_INST_SRC2
] == 'f' || spec
[MONO_INST_DEST
] == 'f') {
5120 mono_print_ins (ins
);
5121 g_assert_not_reached ();
5126 g_assert (cfg
->cbb
== first_bb
);
5128 if (cfg
->cbb
->code
|| (cfg
->cbb
!= first_bb
)) {
5129 /* Replace the original instruction with the new code sequence */
5131 mono_replace_ins (cfg
, bb
, ins
, &prev
, first_bb
, cfg
->cbb
);
5132 first_bb
->code
= first_bb
->last_ins
= NULL
;
5133 first_bb
->in_count
= first_bb
->out_count
= 0;
5134 cfg
->cbb
= first_bb
;
5141 if (cfg
->verbose_level
> 3) mono_print_bb (bb
, "AFTER HANDLE-SOFT-FLOAT ");
5144 mono_decompose_long_opts (cfg
);
5150 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5153 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5154 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5155 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5156 /* Optimize reg-reg moves away */
5158 * Can't optimize other opcodes, since sp[0] might point to
5159 * the last ins of a decomposed opcode.
5161 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5163 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5168 * ldloca inhibits many optimizations so try to get rid of it in common
5171 static inline unsigned char *
5172 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5181 local
= read16 (ip
+ 2);
5185 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5186 gboolean skip
= FALSE
;
5188 /* From the INITOBJ case */
5189 token
= read32 (ip
+ 2);
5190 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5191 CHECK_TYPELOAD (klass
);
5192 if (generic_class_is_reference_type (cfg
, klass
)) {
5193 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5194 } else if (MONO_TYPE_IS_REFERENCE (&klass
->byval_arg
)) {
5195 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5196 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5197 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5210 is_exception_class (MonoClass
*class)
5213 if (class == mono_defaults
.exception_class
)
5215 class = class->parent
;
5221 * mono_method_to_ir:
5223 * Translate the .net IL into linear IR.
5226 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5227 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5228 guint inline_offset
, gboolean is_virtual_call
)
5230 MonoInst
*ins
, **sp
, **stack_start
;
5231 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5232 MonoMethod
*cmethod
, *method_definition
;
5233 MonoInst
**arg_array
;
5234 MonoMethodHeader
*header
;
5236 guint32 token
, ins_flag
;
5238 MonoClass
*constrained_call
= NULL
;
5239 unsigned char *ip
, *end
, *target
, *err_pos
;
5240 static double r8_0
= 0.0;
5241 MonoMethodSignature
*sig
;
5242 MonoGenericContext
*generic_context
= NULL
;
5243 MonoGenericContainer
*generic_container
= NULL
;
5244 MonoType
**param_types
;
5245 int i
, n
, start_new_bblock
, dreg
;
5246 int num_calls
= 0, inline_costs
= 0;
5247 int breakpoint_id
= 0;
5249 MonoBoolean security
, pinvoke
;
5250 MonoSecurityManager
* secman
= NULL
;
5251 MonoDeclSecurityActions actions
;
5252 GSList
*class_inits
= NULL
;
5253 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5255 gboolean init_locals
;
5257 /* serialization and xdomain stuff may need access to private fields and methods */
5258 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5259 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5260 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5261 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5262 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5263 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5265 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5267 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5268 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5269 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5270 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5272 image
= method
->klass
->image
;
5273 header
= mono_method_get_header (method
);
5274 generic_container
= mono_method_get_generic_container (method
);
5275 sig
= mono_method_signature (method
);
5276 num_args
= sig
->hasthis
+ sig
->param_count
;
5277 ip
= (unsigned char*)header
->code
;
5278 cfg
->cil_start
= ip
;
5279 end
= ip
+ header
->code_size
;
5280 mono_jit_stats
.cil_code_size
+= header
->code_size
;
5281 init_locals
= header
->init_locals
;
5284 * Methods without init_locals set could cause asserts in various passes
5289 method_definition
= method
;
5290 while (method_definition
->is_inflated
) {
5291 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5292 method_definition
= imethod
->declaring
;
5295 /* SkipVerification is not allowed if core-clr is enabled */
5296 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5298 dont_verify_stloc
= TRUE
;
5301 if (!dont_verify
&& mini_method_verify (cfg
, method_definition
))
5302 goto exception_exit
;
5304 if (mono_debug_using_mono_debugger ())
5305 cfg
->keep_cil_nops
= TRUE
;
5307 if (sig
->is_inflated
)
5308 generic_context
= mono_method_get_context (method
);
5309 else if (generic_container
)
5310 generic_context
= &generic_container
->context
;
5311 cfg
->generic_context
= generic_context
;
5313 if (!cfg
->generic_sharing_context
)
5314 g_assert (!sig
->has_type_parameters
);
5316 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5317 g_assert (method
->is_inflated
);
5318 g_assert (mono_method_get_context (method
)->method_inst
);
5320 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5321 g_assert (sig
->generic_param_count
);
5323 if (cfg
->method
== method
) {
5324 cfg
->real_offset
= 0;
5326 cfg
->real_offset
= inline_offset
;
5329 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5330 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5332 cfg
->current_method
= method
;
5334 if (cfg
->verbose_level
> 2)
5335 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5337 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5339 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5340 for (n
= 0; n
< sig
->param_count
; ++n
)
5341 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5342 cfg
->arg_types
= param_types
;
5344 dont_inline
= g_list_prepend (dont_inline
, method
);
5345 if (cfg
->method
== method
) {
5347 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5348 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5351 NEW_BBLOCK (cfg
, start_bblock
);
5352 cfg
->bb_entry
= start_bblock
;
5353 start_bblock
->cil_code
= NULL
;
5354 start_bblock
->cil_length
= 0;
5357 NEW_BBLOCK (cfg
, end_bblock
);
5358 cfg
->bb_exit
= end_bblock
;
5359 end_bblock
->cil_code
= NULL
;
5360 end_bblock
->cil_length
= 0;
5361 g_assert (cfg
->num_bblocks
== 2);
5363 arg_array
= cfg
->args
;
5365 if (header
->num_clauses
) {
5366 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5367 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5369 /* handle exception clauses */
5370 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5371 MonoBasicBlock
*try_bb
;
5372 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5373 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5374 try_bb
->real_offset
= clause
->try_offset
;
5375 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5376 tblock
->real_offset
= clause
->handler_offset
;
5377 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5379 link_bblock (cfg
, try_bb
, tblock
);
5381 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5382 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5384 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5385 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5386 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5387 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5388 MONO_ADD_INS (tblock
, ins
);
5390 /* todo: is a fault block unsafe to optimize? */
5391 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5392 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5396 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5398 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5400 /* catch and filter blocks get the exception object on the stack */
5401 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5402 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5403 MonoInst
*dummy_use
;
5405 /* mostly like handle_stack_args (), but just sets the input args */
5406 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5407 tblock
->in_scount
= 1;
5408 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5409 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5412 * Add a dummy use for the exvar so its liveness info will be
5416 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5418 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5419 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5420 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5421 tblock
->real_offset
= clause
->data
.filter_offset
;
5422 tblock
->in_scount
= 1;
5423 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5424 /* The filter block shares the exvar with the handler block */
5425 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5426 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5427 MONO_ADD_INS (tblock
, ins
);
5431 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5432 clause
->data
.catch_class
&&
5433 cfg
->generic_sharing_context
&&
5434 mono_class_check_context_used (clause
->data
.catch_class
)) {
5436 * In shared generic code with catch
5437 * clauses containing type variables
5438 * the exception handling code has to
5439 * be able to get to the rgctx.
5440 * Therefore we have to make sure that
5441 * the vtable/mrgctx argument (for
5442 * static or generic methods) or the
5443 * "this" argument (for non-static
5444 * methods) are live.
5446 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5447 mini_method_get_context (method
)->method_inst
||
5448 method
->klass
->valuetype
) {
5449 mono_get_vtable_var (cfg
);
5451 MonoInst
*dummy_use
;
5453 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5458 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5459 cfg
->cbb
= start_bblock
;
5460 cfg
->args
= arg_array
;
5461 mono_save_args (cfg
, sig
, inline_args
);
5464 /* FIRST CODE BLOCK */
5465 NEW_BBLOCK (cfg
, bblock
);
5466 bblock
->cil_code
= ip
;
5470 ADD_BBLOCK (cfg
, bblock
);
5472 if (cfg
->method
== method
) {
5473 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5474 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5475 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5476 MONO_ADD_INS (bblock
, ins
);
5480 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5481 secman
= mono_security_manager_get_methods ();
5483 security
= (secman
&& mono_method_has_declsec (method
));
5484 /* at this point having security doesn't mean we have any code to generate */
5485 if (security
&& (cfg
->method
== method
)) {
5486 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5487 * And we do not want to enter the next section (with allocation) if we
5488 * have nothing to generate */
5489 security
= mono_declsec_get_demands (method
, &actions
);
5492 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5493 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5495 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5496 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5497 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
5499 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5500 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5504 mono_custom_attrs_free (custom
);
5507 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
5508 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
5512 mono_custom_attrs_free (custom
);
5515 /* not a P/Invoke after all */
5520 if ((init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
5521 /* we use a separate basic block for the initialization code */
5522 NEW_BBLOCK (cfg
, init_localsbb
);
5523 cfg
->bb_init
= init_localsbb
;
5524 init_localsbb
->real_offset
= cfg
->real_offset
;
5525 start_bblock
->next_bb
= init_localsbb
;
5526 init_localsbb
->next_bb
= bblock
;
5527 link_bblock (cfg
, start_bblock
, init_localsbb
);
5528 link_bblock (cfg
, init_localsbb
, bblock
);
5530 cfg
->cbb
= init_localsbb
;
5532 start_bblock
->next_bb
= bblock
;
5533 link_bblock (cfg
, start_bblock
, bblock
);
5536 /* at this point we know, if security is TRUE, that some code needs to be generated */
5537 if (security
&& (cfg
->method
== method
)) {
5540 mono_jit_stats
.cas_demand_generation
++;
5542 if (actions
.demand
.blob
) {
5543 /* Add code for SecurityAction.Demand */
5544 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
5545 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
5546 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5547 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5549 if (actions
.noncasdemand
.blob
) {
5550 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5551 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5552 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
5553 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
5554 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5555 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
5557 if (actions
.demandchoice
.blob
) {
5558 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5559 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
5560 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
5561 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5562 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
5566 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5568 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
5571 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
5572 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
5573 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5574 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5575 if (!(method
->klass
&& method
->klass
->image
&&
5576 mono_security_core_clr_is_platform_image (method
->klass
->image
))) {
5577 emit_throw_method_access_exception (cfg
, method
, wrapped
, bblock
, ip
);
5583 if (header
->code_size
== 0)
5586 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
5591 if (cfg
->method
== method
)
5592 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
5594 for (n
= 0; n
< header
->num_locals
; ++n
) {
5595 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
5600 /* We force the vtable variable here for all shared methods
5601 for the possibility that they might show up in a stack
5602 trace where their exact instantiation is needed. */
5603 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
5604 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5605 mini_method_get_context (method
)->method_inst
||
5606 method
->klass
->valuetype
) {
5607 mono_get_vtable_var (cfg
);
5609 /* FIXME: Is there a better way to do this?
5610 We need the variable live for the duration
5611 of the whole method. */
5612 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
5616 /* add a check for this != NULL to inlined methods */
5617 if (is_virtual_call
) {
5620 NEW_ARGLOAD (cfg
, arg_ins
, 0);
5621 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
5622 cfg
->flags
|= MONO_CFG_HAS_CHECK_THIS
;
5623 MONO_EMIT_NEW_UNALU (cfg
, OP_CHECK_THIS
, -1, arg_ins
->dreg
);
5624 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, arg_ins
->dreg
);
5627 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5628 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
5631 start_new_bblock
= 0;
5635 if (cfg
->method
== method
)
5636 cfg
->real_offset
= ip
- header
->code
;
5638 cfg
->real_offset
= inline_offset
;
5643 if (start_new_bblock
) {
5644 bblock
->cil_length
= ip
- bblock
->cil_code
;
5645 if (start_new_bblock
== 2) {
5646 g_assert (ip
== tblock
->cil_code
);
5648 GET_BBLOCK (cfg
, tblock
, ip
);
5650 bblock
->next_bb
= tblock
;
5653 start_new_bblock
= 0;
5654 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5655 if (cfg
->verbose_level
> 3)
5656 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5657 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5661 g_slist_free (class_inits
);
5664 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
5665 link_bblock (cfg
, bblock
, tblock
);
5666 if (sp
!= stack_start
) {
5667 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
5669 CHECK_UNVERIFIABLE (cfg
);
5671 bblock
->next_bb
= tblock
;
5674 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
5675 if (cfg
->verbose_level
> 3)
5676 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
5677 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
5680 g_slist_free (class_inits
);
5685 bblock
->real_offset
= cfg
->real_offset
;
5687 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
5688 guint32 cil_offset
= ip
- header
->code
;
5689 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
5691 /* TODO: Use an increment here */
5692 #if defined(TARGET_X86)
5693 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
5694 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
5696 MONO_ADD_INS (cfg
->cbb
, ins
);
5698 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
5699 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
5703 if (cfg
->verbose_level
> 3)
5704 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
5708 if (cfg
->keep_cil_nops
)
5709 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
5711 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
5713 MONO_ADD_INS (bblock
, ins
);
5716 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5718 MONO_ADD_INS (bblock
, ins
);
5724 CHECK_STACK_OVF (1);
5725 n
= (*ip
)-CEE_LDARG_0
;
5727 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5735 CHECK_STACK_OVF (1);
5736 n
= (*ip
)-CEE_LDLOC_0
;
5738 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5747 n
= (*ip
)-CEE_STLOC_0
;
5750 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
5752 emit_stloc_ir (cfg
, sp
, header
, n
);
5759 CHECK_STACK_OVF (1);
5762 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
5768 CHECK_STACK_OVF (1);
5771 NEW_ARGLOADA (cfg
, ins
, n
);
5772 MONO_ADD_INS (cfg
->cbb
, ins
);
5782 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
5784 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
5789 CHECK_STACK_OVF (1);
5792 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
5796 case CEE_LDLOCA_S
: {
5797 unsigned char *tmp_ip
;
5799 CHECK_STACK_OVF (1);
5800 CHECK_LOCAL (ip
[1]);
5802 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
5808 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
5817 CHECK_LOCAL (ip
[1]);
5818 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
5820 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
5825 CHECK_STACK_OVF (1);
5826 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
5827 ins
->type
= STACK_OBJ
;
5832 CHECK_STACK_OVF (1);
5833 EMIT_NEW_ICONST (cfg
, ins
, -1);
5846 CHECK_STACK_OVF (1);
5847 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
5853 CHECK_STACK_OVF (1);
5855 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
5861 CHECK_STACK_OVF (1);
5862 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
5868 CHECK_STACK_OVF (1);
5869 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
5870 ins
->type
= STACK_I8
;
5871 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
5873 ins
->inst_l
= (gint64
)read64 (ip
);
5874 MONO_ADD_INS (bblock
, ins
);
5880 gboolean use_aotconst
= FALSE
;
5882 #ifdef TARGET_POWERPC
5883 /* FIXME: Clean this up */
5884 if (cfg
->compile_aot
)
5885 use_aotconst
= TRUE
;
5888 /* FIXME: we should really allocate this only late in the compilation process */
5889 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
5891 CHECK_STACK_OVF (1);
5897 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
5899 dreg
= alloc_freg (cfg
);
5900 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
5901 ins
->type
= STACK_R8
;
5903 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
5904 ins
->type
= STACK_R8
;
5905 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5907 MONO_ADD_INS (bblock
, ins
);
5917 gboolean use_aotconst
= FALSE
;
5919 #ifdef TARGET_POWERPC
5920 /* FIXME: Clean this up */
5921 if (cfg
->compile_aot
)
5922 use_aotconst
= TRUE
;
5925 /* FIXME: we should really allocate this only late in the compilation process */
5926 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
5928 CHECK_STACK_OVF (1);
5934 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
5936 dreg
= alloc_freg (cfg
);
5937 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
5938 ins
->type
= STACK_R8
;
5940 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
5941 ins
->type
= STACK_R8
;
5942 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
5944 MONO_ADD_INS (bblock
, ins
);
5953 MonoInst
*temp
, *store
;
5955 CHECK_STACK_OVF (1);
5959 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
5960 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
5962 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
5965 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
5978 if (sp
[0]->type
== STACK_R8
)
5979 /* we need to pop the value from the x86 FP stack */
5980 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
5989 if (stack_start
!= sp
)
5991 token
= read32 (ip
+ 1);
5992 /* FIXME: check the signature matches */
5993 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
5998 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
5999 GENERIC_SHARING_FAILURE (CEE_JMP
);
6001 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6002 CHECK_CFG_EXCEPTION
;
6004 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6006 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
6009 /* Handle tail calls similarly to calls */
6010 n
= fsig
->param_count
+ fsig
->hasthis
;
6012 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6013 call
->method
= cmethod
;
6014 call
->tail_call
= TRUE
;
6015 call
->signature
= mono_method_signature (cmethod
);
6016 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6017 call
->inst
.inst_p0
= cmethod
;
6018 for (i
= 0; i
< n
; ++i
)
6019 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6021 mono_arch_emit_call (cfg
, call
);
6022 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
6025 for (i
= 0; i
< num_args
; ++i
)
6026 /* Prevent arguments from being optimized away */
6027 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6029 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6030 ins
= (MonoInst
*)call
;
6031 ins
->inst_p0
= cmethod
;
6032 MONO_ADD_INS (bblock
, ins
);
6036 start_new_bblock
= 1;
6041 case CEE_CALLVIRT
: {
6042 MonoInst
*addr
= NULL
;
6043 MonoMethodSignature
*fsig
= NULL
;
6045 int virtual = *ip
== CEE_CALLVIRT
;
6046 int calli
= *ip
== CEE_CALLI
;
6047 gboolean pass_imt_from_rgctx
= FALSE
;
6048 MonoInst
*imt_arg
= NULL
;
6049 gboolean pass_vtable
= FALSE
;
6050 gboolean pass_mrgctx
= FALSE
;
6051 MonoInst
*vtable_arg
= NULL
;
6052 gboolean check_this
= FALSE
;
6053 gboolean supported_tail_call
= FALSE
;
6056 token
= read32 (ip
+ 1);
6063 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
6064 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
6066 fsig
= mono_metadata_parse_signature (image
, token
);
6068 n
= fsig
->param_count
+ fsig
->hasthis
;
6070 MonoMethod
*cil_method
;
6072 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
6073 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
6074 cil_method
= cmethod
;
6075 } else if (constrained_call
) {
6076 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
6078 * This is needed since get_method_constrained can't find
6079 * the method in klass representing a type var.
6080 * The type var is guaranteed to be a reference type in this
6083 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6084 cil_method
= cmethod
;
6085 g_assert (!cmethod
->klass
->valuetype
);
6087 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
6090 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6091 cil_method
= cmethod
;
6096 if (!dont_verify
&& !cfg
->skip_visibility
) {
6097 MonoMethod
*target_method
= cil_method
;
6098 if (method
->is_inflated
) {
6099 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
6101 if (!mono_method_can_access_method (method_definition
, target_method
) &&
6102 !mono_method_can_access_method (method
, cil_method
))
6103 METHOD_ACCESS_FAILURE
;
6106 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
6107 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
6109 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
6110 /* MS.NET seems to silently convert this to a callvirt */
6113 if (!cmethod
->klass
->inited
)
6114 if (!mono_class_init (cmethod
->klass
))
6117 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
6118 mini_class_is_system_array (cmethod
->klass
)) {
6119 array_rank
= cmethod
->klass
->rank
;
6120 fsig
= mono_method_signature (cmethod
);
6122 if (mono_method_signature (cmethod
)->pinvoke
) {
6123 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
6124 check_for_pending_exc
, FALSE
);
6125 fsig
= mono_method_signature (wrapper
);
6126 } else if (constrained_call
) {
6127 fsig
= mono_method_signature (cmethod
);
6129 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
6133 mono_save_token_info (cfg
, image
, token
, cil_method
);
6135 n
= fsig
->param_count
+ fsig
->hasthis
;
6137 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
6138 if (check_linkdemand (cfg
, method
, cmethod
))
6140 CHECK_CFG_EXCEPTION
;
6143 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
6144 g_assert_not_reached ();
6147 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
6150 if (!cfg
->generic_sharing_context
&& cmethod
)
6151 g_assert (!mono_method_check_context_used (cmethod
));
6155 //g_assert (!virtual || fsig->hasthis);
6159 if (constrained_call
) {
6161 * We have the `constrained.' prefix opcode.
6163 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
6167 * The type parameter is instantiated as a valuetype,
6168 * but that type doesn't override the method we're
6169 * calling, so we need to box `this'.
6171 dreg
= alloc_dreg (cfg
, STACK_VTYPE
);
6172 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADV_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6173 ins
->klass
= constrained_call
;
6174 sp
[0] = handle_box (cfg
, ins
, constrained_call
);
6175 } else if (!constrained_call
->valuetype
) {
6176 int dreg
= alloc_preg (cfg
);
6179 * The type parameter is instantiated as a reference
6180 * type. We have a managed pointer on the stack, so
6181 * we need to dereference it here.
6183 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6184 ins
->type
= STACK_OBJ
;
6186 } else if (cmethod
->klass
->valuetype
)
6188 constrained_call
= NULL
;
6191 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6195 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6196 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6197 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6198 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6199 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6202 * Pass vtable iff target method might
6203 * be shared, which means that sharing
6204 * is enabled for its class and its
6205 * context is sharable (and it's not a
6208 if (sharing_enabled
&& context_sharable
&&
6209 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6213 if (cmethod
&& mini_method_get_context (cmethod
) &&
6214 mini_method_get_context (cmethod
)->method_inst
) {
6215 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6216 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6217 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6219 g_assert (!pass_vtable
);
6221 if (sharing_enabled
&& context_sharable
)
6225 if (cfg
->generic_sharing_context
&& cmethod
) {
6226 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6228 context_used
= mono_method_check_context_used (cmethod
);
6230 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6231 /* Generic method interface
6232 calls are resolved via a
6233 helper function and don't
6235 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6236 pass_imt_from_rgctx
= TRUE
;
6240 * If a shared method calls another
6241 * shared method then the caller must
6242 * have a generic sharing context
6243 * because the magic trampoline
6244 * requires it. FIXME: We shouldn't
6245 * have to force the vtable/mrgctx
6246 * variable here. Instead there
6247 * should be a flag in the cfg to
6248 * request a generic sharing context.
6251 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6252 mono_get_vtable_var (cfg
);
6257 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6259 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6261 CHECK_TYPELOAD (cmethod
->klass
);
6262 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6267 g_assert (!vtable_arg
);
6270 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6272 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
6275 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6276 MONO_METHOD_IS_FINAL (cmethod
)) {
6283 if (pass_imt_from_rgctx
) {
6284 g_assert (!pass_vtable
);
6287 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6288 cmethod
, MONO_RGCTX_INFO_METHOD
);
6294 MONO_INST_NEW (cfg
, check
, OP_CHECK_THIS
);
6295 check
->sreg1
= sp
[0]->dreg
;
6296 MONO_ADD_INS (cfg
->cbb
, check
);
6299 /* Calling virtual generic methods */
6300 if (cmethod
&& virtual &&
6301 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6302 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6303 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6304 mono_method_signature (cmethod
)->generic_param_count
) {
6305 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6306 MonoInst
*iargs
[4];
6308 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6310 /* Prevent inlining of methods that contain indirect calls */
6313 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6314 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
&& mono_use_imt
) {
6315 g_assert (!imt_arg
);
6317 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6318 cmethod
, MONO_RGCTX_INFO_METHOD
);
6321 g_assert (cmethod
->is_inflated
);
6322 EMIT_NEW_METHODCONST (cfg
, imt_arg
, cmethod
);
6324 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
);
6328 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6329 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6330 MONO_ADD_INS (bblock
, store
);
6332 /* FIXME: This should be a managed pointer */
6333 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6335 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6337 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6338 cmethod
, MONO_RGCTX_INFO_METHOD
);
6339 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6340 addr
= mono_emit_jit_icall (cfg
,
6341 mono_helper_compile_generic_method
, iargs
);
6343 EMIT_NEW_METHODCONST (cfg
, iargs
[1], cmethod
);
6344 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6345 addr
= mono_emit_jit_icall (cfg
, mono_helper_compile_generic_method
, iargs
);
6348 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6350 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6353 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6354 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6361 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6362 supported_tail_call
= cmethod
&& MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method
), mono_method_signature (cmethod
));
6364 supported_tail_call
= cmethod
&& mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->ret
);
6368 /* FIXME: runtime generic context pointer for jumps? */
6369 /* FIXME: handle this for generic sharing eventually */
6370 if ((ins_flag
& MONO_INST_TAILCALL
) && !cfg
->generic_sharing_context
&& !vtable_arg
&& cmethod
&& (*ip
== CEE_CALL
) && supported_tail_call
) {
6373 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6376 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6377 /* Handle tail calls similarly to calls */
6378 call
= mono_emit_call_args (cfg
, mono_method_signature (cmethod
), sp
, FALSE
, FALSE
, TRUE
);
6380 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6381 call
->tail_call
= TRUE
;
6382 call
->method
= cmethod
;
6383 call
->signature
= mono_method_signature (cmethod
);
6386 * We implement tail calls by storing the actual arguments into the
6387 * argument variables, then emitting a CEE_JMP.
6389 for (i
= 0; i
< n
; ++i
) {
6390 /* Prevent argument from being register allocated */
6391 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6392 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6396 ins
= (MonoInst
*)call
;
6397 ins
->inst_p0
= cmethod
;
6398 ins
->inst_p1
= arg_array
[0];
6399 MONO_ADD_INS (bblock
, ins
);
6400 link_bblock (cfg
, bblock
, end_bblock
);
6401 start_new_bblock
= 1;
6402 /* skip CEE_RET as well */
6408 /* Conversion to a JIT intrinsic */
6409 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6410 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6411 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6422 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
6423 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
6424 mono_method_check_inlining (cfg
, cmethod
) &&
6425 !g_list_find (dont_inline
, cmethod
)) {
6427 gboolean allways
= FALSE
;
6429 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
6430 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
6431 /* Prevent inlining of methods that call wrappers */
6433 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
6437 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, allways
))) {
6439 cfg
->real_offset
+= 5;
6442 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6443 /* *sp is already set by inline_method */
6446 inline_costs
+= costs
;
6452 inline_costs
+= 10 * num_calls
++;
6454 /* Tail recursion elimination */
6455 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
6456 gboolean has_vtargs
= FALSE
;
6459 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6462 /* keep it simple */
6463 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
6464 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
6469 for (i
= 0; i
< n
; ++i
)
6470 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
6471 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6472 MONO_ADD_INS (bblock
, ins
);
6473 tblock
= start_bblock
->out_bb
[0];
6474 link_bblock (cfg
, bblock
, tblock
);
6475 ins
->inst_target_bb
= tblock
;
6476 start_new_bblock
= 1;
6478 /* skip the CEE_RET, too */
6479 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
6489 /* Generic sharing */
6490 /* FIXME: only do this for generic methods if
6491 they are not shared! */
6492 if (context_used
&& !imt_arg
&& !array_rank
&&
6493 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
6494 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
6495 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
6496 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
6499 g_assert (cfg
->generic_sharing_context
&& cmethod
);
6503 * We are compiling a call to a
6504 * generic method from shared code,
6505 * which means that we have to look up
6506 * the method in the rgctx and do an
6509 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
6512 /* Indirect calls */
6514 g_assert (!imt_arg
);
6516 if (*ip
== CEE_CALL
)
6517 g_assert (context_used
);
6518 else if (*ip
== CEE_CALLI
)
6519 g_assert (!vtable_arg
);
6521 /* FIXME: what the hell is this??? */
6522 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
6523 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
6525 /* Prevent inlining of methods with indirect calls */
6529 #ifdef MONO_ARCH_RGCTX_REG
6531 int rgctx_reg
= mono_alloc_preg (cfg
);
6533 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, vtable_arg
->dreg
);
6534 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6535 call
= (MonoCallInst
*)ins
;
6536 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
6537 cfg
->uses_rgctx_reg
= TRUE
;
6538 call
->rgctx_reg
= TRUE
;
6543 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6545 * Instead of emitting an indirect call, emit a direct call
6546 * with the contents of the aotconst as the patch info.
6548 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
6550 } else if (addr
->opcode
== OP_GOT_ENTRY
&& addr
->inst_right
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
6551 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_right
->inst_left
, fsig
, sp
);
6554 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
);
6557 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6558 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6569 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
6570 if (sp
[fsig
->param_count
]->type
== STACK_OBJ
) {
6571 MonoInst
*iargs
[2];
6574 iargs
[1] = sp
[fsig
->param_count
];
6576 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
6579 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
6580 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, sp
[fsig
->param_count
]->dreg
);
6581 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
6582 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6584 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
6587 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
6588 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
6589 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
6592 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
6595 g_assert_not_reached ();
6603 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
6605 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6606 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6616 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
6618 } else if (imt_arg
) {
6619 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, imt_arg
);
6621 ins
= (MonoInst
*)mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
, NULL
);
6624 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6625 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6632 if (cfg
->method
!= method
) {
6633 /* return from inlined method */
6635 * If in_count == 0, that means the ret is unreachable due to
6636 * being preceeded by a throw. In that case, inline_method () will
6637 * handle setting the return value
6638 * (test case: test_0_inline_throw ()).
6640 if (return_var
&& cfg
->cbb
->in_count
) {
6644 //g_assert (returnvar != -1);
6645 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
6646 cfg
->ret_var_set
= TRUE
;
6650 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
6652 g_assert (!return_var
);
6655 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
6658 if (!cfg
->vret_addr
) {
6661 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
6663 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
6665 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
6666 ins
->klass
= mono_class_from_mono_type (ret_type
);
6669 #ifdef MONO_ARCH_SOFT_FLOAT
6670 if (!ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
6671 MonoInst
*iargs
[1];
6675 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
6676 mono_arch_emit_setret (cfg
, method
, conv
);
6678 mono_arch_emit_setret (cfg
, method
, *sp
);
6681 mono_arch_emit_setret (cfg
, method
, *sp
);
6686 if (sp
!= stack_start
)
6688 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6690 ins
->inst_target_bb
= end_bblock
;
6691 MONO_ADD_INS (bblock
, ins
);
6692 link_bblock (cfg
, bblock
, end_bblock
);
6693 start_new_bblock
= 1;
6697 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6699 target
= ip
+ 1 + (signed char)(*ip
);
6701 GET_BBLOCK (cfg
, tblock
, target
);
6702 link_bblock (cfg
, bblock
, tblock
);
6703 ins
->inst_target_bb
= tblock
;
6704 if (sp
!= stack_start
) {
6705 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6707 CHECK_UNVERIFIABLE (cfg
);
6709 MONO_ADD_INS (bblock
, ins
);
6710 start_new_bblock
= 1;
6711 inline_costs
+= BRANCH_COST
;
6725 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
6727 target
= ip
+ 1 + *(signed char*)ip
;
6733 inline_costs
+= BRANCH_COST
;
6737 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6740 target
= ip
+ 4 + (gint32
)read32(ip
);
6742 GET_BBLOCK (cfg
, tblock
, target
);
6743 link_bblock (cfg
, bblock
, tblock
);
6744 ins
->inst_target_bb
= tblock
;
6745 if (sp
!= stack_start
) {
6746 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6748 CHECK_UNVERIFIABLE (cfg
);
6751 MONO_ADD_INS (bblock
, ins
);
6753 start_new_bblock
= 1;
6754 inline_costs
+= BRANCH_COST
;
6761 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
6762 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
6763 guint32 opsize
= is_short
? 1 : 4;
6765 CHECK_OPSIZE (opsize
);
6767 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
6770 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
6775 GET_BBLOCK (cfg
, tblock
, target
);
6776 link_bblock (cfg
, bblock
, tblock
);
6777 GET_BBLOCK (cfg
, tblock
, ip
);
6778 link_bblock (cfg
, bblock
, tblock
);
6780 if (sp
!= stack_start
) {
6781 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6782 CHECK_UNVERIFIABLE (cfg
);
6785 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
6786 cmp
->sreg1
= sp
[0]->dreg
;
6787 type_from_op (cmp
, sp
[0], NULL
);
6790 #if SIZEOF_REGISTER == 4
6791 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
6792 /* Convert it to OP_LCOMPARE */
6793 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6794 ins
->type
= STACK_I8
;
6795 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6797 MONO_ADD_INS (bblock
, ins
);
6798 cmp
->opcode
= OP_LCOMPARE
;
6799 cmp
->sreg2
= ins
->dreg
;
6802 MONO_ADD_INS (bblock
, cmp
);
6804 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
6805 type_from_op (ins
, sp
[0], NULL
);
6806 MONO_ADD_INS (bblock
, ins
);
6807 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
6808 GET_BBLOCK (cfg
, tblock
, target
);
6809 ins
->inst_true_bb
= tblock
;
6810 GET_BBLOCK (cfg
, tblock
, ip
);
6811 ins
->inst_false_bb
= tblock
;
6812 start_new_bblock
= 2;
6815 inline_costs
+= BRANCH_COST
;
6830 MONO_INST_NEW (cfg
, ins
, *ip
);
6832 target
= ip
+ 4 + (gint32
)read32(ip
);
6838 inline_costs
+= BRANCH_COST
;
6842 MonoBasicBlock
**targets
;
6843 MonoBasicBlock
*default_bblock
;
6844 MonoJumpInfoBBTable
*table
;
6845 int offset_reg
= alloc_preg (cfg
);
6846 int target_reg
= alloc_preg (cfg
);
6847 int table_reg
= alloc_preg (cfg
);
6848 int sum_reg
= alloc_preg (cfg
);
6849 gboolean use_op_switch
;
6853 n
= read32 (ip
+ 1);
6856 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
6860 CHECK_OPSIZE (n
* sizeof (guint32
));
6861 target
= ip
+ n
* sizeof (guint32
);
6863 GET_BBLOCK (cfg
, default_bblock
, target
);
6865 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
6866 for (i
= 0; i
< n
; ++i
) {
6867 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
6868 targets
[i
] = tblock
;
6872 if (sp
!= stack_start
) {
6874 * Link the current bb with the targets as well, so handle_stack_args
6875 * will set their in_stack correctly.
6877 link_bblock (cfg
, bblock
, default_bblock
);
6878 for (i
= 0; i
< n
; ++i
)
6879 link_bblock (cfg
, bblock
, targets
[i
]);
6881 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6883 CHECK_UNVERIFIABLE (cfg
);
6886 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
6887 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
6890 for (i
= 0; i
< n
; ++i
)
6891 link_bblock (cfg
, bblock
, targets
[i
]);
6893 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
6894 table
->table
= targets
;
6895 table
->table_size
= n
;
6897 use_op_switch
= FALSE
;
6899 /* ARM implements SWITCH statements differently */
6900 /* FIXME: Make it use the generic implementation */
6901 if (!cfg
->compile_aot
)
6902 use_op_switch
= TRUE
;
6905 if (COMPILE_LLVM (cfg
))
6906 use_op_switch
= TRUE
;
6908 cfg
->cbb
->has_jump_table
= 1;
6910 if (use_op_switch
) {
6911 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
6912 ins
->sreg1
= src1
->dreg
;
6913 ins
->inst_p0
= table
;
6914 ins
->inst_many_bb
= targets
;
6915 ins
->klass
= GUINT_TO_POINTER (n
);
6916 MONO_ADD_INS (cfg
->cbb
, ins
);
6918 if (sizeof (gpointer
) == 8)
6919 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
6921 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
6923 #if SIZEOF_REGISTER == 8
6924 /* The upper word might not be zero, and we add it to a 64 bit address later */
6925 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
6928 if (cfg
->compile_aot
) {
6929 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
6931 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
6932 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
6933 ins
->inst_p0
= table
;
6934 ins
->dreg
= table_reg
;
6935 MONO_ADD_INS (cfg
->cbb
, ins
);
6938 /* FIXME: Use load_memindex */
6939 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
6940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
6941 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
6943 start_new_bblock
= 1;
6944 inline_costs
+= (BRANCH_COST
* 2);
6964 dreg
= alloc_freg (cfg
);
6967 dreg
= alloc_lreg (cfg
);
6970 dreg
= alloc_preg (cfg
);
6973 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
6974 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
6975 ins
->flags
|= ins_flag
;
6977 MONO_ADD_INS (bblock
, ins
);
6992 #if HAVE_WRITE_BARRIERS
6993 if (*ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0))) {
6994 /* insert call to write barrier */
6995 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
6996 mono_emit_method_call (cfg
, write_barrier
, sp
, NULL
);
7003 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
7004 ins
->flags
|= ins_flag
;
7006 MONO_ADD_INS (bblock
, ins
);
7014 MONO_INST_NEW (cfg
, ins
, (*ip
));
7016 ins
->sreg1
= sp
[0]->dreg
;
7017 ins
->sreg2
= sp
[1]->dreg
;
7018 type_from_op (ins
, sp
[0], sp
[1]);
7020 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7022 /* Use the immediate opcodes if possible */
7023 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
7024 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
7025 if (imm_opcode
!= -1) {
7026 ins
->opcode
= imm_opcode
;
7027 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7030 sp
[1]->opcode
= OP_NOP
;
7034 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7036 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7053 MONO_INST_NEW (cfg
, ins
, (*ip
));
7055 ins
->sreg1
= sp
[0]->dreg
;
7056 ins
->sreg2
= sp
[1]->dreg
;
7057 type_from_op (ins
, sp
[0], sp
[1]);
7059 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
7060 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7062 /* FIXME: Pass opcode to is_inst_imm */
7064 /* Use the immediate opcodes if possible */
7065 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
7068 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
7069 if (imm_opcode
!= -1) {
7070 ins
->opcode
= imm_opcode
;
7071 if (sp
[1]->opcode
== OP_I8CONST
) {
7072 #if SIZEOF_REGISTER == 8
7073 ins
->inst_imm
= sp
[1]->inst_l
;
7075 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
7076 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
7080 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7083 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7084 if (sp
[1]->next
== NULL
)
7085 sp
[1]->opcode
= OP_NOP
;
7088 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7090 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7103 case CEE_CONV_OVF_I8
:
7104 case CEE_CONV_OVF_U8
:
7108 /* Special case this earlier so we have long constants in the IR */
7109 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
7110 int data
= sp
[-1]->inst_c0
;
7111 sp
[-1]->opcode
= OP_I8CONST
;
7112 sp
[-1]->type
= STACK_I8
;
7113 #if SIZEOF_REGISTER == 8
7114 if ((*ip
) == CEE_CONV_U8
)
7115 sp
[-1]->inst_c0
= (guint32
)data
;
7117 sp
[-1]->inst_c0
= data
;
7119 sp
[-1]->inst_ls_word
= data
;
7120 if ((*ip
) == CEE_CONV_U8
)
7121 sp
[-1]->inst_ms_word
= 0;
7123 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
7125 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
7132 case CEE_CONV_OVF_I4
:
7133 case CEE_CONV_OVF_I1
:
7134 case CEE_CONV_OVF_I2
:
7135 case CEE_CONV_OVF_I
:
7136 case CEE_CONV_OVF_U
:
7139 if (sp
[-1]->type
== STACK_R8
) {
7140 ADD_UNOP (CEE_CONV_OVF_I8
);
7147 case CEE_CONV_OVF_U1
:
7148 case CEE_CONV_OVF_U2
:
7149 case CEE_CONV_OVF_U4
:
7152 if (sp
[-1]->type
== STACK_R8
) {
7153 ADD_UNOP (CEE_CONV_OVF_U8
);
7160 case CEE_CONV_OVF_I1_UN
:
7161 case CEE_CONV_OVF_I2_UN
:
7162 case CEE_CONV_OVF_I4_UN
:
7163 case CEE_CONV_OVF_I8_UN
:
7164 case CEE_CONV_OVF_U1_UN
:
7165 case CEE_CONV_OVF_U2_UN
:
7166 case CEE_CONV_OVF_U4_UN
:
7167 case CEE_CONV_OVF_U8_UN
:
7168 case CEE_CONV_OVF_I_UN
:
7169 case CEE_CONV_OVF_U_UN
:
7179 case CEE_ADD_OVF_UN
:
7181 case CEE_MUL_OVF_UN
:
7183 case CEE_SUB_OVF_UN
:
7191 token
= read32 (ip
+ 1);
7192 klass
= mini_get_class (method
, token
, generic_context
);
7193 CHECK_TYPELOAD (klass
);
7195 if (generic_class_is_reference_type (cfg
, klass
)) {
7196 MonoInst
*store
, *load
;
7197 int dreg
= alloc_preg (cfg
);
7199 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7200 load
->flags
|= ins_flag
;
7201 MONO_ADD_INS (cfg
->cbb
, load
);
7203 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7204 store
->flags
|= ins_flag
;
7205 MONO_ADD_INS (cfg
->cbb
, store
);
7207 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7219 token
= read32 (ip
+ 1);
7220 klass
= mini_get_class (method
, token
, generic_context
);
7221 CHECK_TYPELOAD (klass
);
7223 /* Optimize the common ldobj+stloc combination */
7233 loc_index
= ip
[5] - CEE_STLOC_0
;
7240 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7241 CHECK_LOCAL (loc_index
);
7243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7244 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7250 /* Optimize the ldobj+stobj combination */
7251 /* The reference case ends up being a load+store anyway */
7252 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 5) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7257 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7264 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7273 CHECK_STACK_OVF (1);
7275 n
= read32 (ip
+ 1);
7277 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7278 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7279 ins
->type
= STACK_OBJ
;
7282 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7283 MonoInst
*iargs
[1];
7285 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7286 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7288 if (cfg
->opt
& MONO_OPT_SHARED
) {
7289 MonoInst
*iargs
[3];
7291 if (cfg
->compile_aot
) {
7292 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7294 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7295 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7296 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7297 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7298 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7300 if (bblock
->out_of_line
) {
7301 MonoInst
*iargs
[2];
7303 if (image
== mono_defaults
.corlib
) {
7305 * Avoid relocations in AOT and save some space by using a
7306 * version of helper_ldstr specialized to mscorlib.
7308 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7309 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7311 /* Avoid creating the string object */
7312 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7313 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7314 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7318 if (cfg
->compile_aot
) {
7319 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7321 MONO_ADD_INS (bblock
, ins
);
7324 NEW_PCONST (cfg
, ins
, NULL
);
7325 ins
->type
= STACK_OBJ
;
7326 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7328 MONO_ADD_INS (bblock
, ins
);
7337 MonoInst
*iargs
[2];
7338 MonoMethodSignature
*fsig
;
7341 MonoInst
*vtable_arg
= NULL
;
7344 token
= read32 (ip
+ 1);
7345 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7348 fsig
= mono_method_get_signature (cmethod
, image
, token
);
7350 mono_save_token_info (cfg
, image
, token
, cmethod
);
7352 if (!mono_class_init (cmethod
->klass
))
7355 if (cfg
->generic_sharing_context
)
7356 context_used
= mono_method_check_context_used (cmethod
);
7358 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
7359 if (check_linkdemand (cfg
, method
, cmethod
))
7361 CHECK_CFG_EXCEPTION
;
7362 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
7363 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
7366 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
7367 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
7368 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
7370 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
7371 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7373 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, vtable_arg
, cmethod
);
7377 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
7378 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7380 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7382 CHECK_TYPELOAD (cmethod
->klass
);
7383 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7388 n
= fsig
->param_count
;
7392 * Generate smaller code for the common newobj <exception> instruction in
7393 * argument checking code.
7395 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
7396 is_exception_class (cmethod
->klass
) && n
<= 2 &&
7397 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
7398 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
7399 MonoInst
*iargs
[3];
7401 g_assert (!vtable_arg
);
7405 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
7408 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
7412 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
7417 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
7420 g_assert_not_reached ();
7428 /* move the args to allow room for 'this' in the first position */
7434 /* check_call_signature () requires sp[0] to be set */
7435 this_ins
.type
= STACK_OBJ
;
7437 if (check_call_signature (cfg
, fsig
, sp
))
7442 if (mini_class_is_system_array (cmethod
->klass
)) {
7443 g_assert (!vtable_arg
);
7446 *sp
= emit_get_rgctx_method (cfg
, context_used
,
7447 cmethod
, MONO_RGCTX_INFO_METHOD
);
7449 EMIT_NEW_METHODCONST (cfg
, *sp
, cmethod
);
7452 /* Avoid varargs in the common case */
7453 if (fsig
->param_count
== 1)
7454 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
7455 else if (fsig
->param_count
== 2)
7456 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
7458 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
7459 } else if (cmethod
->string_ctor
) {
7460 g_assert (!context_used
);
7461 g_assert (!vtable_arg
);
7462 /* we simply pass a null pointer */
7463 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
7464 /* now call the string ctor */
7465 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
);
7467 MonoInst
* callvirt_this_arg
= NULL
;
7469 if (cmethod
->klass
->valuetype
) {
7470 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
7471 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
7472 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
7477 * The code generated by mini_emit_virtual_call () expects
7478 * iargs [0] to be a boxed instance, but luckily the vcall
7479 * will be transformed into a normal call there.
7481 } else if (context_used
) {
7485 if (cfg
->opt
& MONO_OPT_SHARED
)
7486 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7488 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7489 data
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, rgctx_info
);
7491 alloc
= handle_alloc_from_inst (cfg
, cmethod
->klass
, data
, FALSE
);
7494 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
7496 CHECK_TYPELOAD (cmethod
->klass
);
7499 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7500 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7501 * As a workaround, we call class cctors before allocating objects.
7503 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
7504 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
7505 if (cfg
->verbose_level
> 2)
7506 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
7507 class_inits
= g_slist_prepend (class_inits
, vtable
);
7510 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
);
7515 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
7517 /* Now call the actual ctor */
7518 /* Avoid virtual calls to ctors if possible */
7519 if (cmethod
->klass
->marshalbyref
)
7520 callvirt_this_arg
= sp
[0];
7522 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
7523 mono_method_check_inlining (cfg
, cmethod
) &&
7524 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
7525 !g_list_find (dont_inline
, cmethod
)) {
7528 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
7529 cfg
->real_offset
+= 5;
7532 inline_costs
+= costs
- 5;
7535 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
);
7537 } else if (context_used
&&
7538 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7539 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
7540 MonoInst
*cmethod_addr
;
7542 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
7543 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7545 mono_emit_rgctx_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
7548 ins
= mono_emit_rgctx_method_call_full (cfg
, cmethod
, fsig
, sp
,
7549 callvirt_this_arg
, NULL
, vtable_arg
);
7550 if (mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && ((MonoCallInst
*)ins
)->method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)
7551 GENERIC_SHARING_FAILURE (*ip
);
7555 if (alloc
== NULL
) {
7557 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
7558 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
7572 token
= read32 (ip
+ 1);
7573 klass
= mini_get_class (method
, token
, generic_context
);
7574 CHECK_TYPELOAD (klass
);
7575 if (sp
[0]->type
!= STACK_OBJ
)
7578 if (cfg
->generic_sharing_context
)
7579 context_used
= mono_class_check_context_used (klass
);
7588 args
[1] = emit_get_rgctx_klass (cfg
, context_used
,
7589 klass
, MONO_RGCTX_INFO_KLASS
);
7591 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
7595 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7596 MonoMethod
*mono_castclass
;
7597 MonoInst
*iargs
[1];
7600 mono_castclass
= mono_marshal_get_castclass (klass
);
7603 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7604 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7605 g_assert (costs
> 0);
7608 cfg
->real_offset
+= 5;
7613 inline_costs
+= costs
;
7616 ins
= handle_castclass (cfg
, klass
, *sp
);
7626 token
= read32 (ip
+ 1);
7627 klass
= mini_get_class (method
, token
, generic_context
);
7628 CHECK_TYPELOAD (klass
);
7629 if (sp
[0]->type
!= STACK_OBJ
)
7632 if (cfg
->generic_sharing_context
)
7633 context_used
= mono_class_check_context_used (klass
);
7642 args
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7644 *sp
= mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
7648 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7649 MonoMethod
*mono_isinst
;
7650 MonoInst
*iargs
[1];
7653 mono_isinst
= mono_marshal_get_isinst (klass
);
7656 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
7657 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7658 g_assert (costs
> 0);
7661 cfg
->real_offset
+= 5;
7666 inline_costs
+= costs
;
7669 ins
= handle_isinst (cfg
, klass
, *sp
);
7676 case CEE_UNBOX_ANY
: {
7680 token
= read32 (ip
+ 1);
7681 klass
= mini_get_class (method
, token
, generic_context
);
7682 CHECK_TYPELOAD (klass
);
7684 mono_save_token_info (cfg
, image
, token
, klass
);
7686 if (cfg
->generic_sharing_context
)
7687 context_used
= mono_class_check_context_used (klass
);
7689 if (generic_class_is_reference_type (cfg
, klass
)) {
7692 MonoInst
*iargs
[2];
7697 iargs
[1] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
7698 ins
= mono_emit_jit_icall (cfg
, mono_object_castclass
, iargs
);
7702 } else if (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
7703 MonoMethod
*mono_castclass
;
7704 MonoInst
*iargs
[1];
7707 mono_castclass
= mono_marshal_get_castclass (klass
);
7710 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
7711 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7713 g_assert (costs
> 0);
7716 cfg
->real_offset
+= 5;
7720 inline_costs
+= costs
;
7722 ins
= handle_castclass (cfg
, klass
, *sp
);
7730 if (mono_class_is_nullable (klass
)) {
7731 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7738 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7744 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7757 token
= read32 (ip
+ 1);
7758 klass
= mini_get_class (method
, token
, generic_context
);
7759 CHECK_TYPELOAD (klass
);
7761 mono_save_token_info (cfg
, image
, token
, klass
);
7763 if (cfg
->generic_sharing_context
)
7764 context_used
= mono_class_check_context_used (klass
);
7766 if (generic_class_is_reference_type (cfg
, klass
)) {
7772 if (klass
== mono_defaults
.void_class
)
7774 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
7776 /* frequent check in generic code: box (struct), brtrue */
7777 if (!mono_class_is_nullable (klass
) &&
7778 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) && (ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
)) {
7779 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7781 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7782 if (*ip
== CEE_BRTRUE_S
) {
7785 target
= ip
+ 1 + (signed char)(*ip
);
7790 target
= ip
+ 4 + (gint
)(read32 (ip
));
7793 GET_BBLOCK (cfg
, tblock
, target
);
7794 link_bblock (cfg
, bblock
, tblock
);
7795 ins
->inst_target_bb
= tblock
;
7796 GET_BBLOCK (cfg
, tblock
, ip
);
7798 * This leads to some inconsistency, since the two bblocks are
7799 * not really connected, but it is needed for handling stack
7800 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7801 * FIXME: This should only be needed if sp != stack_start, but that
7802 * doesn't work for some reason (test failure in mcs/tests on x86).
7804 link_bblock (cfg
, bblock
, tblock
);
7805 if (sp
!= stack_start
) {
7806 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7808 CHECK_UNVERIFIABLE (cfg
);
7810 MONO_ADD_INS (bblock
, ins
);
7811 start_new_bblock
= 1;
7819 if (cfg
->opt
& MONO_OPT_SHARED
)
7820 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
7822 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
7823 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
7824 *sp
++ = handle_box_from_inst (cfg
, val
, klass
, context_used
, data
);
7826 *sp
++ = handle_box (cfg
, val
, klass
);
7837 token
= read32 (ip
+ 1);
7838 klass
= mini_get_class (method
, token
, generic_context
);
7839 CHECK_TYPELOAD (klass
);
7841 mono_save_token_info (cfg
, image
, token
, klass
);
7843 if (cfg
->generic_sharing_context
)
7844 context_used
= mono_class_check_context_used (klass
);
7846 if (mono_class_is_nullable (klass
)) {
7849 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
7850 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
7854 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
7864 MonoClassField
*field
;
7868 if (*ip
== CEE_STFLD
) {
7875 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
7877 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
7880 token
= read32 (ip
+ 1);
7881 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7882 field
= mono_method_get_wrapper_data (method
, token
);
7883 klass
= field
->parent
;
7886 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
7890 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
7891 FIELD_ACCESS_FAILURE
;
7892 mono_class_init (klass
);
7894 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7895 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7896 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7897 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7900 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
7901 if (*ip
== CEE_STFLD
) {
7902 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
7904 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
7905 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
7906 MonoInst
*iargs
[5];
7909 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
7910 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
7911 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
7915 if (cfg
->opt
& MONO_OPT_INLINE
) {
7916 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
7917 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7918 g_assert (costs
> 0);
7920 cfg
->real_offset
+= 5;
7923 inline_costs
+= costs
;
7925 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
7930 #if HAVE_WRITE_BARRIERS
7931 if (mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
7932 /* insert call to write barrier */
7933 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
7934 MonoInst
*iargs
[2];
7937 dreg
= alloc_preg (cfg
);
7938 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
7940 mono_emit_method_call (cfg
, write_barrier
, iargs
, NULL
);
7944 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
7946 store
->flags
|= ins_flag
;
7953 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
7954 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
7955 MonoInst
*iargs
[4];
7958 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
7959 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
7960 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
7961 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
7962 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
7963 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
7965 g_assert (costs
> 0);
7967 cfg
->real_offset
+= 5;
7971 inline_costs
+= costs
;
7973 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
7977 if (sp
[0]->type
== STACK_VTYPE
) {
7980 /* Have to compute the address of the variable */
7982 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
7984 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
7986 g_assert (var
->klass
== klass
);
7988 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
7992 if (*ip
== CEE_LDFLDA
) {
7993 dreg
= alloc_preg (cfg
);
7995 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
7996 ins
->klass
= mono_class_from_mono_type (field
->type
);
7997 ins
->type
= STACK_MP
;
8002 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
8003 load
->flags
|= ins_flag
;
8014 MonoClassField
*field
;
8015 gpointer addr
= NULL
;
8016 gboolean is_special_static
;
8019 token
= read32 (ip
+ 1);
8021 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8022 field
= mono_method_get_wrapper_data (method
, token
);
8023 klass
= field
->parent
;
8026 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8029 mono_class_init (klass
);
8030 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8031 FIELD_ACCESS_FAILURE
;
8033 /* if the class is Critical then transparent code cannot access it's fields */
8034 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
8035 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
8038 * We can only support shared generic static
8039 * field access on architectures where the
8040 * trampoline code has been extended to handle
8041 * the generic class init.
8043 #ifndef MONO_ARCH_VTABLE_REG
8044 GENERIC_SHARING_FAILURE (*ip
);
8047 if (cfg
->generic_sharing_context
)
8048 context_used
= mono_class_check_context_used (klass
);
8050 g_assert (!(field
->type
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
8052 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8053 * to be called here.
8055 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
8056 mono_class_vtable (cfg
->domain
, klass
);
8057 CHECK_TYPELOAD (klass
);
8059 mono_domain_lock (cfg
->domain
);
8060 if (cfg
->domain
->special_static_fields
)
8061 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
8062 mono_domain_unlock (cfg
->domain
);
8064 is_special_static
= mono_class_field_is_special_static (field
);
8066 /* Generate IR to compute the field address */
8068 if ((cfg
->opt
& MONO_OPT_SHARED
) ||
8069 (cfg
->compile_aot
&& is_special_static
) ||
8070 (context_used
&& is_special_static
)) {
8071 MonoInst
*iargs
[2];
8073 g_assert (field
->parent
);
8074 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8076 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
8077 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
8079 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8081 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8082 } else if (context_used
) {
8083 MonoInst
*static_data
;
8086 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8087 method->klass->name_space, method->klass->name, method->name,
8088 depth, field->offset);
8091 if (mono_class_needs_cctor_run (klass
, method
)) {
8095 vtable
= emit_get_rgctx_klass (cfg
, context_used
,
8096 klass
, MONO_RGCTX_INFO_VTABLE
);
8098 // FIXME: This doesn't work since it tries to pass the argument
8099 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8101 * The vtable pointer is always passed in a register regardless of
8102 * the calling convention, so assign it manually, and make a call
8103 * using a signature without parameters.
8105 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable
);
8106 #ifdef MONO_ARCH_VTABLE_REG
8107 mono_call_inst_add_outarg_reg (cfg
, call
, vtable
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
8108 cfg
->uses_vtable_reg
= TRUE
;
8115 * The pointer we're computing here is
8117 * super_info.static_data + field->offset
8119 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
8120 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
8122 if (field
->offset
== 0) {
8125 int addr_reg
= mono_alloc_preg (cfg
);
8126 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
8128 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
8129 MonoInst
*iargs
[2];
8131 g_assert (field
->parent
);
8132 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8133 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8134 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8136 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
8138 CHECK_TYPELOAD (klass
);
8140 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8141 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8142 if (cfg
->verbose_level
> 2)
8143 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
8144 class_inits
= g_slist_prepend (class_inits
, vtable
);
8146 if (cfg
->run_cctors
) {
8148 /* This makes so that inline cannot trigger */
8149 /* .cctors: too many apps depend on them */
8150 /* running with a specific order... */
8151 if (! vtable
->initialized
)
8153 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
8155 set_exception_object (cfg
, ex
);
8156 goto exception_exit
;
8160 addr
= (char*)vtable
->data
+ field
->offset
;
8162 if (cfg
->compile_aot
)
8163 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8165 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8168 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8169 * This could be later optimized to do just a couple of
8170 * memory dereferences with constant offsets.
8172 MonoInst
*iargs
[1];
8173 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8174 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8178 /* Generate IR to do the actual load/store operation */
8180 if (*ip
== CEE_LDSFLDA
) {
8181 ins
->klass
= mono_class_from_mono_type (field
->type
);
8182 ins
->type
= STACK_PTR
;
8184 } else if (*ip
== CEE_STSFLD
) {
8189 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, ins
->dreg
, 0, sp
[0]->dreg
);
8190 store
->flags
|= ins_flag
;
8192 gboolean is_const
= FALSE
;
8193 MonoVTable
*vtable
= NULL
;
8195 if (!context_used
) {
8196 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8197 CHECK_TYPELOAD (klass
);
8199 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8200 vtable
->initialized
&& (field
->type
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8201 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8202 int ro_type
= field
->type
->type
;
8203 if (ro_type
== MONO_TYPE_VALUETYPE
&& field
->type
->data
.klass
->enumtype
) {
8204 ro_type
= mono_class_enum_basetype (field
->type
->data
.klass
)->type
;
8206 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8209 case MONO_TYPE_BOOLEAN
:
8211 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8215 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8218 case MONO_TYPE_CHAR
:
8220 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8224 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8229 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8233 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8236 #ifndef HAVE_MOVING_COLLECTOR
8239 case MONO_TYPE_STRING
:
8240 case MONO_TYPE_OBJECT
:
8241 case MONO_TYPE_CLASS
:
8242 case MONO_TYPE_SZARRAY
:
8244 case MONO_TYPE_FNPTR
:
8245 case MONO_TYPE_ARRAY
:
8246 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8247 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8253 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
8258 case MONO_TYPE_VALUETYPE
:
8268 CHECK_STACK_OVF (1);
8270 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
8271 load
->flags
|= ins_flag
;
8284 token
= read32 (ip
+ 1);
8285 klass
= mini_get_class (method
, token
, generic_context
);
8286 CHECK_TYPELOAD (klass
);
8287 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8288 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
8299 const char *data_ptr
;
8301 guint32 field_token
;
8307 token
= read32 (ip
+ 1);
8309 klass
= mini_get_class (method
, token
, generic_context
);
8310 CHECK_TYPELOAD (klass
);
8312 if (cfg
->generic_sharing_context
)
8313 context_used
= mono_class_check_context_used (klass
);
8318 /* FIXME: Decompose later to help abcrem */
8321 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
8322 mono_array_class_get (klass
, 1), MONO_RGCTX_INFO_VTABLE
);
8327 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
8329 if (cfg
->opt
& MONO_OPT_SHARED
) {
8330 /* Decompose now to avoid problems with references to the domainvar */
8331 MonoInst
*iargs
[3];
8333 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8334 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8337 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
8339 /* Decompose later since it is needed by abcrem */
8340 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
8341 ins
->dreg
= alloc_preg (cfg
);
8342 ins
->sreg1
= sp
[0]->dreg
;
8343 ins
->inst_newa_class
= klass
;
8344 ins
->type
= STACK_OBJ
;
8346 MONO_ADD_INS (cfg
->cbb
, ins
);
8347 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8348 cfg
->cbb
->has_array_access
= TRUE
;
8350 /* Needed so mono_emit_load_get_addr () gets called */
8351 mono_get_got_var (cfg
);
8361 * we inline/optimize the initialization sequence if possible.
8362 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8363 * for small sizes open code the memcpy
8364 * ensure the rva field is big enough
8366 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
8367 MonoMethod
*memcpy_method
= get_memcpy_method ();
8368 MonoInst
*iargs
[3];
8369 int add_reg
= alloc_preg (cfg
);
8371 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
8372 if (cfg
->compile_aot
) {
8373 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
8375 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
8377 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
8378 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
8387 if (sp
[0]->type
!= STACK_OBJ
)
8390 dreg
= alloc_preg (cfg
);
8391 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
8392 ins
->dreg
= alloc_preg (cfg
);
8393 ins
->sreg1
= sp
[0]->dreg
;
8394 ins
->type
= STACK_I4
;
8395 MONO_ADD_INS (cfg
->cbb
, ins
);
8396 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
8397 cfg
->cbb
->has_array_access
= TRUE
;
8405 if (sp
[0]->type
!= STACK_OBJ
)
8408 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8410 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
8411 CHECK_TYPELOAD (klass
);
8412 /* we need to make sure that this array is exactly the type it needs
8413 * to be for correctness. the wrappers are lax with their usage
8414 * so we need to ignore them here
8416 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
)
8417 mini_emit_check_array_type (cfg
, sp
[0], mono_array_class_get (klass
, 1));
8420 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8424 case CEE_LDELEM_ANY
:
8435 case CEE_LDELEM_REF
: {
8441 if (*ip
== CEE_LDELEM_ANY
) {
8443 token
= read32 (ip
+ 1);
8444 klass
= mini_get_class (method
, token
, generic_context
);
8445 CHECK_TYPELOAD (klass
);
8446 mono_class_init (klass
);
8449 klass
= array_access_to_klass (*ip
);
8451 if (sp
[0]->type
!= STACK_OBJ
)
8454 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8456 if (sp
[1]->opcode
== OP_ICONST
) {
8457 int array_reg
= sp
[0]->dreg
;
8458 int index_reg
= sp
[1]->dreg
;
8459 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8461 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8462 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
8464 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8465 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
8468 if (*ip
== CEE_LDELEM_ANY
)
8481 case CEE_STELEM_REF
:
8482 case CEE_STELEM_ANY
: {
8488 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
8490 if (*ip
== CEE_STELEM_ANY
) {
8492 token
= read32 (ip
+ 1);
8493 klass
= mini_get_class (method
, token
, generic_context
);
8494 CHECK_TYPELOAD (klass
);
8495 mono_class_init (klass
);
8498 klass
= array_access_to_klass (*ip
);
8500 if (sp
[0]->type
!= STACK_OBJ
)
8503 /* storing a NULL doesn't need any of the complex checks in stelemref */
8504 if (generic_class_is_reference_type (cfg
, klass
) &&
8505 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
8506 MonoMethod
* helper
= mono_marshal_get_stelemref ();
8507 MonoInst
*iargs
[3];
8509 if (sp
[0]->type
!= STACK_OBJ
)
8511 if (sp
[2]->type
!= STACK_OBJ
)
8518 mono_emit_method_call (cfg
, helper
, iargs
, NULL
);
8520 if (sp
[1]->opcode
== OP_ICONST
) {
8521 int array_reg
= sp
[0]->dreg
;
8522 int index_reg
= sp
[1]->dreg
;
8523 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
8525 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
8526 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
8528 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1]);
8529 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
8533 if (*ip
== CEE_STELEM_ANY
)
8540 case CEE_CKFINITE
: {
8544 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
8545 ins
->sreg1
= sp
[0]->dreg
;
8546 ins
->dreg
= alloc_freg (cfg
);
8547 ins
->type
= STACK_R8
;
8548 MONO_ADD_INS (bblock
, ins
);
8550 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8555 case CEE_REFANYVAL
: {
8556 MonoInst
*src_var
, *src
;
8558 int klass_reg
= alloc_preg (cfg
);
8559 int dreg
= alloc_preg (cfg
);
8562 MONO_INST_NEW (cfg
, ins
, *ip
);
8565 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8566 CHECK_TYPELOAD (klass
);
8567 mono_class_init (klass
);
8569 if (cfg
->generic_sharing_context
)
8570 context_used
= mono_class_check_context_used (klass
);
8573 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8575 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8576 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
8577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
8580 MonoInst
*klass_ins
;
8582 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
8583 klass
, MONO_RGCTX_INFO_KLASS
);
8586 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
8587 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
8589 mini_emit_class_check (cfg
, klass_reg
, klass
);
8591 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
8592 ins
->type
= STACK_MP
;
8597 case CEE_MKREFANY
: {
8598 MonoInst
*loc
, *addr
;
8601 MONO_INST_NEW (cfg
, ins
, *ip
);
8604 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
8605 CHECK_TYPELOAD (klass
);
8606 mono_class_init (klass
);
8608 if (cfg
->generic_sharing_context
)
8609 context_used
= mono_class_check_context_used (klass
);
8611 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
8612 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
8615 MonoInst
*const_ins
;
8616 int type_reg
= alloc_preg (cfg
);
8618 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
8619 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
8620 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8621 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8622 } else if (cfg
->compile_aot
) {
8623 int const_reg
= alloc_preg (cfg
);
8624 int type_reg
= alloc_preg (cfg
);
8626 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
8627 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
8628 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
8629 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
8631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
8632 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
8634 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
8636 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
8637 ins
->type
= STACK_VTYPE
;
8638 ins
->klass
= mono_defaults
.typed_reference_class
;
8645 MonoClass
*handle_class
;
8647 CHECK_STACK_OVF (1);
8650 n
= read32 (ip
+ 1);
8652 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
8653 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
8654 handle
= mono_method_get_wrapper_data (method
, n
);
8655 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
8656 if (handle_class
== mono_defaults
.typehandle_class
)
8657 handle
= &((MonoClass
*)handle
)->byval_arg
;
8660 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
8664 mono_class_init (handle_class
);
8665 if (cfg
->generic_sharing_context
) {
8666 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
8667 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
8668 /* This case handles ldtoken
8669 of an open type, like for
8672 } else if (handle_class
== mono_defaults
.typehandle_class
) {
8673 /* If we get a MONO_TYPE_CLASS
8674 then we need to provide the
8676 instantiation of it. */
8677 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
8680 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
8681 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
8682 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
8683 else if (handle_class
== mono_defaults
.methodhandle_class
)
8684 context_used
= mono_method_check_context_used (handle
);
8686 g_assert_not_reached ();
8689 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
8690 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
8691 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
8692 MonoInst
*addr
, *vtvar
, *iargs
[3];
8693 int method_context_used
;
8695 if (cfg
->generic_sharing_context
)
8696 method_context_used
= mono_method_check_context_used (method
);
8698 method_context_used
= 0;
8700 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8702 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8703 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
8704 if (method_context_used
) {
8705 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
8706 method
, MONO_RGCTX_INFO_METHOD
);
8707 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
8709 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
8710 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
8712 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8714 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8716 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8718 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8719 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
8720 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
8721 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
8722 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
8723 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
8725 mono_class_init (tclass
);
8727 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8728 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
8729 } else if (cfg
->compile_aot
) {
8730 if (method
->wrapper_type
) {
8731 /* FIXME: n is not a normal token */
8732 cfg
->disable_aot
= TRUE
;
8733 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
8735 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
8738 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
8740 ins
->type
= STACK_OBJ
;
8741 ins
->klass
= cmethod
->klass
;
8744 MonoInst
*addr
, *vtvar
;
8746 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
8749 if (handle_class
== mono_defaults
.typehandle_class
) {
8750 ins
= emit_get_rgctx_klass (cfg
, context_used
,
8751 mono_class_from_mono_type (handle
),
8752 MONO_RGCTX_INFO_TYPE
);
8753 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
8754 ins
= emit_get_rgctx_method (cfg
, context_used
,
8755 handle
, MONO_RGCTX_INFO_METHOD
);
8756 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
8757 ins
= emit_get_rgctx_field (cfg
, context_used
,
8758 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
8760 g_assert_not_reached ();
8762 } else if (cfg
->compile_aot
) {
8763 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
8765 EMIT_NEW_PCONST (cfg
, ins
, handle
);
8767 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
8768 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
8769 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
8779 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
8781 ins
->sreg1
= sp
[0]->dreg
;
8783 bblock
->out_of_line
= TRUE
;
8784 MONO_ADD_INS (bblock
, ins
);
8785 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
8786 MONO_ADD_INS (bblock
, ins
);
8789 link_bblock (cfg
, bblock
, end_bblock
);
8790 start_new_bblock
= 1;
8792 case CEE_ENDFINALLY
:
8793 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
8794 MONO_ADD_INS (bblock
, ins
);
8796 start_new_bblock
= 1;
8799 * Control will leave the method so empty the stack, otherwise
8800 * the next basic block will start with a nonempty stack.
8802 while (sp
!= stack_start
) {
8810 if (*ip
== CEE_LEAVE
) {
8812 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
8815 target
= ip
+ 2 + (signed char)(ip
[1]);
8818 /* empty the stack */
8819 while (sp
!= stack_start
) {
8824 * If this leave statement is in a catch block, check for a
8825 * pending exception, and rethrow it if necessary.
8827 for (i
= 0; i
< header
->num_clauses
; ++i
) {
8828 MonoExceptionClause
*clause
= &header
->clauses
[i
];
8831 * Use <= in the final comparison to handle clauses with multiple
8832 * leave statements, like in bug #78024.
8833 * The ordering of the exception clauses guarantees that we find the
8836 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
)) {
8838 MonoBasicBlock
*dont_throw
;
8843 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8846 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
8848 NEW_BBLOCK (cfg
, dont_throw
);
8851 * Currently, we allways rethrow the abort exception, despite the
8852 * fact that this is not correct. See thread6.cs for an example.
8853 * But propagating the abort exception is more important than
8854 * getting the sematics right.
8856 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
8857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
8858 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
8860 MONO_START_BB (cfg
, dont_throw
);
8865 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
8867 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
8869 link_bblock (cfg
, bblock
, tblock
);
8870 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
8871 ins
->inst_target_bb
= tblock
;
8872 MONO_ADD_INS (bblock
, ins
);
8873 bblock
->has_call_handler
= 1;
8875 g_list_free (handlers
);
8878 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8879 MONO_ADD_INS (bblock
, ins
);
8880 GET_BBLOCK (cfg
, tblock
, target
);
8881 link_bblock (cfg
, bblock
, tblock
);
8882 ins
->inst_target_bb
= tblock
;
8883 start_new_bblock
= 1;
8885 if (*ip
== CEE_LEAVE
)
8894 * Mono specific opcodes
8896 case MONO_CUSTOM_PREFIX
: {
8898 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
8902 case CEE_MONO_ICALL
: {
8904 MonoJitICallInfo
*info
;
8906 token
= read32 (ip
+ 2);
8907 func
= mono_method_get_wrapper_data (method
, token
);
8908 info
= mono_find_jit_icall_by_addr (func
);
8911 CHECK_STACK (info
->sig
->param_count
);
8912 sp
-= info
->sig
->param_count
;
8914 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
8915 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
8919 inline_costs
+= 10 * num_calls
++;
8923 case CEE_MONO_LDPTR
: {
8926 CHECK_STACK_OVF (1);
8928 token
= read32 (ip
+ 2);
8930 ptr
= mono_method_get_wrapper_data (method
, token
);
8931 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
8932 MonoJitICallInfo
*callinfo
;
8933 const char *icall_name
;
8935 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
8936 g_assert (icall_name
);
8937 callinfo
= mono_find_jit_icall_by_name (icall_name
);
8938 g_assert (callinfo
);
8940 if (ptr
== callinfo
->func
) {
8941 /* Will be transformed into an AOTCONST later */
8942 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8948 /* FIXME: Generalize this */
8949 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
8950 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
8955 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8958 inline_costs
+= 10 * num_calls
++;
8959 /* Can't embed random pointers into AOT code */
8960 cfg
->disable_aot
= 1;
8963 case CEE_MONO_ICALL_ADDR
: {
8964 MonoMethod
*cmethod
;
8967 CHECK_STACK_OVF (1);
8969 token
= read32 (ip
+ 2);
8971 cmethod
= mono_method_get_wrapper_data (method
, token
);
8973 if (cfg
->compile_aot
) {
8974 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
8976 ptr
= mono_lookup_internal_call (cmethod
);
8978 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
8984 case CEE_MONO_VTADDR
: {
8985 MonoInst
*src_var
, *src
;
8991 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8992 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
8997 case CEE_MONO_NEWOBJ
: {
8998 MonoInst
*iargs
[2];
9000 CHECK_STACK_OVF (1);
9002 token
= read32 (ip
+ 2);
9003 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9004 mono_class_init (klass
);
9005 NEW_DOMAINCONST (cfg
, iargs
[0]);
9006 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
9007 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9008 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
9009 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
9011 inline_costs
+= 10 * num_calls
++;
9014 case CEE_MONO_OBJADDR
:
9017 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
9018 ins
->dreg
= alloc_preg (cfg
);
9019 ins
->sreg1
= sp
[0]->dreg
;
9020 ins
->type
= STACK_MP
;
9021 MONO_ADD_INS (cfg
->cbb
, ins
);
9025 case CEE_MONO_LDNATIVEOBJ
:
9027 * Similar to LDOBJ, but instead load the unmanaged
9028 * representation of the vtype to the stack.
9033 token
= read32 (ip
+ 2);
9034 klass
= mono_method_get_wrapper_data (method
, token
);
9035 g_assert (klass
->valuetype
);
9036 mono_class_init (klass
);
9039 MonoInst
*src
, *dest
, *temp
;
9042 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
9043 temp
->backend
.is_pinvoke
= 1;
9044 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
9045 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
9047 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
9048 dest
->type
= STACK_VTYPE
;
9049 dest
->klass
= klass
;
9055 case CEE_MONO_RETOBJ
: {
9057 * Same as RET, but return the native representation of a vtype
9060 g_assert (cfg
->ret
);
9061 g_assert (mono_method_signature (method
)->pinvoke
);
9066 token
= read32 (ip
+ 2);
9067 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9069 if (!cfg
->vret_addr
) {
9070 g_assert (cfg
->ret_var_is_local
);
9072 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
9074 EMIT_NEW_RETLOADA (cfg
, ins
);
9076 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
9078 if (sp
!= stack_start
)
9081 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9082 ins
->inst_target_bb
= end_bblock
;
9083 MONO_ADD_INS (bblock
, ins
);
9084 link_bblock (cfg
, bblock
, end_bblock
);
9085 start_new_bblock
= 1;
9089 case CEE_MONO_CISINST
:
9090 case CEE_MONO_CCASTCLASS
: {
9095 token
= read32 (ip
+ 2);
9096 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9097 if (ip
[1] == CEE_MONO_CISINST
)
9098 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
9100 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
9106 case CEE_MONO_SAVE_LMF
:
9107 case CEE_MONO_RESTORE_LMF
:
9108 #ifdef MONO_ARCH_HAVE_LMF_OPS
9109 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
9110 MONO_ADD_INS (bblock
, ins
);
9111 cfg
->need_lmf_area
= TRUE
;
9115 case CEE_MONO_CLASSCONST
:
9116 CHECK_STACK_OVF (1);
9118 token
= read32 (ip
+ 2);
9119 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
9122 inline_costs
+= 10 * num_calls
++;
9124 case CEE_MONO_NOT_TAKEN
:
9125 bblock
->out_of_line
= TRUE
;
9129 CHECK_STACK_OVF (1);
9131 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
9132 ins
->dreg
= alloc_preg (cfg
);
9133 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
9134 ins
->type
= STACK_PTR
;
9135 MONO_ADD_INS (bblock
, ins
);
9140 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
9150 /* somewhat similar to LDTOKEN */
9151 MonoInst
*addr
, *vtvar
;
9152 CHECK_STACK_OVF (1);
9153 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
9155 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9156 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
9158 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9159 ins
->type
= STACK_VTYPE
;
9160 ins
->klass
= mono_defaults
.argumenthandle_class
;
9173 * The following transforms:
9174 * CEE_CEQ into OP_CEQ
9175 * CEE_CGT into OP_CGT
9176 * CEE_CGT_UN into OP_CGT_UN
9177 * CEE_CLT into OP_CLT
9178 * CEE_CLT_UN into OP_CLT_UN
9180 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
9182 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
9184 cmp
->sreg1
= sp
[0]->dreg
;
9185 cmp
->sreg2
= sp
[1]->dreg
;
9186 type_from_op (cmp
, sp
[0], sp
[1]);
9188 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_REGISTER
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
9189 cmp
->opcode
= OP_LCOMPARE
;
9190 else if (sp
[0]->type
== STACK_R8
)
9191 cmp
->opcode
= OP_FCOMPARE
;
9193 cmp
->opcode
= OP_ICOMPARE
;
9194 MONO_ADD_INS (bblock
, cmp
);
9195 ins
->type
= STACK_I4
;
9196 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
9197 type_from_op (ins
, sp
[0], sp
[1]);
9199 if (cmp
->opcode
== OP_FCOMPARE
) {
9201 * The backends expect the fceq opcodes to do the
9204 cmp
->opcode
= OP_NOP
;
9205 ins
->sreg1
= cmp
->sreg1
;
9206 ins
->sreg2
= cmp
->sreg2
;
9208 MONO_ADD_INS (bblock
, ins
);
9215 MonoMethod
*cil_method
;
9216 gboolean needs_static_rgctx_invoke
;
9218 CHECK_STACK_OVF (1);
9220 n
= read32 (ip
+ 2);
9221 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9224 mono_class_init (cmethod
->klass
);
9226 mono_save_token_info (cfg
, image
, n
, cmethod
);
9228 if (cfg
->generic_sharing_context
)
9229 context_used
= mono_method_check_context_used (cmethod
);
9231 needs_static_rgctx_invoke
= mono_method_needs_static_rgctx_invoke (cmethod
, TRUE
);
9233 cil_method
= cmethod
;
9234 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
9235 METHOD_ACCESS_FAILURE
;
9237 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9238 if (check_linkdemand (cfg
, method
, cmethod
))
9240 CHECK_CFG_EXCEPTION
;
9241 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9242 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9246 * Optimize the common case of ldftn+delegate creation
9248 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9249 /* FIXME: SGEN support */
9250 /* FIXME: handle shared static generic methods */
9251 /* FIXME: handle this in shared code */
9252 if (!needs_static_rgctx_invoke
&& !context_used
&& (sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
9253 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
9254 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
9255 MonoInst
*target_ins
;
9258 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
9259 if (!invoke
|| !mono_method_signature (invoke
))
9263 if (cfg
->verbose_level
> 3)
9264 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
9265 target_ins
= sp
[-1];
9267 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
);
9276 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
9278 EMIT_NEW_METHODCONST (cfg
, argconst
, cmethod
);
9280 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
9284 inline_costs
+= 10 * num_calls
++;
9287 case CEE_LDVIRTFTN
: {
9292 n
= read32 (ip
+ 2);
9293 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
9296 mono_class_init (cmethod
->klass
);
9298 if (cfg
->generic_sharing_context
)
9299 context_used
= mono_method_check_context_used (cmethod
);
9301 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
9302 if (check_linkdemand (cfg
, method
, cmethod
))
9304 CHECK_CFG_EXCEPTION
;
9305 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
9306 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
9313 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
9314 cmethod
, MONO_RGCTX_INFO_METHOD
);
9315 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
9317 EMIT_NEW_METHODCONST (cfg
, args
[1], cmethod
);
9318 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
9322 inline_costs
+= 10 * num_calls
++;
9326 CHECK_STACK_OVF (1);
9328 n
= read16 (ip
+ 2);
9330 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
9335 CHECK_STACK_OVF (1);
9337 n
= read16 (ip
+ 2);
9339 NEW_ARGLOADA (cfg
, ins
, n
);
9340 MONO_ADD_INS (cfg
->cbb
, ins
);
9348 n
= read16 (ip
+ 2);
9350 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
9352 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
9356 CHECK_STACK_OVF (1);
9358 n
= read16 (ip
+ 2);
9360 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
9365 unsigned char *tmp_ip
;
9366 CHECK_STACK_OVF (1);
9368 n
= read16 (ip
+ 2);
9371 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
9377 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
9386 n
= read16 (ip
+ 2);
9388 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
9390 emit_stloc_ir (cfg
, sp
, header
, n
);
9397 if (sp
!= stack_start
)
9399 if (cfg
->method
!= method
)
9401 * Inlining this into a loop in a parent could lead to
9402 * stack overflows which is different behavior than the
9403 * non-inlined case, thus disable inlining in this case.
9405 goto inline_failure
;
9407 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
9408 ins
->dreg
= alloc_preg (cfg
);
9409 ins
->sreg1
= sp
[0]->dreg
;
9410 ins
->type
= STACK_PTR
;
9411 MONO_ADD_INS (cfg
->cbb
, ins
);
9413 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
9415 ins
->flags
|= MONO_INST_INIT
;
9420 case CEE_ENDFILTER
: {
9421 MonoExceptionClause
*clause
, *nearest
;
9422 int cc
, nearest_num
;
9426 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
9428 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
9429 ins
->sreg1
= (*sp
)->dreg
;
9430 MONO_ADD_INS (bblock
, ins
);
9431 start_new_bblock
= 1;
9436 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
9437 clause
= &header
->clauses
[cc
];
9438 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
9439 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
9440 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
9446 if ((ip
- header
->code
) != nearest
->handler_offset
)
9451 case CEE_UNALIGNED_
:
9452 ins_flag
|= MONO_INST_UNALIGNED
;
9453 /* FIXME: record alignment? we can assume 1 for now */
9458 ins_flag
|= MONO_INST_VOLATILE
;
9462 ins_flag
|= MONO_INST_TAILCALL
;
9463 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
9464 /* Can't inline tail calls at this time */
9465 inline_costs
+= 100000;
9472 token
= read32 (ip
+ 2);
9473 klass
= mini_get_class (method
, token
, generic_context
);
9474 CHECK_TYPELOAD (klass
);
9475 if (generic_class_is_reference_type (cfg
, klass
))
9476 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
9478 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
9482 case CEE_CONSTRAINED_
:
9484 token
= read32 (ip
+ 2);
9485 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
9486 CHECK_TYPELOAD (constrained_call
);
9491 MonoInst
*iargs
[3];
9495 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
9496 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
9497 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
9498 /* emit_memset only works when val == 0 */
9499 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
9504 if (ip
[1] == CEE_CPBLK
) {
9505 MonoMethod
*memcpy_method
= get_memcpy_method ();
9506 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9508 MonoMethod
*memset_method
= get_memset_method ();
9509 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
9519 ins_flag
|= MONO_INST_NOTYPECHECK
;
9521 ins_flag
|= MONO_INST_NORANGECHECK
;
9522 /* we ignore the no-nullcheck for now since we
9523 * really do it explicitly only when doing callvirt->call
9529 int handler_offset
= -1;
9531 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9532 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9533 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
9534 handler_offset
= clause
->handler_offset
;
9539 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
9541 g_assert (handler_offset
!= -1);
9543 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
9544 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
9545 ins
->sreg1
= load
->dreg
;
9546 MONO_ADD_INS (bblock
, ins
);
9548 link_bblock (cfg
, bblock
, end_bblock
);
9549 start_new_bblock
= 1;
9557 CHECK_STACK_OVF (1);
9559 token
= read32 (ip
+ 2);
9560 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
) {
9561 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
9562 token
= mono_type_size (type
, &ialign
);
9564 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
9565 CHECK_TYPELOAD (klass
);
9566 mono_class_init (klass
);
9567 token
= mono_class_value_size (klass
, &align
);
9569 EMIT_NEW_ICONST (cfg
, ins
, token
);
9574 case CEE_REFANYTYPE
: {
9575 MonoInst
*src_var
, *src
;
9581 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9583 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9584 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9585 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
9595 g_error ("opcode 0xfe 0x%02x not handled", ip
[1]);
9600 g_error ("opcode 0x%02x not handled", *ip
);
9603 if (start_new_bblock
!= 1)
9606 bblock
->cil_length
= ip
- bblock
->cil_code
;
9607 bblock
->next_bb
= end_bblock
;
9609 if (cfg
->method
== method
&& cfg
->domainvar
) {
9611 MonoInst
*get_domain
;
9613 cfg
->cbb
= init_localsbb
;
9615 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
9616 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
9619 get_domain
->dreg
= alloc_preg (cfg
);
9620 MONO_ADD_INS (cfg
->cbb
, get_domain
);
9622 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
9623 MONO_ADD_INS (cfg
->cbb
, store
);
9626 #ifdef TARGET_POWERPC
9627 if (cfg
->compile_aot
)
9628 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9629 mono_get_got_var (cfg
);
9632 if (cfg
->method
== method
&& cfg
->got_var
)
9633 mono_emit_load_got_addr (cfg
);
9638 cfg
->cbb
= init_localsbb
;
9640 for (i
= 0; i
< header
->num_locals
; ++i
) {
9641 MonoType
*ptype
= header
->locals
[i
];
9642 int t
= ptype
->type
;
9643 dreg
= cfg
->locals
[i
]->dreg
;
9645 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
9646 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
9648 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9649 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
9650 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9651 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
9652 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
9653 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
9654 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
9655 ins
->type
= STACK_R8
;
9656 ins
->inst_p0
= (void*)&r8_0
;
9657 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
9658 MONO_ADD_INS (init_localsbb
, ins
);
9659 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
9660 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
9661 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
9662 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
9664 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
9671 if (cfg
->method
== method
) {
9673 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
9674 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
9676 mono_create_spvar_for_region (cfg
, bb
->region
);
9677 if (cfg
->verbose_level
> 2)
9678 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
9682 g_slist_free (class_inits
);
9683 dont_inline
= g_list_remove (dont_inline
, method
);
9685 if (inline_costs
< 0) {
9688 /* Method is too large */
9689 mname
= mono_method_full_name (method
, TRUE
);
9690 cfg
->exception_type
= MONO_EXCEPTION_INVALID_PROGRAM
;
9691 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
9696 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
9697 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
9699 return inline_costs
;
9702 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
9703 g_slist_free (class_inits
);
9704 dont_inline
= g_list_remove (dont_inline
, method
);
9708 g_slist_free (class_inits
);
9709 dont_inline
= g_list_remove (dont_inline
, method
);
9713 g_slist_free (class_inits
);
9714 dont_inline
= g_list_remove (dont_inline
, method
);
9715 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
9719 g_slist_free (class_inits
);
9720 dont_inline
= g_list_remove (dont_inline
, method
);
9721 set_exception_type_from_invalid_il (cfg
, method
, ip
);
9726 store_membase_reg_to_store_membase_imm (int opcode
)
9729 case OP_STORE_MEMBASE_REG
:
9730 return OP_STORE_MEMBASE_IMM
;
9731 case OP_STOREI1_MEMBASE_REG
:
9732 return OP_STOREI1_MEMBASE_IMM
;
9733 case OP_STOREI2_MEMBASE_REG
:
9734 return OP_STOREI2_MEMBASE_IMM
;
9735 case OP_STOREI4_MEMBASE_REG
:
9736 return OP_STOREI4_MEMBASE_IMM
;
9737 case OP_STOREI8_MEMBASE_REG
:
9738 return OP_STOREI8_MEMBASE_IMM
;
9740 g_assert_not_reached ();
9746 #endif /* DISABLE_JIT */
9749 mono_op_to_op_imm (int opcode
)
9759 return OP_IDIV_UN_IMM
;
9763 return OP_IREM_UN_IMM
;
9777 return OP_ISHR_UN_IMM
;
9794 return OP_LSHR_UN_IMM
;
9797 return OP_COMPARE_IMM
;
9799 return OP_ICOMPARE_IMM
;
9801 return OP_LCOMPARE_IMM
;
9803 case OP_STORE_MEMBASE_REG
:
9804 return OP_STORE_MEMBASE_IMM
;
9805 case OP_STOREI1_MEMBASE_REG
:
9806 return OP_STOREI1_MEMBASE_IMM
;
9807 case OP_STOREI2_MEMBASE_REG
:
9808 return OP_STOREI2_MEMBASE_IMM
;
9809 case OP_STOREI4_MEMBASE_REG
:
9810 return OP_STOREI4_MEMBASE_IMM
;
9812 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9814 return OP_X86_PUSH_IMM
;
9815 case OP_X86_COMPARE_MEMBASE_REG
:
9816 return OP_X86_COMPARE_MEMBASE_IMM
;
9818 #if defined(TARGET_AMD64)
9819 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
9820 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
9822 case OP_VOIDCALL_REG
:
9831 return OP_LOCALLOC_IMM
;
9838 ldind_to_load_membase (int opcode
)
9842 return OP_LOADI1_MEMBASE
;
9844 return OP_LOADU1_MEMBASE
;
9846 return OP_LOADI2_MEMBASE
;
9848 return OP_LOADU2_MEMBASE
;
9850 return OP_LOADI4_MEMBASE
;
9852 return OP_LOADU4_MEMBASE
;
9854 return OP_LOAD_MEMBASE
;
9856 return OP_LOAD_MEMBASE
;
9858 return OP_LOADI8_MEMBASE
;
9860 return OP_LOADR4_MEMBASE
;
9862 return OP_LOADR8_MEMBASE
;
9864 g_assert_not_reached ();
9871 stind_to_store_membase (int opcode
)
9875 return OP_STOREI1_MEMBASE_REG
;
9877 return OP_STOREI2_MEMBASE_REG
;
9879 return OP_STOREI4_MEMBASE_REG
;
9882 return OP_STORE_MEMBASE_REG
;
9884 return OP_STOREI8_MEMBASE_REG
;
9886 return OP_STORER4_MEMBASE_REG
;
9888 return OP_STORER8_MEMBASE_REG
;
9890 g_assert_not_reached ();
9897 mono_load_membase_to_load_mem (int opcode
)
9899 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9900 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9902 case OP_LOAD_MEMBASE
:
9904 case OP_LOADU1_MEMBASE
:
9905 return OP_LOADU1_MEM
;
9906 case OP_LOADU2_MEMBASE
:
9907 return OP_LOADU2_MEM
;
9908 case OP_LOADI4_MEMBASE
:
9909 return OP_LOADI4_MEM
;
9910 case OP_LOADU4_MEMBASE
:
9911 return OP_LOADU4_MEM
;
9912 #if SIZEOF_REGISTER == 8
9913 case OP_LOADI8_MEMBASE
:
9914 return OP_LOADI8_MEM
;
9923 op_to_op_dest_membase (int store_opcode
, int opcode
)
9925 #if defined(TARGET_X86)
9926 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
9931 return OP_X86_ADD_MEMBASE_REG
;
9933 return OP_X86_SUB_MEMBASE_REG
;
9935 return OP_X86_AND_MEMBASE_REG
;
9937 return OP_X86_OR_MEMBASE_REG
;
9939 return OP_X86_XOR_MEMBASE_REG
;
9942 return OP_X86_ADD_MEMBASE_IMM
;
9945 return OP_X86_SUB_MEMBASE_IMM
;
9948 return OP_X86_AND_MEMBASE_IMM
;
9951 return OP_X86_OR_MEMBASE_IMM
;
9954 return OP_X86_XOR_MEMBASE_IMM
;
9960 #if defined(TARGET_AMD64)
9961 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
9966 return OP_X86_ADD_MEMBASE_REG
;
9968 return OP_X86_SUB_MEMBASE_REG
;
9970 return OP_X86_AND_MEMBASE_REG
;
9972 return OP_X86_OR_MEMBASE_REG
;
9974 return OP_X86_XOR_MEMBASE_REG
;
9976 return OP_X86_ADD_MEMBASE_IMM
;
9978 return OP_X86_SUB_MEMBASE_IMM
;
9980 return OP_X86_AND_MEMBASE_IMM
;
9982 return OP_X86_OR_MEMBASE_IMM
;
9984 return OP_X86_XOR_MEMBASE_IMM
;
9986 return OP_AMD64_ADD_MEMBASE_REG
;
9988 return OP_AMD64_SUB_MEMBASE_REG
;
9990 return OP_AMD64_AND_MEMBASE_REG
;
9992 return OP_AMD64_OR_MEMBASE_REG
;
9994 return OP_AMD64_XOR_MEMBASE_REG
;
9997 return OP_AMD64_ADD_MEMBASE_IMM
;
10000 return OP_AMD64_SUB_MEMBASE_IMM
;
10003 return OP_AMD64_AND_MEMBASE_IMM
;
10006 return OP_AMD64_OR_MEMBASE_IMM
;
10009 return OP_AMD64_XOR_MEMBASE_IMM
;
10019 op_to_op_store_membase (int store_opcode
, int opcode
)
10021 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10024 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10025 return OP_X86_SETEQ_MEMBASE
;
10027 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10028 return OP_X86_SETNE_MEMBASE
;
10036 op_to_op_src1_membase (int load_opcode
, int opcode
)
10039 /* FIXME: This has sign extension issues */
10041 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10042 return OP_X86_COMPARE_MEMBASE8_IMM;
10045 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10050 return OP_X86_PUSH_MEMBASE
;
10051 case OP_COMPARE_IMM
:
10052 case OP_ICOMPARE_IMM
:
10053 return OP_X86_COMPARE_MEMBASE_IMM
;
10056 return OP_X86_COMPARE_MEMBASE_REG
;
10060 #ifdef TARGET_AMD64
10061 /* FIXME: This has sign extension issues */
10063 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10064 return OP_X86_COMPARE_MEMBASE8_IMM;
10069 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10070 return OP_X86_PUSH_MEMBASE
;
10072 /* FIXME: This only works for 32 bit immediates
10073 case OP_COMPARE_IMM:
10074 case OP_LCOMPARE_IMM:
10075 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10076 return OP_AMD64_COMPARE_MEMBASE_IMM;
10078 case OP_ICOMPARE_IMM
:
10079 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10080 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10084 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10085 return OP_AMD64_COMPARE_MEMBASE_REG
;
10088 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10089 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
10098 op_to_op_src2_membase (int load_opcode
, int opcode
)
10101 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10107 return OP_X86_COMPARE_REG_MEMBASE
;
10109 return OP_X86_ADD_REG_MEMBASE
;
10111 return OP_X86_SUB_REG_MEMBASE
;
10113 return OP_X86_AND_REG_MEMBASE
;
10115 return OP_X86_OR_REG_MEMBASE
;
10117 return OP_X86_XOR_REG_MEMBASE
;
10121 #ifdef TARGET_AMD64
10124 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10125 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
10129 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10130 return OP_AMD64_COMPARE_REG_MEMBASE
;
10133 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10134 return OP_X86_ADD_REG_MEMBASE
;
10136 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10137 return OP_X86_SUB_REG_MEMBASE
;
10139 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10140 return OP_X86_AND_REG_MEMBASE
;
10142 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10143 return OP_X86_OR_REG_MEMBASE
;
10145 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10146 return OP_X86_XOR_REG_MEMBASE
;
10148 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10149 return OP_AMD64_ADD_REG_MEMBASE
;
10151 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10152 return OP_AMD64_SUB_REG_MEMBASE
;
10154 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10155 return OP_AMD64_AND_REG_MEMBASE
;
10157 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10158 return OP_AMD64_OR_REG_MEMBASE
;
10160 if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
))
10161 return OP_AMD64_XOR_REG_MEMBASE
;
10169 mono_op_to_op_imm_noemul (int opcode
)
10172 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10177 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10185 return mono_op_to_op_imm (opcode
);
10189 #ifndef DISABLE_JIT
10192 * mono_handle_global_vregs:
10194 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10198 mono_handle_global_vregs (MonoCompile
*cfg
)
10200 gint32
*vreg_to_bb
;
10201 MonoBasicBlock
*bb
;
10204 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
10206 #ifdef MONO_ARCH_SIMD_INTRINSICS
10207 if (cfg
->uses_simd_intrinsics
)
10208 mono_simd_simplify_indirection (cfg
);
10211 /* Find local vregs used in more than one bb */
10212 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10213 MonoInst
*ins
= bb
->code
;
10214 int block_num
= bb
->block_num
;
10216 if (cfg
->verbose_level
> 2)
10217 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
10220 for (; ins
; ins
= ins
->next
) {
10221 const char *spec
= INS_INFO (ins
->opcode
);
10222 int regtype
, regindex
;
10225 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10226 mono_print_ins (ins
);
10228 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
10230 for (regindex
= 0; regindex
< 4; regindex
++) {
10233 if (regindex
== 0) {
10234 regtype
= spec
[MONO_INST_DEST
];
10235 if (regtype
== ' ')
10238 } else if (regindex
== 1) {
10239 regtype
= spec
[MONO_INST_SRC1
];
10240 if (regtype
== ' ')
10243 } else if (regindex
== 2) {
10244 regtype
= spec
[MONO_INST_SRC2
];
10245 if (regtype
== ' ')
10248 } else if (regindex
== 3) {
10249 regtype
= spec
[MONO_INST_SRC3
];
10250 if (regtype
== ' ')
10255 #if SIZEOF_REGISTER == 4
10256 /* In the LLVM case, the long opcodes are not decomposed */
10257 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
10259 * Since some instructions reference the original long vreg,
10260 * and some reference the two component vregs, it is quite hard
10261 * to determine when it needs to be global. So be conservative.
10263 if (!get_vreg_to_inst (cfg
, vreg
)) {
10264 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
10266 if (cfg
->verbose_level
> 2)
10267 printf ("LONG VREG R%d made global.\n", vreg
);
10271 * Make the component vregs volatile since the optimizations can
10272 * get confused otherwise.
10274 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
10275 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
10279 g_assert (vreg
!= -1);
10281 prev_bb
= vreg_to_bb
[vreg
];
10282 if (prev_bb
== 0) {
10283 /* 0 is a valid block num */
10284 vreg_to_bb
[vreg
] = block_num
+ 1;
10285 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
10286 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
10289 if (!get_vreg_to_inst (cfg
, vreg
)) {
10290 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10291 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
10295 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
10298 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
10301 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
10304 g_assert_not_reached ();
10308 /* Flag as having been used in more than one bb */
10309 vreg_to_bb
[vreg
] = -1;
10315 /* If a variable is used in only one bblock, convert it into a local vreg */
10316 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10317 MonoInst
*var
= cfg
->varinfo
[i
];
10318 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
10320 switch (var
->type
) {
10326 #if SIZEOF_REGISTER == 8
10329 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10330 /* Enabling this screws up the fp stack on x86 */
10333 /* Arguments are implicitly global */
10334 /* Putting R4 vars into registers doesn't work currently */
10335 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
10337 * Make that the variable's liveness interval doesn't contain a call, since
10338 * that would cause the lvreg to be spilled, making the whole optimization
10341 /* This is too slow for JIT compilation */
10343 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
10345 int def_index
, call_index
, ins_index
;
10346 gboolean spilled
= FALSE
;
10351 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
10352 const char *spec
= INS_INFO (ins
->opcode
);
10354 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
10355 def_index
= ins_index
;
10357 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
10358 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
10359 if (call_index
> def_index
) {
10365 if (MONO_IS_CALL (ins
))
10366 call_index
= ins_index
;
10376 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10377 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
10378 var
->flags
|= MONO_INST_IS_DEAD
;
10379 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
10386 * Compress the varinfo and vars tables so the liveness computation is faster and
10387 * takes up less space.
10390 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10391 MonoInst
*var
= cfg
->varinfo
[i
];
10392 if (pos
< i
&& cfg
->locals_start
== i
)
10393 cfg
->locals_start
= pos
;
10394 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
10396 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
10397 cfg
->varinfo
[pos
]->inst_c0
= pos
;
10398 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
10399 cfg
->vars
[pos
].idx
= pos
;
10400 #if SIZEOF_REGISTER == 4
10401 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
10402 /* Modify the two component vars too */
10405 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
10406 var1
->inst_c0
= pos
;
10407 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
10408 var1
->inst_c0
= pos
;
10415 cfg
->num_varinfo
= pos
;
10416 if (cfg
->locals_start
> cfg
->num_varinfo
)
10417 cfg
->locals_start
= cfg
->num_varinfo
;
10421 * mono_spill_global_vars:
10423 * Generate spill code for variables which are not allocated to registers,
10424 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10425 * code is generated which could be optimized by the local optimization passes.
10428 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
10430 MonoBasicBlock
*bb
;
10432 int orig_next_vreg
;
10433 guint32
*vreg_to_lvreg
;
10435 guint32 i
, lvregs_len
;
10436 gboolean dest_has_lvreg
= FALSE
;
10437 guint32 stacktypes
[128];
10438 MonoInst
**live_range_start
, **live_range_end
;
10439 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
10441 *need_local_opts
= FALSE
;
10443 memset (spec2
, 0, sizeof (spec2
));
10445 /* FIXME: Move this function to mini.c */
10446 stacktypes
['i'] = STACK_PTR
;
10447 stacktypes
['l'] = STACK_I8
;
10448 stacktypes
['f'] = STACK_R8
;
10449 #ifdef MONO_ARCH_SIMD_INTRINSICS
10450 stacktypes
['x'] = STACK_VTYPE
;
10453 #if SIZEOF_REGISTER == 4
10454 /* Create MonoInsts for longs */
10455 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
10456 MonoInst
*ins
= cfg
->varinfo
[i
];
10458 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
10459 switch (ins
->type
) {
10460 #ifdef MONO_ARCH_SOFT_FLOAT
10466 g_assert (ins
->opcode
== OP_REGOFFSET
);
10468 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
10470 tree
->opcode
= OP_REGOFFSET
;
10471 tree
->inst_basereg
= ins
->inst_basereg
;
10472 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
10474 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
10476 tree
->opcode
= OP_REGOFFSET
;
10477 tree
->inst_basereg
= ins
->inst_basereg
;
10478 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
10488 /* FIXME: widening and truncation */
10491 * As an optimization, when a variable allocated to the stack is first loaded into
10492 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10493 * the variable again.
10495 orig_next_vreg
= cfg
->next_vreg
;
10496 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
10497 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
10501 * These arrays contain the first and last instructions accessing a given
10503 * Since we emit bblocks in the same order we process them here, and we
10504 * don't split live ranges, these will precisely describe the live range of
10505 * the variable, i.e. the instruction range where a valid value can be found
10506 * in the variables location.
10508 /* FIXME: Only do this if debugging info is requested */
10509 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10510 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
10511 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10512 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
10514 /* Add spill loads/stores */
10515 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10518 if (cfg
->verbose_level
> 2)
10519 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
10521 /* Clear vreg_to_lvreg array */
10522 for (i
= 0; i
< lvregs_len
; i
++)
10523 vreg_to_lvreg
[lvregs
[i
]] = 0;
10527 MONO_BB_FOR_EACH_INS (bb
, ins
) {
10528 const char *spec
= INS_INFO (ins
->opcode
);
10529 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
10530 gboolean store
, no_lvreg
;
10531 int sregs
[MONO_MAX_SRC_REGS
];
10533 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10534 mono_print_ins (ins
);
10536 if (ins
->opcode
== OP_NOP
)
10540 * We handle LDADDR here as well, since it can only be decomposed
10541 * when variable addresses are known.
10543 if (ins
->opcode
== OP_LDADDR
) {
10544 MonoInst
*var
= ins
->inst_p0
;
10546 if (var
->opcode
== OP_VTARG_ADDR
) {
10547 /* Happens on SPARC/S390 where vtypes are passed by reference */
10548 MonoInst
*vtaddr
= var
->inst_left
;
10549 if (vtaddr
->opcode
== OP_REGVAR
) {
10550 ins
->opcode
= OP_MOVE
;
10551 ins
->sreg1
= vtaddr
->dreg
;
10553 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
10554 ins
->opcode
= OP_LOAD_MEMBASE
;
10555 ins
->inst_basereg
= vtaddr
->inst_basereg
;
10556 ins
->inst_offset
= vtaddr
->inst_offset
;
10560 g_assert (var
->opcode
== OP_REGOFFSET
);
10562 ins
->opcode
= OP_ADD_IMM
;
10563 ins
->sreg1
= var
->inst_basereg
;
10564 ins
->inst_imm
= var
->inst_offset
;
10567 *need_local_opts
= TRUE
;
10568 spec
= INS_INFO (ins
->opcode
);
10571 if (ins
->opcode
< MONO_CEE_LAST
) {
10572 mono_print_ins (ins
);
10573 g_assert_not_reached ();
10577 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10581 if (MONO_IS_STORE_MEMBASE (ins
)) {
10582 tmp_reg
= ins
->dreg
;
10583 ins
->dreg
= ins
->sreg2
;
10584 ins
->sreg2
= tmp_reg
;
10587 spec2
[MONO_INST_DEST
] = ' ';
10588 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10589 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10590 spec2
[MONO_INST_SRC3
] = ' ';
10592 } else if (MONO_IS_STORE_MEMINDEX (ins
))
10593 g_assert_not_reached ();
10598 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
10599 printf ("\t %.3s %d", spec
, ins
->dreg
);
10600 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
10601 for (srcindex
= 0; srcindex
< 3; ++srcindex
)
10602 printf (" %d", sregs
[srcindex
]);
10609 regtype
= spec
[MONO_INST_DEST
];
10610 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
10613 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
10614 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
10615 MonoInst
*store_ins
;
10617 MonoInst
*def_ins
= ins
;
10618 int dreg
= ins
->dreg
; /* The original vreg */
10620 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
10622 if (var
->opcode
== OP_REGVAR
) {
10623 ins
->dreg
= var
->dreg
;
10624 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
10626 * Instead of emitting a load+store, use a _membase opcode.
10628 g_assert (var
->opcode
== OP_REGOFFSET
);
10629 if (ins
->opcode
== OP_MOVE
) {
10633 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
10634 ins
->inst_basereg
= var
->inst_basereg
;
10635 ins
->inst_offset
= var
->inst_offset
;
10638 spec
= INS_INFO (ins
->opcode
);
10642 g_assert (var
->opcode
== OP_REGOFFSET
);
10644 prev_dreg
= ins
->dreg
;
10646 /* Invalidate any previous lvreg for this vreg */
10647 vreg_to_lvreg
[ins
->dreg
] = 0;
10651 #ifdef MONO_ARCH_SOFT_FLOAT
10652 if (store_opcode
== OP_STORER8_MEMBASE_REG
) {
10654 store_opcode
= OP_STOREI8_MEMBASE_REG
;
10658 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10660 if (regtype
== 'l') {
10661 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
10662 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10663 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
10664 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10665 def_ins
= store_ins
;
10668 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
10670 /* Try to fuse the store into the instruction itself */
10671 /* FIXME: Add more instructions */
10672 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
10673 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
10674 ins
->inst_imm
= ins
->inst_c0
;
10675 ins
->inst_destbasereg
= var
->inst_basereg
;
10676 ins
->inst_offset
= var
->inst_offset
;
10677 spec
= INS_INFO (ins
->opcode
);
10678 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
10679 ins
->opcode
= store_opcode
;
10680 ins
->inst_destbasereg
= var
->inst_basereg
;
10681 ins
->inst_offset
= var
->inst_offset
;
10685 tmp_reg
= ins
->dreg
;
10686 ins
->dreg
= ins
->sreg2
;
10687 ins
->sreg2
= tmp_reg
;
10690 spec2
[MONO_INST_DEST
] = ' ';
10691 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
10692 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
10693 spec2
[MONO_INST_SRC3
] = ' ';
10695 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
10696 // FIXME: The backends expect the base reg to be in inst_basereg
10697 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
10699 ins
->inst_basereg
= var
->inst_basereg
;
10700 ins
->inst_offset
= var
->inst_offset
;
10701 spec
= INS_INFO (ins
->opcode
);
10703 /* printf ("INS: "); mono_print_ins (ins); */
10704 /* Create a store instruction */
10705 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
10707 /* Insert it after the instruction */
10708 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
10710 def_ins
= store_ins
;
10713 * We can't assign ins->dreg to var->dreg here, since the
10714 * sregs could use it. So set a flag, and do it after
10717 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
10718 dest_has_lvreg
= TRUE
;
10723 if (def_ins
&& !live_range_start
[dreg
]) {
10724 live_range_start
[dreg
] = def_ins
;
10725 live_range_start_bb
[dreg
] = bb
;
10732 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
10733 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
10734 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
10735 sreg
= sregs
[srcindex
];
10737 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
10738 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
10739 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
10740 MonoInst
*use_ins
= ins
;
10741 MonoInst
*load_ins
;
10742 guint32 load_opcode
;
10744 if (var
->opcode
== OP_REGVAR
) {
10745 sregs
[srcindex
] = var
->dreg
;
10746 //mono_inst_set_src_registers (ins, sregs);
10747 live_range_end
[sreg
] = use_ins
;
10748 live_range_end_bb
[sreg
] = bb
;
10752 g_assert (var
->opcode
== OP_REGOFFSET
);
10754 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
10756 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
10758 if (vreg_to_lvreg
[sreg
]) {
10759 g_assert (vreg_to_lvreg
[sreg
] != -1);
10761 /* The variable is already loaded to an lvreg */
10762 if (G_UNLIKELY (cfg
->verbose_level
> 2))
10763 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
10764 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
10765 //mono_inst_set_src_registers (ins, sregs);
10769 /* Try to fuse the load into the instruction */
10770 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
10771 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
10772 sregs
[0] = var
->inst_basereg
;
10773 //mono_inst_set_src_registers (ins, sregs);
10774 ins
->inst_offset
= var
->inst_offset
;
10775 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
10776 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
10777 sregs
[1] = var
->inst_basereg
;
10778 //mono_inst_set_src_registers (ins, sregs);
10779 ins
->inst_offset
= var
->inst_offset
;
10781 if (MONO_IS_REAL_MOVE (ins
)) {
10782 ins
->opcode
= OP_NOP
;
10785 //printf ("%d ", srcindex); mono_print_ins (ins);
10787 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
10789 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
10790 if (var
->dreg
== prev_dreg
) {
10792 * sreg refers to the value loaded by the load
10793 * emitted below, but we need to use ins->dreg
10794 * since it refers to the store emitted earlier.
10798 g_assert (sreg
!= -1);
10799 vreg_to_lvreg
[var
->dreg
] = sreg
;
10800 g_assert (lvregs_len
< 1024);
10801 lvregs
[lvregs_len
++] = var
->dreg
;
10805 sregs
[srcindex
] = sreg
;
10806 //mono_inst_set_src_registers (ins, sregs);
10808 if (regtype
== 'l') {
10809 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
10810 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10811 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
10812 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10813 use_ins
= load_ins
;
10816 #if SIZEOF_REGISTER == 4
10817 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
10819 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
10820 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
10821 use_ins
= load_ins
;
10825 if (var
->dreg
< orig_next_vreg
) {
10826 live_range_end
[var
->dreg
] = use_ins
;
10827 live_range_end_bb
[var
->dreg
] = bb
;
10831 mono_inst_set_src_registers (ins
, sregs
);
10833 if (dest_has_lvreg
) {
10834 g_assert (ins
->dreg
!= -1);
10835 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
10836 g_assert (lvregs_len
< 1024);
10837 lvregs
[lvregs_len
++] = prev_dreg
;
10838 dest_has_lvreg
= FALSE
;
10842 tmp_reg
= ins
->dreg
;
10843 ins
->dreg
= ins
->sreg2
;
10844 ins
->sreg2
= tmp_reg
;
10847 if (MONO_IS_CALL (ins
)) {
10848 /* Clear vreg_to_lvreg array */
10849 for (i
= 0; i
< lvregs_len
; i
++)
10850 vreg_to_lvreg
[lvregs
[i
]] = 0;
10852 } else if (ins
->opcode
== OP_NOP
) {
10854 MONO_INST_NULLIFY_SREGS (ins
);
10857 if (cfg
->verbose_level
> 2)
10858 mono_print_ins_index (1, ins
);
10862 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10864 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10865 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10867 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10868 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
10871 if (live_range_start
[vreg
]) {
10872 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
10874 ins
->inst_c1
= vreg
;
10875 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
10877 if (live_range_end
[vreg
]) {
10878 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
10880 ins
->inst_c1
= vreg
;
10881 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
10886 g_free (live_range_start
);
10887 g_free (live_range_end
);
10888 g_free (live_range_start_bb
);
10889 g_free (live_range_end_bb
);
10894 * - use 'iadd' instead of 'int_add'
10895 * - handling ovf opcodes: decompose in method_to_ir.
10896 * - unify iregs/fregs
10897 * -> partly done, the missing parts are:
10898 * - a more complete unification would involve unifying the hregs as well, so
10899 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10900 * would no longer map to the machine hregs, so the code generators would need to
10901 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10902 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10903 * fp/non-fp branches speeds it up by about 15%.
10904 * - use sext/zext opcodes instead of shifts
10906 * - get rid of TEMPLOADs if possible and use vregs instead
10907 * - clean up usage of OP_P/OP_ opcodes
10908 * - cleanup usage of DUMMY_USE
10909 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10911 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10912 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10913 * - make sure handle_stack_args () is called before the branch is emitted
10914 * - when the new IR is done, get rid of all unused stuff
10915 * - COMPARE/BEQ as separate instructions or unify them ?
10916 * - keeping them separate allows specialized compare instructions like
10917 * compare_imm, compare_membase
10918 * - most back ends unify fp compare+branch, fp compare+ceq
10919 * - integrate mono_save_args into inline_method
10920 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10921 * - handle long shift opts on 32 bit platforms somehow: they require
10922 * 3 sregs (2 for arg1 and 1 for arg2)
10923 * - make byref a 'normal' type.
10924 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10925 * variable if needed.
10926 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10927 * like inline_method.
10928 * - remove inlining restrictions
10929 * - fix LNEG and enable cfold of INEG
10930 * - generalize x86 optimizations like ldelema as a peephole optimization
10931 * - add store_mem_imm for amd64
10932 * - optimize the loading of the interruption flag in the managed->native wrappers
10933 * - avoid special handling of OP_NOP in passes
10934 * - move code inserting instructions into one function/macro.
10935 * - try a coalescing phase after liveness analysis
10936 * - add float -> vreg conversion + local optimizations on !x86
10937 * - figure out how to handle decomposed branches during optimizations, ie.
10938 * compare+branch, op_jump_table+op_br etc.
10939 * - promote RuntimeXHandles to vregs
10940 * - vtype cleanups:
10941 * - add a NEW_VARLOADA_VREG macro
10942 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10943 * accessing vtype fields.
10944 * - get rid of I8CONST on 64 bit platforms
10945 * - dealing with the increase in code size due to branches created during opcode
10947 * - use extended basic blocks
10948 * - all parts of the JIT
10949 * - handle_global_vregs () && local regalloc
10950 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10951 * - sources of increase in code size:
10954 * - isinst and castclass
10955 * - lvregs not allocated to global registers even if used multiple times
10956 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10958 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10959 * - add all micro optimizations from the old JIT
10960 * - put tree optimizations into the deadce pass
10961 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10962 * specific function.
10963 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10964 * fcompare + branchCC.
10965 * - create a helper function for allocating a stack slot, taking into account
10966 * MONO_CFG_HAS_SPILLUP.
10968 * - merge the ia64 switch changes.
10969 * - optimize mono_regstate2_alloc_int/float.
10970 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10971 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10972 * parts of the tree could be separated by other instructions, killing the tree
10973 * arguments, or stores killing loads etc. Also, should we fold loads into other
10974 * instructions if the result of the load is used multiple times ?
10975 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10976 * - LAST MERGE: 108395.
10977 * - when returning vtypes in registers, generate IR and append it to the end of the
10978 * last bb instead of doing it in the epilog.
10979 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10987 - When to decompose opcodes:
10988 - earlier: this makes some optimizations hard to implement, since the low level IR
10989 no longer contains the neccessary information. But it is easier to do.
10990 - later: harder to implement, enables more optimizations.
10991 - Branches inside bblocks:
10992 - created when decomposing complex opcodes.
10993 - branches to another bblock: harmless, but not tracked by the branch
10994 optimizations, so need to branch to a label at the start of the bblock.
10995 - branches to inside the same bblock: very problematic, trips up the local
10996 reg allocator. Can be fixed by spitting the current bblock, but that is a
10997 complex operation, since some local vregs can become global vregs etc.
10998 - Local/global vregs:
10999 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11000 local register allocator.
11001 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11002 structure, created by mono_create_var (). Assigned to hregs or the stack by
11003 the global register allocator.
11004 - When to do optimizations like alu->alu_imm:
11005 - earlier -> saves work later on since the IR will be smaller/simpler
11006 - later -> can work on more instructions
11007 - Handling of valuetypes:
11008 - When a vtype is pushed on the stack, a new temporary is created, an
11009 instruction computing its address (LDADDR) is emitted and pushed on
11010 the stack. Need to optimize cases when the vtype is used immediately as in
11011 argument passing, stloc etc.
11012 - Instead of the to_end stuff in the old JIT, simply call the function handling
11013 the values on the stack before emitting the last instruction of the bb.
11016 #endif /* DISABLE_JIT */