2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode
);
109 static int stind_to_store_membase (int opcode
);
111 int mono_op_to_op_imm (int opcode
);
112 int mono_op_to_op_imm_noemul (int opcode
);
114 MonoInst
* mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
, MonoInst
**args
);
115 void mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
);
116 void mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
);
118 /* helper methods signatures */
119 static MonoMethodSignature
*helper_sig_class_init_trampoline
= NULL
;
120 static MonoMethodSignature
*helper_sig_domain_get
= NULL
;
121 static MonoMethodSignature
*helper_sig_generic_class_init_trampoline
= NULL
;
122 static MonoMethodSignature
*helper_sig_generic_class_init_trampoline_llvm
= NULL
;
123 static MonoMethodSignature
*helper_sig_rgctx_lazy_fetch_trampoline
= NULL
;
124 static MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline
= NULL
;
125 static MonoMethodSignature
*helper_sig_monitor_enter_exit_trampoline_llvm
= NULL
;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts
[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst
*ins
, int *regs
)
177 ins
->sreg1
= regs
[0];
178 ins
->sreg2
= regs
[1];
179 ins
->sreg3
= regs
[2];
183 mono_alloc_ireg (MonoCompile
*cfg
)
185 return alloc_ireg (cfg
);
189 mono_alloc_freg (MonoCompile
*cfg
)
191 return alloc_freg (cfg
);
195 mono_alloc_preg (MonoCompile
*cfg
)
197 return alloc_preg (cfg
);
201 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
203 return alloc_dreg (cfg
, stack_type
);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
212 mono_alloc_ireg_ref (MonoCompile
*cfg
)
214 return alloc_ireg_ref (cfg
);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
223 mono_alloc_ireg_mp (MonoCompile
*cfg
)
225 return alloc_ireg_mp (cfg
);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
234 mono_alloc_ireg_copy (MonoCompile
*cfg
, guint32 vreg
)
236 if (vreg_is_ref (cfg
, vreg
))
237 return alloc_ireg_ref (cfg
);
238 else if (vreg_is_mp (cfg
, vreg
))
239 return alloc_ireg_mp (cfg
);
241 return alloc_ireg (cfg
);
245 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
251 switch (type
->type
) {
254 case MONO_TYPE_BOOLEAN
:
266 case MONO_TYPE_FNPTR
:
268 case MONO_TYPE_CLASS
:
269 case MONO_TYPE_STRING
:
270 case MONO_TYPE_OBJECT
:
271 case MONO_TYPE_SZARRAY
:
272 case MONO_TYPE_ARRAY
:
276 #if SIZEOF_REGISTER == 8
285 case MONO_TYPE_VALUETYPE
:
286 if (type
->data
.klass
->enumtype
) {
287 type
= mono_class_enum_basetype (type
->data
.klass
);
290 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type (type
)))
293 case MONO_TYPE_TYPEDBYREF
:
295 case MONO_TYPE_GENERICINST
:
296 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
300 g_assert (cfg
->generic_sharing_context
);
303 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
309 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
314 printf ("\n%s %d: [IN: ", msg
, bb
->block_num
);
315 for (i
= 0; i
< bb
->in_count
; ++i
)
316 printf (" BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
318 for (i
= 0; i
< bb
->out_count
; ++i
)
319 printf (" BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
321 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
322 mono_print_ins_index (-1, tree
);
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get
= mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline
= mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline
= mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm
= mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline
= mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline
= mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm
= mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
408 #define ADD_BINCOND(next_block) do { \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
447 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
449 MonoBasicBlock
**newa
;
453 if (from
->cil_code
) {
455 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
457 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
460 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
462 printf ("edge from entry to exit\n");
467 for (i
= 0; i
< from
->out_count
; ++i
) {
468 if (to
== from
->out_bb
[i
]) {
474 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
475 for (i
= 0; i
< from
->out_count
; ++i
) {
476 newa
[i
] = from
->out_bb
[i
];
484 for (i
= 0; i
< to
->in_count
; ++i
) {
485 if (from
== to
->in_bb
[i
]) {
491 newa
= mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
492 for (i
= 0; i
< to
->in_count
; ++i
) {
493 newa
[i
] = to
->in_bb
[i
];
502 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
504 link_bblock (cfg
, from
, to
);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
522 mono_find_block_region (MonoCompile
*cfg
, int offset
)
524 MonoMethodHeader
*header
= cfg
->header
;
525 MonoExceptionClause
*clause
;
528 for (i
= 0; i
< header
->num_clauses
; ++i
) {
529 clause
= &header
->clauses
[i
];
530 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
531 (offset
< (clause
->handler_offset
)))
532 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
534 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
535 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
536 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
537 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
538 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
540 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
543 if (MONO_OFFSET_IN_CLAUSE (clause
, offset
))
544 return ((i
+ 1) << 8) | clause
->flags
;
551 mono_find_final_block (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *target
, int type
)
553 MonoMethodHeader
*header
= cfg
->header
;
554 MonoExceptionClause
*clause
;
558 for (i
= 0; i
< header
->num_clauses
; ++i
) {
559 clause
= &header
->clauses
[i
];
560 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
562 if (clause
->flags
== type
)
563 res
= g_list_append (res
, clause
);
570 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
574 var
= g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
578 var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
579 /* prevent it from being register allocated */
580 var
->flags
|= MONO_INST_INDIRECT
;
582 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
586 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
588 return g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
592 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
596 var
= g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
600 var
= mono_compile_create_var (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
);
601 /* prevent it from being register allocated */
602 var
->flags
|= MONO_INST_INDIRECT
;
604 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
614 type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
618 inst
->klass
= klass
= mono_class_from_mono_type (type
);
620 inst
->type
= STACK_MP
;
625 switch (type
->type
) {
627 inst
->type
= STACK_INV
;
631 case MONO_TYPE_BOOLEAN
:
637 inst
->type
= STACK_I4
;
642 case MONO_TYPE_FNPTR
:
643 inst
->type
= STACK_PTR
;
645 case MONO_TYPE_CLASS
:
646 case MONO_TYPE_STRING
:
647 case MONO_TYPE_OBJECT
:
648 case MONO_TYPE_SZARRAY
:
649 case MONO_TYPE_ARRAY
:
650 inst
->type
= STACK_OBJ
;
654 inst
->type
= STACK_I8
;
658 inst
->type
= STACK_R8
;
660 case MONO_TYPE_VALUETYPE
:
661 if (type
->data
.klass
->enumtype
) {
662 type
= mono_class_enum_basetype (type
->data
.klass
);
666 inst
->type
= STACK_VTYPE
;
669 case MONO_TYPE_TYPEDBYREF
:
670 inst
->klass
= mono_defaults
.typed_reference_class
;
671 inst
->type
= STACK_VTYPE
;
673 case MONO_TYPE_GENERICINST
:
674 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
677 case MONO_TYPE_MVAR
:
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
682 g_assert (cfg
->generic_sharing_context
);
683 inst
->type
= STACK_OBJ
;
686 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
694 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
695 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
696 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
697 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
698 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
699 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
},
700 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
701 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
702 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
707 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
710 /* reduce the size of this table */
712 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
713 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
714 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
715 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
716 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
717 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
718 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
719 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
720 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
724 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
725 /* Inv i L p F & O vt */
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
738 shift_table
[STACK_MAX
] [STACK_MAX
] = {
739 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
740 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
741 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
742 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
743 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
744 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
745 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
746 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
755 binops_op_map
[STACK_MAX
] = {
756 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
761 unops_op_map
[STACK_MAX
] = {
762 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
767 ovfops_op_map
[STACK_MAX
] = {
768 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
773 ovf2ops_op_map
[STACK_MAX
] = {
774 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
779 ovf3ops_op_map
[STACK_MAX
] = {
780 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
785 beqops_op_map
[STACK_MAX
] = {
786 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
791 ceqops_op_map
[STACK_MAX
] = {
792 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
804 type_from_op (MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
) {
806 switch (ins
->opcode
) {
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
815 ins
->opcode
+= binops_op_map
[ins
->type
];
822 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
823 ins
->opcode
+= binops_op_map
[ins
->type
];
828 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
829 ins
->opcode
+= binops_op_map
[ins
->type
];
834 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
835 if ((src1
->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
836 ins
->opcode
= OP_LCOMPARE
;
837 else if (src1
->type
== STACK_R8
)
838 ins
->opcode
= OP_FCOMPARE
;
840 ins
->opcode
= OP_ICOMPARE
;
842 case OP_ICOMPARE_IMM
:
843 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
844 if ((src1
->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
845 ins
->opcode
= OP_LCOMPARE_IMM
;
857 ins
->opcode
+= beqops_op_map
[src1
->type
];
860 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
861 ins
->opcode
+= ceqops_op_map
[src1
->type
];
867 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
868 ins
->opcode
+= ceqops_op_map
[src1
->type
];
872 ins
->type
= neg_table
[src1
->type
];
873 ins
->opcode
+= unops_op_map
[ins
->type
];
876 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
877 ins
->type
= src1
->type
;
879 ins
->type
= STACK_INV
;
880 ins
->opcode
+= unops_op_map
[ins
->type
];
886 ins
->type
= STACK_I4
;
887 ins
->opcode
+= unops_op_map
[src1
->type
];
890 ins
->type
= STACK_R8
;
891 switch (src1
->type
) {
894 ins
->opcode
= OP_ICONV_TO_R_UN
;
897 ins
->opcode
= OP_LCONV_TO_R_UN
;
901 case CEE_CONV_OVF_I1
:
902 case CEE_CONV_OVF_U1
:
903 case CEE_CONV_OVF_I2
:
904 case CEE_CONV_OVF_U2
:
905 case CEE_CONV_OVF_I4
:
906 case CEE_CONV_OVF_U4
:
907 ins
->type
= STACK_I4
;
908 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
910 case CEE_CONV_OVF_I_UN
:
911 case CEE_CONV_OVF_U_UN
:
912 ins
->type
= STACK_PTR
;
913 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
915 case CEE_CONV_OVF_I1_UN
:
916 case CEE_CONV_OVF_I2_UN
:
917 case CEE_CONV_OVF_I4_UN
:
918 case CEE_CONV_OVF_U1_UN
:
919 case CEE_CONV_OVF_U2_UN
:
920 case CEE_CONV_OVF_U4_UN
:
921 ins
->type
= STACK_I4
;
922 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
925 ins
->type
= STACK_PTR
;
926 switch (src1
->type
) {
928 ins
->opcode
= OP_ICONV_TO_U
;
932 #if SIZEOF_VOID_P == 8
933 ins
->opcode
= OP_LCONV_TO_U
;
935 ins
->opcode
= OP_MOVE
;
939 ins
->opcode
= OP_LCONV_TO_U
;
942 ins
->opcode
= OP_FCONV_TO_U
;
948 ins
->type
= STACK_I8
;
949 ins
->opcode
+= unops_op_map
[src1
->type
];
951 case CEE_CONV_OVF_I8
:
952 case CEE_CONV_OVF_U8
:
953 ins
->type
= STACK_I8
;
954 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
956 case CEE_CONV_OVF_U8_UN
:
957 case CEE_CONV_OVF_I8_UN
:
958 ins
->type
= STACK_I8
;
959 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
963 ins
->type
= STACK_R8
;
964 ins
->opcode
+= unops_op_map
[src1
->type
];
967 ins
->type
= STACK_R8
;
971 ins
->type
= STACK_I4
;
972 ins
->opcode
+= ovfops_op_map
[src1
->type
];
977 ins
->type
= STACK_PTR
;
978 ins
->opcode
+= ovfops_op_map
[src1
->type
];
986 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
987 ins
->opcode
+= ovfops_op_map
[src1
->type
];
988 if (ins
->type
== STACK_R8
)
989 ins
->type
= STACK_INV
;
991 case OP_LOAD_MEMBASE
:
992 ins
->type
= STACK_PTR
;
994 case OP_LOADI1_MEMBASE
:
995 case OP_LOADU1_MEMBASE
:
996 case OP_LOADI2_MEMBASE
:
997 case OP_LOADU2_MEMBASE
:
998 case OP_LOADI4_MEMBASE
:
999 case OP_LOADU4_MEMBASE
:
1000 ins
->type
= STACK_PTR
;
1002 case OP_LOADI8_MEMBASE
:
1003 ins
->type
= STACK_I8
;
1005 case OP_LOADR4_MEMBASE
:
1006 case OP_LOADR8_MEMBASE
:
1007 ins
->type
= STACK_R8
;
1010 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
1014 if (ins
->type
== STACK_MP
)
1015 ins
->klass
= mono_defaults
.object_class
;
1020 STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_R8
, STACK_OBJ
1026 param_table
[STACK_MAX
] [STACK_MAX
] = {
1031 check_values_to_signature (MonoInst
*args
, MonoType
*this, MonoMethodSignature
*sig
) {
1035 switch (args
->type
) {
1045 for (i
= 0; i
< sig
->param_count
; ++i
) {
1046 switch (args
[i
].type
) {
1050 if (!sig
->params
[i
]->byref
)
1054 if (sig
->params
[i
]->byref
)
1056 switch (sig
->params
[i
]->type
) {
1057 case MONO_TYPE_CLASS
:
1058 case MONO_TYPE_STRING
:
1059 case MONO_TYPE_OBJECT
:
1060 case MONO_TYPE_SZARRAY
:
1061 case MONO_TYPE_ARRAY
:
1068 if (sig
->params
[i
]->byref
)
1070 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst
*
1092 mono_get_domainvar (MonoCompile
*cfg
)
1094 if (!cfg
->domainvar
)
1095 cfg
->domainvar
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1096 return cfg
->domainvar
;
1100 * The got_var contains the address of the Global Offset Table when AOT
1104 mono_get_got_var (MonoCompile
*cfg
)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg
->compile_aot
)
1109 if (!cfg
->got_var
) {
1110 cfg
->got_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1112 return cfg
->got_var
;
1119 mono_get_vtable_var (MonoCompile
*cfg
)
1121 g_assert (cfg
->generic_sharing_context
);
1123 if (!cfg
->rgctx_var
) {
1124 cfg
->rgctx_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1125 /* force the var to be stack allocated */
1126 cfg
->rgctx_var
->flags
|= MONO_INST_INDIRECT
;
1129 return cfg
->rgctx_var
;
1133 type_from_stack_type (MonoInst
*ins
) {
1134 switch (ins
->type
) {
1135 case STACK_I4
: return &mono_defaults
.int32_class
->byval_arg
;
1136 case STACK_I8
: return &mono_defaults
.int64_class
->byval_arg
;
1137 case STACK_PTR
: return &mono_defaults
.int_class
->byval_arg
;
1138 case STACK_R8
: return &mono_defaults
.double_class
->byval_arg
;
1140 return &ins
->klass
->this_arg
;
1141 case STACK_OBJ
: return &mono_defaults
.object_class
->byval_arg
;
1142 case STACK_VTYPE
: return &ins
->klass
->byval_arg
;
1144 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1149 static G_GNUC_UNUSED
int
1150 type_to_stack_type (MonoType
*t
)
1152 t
= mono_type_get_underlying_type (t
);
1156 case MONO_TYPE_BOOLEAN
:
1159 case MONO_TYPE_CHAR
:
1166 case MONO_TYPE_FNPTR
:
1168 case MONO_TYPE_CLASS
:
1169 case MONO_TYPE_STRING
:
1170 case MONO_TYPE_OBJECT
:
1171 case MONO_TYPE_SZARRAY
:
1172 case MONO_TYPE_ARRAY
:
1180 case MONO_TYPE_VALUETYPE
:
1181 case MONO_TYPE_TYPEDBYREF
:
1183 case MONO_TYPE_GENERICINST
:
1184 if (mono_type_generic_inst_is_valuetype (t
))
1190 g_assert_not_reached ();
1197 array_access_to_klass (int opcode
)
1201 return mono_defaults
.byte_class
;
1203 return mono_defaults
.uint16_class
;
1206 return mono_defaults
.int_class
;
1209 return mono_defaults
.sbyte_class
;
1212 return mono_defaults
.int16_class
;
1215 return mono_defaults
.int32_class
;
1217 return mono_defaults
.uint32_class
;
1220 return mono_defaults
.int64_class
;
1223 return mono_defaults
.single_class
;
1226 return mono_defaults
.double_class
;
1227 case CEE_LDELEM_REF
:
1228 case CEE_STELEM_REF
:
1229 return mono_defaults
.object_class
;
1231 g_assert_not_reached ();
1237 * We try to share variables when possible
1240 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1245 /* inlining can result in deeper stacks */
1246 if (slot
>= cfg
->header
->max_stack
)
1247 return mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1249 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1251 switch (ins
->type
) {
1258 if ((vnum
= cfg
->intvars
[pos
]))
1259 return cfg
->varinfo
[vnum
];
1260 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1261 cfg
->intvars
[pos
] = res
->inst_c0
;
1264 res
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
1270 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1278 MonoJumpInfoToken
*jump_info_token
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1279 jump_info_token
->image
= image
;
1280 jump_info_token
->token
= token
;
1281 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1298 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1301 MonoBasicBlock
*bb
= cfg
->cbb
;
1302 MonoBasicBlock
*outb
;
1303 MonoInst
*inst
, **locals
;
1308 if (cfg
->verbose_level
> 3)
1309 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1310 if (!bb
->out_scount
) {
1311 bb
->out_scount
= count
;
1312 //printf ("bblock %d has out:", bb->block_num);
1314 for (i
= 0; i
< bb
->out_count
; ++i
) {
1315 outb
= bb
->out_bb
[i
];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1319 //printf (" %d", outb->block_num);
1320 if (outb
->in_stack
) {
1322 bb
->out_stack
= outb
->in_stack
;
1328 bb
->out_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1329 for (i
= 0; i
< count
; ++i
) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1342 if (cfg
->inlined_method
)
1343 bb
->out_stack
[i
] = mono_compile_create_var (cfg
, type_from_stack_type (sp
[i
]), OP_LOCAL
);
1345 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1350 for (i
= 0; i
< bb
->out_count
; ++i
) {
1351 outb
= bb
->out_bb
[i
];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1355 if (outb
->in_scount
) {
1356 if (outb
->in_scount
!= bb
->out_scount
) {
1357 cfg
->unverifiable
= TRUE
;
1360 continue; /* check they are the same locals */
1362 outb
->in_scount
= count
;
1363 outb
->in_stack
= bb
->out_stack
;
1366 locals
= bb
->out_stack
;
1368 for (i
= 0; i
< count
; ++i
) {
1369 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1370 inst
->cil_code
= sp
[i
]->cil_code
;
1371 sp
[i
] = locals
[i
];
1372 if (cfg
->verbose_level
> 3)
1373 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1385 /* Find a bblock which has a different in_stack */
1387 while (bindex
< bb
->out_count
) {
1388 outb
= bb
->out_bb
[bindex
];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1394 if (outb
->in_stack
!= locals
) {
1395 for (i
= 0; i
< count
; ++i
) {
1396 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1397 inst
->cil_code
= sp
[i
]->cil_code
;
1398 sp
[i
] = locals
[i
];
1399 if (cfg
->verbose_level
> 3)
1400 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1402 locals
= outb
->in_stack
;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1415 mini_emit_load_intf_reg_vtable (MonoCompile
*cfg
, int intf_reg
, int vtable_reg
, MonoClass
*klass
)
1417 if (cfg
->compile_aot
) {
1418 int ioffset_reg
= alloc_preg (cfg
);
1419 int iid_reg
= alloc_preg (cfg
);
1421 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_ADJUSTED_IID
);
1422 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ioffset_reg
, iid_reg
, vtable_reg
);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, ioffset_reg
, 0);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, intf_reg
, vtable_reg
, -((klass
->interface_id
+ 1) * SIZEOF_VOID_P
));
1431 mini_emit_interface_bitmap_check (MonoCompile
*cfg
, int intf_bit_reg
, int base_reg
, int offset
, MonoClass
*klass
)
1433 int ibitmap_reg
= alloc_preg (cfg
);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1436 MonoInst
*res
, *ins
;
1437 NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, ibitmap_reg
, base_reg
, offset
);
1438 MONO_ADD_INS (cfg
->cbb
, ins
);
1440 if (cfg
->compile_aot
)
1441 EMIT_NEW_AOTCONST (cfg
, args
[1], MONO_PATCH_INFO_IID
, klass
);
1443 EMIT_NEW_ICONST (cfg
, args
[1], klass
->interface_id
);
1444 res
= mono_emit_jit_icall (cfg
, mono_class_interface_match
, args
);
1445 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, intf_bit_reg
, res
->dreg
);
1447 int ibitmap_byte_reg
= alloc_preg (cfg
);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, ibitmap_reg
, base_reg
, offset
);
1451 if (cfg
->compile_aot
) {
1452 int iid_reg
= alloc_preg (cfg
);
1453 int shifted_iid_reg
= alloc_preg (cfg
);
1454 int ibitmap_byte_address_reg
= alloc_preg (cfg
);
1455 int masked_iid_reg
= alloc_preg (cfg
);
1456 int iid_one_bit_reg
= alloc_preg (cfg
);
1457 int iid_bit_reg
= alloc_preg (cfg
);
1458 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_IMM
, shifted_iid_reg
, iid_reg
, 3);
1460 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, ibitmap_byte_address_reg
, ibitmap_reg
, shifted_iid_reg
);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, ibitmap_byte_reg
, ibitmap_byte_address_reg
, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, masked_iid_reg
, iid_reg
, 7);
1463 MONO_EMIT_NEW_ICONST (cfg
, iid_one_bit_reg
, 1);
1464 MONO_EMIT_NEW_BIALU (cfg
, OP_ISHL
, iid_bit_reg
, iid_one_bit_reg
, masked_iid_reg
);
1465 MONO_EMIT_NEW_BIALU (cfg
, OP_IAND
, intf_bit_reg
, ibitmap_byte_reg
, iid_bit_reg
);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, ibitmap_byte_reg
, ibitmap_reg
, klass
->interface_id
>> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_AND_IMM
, intf_bit_reg
, ibitmap_byte_reg
, 1 << (klass
->interface_id
& 7));
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1478 mini_emit_load_intf_bit_reg_class (MonoCompile
*cfg
, int intf_bit_reg
, int klass_reg
, MonoClass
*klass
)
1480 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, interface_bitmap
), klass
);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile
*cfg
, int intf_bit_reg
, int vtable_reg
, MonoClass
*klass
)
1490 mini_emit_interface_bitmap_check (cfg
, intf_bit_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, interface_bitmap
), klass
);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1498 mini_emit_max_iid_check (MonoCompile
*cfg
, int max_iid_reg
, MonoClass
*klass
,
1499 MonoBasicBlock
*false_target
)
1501 if (cfg
->compile_aot
) {
1502 int iid_reg
= alloc_preg (cfg
);
1503 MONO_EMIT_NEW_AOTCONST (cfg
, iid_reg
, klass
, MONO_PATCH_INFO_IID
);
1504 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, max_iid_reg
, iid_reg
);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, max_iid_reg
, klass
->interface_id
);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1511 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1516 mini_emit_max_iid_check_vtable (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
,
1517 MonoBasicBlock
*false_target
)
1519 int max_iid_reg
= alloc_preg (cfg
);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, max_interface_id
));
1522 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1525 /* Same as above, but obtains max_iid from a klass */
1527 mini_emit_max_iid_check_class (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
,
1528 MonoBasicBlock
*false_target
)
1530 int max_iid_reg
= alloc_preg (cfg
);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, max_iid_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, max_interface_id
));
1533 mini_emit_max_iid_check (cfg
, max_iid_reg
, klass
, false_target
);
1537 mini_emit_isninst_cast_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_ins
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1539 int idepth_reg
= alloc_preg (cfg
);
1540 int stypes_reg
= alloc_preg (cfg
);
1541 int stype
= alloc_preg (cfg
);
1543 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBLT_UN
, false_target
);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1551 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, klass_ins
->dreg
);
1552 } else if (cfg
->compile_aot
) {
1553 int const_reg
= alloc_preg (cfg
);
1554 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1555 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, stype
, const_reg
);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, stype
, klass
);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, true_target
);
1563 mini_emit_isninst_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1565 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, NULL
, false_target
, true_target
);
1569 mini_emit_iface_cast (MonoCompile
*cfg
, int vtable_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1571 int intf_reg
= alloc_preg (cfg
);
1573 mini_emit_max_iid_check_vtable (cfg
, vtable_reg
, klass
, false_target
);
1574 mini_emit_load_intf_bit_reg_vtable (cfg
, intf_reg
, vtable_reg
, klass
);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_reg
, 0);
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1579 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1586 mini_emit_iface_class_cast (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*false_target
, MonoBasicBlock
*true_target
)
1588 int intf_bit_reg
= alloc_preg (cfg
);
1590 mini_emit_max_iid_check_class (cfg
, klass_reg
, klass
, false_target
);
1591 mini_emit_load_intf_bit_reg_class (cfg
, intf_bit_reg
, klass_reg
, klass
);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, intf_bit_reg
, 0);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, true_target
);
1596 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
1600 mini_emit_class_check_inst (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
)
1603 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_inst
->dreg
);
1604 } else if (cfg
->compile_aot
) {
1605 int const_reg
= alloc_preg (cfg
);
1606 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1607 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1609 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1611 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1615 mini_emit_class_check (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
)
1617 mini_emit_class_check_inst (cfg
, klass_reg
, klass
, NULL
);
1621 mini_emit_class_check_branch (MonoCompile
*cfg
, int klass_reg
, MonoClass
*klass
, int branch_op
, MonoBasicBlock
*target
)
1623 if (cfg
->compile_aot
) {
1624 int const_reg
= alloc_preg (cfg
);
1625 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
1626 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, const_reg
);
1628 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, branch_op
, target
);
1634 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
);
1637 mini_emit_castclass_inst (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoInst
*klass_inst
, MonoBasicBlock
*object_is_null
)
1640 int rank_reg
= alloc_preg (cfg
);
1641 int eclass_reg
= alloc_preg (cfg
);
1643 g_assert (!klass_inst
);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, rank
));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
1646 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
1649 if (klass
->cast_class
== mono_defaults
.object_class
) {
1650 int parent_reg
= alloc_preg (cfg
);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
1652 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, object_is_null
);
1653 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1654 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
1655 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, object_is_null
);
1656 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1657 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
1658 mini_emit_class_check (cfg
, eclass_reg
, mono_defaults
.enum_class
);
1659 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
1660 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, NULL
, NULL
);
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg
, -1, eclass_reg
, klass
->cast_class
, object_is_null
);
1666 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
) && (obj_reg
!= -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg
= alloc_preg (cfg
);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
1674 int idepth_reg
= alloc_preg (cfg
);
1675 int stypes_reg
= alloc_preg (cfg
);
1676 int stype
= alloc_preg (cfg
);
1678 if (klass
->idepth
> MONO_DEFAULT_SUPERTABLE_SIZE
) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, idepth_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, idepth
));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, idepth_reg
, klass
->idepth
);
1681 MONO_EMIT_NEW_COND_EXC (cfg
, LT_UN
, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stypes_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, supertypes
));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, stype
, stypes_reg
, ((klass
->idepth
- 1) * SIZEOF_VOID_P
));
1685 mini_emit_class_check_inst (cfg
, stype
, klass
, klass_inst
);
1690 mini_emit_castclass (MonoCompile
*cfg
, int obj_reg
, int klass_reg
, MonoClass
*klass
, MonoBasicBlock
*object_is_null
)
1692 mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, NULL
, object_is_null
);
1696 mini_emit_memset (MonoCompile
*cfg
, int destreg
, int offset
, int size
, int val
, int align
)
1700 g_assert (val
== 0);
1705 if ((size
<= 4) && (size
<= align
)) {
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, destreg
, offset
, val
);
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI2_MEMBASE_IMM
, destreg
, offset
, val
);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI4_MEMBASE_IMM
, destreg
, offset
, val
);
1716 #if SIZEOF_REGISTER == 8
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI8_MEMBASE_IMM
, destreg
, offset
, val
);
1724 val_reg
= alloc_preg (cfg
);
1726 if (SIZEOF_REGISTER
== 8)
1727 MONO_EMIT_NEW_I8CONST (cfg
, val_reg
, val
);
1729 MONO_EMIT_NEW_ICONST (cfg
, val_reg
, val
);
1732 /* This could be optimized further if neccesary */
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER
== 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, offset
, val_reg
);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, val_reg
);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, val_reg
);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, val_reg
);
1774 mini_emit_memcpy (MonoCompile
*cfg
, int destreg
, int doffset
, int srcreg
, int soffset
, int size
, int align
)
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size
< 10000);
1785 /* This could be optimized further if neccesary */
1787 cur_reg
= alloc_preg (cfg
);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER
== 8) {
1799 cur_reg
= alloc_preg (cfg
);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI8_MEMBASE
, cur_reg
, srcreg
, soffset
);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1810 cur_reg
= alloc_preg (cfg
);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, cur_reg
, srcreg
, soffset
);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1818 cur_reg
= alloc_preg (cfg
);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, cur_reg
, srcreg
, soffset
);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1826 cur_reg
= alloc_preg (cfg
);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, cur_reg
, srcreg
, soffset
);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, doffset
, cur_reg
);
1836 ret_type_to_call_opcode (MonoType
*type
, int calli
, int virt
, MonoGenericSharingContext
*gsctx
)
1839 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1842 type
= mini_get_basic_type_from_generic (gsctx
, type
);
1843 switch (type
->type
) {
1844 case MONO_TYPE_VOID
:
1845 return calli
? OP_VOIDCALL_REG
: virt
? OP_VOIDCALLVIRT
: OP_VOIDCALL
;
1848 case MONO_TYPE_BOOLEAN
:
1851 case MONO_TYPE_CHAR
:
1854 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1858 case MONO_TYPE_FNPTR
:
1859 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1860 case MONO_TYPE_CLASS
:
1861 case MONO_TYPE_STRING
:
1862 case MONO_TYPE_OBJECT
:
1863 case MONO_TYPE_SZARRAY
:
1864 case MONO_TYPE_ARRAY
:
1865 return calli
? OP_CALL_REG
: virt
? OP_CALLVIRT
: OP_CALL
;
1868 return calli
? OP_LCALL_REG
: virt
? OP_LCALLVIRT
: OP_LCALL
;
1871 return calli
? OP_FCALL_REG
: virt
? OP_FCALLVIRT
: OP_FCALL
;
1872 case MONO_TYPE_VALUETYPE
:
1873 if (type
->data
.klass
->enumtype
) {
1874 type
= mono_class_enum_basetype (type
->data
.klass
);
1877 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1878 case MONO_TYPE_TYPEDBYREF
:
1879 return calli
? OP_VCALL_REG
: virt
? OP_VCALLVIRT
: OP_VCALL
;
1880 case MONO_TYPE_GENERICINST
:
1881 type
= &type
->data
.generic_class
->container_class
->byval_arg
;
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type
->type
);
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1898 * Returns: non-0 value if arg can't be stored on a target.
1901 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1903 MonoType
*simple_type
;
1906 if (target
->byref
) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg
->type
== STACK_MP
)
1909 return arg
->klass
!= mono_class_from_mono_type (target
);
1910 if (arg
->type
== STACK_PTR
)
1915 simple_type
= mono_type_get_underlying_type (target
);
1916 switch (simple_type
->type
) {
1917 case MONO_TYPE_VOID
:
1921 case MONO_TYPE_BOOLEAN
:
1924 case MONO_TYPE_CHAR
:
1927 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1937 case MONO_TYPE_FNPTR
:
1939 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1940 * in native int. (#688008).
1942 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1945 case MONO_TYPE_CLASS
:
1946 case MONO_TYPE_STRING
:
1947 case MONO_TYPE_OBJECT
:
1948 case MONO_TYPE_SZARRAY
:
1949 case MONO_TYPE_ARRAY
:
1950 if (arg
->type
!= STACK_OBJ
)
1952 /* FIXME: check type compatibility */
1956 if (arg
->type
!= STACK_I8
)
1961 if (arg
->type
!= STACK_R8
)
1964 case MONO_TYPE_VALUETYPE
:
1965 if (arg
->type
!= STACK_VTYPE
)
1967 klass
= mono_class_from_mono_type (simple_type
);
1968 if (klass
!= arg
->klass
)
1971 case MONO_TYPE_TYPEDBYREF
:
1972 if (arg
->type
!= STACK_VTYPE
)
1974 klass
= mono_class_from_mono_type (simple_type
);
1975 if (klass
!= arg
->klass
)
1978 case MONO_TYPE_GENERICINST
:
1979 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1980 if (arg
->type
!= STACK_VTYPE
)
1982 klass
= mono_class_from_mono_type (simple_type
);
1983 if (klass
!= arg
->klass
)
1987 if (arg
->type
!= STACK_OBJ
)
1989 /* FIXME: check type compatibility */
1993 case MONO_TYPE_MVAR
:
1994 /* FIXME: all the arguments must be references for now,
1995 * later look inside cfg and see if the arg num is
1996 * really a reference
1998 g_assert (cfg
->generic_sharing_context
);
1999 if (arg
->type
!= STACK_OBJ
)
2003 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
2009 * Prepare arguments for passing to a function call.
2010 * Return a non-zero value if the arguments can't be passed to the given
2012 * The type checks are not yet complete and some conversions may need
2013 * casts on 32 or 64 bit architectures.
2015 * FIXME: implement this using target_type_is_incompatible ()
2018 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
2020 MonoType
*simple_type
;
2024 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
2028 for (i
= 0; i
< sig
->param_count
; ++i
) {
2029 if (sig
->params
[i
]->byref
) {
2030 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
2034 simple_type
= sig
->params
[i
];
2035 simple_type
= mini_get_basic_type_from_generic (cfg
->generic_sharing_context
, simple_type
);
2037 switch (simple_type
->type
) {
2038 case MONO_TYPE_VOID
:
2043 case MONO_TYPE_BOOLEAN
:
2046 case MONO_TYPE_CHAR
:
2049 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
2055 case MONO_TYPE_FNPTR
:
2056 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
2059 case MONO_TYPE_CLASS
:
2060 case MONO_TYPE_STRING
:
2061 case MONO_TYPE_OBJECT
:
2062 case MONO_TYPE_SZARRAY
:
2063 case MONO_TYPE_ARRAY
:
2064 if (args
[i
]->type
!= STACK_OBJ
)
2069 if (args
[i
]->type
!= STACK_I8
)
2074 if (args
[i
]->type
!= STACK_R8
)
2077 case MONO_TYPE_VALUETYPE
:
2078 if (simple_type
->data
.klass
->enumtype
) {
2079 simple_type
= mono_class_enum_basetype (simple_type
->data
.klass
);
2082 if (args
[i
]->type
!= STACK_VTYPE
)
2085 case MONO_TYPE_TYPEDBYREF
:
2086 if (args
[i
]->type
!= STACK_VTYPE
)
2089 case MONO_TYPE_GENERICINST
:
2090 simple_type
= &simple_type
->data
.generic_class
->container_class
->byval_arg
;
2094 g_error ("unknown type 0x%02x in check_call_signature",
2102 callvirt_to_call (int opcode
)
2107 case OP_VOIDCALLVIRT
:
2116 g_assert_not_reached ();
2123 callvirt_to_call_membase (int opcode
)
2127 return OP_CALL_MEMBASE
;
2128 case OP_VOIDCALLVIRT
:
2129 return OP_VOIDCALL_MEMBASE
;
2131 return OP_FCALL_MEMBASE
;
2133 return OP_LCALL_MEMBASE
;
2135 return OP_VCALL_MEMBASE
;
2137 g_assert_not_reached ();
2143 #ifdef MONO_ARCH_HAVE_IMT
2145 emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
2149 if (COMPILE_LLVM (cfg
)) {
2150 method_reg
= alloc_preg (cfg
);
2153 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2154 } else if (cfg
->compile_aot
) {
2155 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2158 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2159 ins
->inst_p0
= call
->method
;
2160 ins
->dreg
= method_reg
;
2161 MONO_ADD_INS (cfg
->cbb
, ins
);
2165 call
->imt_arg_reg
= method_reg
;
2167 #ifdef MONO_ARCH_IMT_REG
2168 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2170 /* Need this to keep the IMT arg alive */
2171 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, 0, FALSE
);
2176 #ifdef MONO_ARCH_IMT_REG
2177 method_reg
= alloc_preg (cfg
);
2180 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, method_reg
, imt_arg
->dreg
);
2181 } else if (cfg
->compile_aot
) {
2182 MONO_EMIT_NEW_AOTCONST (cfg
, method_reg
, call
->method
, MONO_PATCH_INFO_METHODCONST
);
2185 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2186 ins
->inst_p0
= call
->method
;
2187 ins
->dreg
= method_reg
;
2188 MONO_ADD_INS (cfg
->cbb
, ins
);
2191 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, MONO_ARCH_IMT_REG
, FALSE
);
2193 mono_arch_emit_imt_argument (cfg
, call
, imt_arg
);
2198 static MonoJumpInfo
*
2199 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2201 MonoJumpInfo
*ji
= mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2205 ji
->data
.target
= target
;
2210 inline static MonoCallInst
*
2211 mono_emit_call_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
,
2212 MonoInst
**args
, int calli
, int virtual, int tail
, int rgctx
)
2215 #ifdef MONO_ARCH_SOFT_FLOAT
2220 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
2222 MONO_INST_NEW_CALL (cfg
, call
, ret_type_to_call_opcode (sig
->ret
, calli
, virtual, cfg
->generic_sharing_context
));
2225 call
->signature
= sig
;
2226 call
->rgctx_reg
= rgctx
;
2228 type_to_eval_stack_type ((cfg
), sig
->ret
, &call
->inst
);
2231 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2232 call
->vret_var
= cfg
->vret_addr
;
2233 //g_assert_not_reached ();
2235 } else if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2236 MonoInst
*temp
= mono_compile_create_var (cfg
, sig
->ret
, OP_LOCAL
);
2239 temp
->backend
.is_pinvoke
= sig
->pinvoke
;
2242 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2243 * address of return value to increase optimization opportunities.
2244 * Before vtype decomposition, the dreg of the call ins itself represents the
2245 * fact the call modifies the return value. After decomposition, the call will
2246 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2247 * will be transformed into an LDADDR.
2249 MONO_INST_NEW (cfg
, loada
, OP_OUTARG_VTRETADDR
);
2250 loada
->dreg
= alloc_preg (cfg
);
2251 loada
->inst_p0
= temp
;
2252 /* We reference the call too since call->dreg could change during optimization */
2253 loada
->inst_p1
= call
;
2254 MONO_ADD_INS (cfg
->cbb
, loada
);
2256 call
->inst
.dreg
= temp
->dreg
;
2258 call
->vret_var
= loada
;
2259 } else if (!MONO_TYPE_IS_VOID (sig
->ret
))
2260 call
->inst
.dreg
= alloc_dreg (cfg
, call
->inst
.type
);
2262 #ifdef MONO_ARCH_SOFT_FLOAT
2263 if (COMPILE_SOFT_FLOAT (cfg
)) {
2265 * If the call has a float argument, we would need to do an r8->r4 conversion using
2266 * an icall, but that cannot be done during the call sequence since it would clobber
2267 * the call registers + the stack. So we do it before emitting the call.
2269 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2271 MonoInst
*in
= call
->args
[i
];
2273 if (i
>= sig
->hasthis
)
2274 t
= sig
->params
[i
- sig
->hasthis
];
2276 t
= &mono_defaults
.int_class
->byval_arg
;
2277 t
= mono_type_get_underlying_type (t
);
2279 if (!t
->byref
&& t
->type
== MONO_TYPE_R4
) {
2280 MonoInst
*iargs
[1];
2284 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
2286 /* The result will be in an int vreg */
2287 call
->args
[i
] = conv
;
2294 if (COMPILE_LLVM (cfg
))
2295 mono_llvm_emit_call (cfg
, call
);
2297 mono_arch_emit_call (cfg
, call
);
2299 mono_arch_emit_call (cfg
, call
);
2302 cfg
->param_area
= MAX (cfg
->param_area
, call
->stack_usage
);
2303 cfg
->flags
|= MONO_CFG_HAS_CALLS
;
2309 set_rgctx_arg (MonoCompile
*cfg
, MonoCallInst
*call
, int rgctx_reg
, MonoInst
*rgctx_arg
)
2311 #ifdef MONO_ARCH_RGCTX_REG
2312 mono_call_inst_add_outarg_reg (cfg
, call
, rgctx_reg
, MONO_ARCH_RGCTX_REG
, FALSE
);
2313 cfg
->uses_rgctx_reg
= TRUE
;
2314 call
->rgctx_reg
= TRUE
;
2316 call
->rgctx_arg_reg
= rgctx_reg
;
2323 inline static MonoInst
*
2324 mono_emit_calli (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
, MonoInst
*addr
, MonoInst
*rgctx_arg
)
2330 rgctx_reg
= mono_alloc_preg (cfg
);
2331 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2334 call
= mono_emit_call_args (cfg
, sig
, args
, TRUE
, FALSE
, FALSE
, rgctx_arg
? TRUE
: FALSE
);
2336 call
->inst
.sreg1
= addr
->dreg
;
2338 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2341 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2343 return (MonoInst
*)call
;
2347 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
, MonoMethod
*cmethod
, int rgctx_type
);
2349 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
, MonoClass
*klass
, int rgctx_type
);
2352 mono_emit_method_call_full (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethodSignature
*sig
,
2353 MonoInst
**args
, MonoInst
*this, MonoInst
*imt_arg
, MonoInst
*rgctx_arg
)
2355 gboolean might_be_remote
;
2356 gboolean
virtual = this != NULL
;
2357 gboolean enable_for_aot
= TRUE
;
2363 rgctx_reg
= mono_alloc_preg (cfg
);
2364 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, rgctx_reg
, rgctx_arg
->dreg
);
2367 if (method
->string_ctor
) {
2368 /* Create the real signature */
2369 /* FIXME: Cache these */
2370 MonoMethodSignature
*ctor_sig
= mono_metadata_signature_dup_mempool (cfg
->mempool
, sig
);
2371 ctor_sig
->ret
= &mono_defaults
.string_class
->byval_arg
;
2376 context_used
= mono_method_check_context_used (method
);
2378 might_be_remote
= this && sig
->hasthis
&&
2379 (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) &&
2380 !(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && (!MONO_CHECK_THIS (this) || context_used
);
2382 if (might_be_remote
&& context_used
) {
2385 g_assert (cfg
->generic_sharing_context
);
2387 addr
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK
);
2389 return mono_emit_calli (cfg
, sig
, args
, addr
, NULL
);
2392 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, virtual, FALSE
, rgctx_arg
? TRUE
: FALSE
);
2394 if (might_be_remote
)
2395 call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2397 call
->method
= method
;
2398 call
->inst
.flags
|= MONO_INST_HAS_METHOD
;
2399 call
->inst
.inst_left
= this;
2402 int vtable_reg
, slot_reg
, this_reg
;
2404 this_reg
= this->dreg
;
2406 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2407 if ((method
->klass
->parent
== mono_defaults
.multicastdelegate_class
) && (!strcmp (method
->name
, "Invoke"))) {
2408 MonoInst
*dummy_use
;
2410 MONO_EMIT_NULL_CHECK (cfg
, this_reg
);
2412 /* Make a call to delegate->invoke_impl */
2413 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2414 call
->inst
.inst_basereg
= this_reg
;
2415 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
);
2416 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2418 /* We must emit a dummy use here because the delegate trampoline will
2419 replace the 'this' argument with the delegate target making this activation
2420 no longer a root for the delegate.
2421 This is an issue for delegates that target collectible code such as dynamic
2422 methods of GC'able assemblies.
2424 For a test case look into #667921.
2426 FIXME: a dummy use is not the best way to do it as the local register allocator
2427 will put it on a caller save register and spil it around the call.
2428 Ideally, we would either put it on a callee save register or only do the store part.
2430 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, args
[0]);
2432 return (MonoInst
*)call
;
2436 if ((!cfg
->compile_aot
|| enable_for_aot
) &&
2437 (!(method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
2438 (MONO_METHOD_IS_FINAL (method
) &&
2439 method
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
)) &&
2440 !(method
->klass
->marshalbyref
&& context_used
)) {
2442 * the method is not virtual, we just need to ensure this is not null
2443 * and then we can call the method directly.
2445 if (method
->klass
->marshalbyref
|| method
->klass
== mono_defaults
.object_class
) {
2447 * The check above ensures method is not gshared, this is needed since
2448 * gshared methods can't have wrappers.
2450 method
= call
->method
= mono_marshal_get_remoting_invoke_with_check (method
);
2453 if (!method
->string_ctor
)
2454 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2456 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2457 } else if ((method
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && MONO_METHOD_IS_FINAL (method
)) {
2459 * the method is virtual, but we can statically dispatch since either
2460 * it's class or the method itself are sealed.
2461 * But first we need to ensure it's not a null reference.
2463 MONO_EMIT_NEW_CHECK_THIS (cfg
, this_reg
);
2465 call
->inst
.opcode
= callvirt_to_call (call
->inst
.opcode
);
2467 call
->inst
.opcode
= callvirt_to_call_membase (call
->inst
.opcode
);
2469 vtable_reg
= alloc_preg (cfg
);
2470 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, this_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2471 if (method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
2473 #ifdef MONO_ARCH_HAVE_IMT
2475 guint32 imt_slot
= mono_method_get_imt_slot (method
);
2476 emit_imt_argument (cfg
, call
, imt_arg
);
2477 slot_reg
= vtable_reg
;
2478 call
->inst
.inst_offset
= ((gint32
)imt_slot
- MONO_IMT_SIZE
) * SIZEOF_VOID_P
;
2481 if (slot_reg
== -1) {
2482 slot_reg
= alloc_preg (cfg
);
2483 mini_emit_load_intf_reg_vtable (cfg
, slot_reg
, vtable_reg
, method
->klass
);
2484 call
->inst
.inst_offset
= mono_method_get_vtable_index (method
) * SIZEOF_VOID_P
;
2487 slot_reg
= vtable_reg
;
2488 call
->inst
.inst_offset
= G_STRUCT_OFFSET (MonoVTable
, vtable
) +
2489 ((mono_method_get_vtable_index (method
)) * (SIZEOF_VOID_P
));
2490 #ifdef MONO_ARCH_HAVE_IMT
2492 g_assert (mono_method_signature (method
)->generic_param_count
);
2493 emit_imt_argument (cfg
, call
, imt_arg
);
2498 call
->inst
.sreg1
= slot_reg
;
2499 call
->virtual = TRUE
;
2503 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2506 set_rgctx_arg (cfg
, call
, rgctx_reg
, rgctx_arg
);
2508 return (MonoInst
*)call
;
2512 mono_emit_method_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
**args
, MonoInst
*this)
2514 return mono_emit_method_call_full (cfg
, method
, mono_method_signature (method
), args
, this, NULL
, NULL
);
2518 mono_emit_native_call (MonoCompile
*cfg
, gconstpointer func
, MonoMethodSignature
*sig
,
2525 call
= mono_emit_call_args (cfg
, sig
, args
, FALSE
, FALSE
, FALSE
, FALSE
);
2528 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
2530 return (MonoInst
*)call
;
2534 mono_emit_jit_icall (MonoCompile
*cfg
, gconstpointer func
, MonoInst
**args
)
2536 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (func
);
2540 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, args
);
2544 * mono_emit_abs_call:
2546 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2548 inline static MonoInst
*
2549 mono_emit_abs_call (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gconstpointer data
,
2550 MonoMethodSignature
*sig
, MonoInst
**args
)
2552 MonoJumpInfo
*ji
= mono_patch_info_new (cfg
->mempool
, 0, patch_type
, data
);
2556 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2559 if (cfg
->abs_patches
== NULL
)
2560 cfg
->abs_patches
= g_hash_table_new (NULL
, NULL
);
2561 g_hash_table_insert (cfg
->abs_patches
, ji
, ji
);
2562 ins
= mono_emit_native_call (cfg
, ji
, sig
, args
);
2563 ((MonoCallInst
*)ins
)->fptr_is_patch
= TRUE
;
2568 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2570 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2571 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2575 * Native code might return non register sized integers
2576 * without initializing the upper bits.
2578 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2579 case OP_LOADI1_MEMBASE
:
2580 widen_op
= OP_ICONV_TO_I1
;
2582 case OP_LOADU1_MEMBASE
:
2583 widen_op
= OP_ICONV_TO_U1
;
2585 case OP_LOADI2_MEMBASE
:
2586 widen_op
= OP_ICONV_TO_I2
;
2588 case OP_LOADU2_MEMBASE
:
2589 widen_op
= OP_ICONV_TO_U2
;
2595 if (widen_op
!= -1) {
2596 int dreg
= alloc_preg (cfg
);
2599 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2600 widen
->type
= ins
->type
;
2610 get_memcpy_method (void)
2612 static MonoMethod
*memcpy_method
= NULL
;
2613 if (!memcpy_method
) {
2614 memcpy_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memcpy", 3);
2616 g_error ("Old corlib found. Install a new one");
2618 return memcpy_method
;
2622 create_write_barrier_bitmap (MonoCompile
*cfg
, MonoClass
*klass
, unsigned *wb_bitmap
, int offset
)
2624 MonoClassField
*field
;
2625 gpointer iter
= NULL
;
2627 while ((field
= mono_class_get_fields (klass
, &iter
))) {
2630 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
2632 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
2633 if (mini_type_is_reference (cfg
, mono_field_get_type (field
))) {
2634 g_assert ((foffset
% SIZEOF_VOID_P
) == 0);
2635 *wb_bitmap
|= 1 << ((offset
+ foffset
) / SIZEOF_VOID_P
);
2637 MonoClass
*field_class
= mono_class_from_mono_type (field
->type
);
2638 if (field_class
->has_references
)
2639 create_write_barrier_bitmap (cfg
, field_class
, wb_bitmap
, offset
+ foffset
);
2645 emit_write_barrier (MonoCompile
*cfg
, MonoInst
*ptr
, MonoInst
*value
, int value_reg
)
2647 int card_table_shift_bits
;
2648 gpointer card_table_mask
;
2650 MonoInst
*dummy_use
;
2651 int nursery_shift_bits
;
2652 size_t nursery_size
;
2653 gboolean has_card_table_wb
= FALSE
;
2655 if (!cfg
->gen_write_barriers
)
2658 card_table
= mono_gc_get_card_table (&card_table_shift_bits
, &card_table_mask
);
2660 mono_gc_get_nursery (&nursery_shift_bits
, &nursery_size
);
2662 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2663 has_card_table_wb
= TRUE
;
2666 if (has_card_table_wb
&& !cfg
->compile_aot
&& card_table
&& nursery_shift_bits
> 0) {
2669 MONO_INST_NEW (cfg
, wbarrier
, OP_CARD_TABLE_WBARRIER
);
2670 wbarrier
->sreg1
= ptr
->dreg
;
2672 wbarrier
->sreg2
= value
->dreg
;
2674 wbarrier
->sreg2
= value_reg
;
2675 MONO_ADD_INS (cfg
->cbb
, wbarrier
);
2676 } else if (card_table
) {
2677 int offset_reg
= alloc_preg (cfg
);
2678 int card_reg
= alloc_preg (cfg
);
2681 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, offset_reg
, ptr
->dreg
, card_table_shift_bits
);
2682 if (card_table_mask
)
2683 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PAND_IMM
, offset_reg
, offset_reg
, card_table_mask
);
2685 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2686 * IMM's larger than 32bits.
2688 if (cfg
->compile_aot
) {
2689 MONO_EMIT_NEW_AOTCONST (cfg
, card_reg
, NULL
, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
);
2691 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
2692 ins
->inst_p0
= card_table
;
2693 ins
->dreg
= card_reg
;
2694 MONO_ADD_INS (cfg
->cbb
, ins
);
2697 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, offset_reg
, offset_reg
, card_reg
);
2698 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, offset_reg
, 0, 1);
2700 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
2701 mono_emit_method_call (cfg
, write_barrier
, &ptr
, NULL
);
2705 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, value
);
2707 MONO_INST_NEW (cfg
, dummy_use
, OP_DUMMY_USE
);
2708 dummy_use
->sreg1
= value_reg
;
2709 MONO_ADD_INS (cfg
->cbb
, dummy_use
);
2714 mono_emit_wb_aware_memcpy (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*iargs
[4], int size
, int align
)
2716 int dest_ptr_reg
, tmp_reg
, destreg
, srcreg
, offset
;
2717 unsigned need_wb
= 0;
2722 /*types with references can't have alignment smaller than sizeof(void*) */
2723 if (align
< SIZEOF_VOID_P
)
2726 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2727 if (size
> 32 * SIZEOF_VOID_P
)
2730 create_write_barrier_bitmap (cfg
, klass
, &need_wb
, 0);
2732 /* We don't unroll more than 5 stores to avoid code bloat. */
2733 if (size
> 5 * SIZEOF_VOID_P
) {
2734 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2735 size
+= (SIZEOF_VOID_P
- 1);
2736 size
&= ~(SIZEOF_VOID_P
- 1);
2738 EMIT_NEW_ICONST (cfg
, iargs
[2], size
);
2739 EMIT_NEW_ICONST (cfg
, iargs
[3], need_wb
);
2740 mono_emit_jit_icall (cfg
, mono_gc_wbarrier_value_copy_bitmap
, iargs
);
2744 destreg
= iargs
[0]->dreg
;
2745 srcreg
= iargs
[1]->dreg
;
2748 dest_ptr_reg
= alloc_preg (cfg
);
2749 tmp_reg
= alloc_preg (cfg
);
2752 EMIT_NEW_UNALU (cfg
, iargs
[0], OP_MOVE
, dest_ptr_reg
, destreg
);
2754 while (size
>= SIZEOF_VOID_P
) {
2755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, tmp_reg
, srcreg
, offset
);
2756 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, dest_ptr_reg
, 0, tmp_reg
);
2759 emit_write_barrier (cfg
, iargs
[0], NULL
, tmp_reg
);
2761 offset
+= SIZEOF_VOID_P
;
2762 size
-= SIZEOF_VOID_P
;
2765 /*tmp += sizeof (void*)*/
2766 if (size
>= SIZEOF_VOID_P
) {
2767 NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, dest_ptr_reg
, dest_ptr_reg
, SIZEOF_VOID_P
);
2768 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
2772 /* Those cannot be references since size < sizeof (void*) */
2774 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, tmp_reg
, srcreg
, offset
);
2775 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI2_MEMBASE
, tmp_reg
, srcreg
, offset
);
2782 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI1_MEMBASE
, tmp_reg
, srcreg
, offset
);
2789 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, destreg
, offset
, tmp_reg
);
2798 * Emit code to copy a valuetype of type @klass whose address is stored in
2799 * @src->dreg to memory whose address is stored at @dest->dreg.
2802 mini_emit_stobj (MonoCompile
*cfg
, MonoInst
*dest
, MonoInst
*src
, MonoClass
*klass
, gboolean native
)
2804 MonoInst
*iargs
[4];
2807 MonoMethod
*memcpy_method
;
2811 * This check breaks with spilled vars... need to handle it during verification anyway.
2812 * g_assert (klass && klass == src->klass && klass == dest->klass);
2816 n
= mono_class_native_size (klass
, &align
);
2818 n
= mono_class_value_size (klass
, &align
);
2820 /* if native is true there should be no references in the struct */
2821 if (cfg
->gen_write_barriers
&& klass
->has_references
&& !native
) {
2822 /* Avoid barriers when storing to the stack */
2823 if (!((dest
->opcode
== OP_ADD_IMM
&& dest
->sreg1
== cfg
->frame_reg
) ||
2824 (dest
->opcode
== OP_LDADDR
))) {
2825 int context_used
= 0;
2830 if (cfg
->generic_sharing_context
)
2831 context_used
= mono_class_check_context_used (klass
);
2833 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2834 if ((cfg
->opt
& MONO_OPT_INTRINS
) && mono_emit_wb_aware_memcpy (cfg
, klass
, iargs
, n
, align
)) {
2836 } else if (context_used
) {
2837 iargs
[2] = emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
2839 if (cfg
->compile_aot
) {
2840 EMIT_NEW_CLASSCONST (cfg
, iargs
[2], klass
);
2842 EMIT_NEW_PCONST (cfg
, iargs
[2], klass
);
2843 mono_class_compute_gc_descriptor (klass
);
2847 mono_emit_jit_icall (cfg
, mono_value_copy
, iargs
);
2852 if ((cfg
->opt
& MONO_OPT_INTRINS
) && n
<= sizeof (gpointer
) * 5) {
2853 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2854 mini_emit_memcpy (cfg
, dest
->dreg
, 0, src
->dreg
, 0, n
, align
);
2858 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2860 memcpy_method
= get_memcpy_method ();
2861 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
2866 get_memset_method (void)
2868 static MonoMethod
*memset_method
= NULL
;
2869 if (!memset_method
) {
2870 memset_method
= mono_class_get_method_from_name (mono_defaults
.string_class
, "memset", 3);
2872 g_error ("Old corlib found. Install a new one");
2874 return memset_method
;
2878 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2880 MonoInst
*iargs
[3];
2883 MonoMethod
*memset_method
;
2885 /* FIXME: Optimize this for the case when dest is an LDADDR */
2887 mono_class_init (klass
);
2888 n
= mono_class_value_size (klass
, &align
);
2890 if (n
<= sizeof (gpointer
) * 5) {
2891 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2894 memset_method
= get_memset_method ();
2896 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2897 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2898 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2903 emit_get_rgctx (MonoCompile
*cfg
, MonoMethod
*method
, int context_used
)
2905 MonoInst
*this = NULL
;
2907 g_assert (cfg
->generic_sharing_context
);
2909 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2910 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2911 !method
->klass
->valuetype
)
2912 EMIT_NEW_ARGLOAD (cfg
, this, 0);
2914 if (context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) {
2915 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2918 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2920 mrgctx_loc
= mono_get_vtable_var (cfg
);
2921 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2924 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| method
->klass
->valuetype
) {
2925 MonoInst
*vtable_loc
, *vtable_var
;
2929 vtable_loc
= mono_get_vtable_var (cfg
);
2930 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2932 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2933 MonoInst
*mrgctx_var
= vtable_var
;
2936 vtable_reg
= alloc_preg (cfg
);
2937 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2938 vtable_var
->type
= STACK_PTR
;
2946 vtable_reg
= alloc_preg (cfg
);
2947 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
2952 static MonoJumpInfoRgctxEntry
*
2953 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, int info_type
)
2955 MonoJumpInfoRgctxEntry
*res
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2956 res
->method
= method
;
2957 res
->in_mrgctx
= in_mrgctx
;
2958 res
->data
= mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2959 res
->data
->type
= patch_type
;
2960 res
->data
->data
.target
= patch_data
;
2961 res
->info_type
= info_type
;
2966 static inline MonoInst
*
2967 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2969 return mono_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, helper_sig_rgctx_lazy_fetch_trampoline
, &rgctx
);
2973 emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2974 MonoClass
*klass
, int rgctx_type
)
2976 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2977 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
2979 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2983 * emit_get_rgctx_method:
2985 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2986 * normal constants, else emit a load from the rgctx.
2989 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2990 MonoMethod
*cmethod
, int rgctx_type
)
2992 if (!context_used
) {
2995 switch (rgctx_type
) {
2996 case MONO_RGCTX_INFO_METHOD
:
2997 EMIT_NEW_METHODCONST (cfg
, ins
, cmethod
);
2999 case MONO_RGCTX_INFO_METHOD_RGCTX
:
3000 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, ins
, cmethod
);
3003 g_assert_not_reached ();
3006 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
3007 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3009 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3014 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
3015 MonoClassField
*field
, int rgctx_type
)
3017 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->current_method
, context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
, MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
3018 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3020 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3024 * On return the caller must check @klass for load errors.
3027 emit_generic_class_init (MonoCompile
*cfg
, MonoClass
*klass
)
3029 MonoInst
*vtable_arg
;
3031 int context_used
= 0;
3033 if (cfg
->generic_sharing_context
)
3034 context_used
= mono_class_check_context_used (klass
);
3037 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
3038 klass
, MONO_RGCTX_INFO_VTABLE
);
3040 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3044 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
3047 if (COMPILE_LLVM (cfg
))
3048 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline_llvm
, &vtable_arg
);
3050 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_GENERIC_CLASS_INIT
, NULL
, helper_sig_generic_class_init_trampoline
, &vtable_arg
);
3051 #ifdef MONO_ARCH_VTABLE_REG
3052 mono_call_inst_add_outarg_reg (cfg
, call
, vtable_arg
->dreg
, MONO_ARCH_VTABLE_REG
, FALSE
);
3053 cfg
->uses_vtable_reg
= TRUE
;
3060 save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
)
3062 if (mini_get_debug_options ()->better_cast_details
) {
3063 int to_klass_reg
= alloc_preg (cfg
);
3064 int vtable_reg
= alloc_preg (cfg
);
3065 int klass_reg
= alloc_preg (cfg
);
3066 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3069 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
3073 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3077 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
3078 MONO_EMIT_NEW_PCONST (cfg
, to_klass_reg
, klass
);
3079 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), to_klass_reg
);
3084 reset_cast_details (MonoCompile
*cfg
)
3086 /* Reset the variables holding the cast details */
3087 if (mini_get_debug_options ()->better_cast_details
) {
3088 MonoInst
*tls_get
= mono_get_jit_tls_intrinsic (cfg
);
3090 MONO_ADD_INS (cfg
->cbb
, tls_get
);
3091 /* It is enough to reset the from field */
3092 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, G_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
3097 * On return the caller must check @array_class for load errors
3100 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
3102 int vtable_reg
= alloc_preg (cfg
);
3103 int context_used
= 0;
3105 if (cfg
->generic_sharing_context
)
3106 context_used
= mono_class_check_context_used (array_class
);
3108 save_cast_details (cfg
, array_class
, obj
->dreg
);
3110 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj
->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3112 if (cfg
->opt
& MONO_OPT_SHARED
) {
3113 int class_reg
= alloc_preg (cfg
);
3114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3115 if (cfg
->compile_aot
) {
3116 int klass_reg
= alloc_preg (cfg
);
3117 MONO_EMIT_NEW_CLASSCONST (cfg
, klass_reg
, array_class
);
3118 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, klass_reg
);
3120 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, class_reg
, array_class
);
3122 } else if (context_used
) {
3123 MonoInst
*vtable_ins
;
3125 vtable_ins
= emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
3126 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
3128 if (cfg
->compile_aot
) {
3132 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
3134 vt_reg
= alloc_preg (cfg
);
3135 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, vtable
);
3136 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
3139 if (!(vtable
= mono_class_vtable (cfg
->domain
, array_class
)))
3141 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vtable
);
3145 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
3147 reset_cast_details (cfg
);
3151 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3152 * generic code is generated.
3155 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
3157 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Unbox", 1);
3160 MonoInst
*rgctx
, *addr
;
3162 /* FIXME: What if the class is shared? We might not
3163 have to get the address of the method from the
3165 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3166 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3168 rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3170 return mono_emit_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3172 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3177 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
3181 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
3182 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3183 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3184 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
3186 obj_reg
= sp
[0]->dreg
;
3187 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3188 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3190 /* FIXME: generics */
3191 g_assert (klass
->rank
== 0);
3194 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
3195 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3197 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, element_class
));
3201 MonoInst
*element_class
;
3203 /* This assertion is from the unboxcast insn */
3204 g_assert (klass
->rank
== 0);
3206 element_class
= emit_get_rgctx_klass (cfg
, context_used
,
3207 klass
->element_class
, MONO_RGCTX_INFO_KLASS
);
3209 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
3210 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3212 save_cast_details (cfg
, klass
->element_class
, obj_reg
);
3213 mini_emit_class_check (cfg
, eclass_reg
, klass
->element_class
);
3214 reset_cast_details (cfg
);
3217 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_MP
), obj_reg
, sizeof (MonoObject
));
3218 MONO_ADD_INS (cfg
->cbb
, add
);
3219 add
->type
= STACK_MP
;
3226 * Returns NULL and set the cfg exception on error.
3229 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
, int context_used
)
3231 MonoInst
*iargs
[2];
3237 MonoInst
*iargs
[2];
3240 FIXME: we cannot get managed_alloc here because we can't get
3241 the class's vtable (because it's not a closed class)
3243 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3244 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3247 if (cfg
->opt
& MONO_OPT_SHARED
)
3248 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
3250 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
3251 data
= emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
3253 if (cfg
->opt
& MONO_OPT_SHARED
) {
3254 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3256 alloc_ftn
= mono_object_new
;
3259 alloc_ftn
= mono_object_new_specific
;
3262 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3265 if (cfg
->opt
& MONO_OPT_SHARED
) {
3266 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3267 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
3269 alloc_ftn
= mono_object_new
;
3270 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& klass
->type_token
&& klass
->image
== mono_defaults
.corlib
&& !klass
->generic_class
) {
3271 /* This happens often in argument checking code, eg. throw new FooException... */
3272 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3273 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (klass
->type_token
));
3274 return mono_emit_jit_icall (cfg
, mono_helper_newobj_mscorlib
, iargs
);
3276 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
3277 MonoMethod
*managed_alloc
= NULL
;
3281 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3282 cfg
->exception_ptr
= klass
;
3286 #ifndef MONO_CROSS_COMPILE
3287 managed_alloc
= mono_gc_get_managed_allocator (vtable
, for_box
);
3290 if (managed_alloc
) {
3291 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3292 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
3294 alloc_ftn
= mono_class_get_allocation_ftn (vtable
, for_box
, &pass_lw
);
3296 guint32 lw
= vtable
->klass
->instance_size
;
3297 lw
= ((lw
+ (sizeof (gpointer
) - 1)) & ~(sizeof (gpointer
) - 1)) / sizeof (gpointer
);
3298 EMIT_NEW_ICONST (cfg
, iargs
[0], lw
);
3299 EMIT_NEW_VTABLECONST (cfg
, iargs
[1], vtable
);
3302 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3306 return mono_emit_jit_icall (cfg
, alloc_ftn
, iargs
);
3310 * Returns NULL and set the cfg exception on error.
3313 handle_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
)
3315 MonoInst
*alloc
, *ins
;
3317 if (mono_class_is_nullable (klass
)) {
3318 MonoMethod
* method
= mono_class_get_method_from_name (klass
, "Box", 1);
3321 /* FIXME: What if the class is shared? We might not
3322 have to get the method address from the RGCTX. */
3323 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3324 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3325 MonoInst
*rgctx
= emit_get_rgctx (cfg
, cfg
->current_method
, context_used
);
3327 return mono_emit_calli (cfg
, mono_method_signature (method
), &val
, addr
, rgctx
);
3329 return mono_emit_method_call (cfg
, method
, &val
, NULL
);
3333 alloc
= handle_alloc (cfg
, klass
, TRUE
, context_used
);
3337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, alloc
->dreg
, sizeof (MonoObject
), val
->dreg
);
3344 mini_class_has_reference_variant_generic_argument (MonoCompile
*cfg
, MonoClass
*klass
, int context_used
)
3347 MonoGenericContainer
*container
;
3348 MonoGenericInst
*ginst
;
3350 if (klass
->generic_class
) {
3351 container
= klass
->generic_class
->container_class
->generic_container
;
3352 ginst
= klass
->generic_class
->context
.class_inst
;
3353 } else if (klass
->generic_container
&& context_used
) {
3354 container
= klass
->generic_container
;
3355 ginst
= container
->context
.class_inst
;
3360 for (i
= 0; i
< container
->type_argc
; ++i
) {
3362 if (!(mono_generic_container_get_param_info (container
, i
)->flags
& (MONO_GEN_PARAM_VARIANT
|MONO_GEN_PARAM_COVARIANT
)))
3364 type
= ginst
->type_argv
[i
];
3365 if (mini_type_is_reference (cfg
, type
))
3371 // FIXME: This doesn't work yet (class libs tests fail?)
3372 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3375 * Returns NULL and set the cfg exception on error.
3378 handle_castclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3380 MonoBasicBlock
*is_null_bb
;
3381 int obj_reg
= src
->dreg
;
3382 int vtable_reg
= alloc_preg (cfg
);
3383 MonoInst
*klass_inst
= NULL
;
3388 if(mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
3389 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
3390 MonoInst
*cache_ins
;
3392 cache_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_CAST_CACHE
);
3397 /* klass - it's the second element of the cache entry*/
3398 EMIT_NEW_LOAD_MEMBASE (cfg
, args
[1], OP_LOAD_MEMBASE
, alloc_preg (cfg
), cache_ins
->dreg
, sizeof (gpointer
));
3401 args
[2] = cache_ins
;
3403 return mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
3406 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3408 if (is_complex_isinst (klass
)) {
3409 /* Complex case, handle by an icall */
3415 args
[1] = klass_inst
;
3417 return mono_emit_jit_icall (cfg
, mono_object_castclass
, args
);
3419 /* Simple case, handled by the code below */
3423 NEW_BBLOCK (cfg
, is_null_bb
);
3425 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
3428 save_cast_details (cfg
, klass
, obj_reg
);
3430 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3432 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, NULL
, NULL
);
3434 int klass_reg
= alloc_preg (cfg
);
3436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3438 if (!klass
->rank
&& !cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3439 /* the remoting code is broken, access the class for now */
3440 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3441 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3443 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3444 cfg
->exception_ptr
= klass
;
3447 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3450 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3452 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3455 mini_emit_castclass_inst (cfg
, obj_reg
, klass_reg
, klass
, klass_inst
, is_null_bb
);
3459 MONO_START_BB (cfg
, is_null_bb
);
3461 reset_cast_details (cfg
);
3467 * Returns NULL and set the cfg exception on error.
3470 handle_isinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
, int context_used
)
3473 MonoBasicBlock
*is_null_bb
, *false_bb
, *end_bb
;
3474 int obj_reg
= src
->dreg
;
3475 int vtable_reg
= alloc_preg (cfg
);
3476 int res_reg
= alloc_ireg_ref (cfg
);
3477 MonoInst
*klass_inst
= NULL
;
3482 if(mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
3483 MonoMethod
*mono_isinst
= mono_marshal_get_isinst_with_cache ();
3484 MonoInst
*cache_ins
;
3486 cache_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_CAST_CACHE
);
3491 /* klass - it's the second element of the cache entry*/
3492 EMIT_NEW_LOAD_MEMBASE (cfg
, args
[1], OP_LOAD_MEMBASE
, alloc_preg (cfg
), cache_ins
->dreg
, sizeof (gpointer
));
3495 args
[2] = cache_ins
;
3497 return mono_emit_method_call (cfg
, mono_isinst
, args
, NULL
);
3500 klass_inst
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
3502 if (is_complex_isinst (klass
)) {
3503 /* Complex case, handle by an icall */
3509 args
[1] = klass_inst
;
3511 return mono_emit_jit_icall (cfg
, mono_object_isinst
, args
);
3513 /* Simple case, the code below can handle it */
3517 NEW_BBLOCK (cfg
, is_null_bb
);
3518 NEW_BBLOCK (cfg
, false_bb
);
3519 NEW_BBLOCK (cfg
, end_bb
);
3521 /* Do the assignment at the beginning, so the other assignment can be if converted */
3522 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, res_reg
, obj_reg
);
3523 ins
->type
= STACK_OBJ
;
3526 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_null_bb
);
3529 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3531 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3532 g_assert (!context_used
);
3533 /* the is_null_bb target simply copies the input register to the output */
3534 mini_emit_iface_cast (cfg
, vtable_reg
, klass
, false_bb
, is_null_bb
);
3536 int klass_reg
= alloc_preg (cfg
);
3539 int rank_reg
= alloc_preg (cfg
);
3540 int eclass_reg
= alloc_preg (cfg
);
3542 g_assert (!context_used
);
3543 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
3544 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, klass
->rank
);
3545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, G_STRUCT_OFFSET (MonoClass
, cast_class
));
3548 if (klass
->cast_class
== mono_defaults
.object_class
) {
3549 int parent_reg
= alloc_preg (cfg
);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, parent_reg
, eclass_reg
, G_STRUCT_OFFSET (MonoClass
, parent
));
3551 mini_emit_class_check_branch (cfg
, parent_reg
, mono_defaults
.enum_class
->parent
, OP_PBNE_UN
, is_null_bb
);
3552 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3554 } else if (klass
->cast_class
== mono_defaults
.enum_class
->parent
) {
3555 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
->parent
, OP_PBEQ
, is_null_bb
);
3556 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3558 } else if (klass
->cast_class
== mono_defaults
.enum_class
) {
3559 mini_emit_class_check_branch (cfg
, eclass_reg
, mono_defaults
.enum_class
, OP_PBEQ
, is_null_bb
);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false_bb
);
3561 } else if (klass
->cast_class
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3562 mini_emit_iface_class_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3564 if ((klass
->rank
== 1) && (klass
->byval_arg
.type
== MONO_TYPE_SZARRAY
)) {
3565 /* Check that the object is a vector too */
3566 int bounds_reg
= alloc_preg (cfg
);
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
, obj_reg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
3568 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3572 /* the is_null_bb target simply copies the input register to the output */
3573 mini_emit_isninst_cast (cfg
, eclass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3575 } else if (mono_class_is_nullable (klass
)) {
3576 g_assert (!context_used
);
3577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3578 /* the is_null_bb target simply copies the input register to the output */
3579 mini_emit_isninst_cast (cfg
, klass_reg
, klass
->cast_class
, false_bb
, is_null_bb
);
3581 if (!cfg
->compile_aot
&& !(cfg
->opt
& MONO_OPT_SHARED
) && (klass
->flags
& TYPE_ATTRIBUTE_SEALED
)) {
3582 g_assert (!context_used
);
3583 /* the remoting code is broken, access the class for now */
3584 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3585 MonoVTable
*vt
= mono_class_vtable (cfg
->domain
, klass
);
3587 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
3588 cfg
->exception_ptr
= klass
;
3591 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, vt
);
3593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3594 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, klass_reg
, klass
);
3596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false_bb
);
3597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, is_null_bb
);
3599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3600 /* the is_null_bb target simply copies the input register to the output */
3601 mini_emit_isninst_cast_inst (cfg
, klass_reg
, klass
, klass_inst
, false_bb
, is_null_bb
);
3606 MONO_START_BB (cfg
, false_bb
);
3608 MONO_EMIT_NEW_PCONST (cfg
, res_reg
, 0);
3609 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3611 MONO_START_BB (cfg
, is_null_bb
);
3613 MONO_START_BB (cfg
, end_bb
);
3619 handle_cisinst (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3621 /* This opcode takes as input an object reference and a class, and returns:
3622 0) if the object is an instance of the class,
3623 1) if the object is not instance of the class,
3624 2) if the object is a proxy whose type cannot be determined */
3627 MonoBasicBlock
*true_bb
, *false_bb
, *false2_bb
, *end_bb
, *no_proxy_bb
, *interface_fail_bb
;
3628 int obj_reg
= src
->dreg
;
3629 int dreg
= alloc_ireg (cfg
);
3631 int klass_reg
= alloc_preg (cfg
);
3633 NEW_BBLOCK (cfg
, true_bb
);
3634 NEW_BBLOCK (cfg
, false_bb
);
3635 NEW_BBLOCK (cfg
, false2_bb
);
3636 NEW_BBLOCK (cfg
, end_bb
);
3637 NEW_BBLOCK (cfg
, no_proxy_bb
);
3639 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, false_bb
);
3642 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3643 NEW_BBLOCK (cfg
, interface_fail_bb
);
3645 tmp_reg
= alloc_preg (cfg
);
3646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3647 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, true_bb
);
3648 MONO_START_BB (cfg
, interface_fail_bb
);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3651 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, false_bb
);
3653 tmp_reg
= alloc_preg (cfg
);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3655 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, false2_bb
);
3658 tmp_reg
= alloc_preg (cfg
);
3659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3662 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3663 tmp_reg
= alloc_preg (cfg
);
3664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3667 tmp_reg
= alloc_preg (cfg
);
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3669 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3672 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false2_bb
, true_bb
);
3673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, false2_bb
);
3675 MONO_START_BB (cfg
, no_proxy_bb
);
3677 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, false_bb
, true_bb
);
3680 MONO_START_BB (cfg
, false_bb
);
3682 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3685 MONO_START_BB (cfg
, false2_bb
);
3687 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 2);
3688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3690 MONO_START_BB (cfg
, true_bb
);
3692 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3694 MONO_START_BB (cfg
, end_bb
);
3697 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3699 ins
->type
= STACK_I4
;
3705 handle_ccastclass (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*src
)
3707 /* This opcode takes as input an object reference and a class, and returns:
3708 0) if the object is an instance of the class,
3709 1) if the object is a proxy whose type cannot be determined
3710 an InvalidCastException exception is thrown otherwhise*/
3713 MonoBasicBlock
*end_bb
, *ok_result_bb
, *no_proxy_bb
, *interface_fail_bb
, *fail_1_bb
;
3714 int obj_reg
= src
->dreg
;
3715 int dreg
= alloc_ireg (cfg
);
3716 int tmp_reg
= alloc_preg (cfg
);
3717 int klass_reg
= alloc_preg (cfg
);
3719 NEW_BBLOCK (cfg
, end_bb
);
3720 NEW_BBLOCK (cfg
, ok_result_bb
);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, ok_result_bb
);
3725 save_cast_details (cfg
, klass
, obj_reg
);
3727 if (klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3728 NEW_BBLOCK (cfg
, interface_fail_bb
);
3730 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3731 mini_emit_iface_cast (cfg
, tmp_reg
, klass
, interface_fail_bb
, ok_result_bb
);
3732 MONO_START_BB (cfg
, interface_fail_bb
);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3735 mini_emit_class_check (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
);
3737 tmp_reg
= alloc_preg (cfg
);
3738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3739 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3740 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "InvalidCastException");
3742 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3746 NEW_BBLOCK (cfg
, no_proxy_bb
);
3748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoVTable
, klass
));
3750 mini_emit_class_check_branch (cfg
, klass_reg
, mono_defaults
.transparent_proxy_class
, OP_PBNE_UN
, no_proxy_bb
);
3752 tmp_reg
= alloc_preg (cfg
);
3753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, remote_class
));
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, tmp_reg
, G_STRUCT_OFFSET (MonoRemoteClass
, proxy_class
));
3756 tmp_reg
= alloc_preg (cfg
);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, tmp_reg
, obj_reg
, G_STRUCT_OFFSET (MonoTransparentProxy
, custom_type_info
));
3758 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, tmp_reg
, 0);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, no_proxy_bb
);
3761 NEW_BBLOCK (cfg
, fail_1_bb
);
3763 mini_emit_isninst_cast (cfg
, klass_reg
, klass
, fail_1_bb
, ok_result_bb
);
3765 MONO_START_BB (cfg
, fail_1_bb
);
3767 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 1);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3770 MONO_START_BB (cfg
, no_proxy_bb
);
3772 mini_emit_castclass (cfg
, obj_reg
, klass_reg
, klass
, ok_result_bb
);
3775 MONO_START_BB (cfg
, ok_result_bb
);
3777 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
3779 MONO_START_BB (cfg
, end_bb
);
3782 MONO_INST_NEW (cfg
, ins
, OP_ICONST
);
3784 ins
->type
= STACK_I4
;
3790 * Returns NULL and set the cfg exception on error.
3792 static G_GNUC_UNUSED MonoInst
*
3793 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
, int context_used
)
3797 gpointer
*trampoline
;
3798 MonoInst
*obj
, *method_ins
, *tramp_ins
;
3802 obj
= handle_alloc (cfg
, klass
, FALSE
, 0);
3806 /* Inline the contents of mono_delegate_ctor */
3808 /* Set target field */
3809 /* Optimize away setting of NULL target */
3810 if (!(target
->opcode
== OP_PCONST
&& target
->inst_p0
== 0)) {
3811 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3812 if (cfg
->gen_write_barriers
) {
3813 dreg
= alloc_preg (cfg
);
3814 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, target
));
3815 emit_write_barrier (cfg
, ptr
, target
, 0);
3819 /* Set method field */
3820 method_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD
);
3821 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3822 if (cfg
->gen_write_barriers
) {
3823 dreg
= alloc_preg (cfg
);
3824 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method
));
3825 emit_write_barrier (cfg
, ptr
, method_ins
, 0);
3828 * To avoid looking up the compiled code belonging to the target method
3829 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3830 * store it, and we fill it after the method has been compiled.
3832 if (!cfg
->compile_aot
&& !method
->dynamic
) {
3833 MonoInst
*code_slot_ins
;
3836 code_slot_ins
= emit_get_rgctx_method (cfg
, context_used
, method
, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE
);
3838 domain
= mono_domain_get ();
3839 mono_domain_lock (domain
);
3840 if (!domain_jit_info (domain
)->method_code_hash
)
3841 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3842 code_slot
= g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3844 code_slot
= mono_domain_alloc0 (domain
, sizeof (gpointer
));
3845 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3847 mono_domain_unlock (domain
);
3849 EMIT_NEW_PCONST (cfg
, code_slot_ins
, code_slot
);
3851 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3854 /* Set invoke_impl field */
3855 if (cfg
->compile_aot
) {
3856 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, klass
);
3858 trampoline
= mono_create_delegate_trampoline (klass
);
3859 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3861 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, G_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3863 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3869 handle_array_new (MonoCompile
*cfg
, int rank
, MonoInst
**sp
, unsigned char *ip
)
3871 MonoJitICallInfo
*info
;
3873 /* Need to register the icall so it gets an icall wrapper */
3874 info
= mono_get_array_new_va_icall (rank
);
3876 cfg
->flags
|= MONO_CFG_HAS_VARARGS
;
3878 /* mono_array_new_va () needs a vararg calling convention */
3879 cfg
->disable_llvm
= TRUE
;
3881 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3882 return mono_emit_native_call (cfg
, mono_icall_get_wrapper (info
), info
->sig
, sp
);
3886 mono_emit_load_got_addr (MonoCompile
*cfg
)
3888 MonoInst
*getaddr
, *dummy_use
;
3890 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3893 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3894 getaddr
->dreg
= cfg
->got_var
->dreg
;
3896 /* Add it to the start of the first bblock */
3897 if (cfg
->bb_entry
->code
) {
3898 getaddr
->next
= cfg
->bb_entry
->code
;
3899 cfg
->bb_entry
->code
= getaddr
;
3902 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3904 cfg
->got_var_allocated
= TRUE
;
3907 * Add a dummy use to keep the got_var alive, since real uses might
3908 * only be generated by the back ends.
3909 * Add it to end_bblock, so the variable's lifetime covers the whole
3911 * It would be better to make the usage of the got var explicit in all
3912 * cases when the backend needs it (i.e. calls, throw etc.), so this
3913 * wouldn't be needed.
3915 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3916 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3919 static int inline_limit
;
3920 static gboolean inline_limit_inited
;
3923 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3925 MonoMethodHeaderSummary header
;
3927 #ifdef MONO_ARCH_SOFT_FLOAT
3928 MonoMethodSignature
*sig
= mono_method_signature (method
);
3932 if (cfg
->generic_sharing_context
)
3935 if (cfg
->inline_depth
> 10)
3938 #ifdef MONO_ARCH_HAVE_LMF_OPS
3939 if (((method
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
3940 (method
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) &&
3941 !MONO_TYPE_ISSTRUCT (signature
->ret
) && !mini_class_is_system_array (method
->klass
))
3946 if (!mono_method_get_header_summary (method
, &header
))
3949 /*runtime, icall and pinvoke are checked by summary call*/
3950 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3951 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3952 (method
->klass
->marshalbyref
) ||
3956 /* also consider num_locals? */
3957 /* Do the size check early to avoid creating vtables */
3958 if (!inline_limit_inited
) {
3959 if (getenv ("MONO_INLINELIMIT"))
3960 inline_limit
= atoi (getenv ("MONO_INLINELIMIT"));
3962 inline_limit
= INLINE_LENGTH_LIMIT
;
3963 inline_limit_inited
= TRUE
;
3965 if (header
.code_size
>= inline_limit
)
3969 * if we can initialize the class of the method right away, we do,
3970 * otherwise we don't allow inlining if the class needs initialization,
3971 * since it would mean inserting a call to mono_runtime_class_init()
3972 * inside the inlined code
3974 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3975 if (method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
) {
3976 if (cfg
->run_cctors
&& method
->klass
->has_cctor
) {
3977 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3978 if (!method
->klass
->runtime_info
)
3979 /* No vtable created yet */
3981 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3984 /* This makes so that inline cannot trigger */
3985 /* .cctors: too many apps depend on them */
3986 /* running with a specific order... */
3987 if (! vtable
->initialized
)
3989 mono_runtime_class_init (vtable
);
3991 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3992 if (!method
->klass
->runtime_info
)
3993 /* No vtable created yet */
3995 vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
3998 if (!vtable
->initialized
)
4003 * If we're compiling for shared code
4004 * the cctor will need to be run at aot method load time, for example,
4005 * or at the end of the compilation of the inlining method.
4007 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !((method
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)))
4012 * CAS - do not inline methods with declarative security
4013 * Note: this has to be before any possible return TRUE;
4015 if (mono_method_has_declsec (method
))
4018 #ifdef MONO_ARCH_SOFT_FLOAT
4020 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
4022 for (i
= 0; i
< sig
->param_count
; ++i
)
4023 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
4031 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoVTable
*vtable
)
4033 if (vtable
->initialized
&& !cfg
->compile_aot
)
4036 if (vtable
->klass
->flags
& TYPE_ATTRIBUTE_BEFORE_FIELD_INIT
)
4039 if (!mono_class_needs_cctor_run (vtable
->klass
, method
))
4042 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (vtable
->klass
== method
->klass
))
4043 /* The initialization is already done before the method is called */
4050 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
, gboolean bcheck
)
4054 int mult_reg
, add_reg
, array_reg
, index_reg
, index2_reg
;
4056 mono_class_init (klass
);
4057 size
= mono_class_array_element_size (klass
);
4059 mult_reg
= alloc_preg (cfg
);
4060 array_reg
= arr
->dreg
;
4061 index_reg
= index
->dreg
;
4063 #if SIZEOF_REGISTER == 8
4064 /* The array reg is 64 bits but the index reg is only 32 */
4065 if (COMPILE_LLVM (cfg
)) {
4067 index2_reg
= index_reg
;
4069 index2_reg
= alloc_preg (cfg
);
4070 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
4073 if (index
->type
== STACK_I8
) {
4074 index2_reg
= alloc_preg (cfg
);
4075 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
4077 index2_reg
= index_reg
;
4082 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
4084 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4085 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
4086 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4088 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], G_STRUCT_OFFSET (MonoArray
, vector
));
4089 ins
->klass
= mono_class_get_element_class (klass
);
4090 ins
->type
= STACK_MP
;
4096 add_reg
= alloc_ireg_mp (cfg
);
4098 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
4099 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
4100 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
4101 ins
->klass
= mono_class_get_element_class (klass
);
4102 ins
->type
= STACK_MP
;
4103 MONO_ADD_INS (cfg
->cbb
, ins
);
4108 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4110 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
4112 int bounds_reg
= alloc_preg (cfg
);
4113 int add_reg
= alloc_ireg_mp (cfg
);
4114 int mult_reg
= alloc_preg (cfg
);
4115 int mult2_reg
= alloc_preg (cfg
);
4116 int low1_reg
= alloc_preg (cfg
);
4117 int low2_reg
= alloc_preg (cfg
);
4118 int high1_reg
= alloc_preg (cfg
);
4119 int high2_reg
= alloc_preg (cfg
);
4120 int realidx1_reg
= alloc_preg (cfg
);
4121 int realidx2_reg
= alloc_preg (cfg
);
4122 int sum_reg
= alloc_preg (cfg
);
4127 mono_class_init (klass
);
4128 size
= mono_class_array_element_size (klass
);
4130 index1
= index_ins1
->dreg
;
4131 index2
= index_ins2
->dreg
;
4133 /* range checking */
4134 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
4135 arr
->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
4137 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
4138 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4139 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
4141 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4142 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
4143 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4145 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
4146 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4147 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
4148 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
4149 bounds_reg
, sizeof (MonoArrayBounds
) + G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4150 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
4151 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4153 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
4154 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
4155 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
4156 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
4157 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, G_STRUCT_OFFSET (MonoArray
, vector
));
4159 ins
->type
= STACK_MP
;
4161 MONO_ADD_INS (cfg
->cbb
, ins
);
4168 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, unsigned char *ip
, gboolean is_set
)
4172 MonoMethod
*addr_method
;
4175 rank
= mono_method_signature (cmethod
)->param_count
- (is_set
? 1: 0);
4178 return mini_emit_ldelema_1_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], TRUE
);
4180 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4181 /* emit_ldelema_2 depends on OP_LMUL */
4182 if (rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
)) {
4183 return mini_emit_ldelema_2_ins (cfg
, cmethod
->klass
->element_class
, sp
[0], sp
[1], sp
[2]);
4187 element_size
= mono_class_array_element_size (cmethod
->klass
->element_class
);
4188 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
4189 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
4194 static MonoBreakPolicy
4195 always_insert_breakpoint (MonoMethod
*method
)
4197 return MONO_BREAK_POLICY_ALWAYS
;
4200 static MonoBreakPolicyFunc break_policy_func
= always_insert_breakpoint
;
4203 * mono_set_break_policy:
4204 * policy_callback: the new callback function
4206 * Allow embedders to decide wherther to actually obey breakpoint instructions
4207 * (both break IL instructions and Debugger.Break () method calls), for example
4208 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4209 * untrusted or semi-trusted code.
4211 * @policy_callback will be called every time a break point instruction needs to
4212 * be inserted with the method argument being the method that calls Debugger.Break()
4213 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4214 * if it wants the breakpoint to not be effective in the given method.
4215 * #MONO_BREAK_POLICY_ALWAYS is the default.
4218 mono_set_break_policy (MonoBreakPolicyFunc policy_callback
)
4220 if (policy_callback
)
4221 break_policy_func
= policy_callback
;
4223 break_policy_func
= always_insert_breakpoint
;
4227 should_insert_brekpoint (MonoMethod
*method
) {
4228 switch (break_policy_func (method
)) {
4229 case MONO_BREAK_POLICY_ALWAYS
:
4231 case MONO_BREAK_POLICY_NEVER
:
4233 case MONO_BREAK_POLICY_ON_DBG
:
4234 return mono_debug_using_mono_debugger ();
4236 g_warning ("Incorrect value returned from break policy callback");
4241 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4243 emit_array_generic_access (MonoCompile
*cfg
, MonoMethodSignature
*fsig
, MonoInst
**args
, int is_set
)
4245 MonoInst
*addr
, *store
, *load
;
4246 MonoClass
*eklass
= mono_class_from_mono_type (fsig
->params
[2]);
4248 /* the bounds check is already done by the callers */
4249 addr
= mini_emit_ldelema_1_ins (cfg
, eklass
, args
[0], args
[1], FALSE
);
4251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, args
[2]->dreg
, 0);
4252 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, addr
->dreg
, 0, load
->dreg
);
4254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, &eklass
->byval_arg
, addr
->dreg
, 0);
4255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, &eklass
->byval_arg
, args
[2]->dreg
, 0, load
->dreg
);
4261 mini_emit_inst_for_ctor (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4263 MonoInst
*ins
= NULL
;
4264 #ifdef MONO_ARCH_SIMD_INTRINSICS
4265 if (cfg
->opt
& MONO_OPT_SIMD
) {
4266 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4276 mini_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4278 MonoInst
*ins
= NULL
;
4280 static MonoClass
*runtime_helpers_class
= NULL
;
4281 if (! runtime_helpers_class
)
4282 runtime_helpers_class
= mono_class_from_name (mono_defaults
.corlib
,
4283 "System.Runtime.CompilerServices", "RuntimeHelpers");
4285 if (cmethod
->klass
== mono_defaults
.string_class
) {
4286 if (strcmp (cmethod
->name
, "get_Chars") == 0) {
4287 int dreg
= alloc_ireg (cfg
);
4288 int index_reg
= alloc_preg (cfg
);
4289 int mult_reg
= alloc_preg (cfg
);
4290 int add_reg
= alloc_preg (cfg
);
4292 #if SIZEOF_REGISTER == 8
4293 /* The array reg is 64 bits but the index reg is only 32 */
4294 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index_reg
, args
[1]->dreg
);
4296 index_reg
= args
[1]->dreg
;
4298 MONO_EMIT_BOUNDS_CHECK (cfg
, args
[0]->dreg
, MonoString
, length
, index_reg
);
4300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4301 EMIT_NEW_X86_LEA (cfg
, ins
, args
[0]->dreg
, index_reg
, 1, G_STRUCT_OFFSET (MonoString
, chars
));
4302 add_reg
= ins
->dreg
;
4303 /* Avoid a warning */
4305 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4308 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, index_reg
, 1);
4309 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4310 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU2_MEMBASE
, dreg
,
4311 add_reg
, G_STRUCT_OFFSET (MonoString
, chars
));
4313 type_from_op (ins
, NULL
, NULL
);
4315 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4316 int dreg
= alloc_ireg (cfg
);
4317 /* Decompose later to allow more optimizations */
4318 EMIT_NEW_UNALU (cfg
, ins
, OP_STRLEN
, dreg
, args
[0]->dreg
);
4319 ins
->type
= STACK_I4
;
4320 ins
->flags
|= MONO_INST_FAULT
;
4321 cfg
->cbb
->has_array_access
= TRUE
;
4322 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
4325 } else if (strcmp (cmethod
->name
, "InternalSetChar") == 0) {
4326 int mult_reg
= alloc_preg (cfg
);
4327 int add_reg
= alloc_preg (cfg
);
4329 /* The corlib functions check for oob already. */
4330 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, mult_reg
, args
[1]->dreg
, 1);
4331 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult_reg
, args
[0]->dreg
);
4332 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI2_MEMBASE_REG
, add_reg
, G_STRUCT_OFFSET (MonoString
, chars
), args
[2]->dreg
);
4333 return cfg
->cbb
->last_ins
;
4336 } else if (cmethod
->klass
== mono_defaults
.object_class
) {
4338 if (strcmp (cmethod
->name
, "GetType") == 0) {
4339 int dreg
= alloc_ireg_ref (cfg
);
4340 int vt_reg
= alloc_preg (cfg
);
4341 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vt_reg
, args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4342 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, vt_reg
, G_STRUCT_OFFSET (MonoVTable
, type
));
4343 type_from_op (ins
, NULL
, NULL
);
4346 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4347 } else if (strcmp (cmethod
->name
, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4348 int dreg
= alloc_ireg (cfg
);
4349 int t1
= alloc_ireg (cfg
);
4351 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, t1
, args
[0]->dreg
, 3);
4352 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_MUL_IMM
, dreg
, t1
, 2654435761u);
4353 ins
->type
= STACK_I4
;
4357 } else if (strcmp (cmethod
->name
, ".ctor") == 0) {
4358 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4359 MONO_ADD_INS (cfg
->cbb
, ins
);
4363 } else if (cmethod
->klass
== mono_defaults
.array_class
) {
4364 if (strcmp (cmethod
->name
+ 1, "etGenericValueImpl") == 0)
4365 return emit_array_generic_access (cfg
, fsig
, args
, *cmethod
->name
== 'S');
4367 #ifndef MONO_BIG_ARRAYS
4369 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4372 if ((strcmp (cmethod
->name
, "GetLength") == 0 || strcmp (cmethod
->name
, "GetLowerBound") == 0) && args
[1]->opcode
== OP_ICONST
&& args
[1]->inst_c0
== 0) {
4373 int dreg
= alloc_ireg (cfg
);
4374 int bounds_reg
= alloc_ireg_mp (cfg
);
4375 MonoBasicBlock
*end_bb
, *szarray_bb
;
4376 gboolean get_length
= strcmp (cmethod
->name
, "GetLength") == 0;
4378 NEW_BBLOCK (cfg
, end_bb
);
4379 NEW_BBLOCK (cfg
, szarray_bb
);
4381 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOAD_MEMBASE
, bounds_reg
,
4382 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, bounds
));
4383 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, bounds_reg
, 0);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, szarray_bb
);
4385 /* Non-szarray case */
4387 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4388 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, length
));
4390 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4391 bounds_reg
, G_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
4393 MONO_START_BB (cfg
, szarray_bb
);
4396 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4397 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4399 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
4400 MONO_START_BB (cfg
, end_bb
);
4402 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, dreg
, dreg
);
4403 ins
->type
= STACK_I4
;
4409 if (cmethod
->name
[0] != 'g')
4412 if (strcmp (cmethod
->name
, "get_Rank") == 0) {
4413 int dreg
= alloc_ireg (cfg
);
4414 int vtable_reg
= alloc_preg (cfg
);
4415 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg
, OP_LOAD_MEMBASE
, vtable_reg
,
4416 args
[0]->dreg
, G_STRUCT_OFFSET (MonoObject
, vtable
));
4417 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADU1_MEMBASE
, dreg
,
4418 vtable_reg
, G_STRUCT_OFFSET (MonoVTable
, rank
));
4419 type_from_op (ins
, NULL
, NULL
);
4422 } else if (strcmp (cmethod
->name
, "get_Length") == 0) {
4423 int dreg
= alloc_ireg (cfg
);
4425 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, ins
, OP_LOADI4_MEMBASE
, dreg
,
4426 args
[0]->dreg
, G_STRUCT_OFFSET (MonoArray
, max_length
));
4427 type_from_op (ins
, NULL
, NULL
);
4432 } else if (cmethod
->klass
== runtime_helpers_class
) {
4434 if (strcmp (cmethod
->name
, "get_OffsetToStringData") == 0) {
4435 EMIT_NEW_ICONST (cfg
, ins
, G_STRUCT_OFFSET (MonoString
, chars
));
4439 } else if (cmethod
->klass
== mono_defaults
.thread_class
) {
4440 if (strcmp (cmethod
->name
, "SpinWait_nop") == 0) {
4441 MONO_INST_NEW (cfg
, ins
, OP_RELAXED_NOP
);
4442 MONO_ADD_INS (cfg
->cbb
, ins
);
4444 } else if (strcmp (cmethod
->name
, "MemoryBarrier") == 0) {
4445 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
4446 MONO_ADD_INS (cfg
->cbb
, ins
);
4449 } else if (cmethod
->klass
== mono_defaults
.monitor_class
) {
4451 /* FIXME this should be integrated to the check below once we support the trampoline version */
4452 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4453 if (strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 2) {
4454 MonoMethod
*fast_method
= NULL
;
4456 /*FIXME fix LLVM and AOT support*/
4457 if (COMPILE_LLVM (cfg
) || cfg
->compile_aot
)
4460 /* Avoid infinite recursion */
4461 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&& !strcmp (cfg
->method
->name
, "FastMonitorEnterV4"))
4464 fast_method
= mono_monitor_get_fast_path (cmethod
);
4468 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
4472 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4473 if (strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 1) {
4476 if (COMPILE_LLVM (cfg
)) {
4478 * Pass the argument normally, the LLVM backend will handle the
4479 * calling convention problems.
4481 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4483 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_ENTER
,
4484 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4485 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4486 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4489 return (MonoInst
*)call
;
4490 } else if (strcmp (cmethod
->name
, "Exit") == 0) {
4493 if (COMPILE_LLVM (cfg
)) {
4494 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
, NULL
, helper_sig_monitor_enter_exit_trampoline_llvm
, args
);
4496 call
= (MonoCallInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_MONITOR_EXIT
,
4497 NULL
, helper_sig_monitor_enter_exit_trampoline
, NULL
);
4498 mono_call_inst_add_outarg_reg (cfg
, call
, args
[0]->dreg
,
4499 MONO_ARCH_MONITOR_OBJECT_REG
, FALSE
);
4502 return (MonoInst
*)call
;
4504 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4505 MonoMethod
*fast_method
= NULL
;
4507 /* Avoid infinite recursion */
4508 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
&&
4509 (strcmp (cfg
->method
->name
, "FastMonitorEnter") == 0 ||
4510 strcmp (cfg
->method
->name
, "FastMonitorExit") == 0))
4513 if ((strcmp (cmethod
->name
, "Enter") == 0 && fsig
->param_count
== 1) ||
4514 strcmp (cmethod
->name
, "Exit") == 0)
4515 fast_method
= mono_monitor_get_fast_path (cmethod
);
4519 return (MonoInst
*)mono_emit_method_call (cfg
, fast_method
, args
, NULL
);
4521 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
&&
4522 (strcmp (cmethod
->klass
->name_space
, "System.Threading") == 0) &&
4523 (strcmp (cmethod
->klass
->name
, "Interlocked") == 0)) {
4526 #if SIZEOF_REGISTER == 8
4527 if (strcmp (cmethod
->name
, "Read") == 0 && (fsig
->params
[0]->type
== MONO_TYPE_I8
)) {
4528 /* 64 bit reads are already atomic */
4529 MONO_INST_NEW (cfg
, ins
, OP_LOADI8_MEMBASE
);
4530 ins
->dreg
= mono_alloc_preg (cfg
);
4531 ins
->inst_basereg
= args
[0]->dreg
;
4532 ins
->inst_offset
= 0;
4533 MONO_ADD_INS (cfg
->cbb
, ins
);
4537 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4538 if (strcmp (cmethod
->name
, "Increment") == 0) {
4539 MonoInst
*ins_iconst
;
4542 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4543 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4544 #if SIZEOF_REGISTER == 8
4545 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4546 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4549 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4550 ins_iconst
->inst_c0
= 1;
4551 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4552 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4554 MONO_INST_NEW (cfg
, ins
, opcode
);
4555 ins
->dreg
= mono_alloc_ireg (cfg
);
4556 ins
->inst_basereg
= args
[0]->dreg
;
4557 ins
->inst_offset
= 0;
4558 ins
->sreg2
= ins_iconst
->dreg
;
4559 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4560 MONO_ADD_INS (cfg
->cbb
, ins
);
4562 } else if (strcmp (cmethod
->name
, "Decrement") == 0) {
4563 MonoInst
*ins_iconst
;
4566 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4567 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4568 #if SIZEOF_REGISTER == 8
4569 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4570 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4573 MONO_INST_NEW (cfg
, ins_iconst
, OP_ICONST
);
4574 ins_iconst
->inst_c0
= -1;
4575 ins_iconst
->dreg
= mono_alloc_ireg (cfg
);
4576 MONO_ADD_INS (cfg
->cbb
, ins_iconst
);
4578 MONO_INST_NEW (cfg
, ins
, opcode
);
4579 ins
->dreg
= mono_alloc_ireg (cfg
);
4580 ins
->inst_basereg
= args
[0]->dreg
;
4581 ins
->inst_offset
= 0;
4582 ins
->sreg2
= ins_iconst
->dreg
;
4583 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4584 MONO_ADD_INS (cfg
->cbb
, ins
);
4586 } else if (strcmp (cmethod
->name
, "Add") == 0) {
4589 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4590 opcode
= OP_ATOMIC_ADD_NEW_I4
;
4591 #if SIZEOF_REGISTER == 8
4592 else if (fsig
->params
[0]->type
== MONO_TYPE_I8
)
4593 opcode
= OP_ATOMIC_ADD_NEW_I8
;
4597 MONO_INST_NEW (cfg
, ins
, opcode
);
4598 ins
->dreg
= mono_alloc_ireg (cfg
);
4599 ins
->inst_basereg
= args
[0]->dreg
;
4600 ins
->inst_offset
= 0;
4601 ins
->sreg2
= args
[1]->dreg
;
4602 ins
->type
= (opcode
== OP_ATOMIC_ADD_NEW_I4
) ? STACK_I4
: STACK_I8
;
4603 MONO_ADD_INS (cfg
->cbb
, ins
);
4606 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4608 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4609 if (strcmp (cmethod
->name
, "Exchange") == 0) {
4611 gboolean is_ref
= fsig
->params
[0]->type
== MONO_TYPE_OBJECT
;
4613 if (fsig
->params
[0]->type
== MONO_TYPE_I4
)
4614 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4615 #if SIZEOF_REGISTER == 8
4616 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I8
) ||
4617 (fsig
->params
[0]->type
== MONO_TYPE_I
))
4618 opcode
= OP_ATOMIC_EXCHANGE_I8
;
4620 else if (is_ref
|| (fsig
->params
[0]->type
== MONO_TYPE_I
))
4621 opcode
= OP_ATOMIC_EXCHANGE_I4
;
4626 MONO_INST_NEW (cfg
, ins
, opcode
);
4627 ins
->dreg
= is_ref
? mono_alloc_ireg_ref (cfg
) : mono_alloc_ireg (cfg
);
4628 ins
->inst_basereg
= args
[0]->dreg
;
4629 ins
->inst_offset
= 0;
4630 ins
->sreg2
= args
[1]->dreg
;
4631 MONO_ADD_INS (cfg
->cbb
, ins
);
4633 switch (fsig
->params
[0]->type
) {
4635 ins
->type
= STACK_I4
;
4639 ins
->type
= STACK_I8
;
4641 case MONO_TYPE_OBJECT
:
4642 ins
->type
= STACK_OBJ
;
4645 g_assert_not_reached ();
4648 if (cfg
->gen_write_barriers
&& is_ref
)
4649 emit_write_barrier (cfg
, args
[0], args
[1], -1);
4651 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4653 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4654 if ((strcmp (cmethod
->name
, "CompareExchange") == 0)) {
4656 gboolean is_ref
= mini_type_is_reference (cfg
, fsig
->params
[1]);
4657 if (fsig
->params
[1]->type
== MONO_TYPE_I4
)
4659 else if (is_ref
|| fsig
->params
[1]->type
== MONO_TYPE_I
)
4660 size
= sizeof (gpointer
);
4661 else if (sizeof (gpointer
) == 8 && fsig
->params
[1]->type
== MONO_TYPE_I8
)
4664 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I4
);
4665 ins
->dreg
= is_ref
? alloc_ireg_ref (cfg
) : alloc_ireg (cfg
);
4666 ins
->sreg1
= args
[0]->dreg
;
4667 ins
->sreg2
= args
[1]->dreg
;
4668 ins
->sreg3
= args
[2]->dreg
;
4669 ins
->type
= STACK_I4
;
4670 MONO_ADD_INS (cfg
->cbb
, ins
);
4671 } else if (size
== 8) {
4672 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_CAS_I8
);
4673 ins
->dreg
= is_ref
? alloc_ireg_ref (cfg
) : alloc_ireg (cfg
);
4674 ins
->sreg1
= args
[0]->dreg
;
4675 ins
->sreg2
= args
[1]->dreg
;
4676 ins
->sreg3
= args
[2]->dreg
;
4677 ins
->type
= STACK_I8
;
4678 MONO_ADD_INS (cfg
->cbb
, ins
);
4680 /* g_assert_not_reached (); */
4682 if (cfg
->gen_write_barriers
&& is_ref
)
4683 emit_write_barrier (cfg
, args
[0], args
[1], -1);
4685 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4689 } else if (cmethod
->klass
->image
== mono_defaults
.corlib
) {
4690 if (cmethod
->name
[0] == 'B' && strcmp (cmethod
->name
, "Break") == 0
4691 && strcmp (cmethod
->klass
->name
, "Debugger") == 0) {
4692 if (should_insert_brekpoint (cfg
->method
))
4693 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
4695 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4696 MONO_ADD_INS (cfg
->cbb
, ins
);
4699 if (cmethod
->name
[0] == 'g' && strcmp (cmethod
->name
, "get_IsRunningOnWindows") == 0
4700 && strcmp (cmethod
->klass
->name
, "Environment") == 0) {
4702 EMIT_NEW_ICONST (cfg
, ins
, 1);
4704 EMIT_NEW_ICONST (cfg
, ins
, 0);
4708 } else if (cmethod
->klass
== mono_defaults
.math_class
) {
4710 * There is general branches code for Min/Max, but it does not work for
4712 * http://everything2.com/?node_id=1051618
4716 #ifdef MONO_ARCH_SIMD_INTRINSICS
4717 if (cfg
->opt
& MONO_OPT_SIMD
) {
4718 ins
= mono_emit_simd_intrinsics (cfg
, cmethod
, fsig
, args
);
4724 return mono_arch_emit_inst_for_method (cfg
, cmethod
, fsig
, args
);
4728 * This entry point could be used later for arbitrary method
4731 inline static MonoInst
*
4732 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
4733 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this)
4735 if (method
->klass
== mono_defaults
.string_class
) {
4736 /* managed string allocation support */
4737 if (strcmp (method
->name
, "InternalAllocateStr") == 0 && !(mono_profiler_events
& MONO_PROFILE_ALLOCATIONS
) && !(cfg
->opt
& MONO_OPT_SHARED
)) {
4738 MonoInst
*iargs
[2];
4739 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, method
->klass
);
4740 MonoMethod
*managed_alloc
= NULL
;
4742 g_assert (vtable
); /*Should not fail since it System.String*/
4743 #ifndef MONO_CROSS_COMPILE
4744 managed_alloc
= mono_gc_get_managed_allocator (vtable
, FALSE
);
4748 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4749 iargs
[1] = args
[0];
4750 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this);
4757 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
4759 MonoInst
*store
, *temp
;
4762 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4763 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
4766 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4767 * would be different than the MonoInst's used to represent arguments, and
4768 * the ldelema implementation can't deal with that.
4769 * Solution: When ldelema is used on an inline argument, create a var for
4770 * it, emit ldelema on that var, and emit the saving code below in
4771 * inline_method () if needed.
4773 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
4774 cfg
->args
[i
] = temp
;
4775 /* This uses cfg->args [i] which is set by the preceeding line */
4776 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
4777 store
->cil_code
= sp
[0]->cil_code
;
4782 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4783 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4785 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4787 check_inline_called_method_name_limit (MonoMethod
*called_method
)
4790 static char *limit
= NULL
;
4792 if (limit
== NULL
) {
4793 char *limit_string
= getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4795 if (limit_string
!= NULL
)
4796 limit
= limit_string
;
4798 limit
= (char *) "";
4801 if (limit
[0] != '\0') {
4802 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4804 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4805 g_free (called_method_name
);
4807 //return (strncmp_result <= 0);
4808 return (strncmp_result
== 0);
4815 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4817 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4820 static char *limit
= NULL
;
4822 if (limit
== NULL
) {
4823 char *limit_string
= getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4824 if (limit_string
!= NULL
) {
4825 limit
= limit_string
;
4827 limit
= (char *) "";
4831 if (limit
[0] != '\0') {
4832 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4834 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4835 g_free (caller_method_name
);
4837 //return (strncmp_result <= 0);
4838 return (strncmp_result
== 0);
4846 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4847 guchar
*ip
, guint real_offset
, GList
*dont_inline
, gboolean inline_always
)
4849 MonoInst
*ins
, *rvar
= NULL
;
4850 MonoMethodHeader
*cheader
;
4851 MonoBasicBlock
*ebblock
, *sbblock
;
4853 MonoMethod
*prev_inlined_method
;
4854 MonoInst
**prev_locals
, **prev_args
;
4855 MonoType
**prev_arg_types
;
4856 guint prev_real_offset
;
4857 GHashTable
*prev_cbb_hash
;
4858 MonoBasicBlock
**prev_cil_offset_to_bb
;
4859 MonoBasicBlock
*prev_cbb
;
4860 unsigned char* prev_cil_start
;
4861 guint32 prev_cil_offset_to_bb_len
;
4862 MonoMethod
*prev_current_method
;
4863 MonoGenericContext
*prev_generic_context
;
4864 gboolean ret_var_set
, prev_ret_var_set
, virtual = FALSE
;
4866 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4868 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4869 if ((! inline_always
) && ! check_inline_called_method_name_limit (cmethod
))
4872 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4873 if ((! inline_always
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4877 if (cfg
->verbose_level
> 2)
4878 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4880 if (!cmethod
->inline_info
) {
4881 cfg
->stat_inlineable_methods
++;
4882 cmethod
->inline_info
= 1;
4885 /* allocate local variables */
4886 cheader
= mono_method_get_header (cmethod
);
4888 if (cheader
== NULL
|| mono_loader_get_last_error ()) {
4889 MonoLoaderError
*error
= mono_loader_get_last_error ();
4892 mono_metadata_free_mh (cheader
);
4893 if (inline_always
&& error
)
4894 mono_cfg_set_exception (cfg
, error
->exception_type
);
4896 mono_loader_clear_error ();
4900 /*Must verify before creating locals as it can cause the JIT to assert.*/
4901 if (mono_compile_is_broken (cfg
, cmethod
, FALSE
)) {
4902 mono_metadata_free_mh (cheader
);
4906 /* allocate space to store the return value */
4907 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4908 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4911 prev_locals
= cfg
->locals
;
4912 cfg
->locals
= mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4913 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4914 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4916 /* allocate start and end blocks */
4917 /* This is needed so if the inline is aborted, we can clean up */
4918 NEW_BBLOCK (cfg
, sbblock
);
4919 sbblock
->real_offset
= real_offset
;
4921 NEW_BBLOCK (cfg
, ebblock
);
4922 ebblock
->block_num
= cfg
->num_bblocks
++;
4923 ebblock
->real_offset
= real_offset
;
4925 prev_args
= cfg
->args
;
4926 prev_arg_types
= cfg
->arg_types
;
4927 prev_inlined_method
= cfg
->inlined_method
;
4928 cfg
->inlined_method
= cmethod
;
4929 cfg
->ret_var_set
= FALSE
;
4930 cfg
->inline_depth
++;
4931 prev_real_offset
= cfg
->real_offset
;
4932 prev_cbb_hash
= cfg
->cbb_hash
;
4933 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4934 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4935 prev_cil_start
= cfg
->cil_start
;
4936 prev_cbb
= cfg
->cbb
;
4937 prev_current_method
= cfg
->current_method
;
4938 prev_generic_context
= cfg
->generic_context
;
4939 prev_ret_var_set
= cfg
->ret_var_set
;
4941 if (*ip
== CEE_CALLVIRT
&& !(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
))
4944 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, dont_inline
, sp
, real_offset
, virtual);
4946 ret_var_set
= cfg
->ret_var_set
;
4948 cfg
->inlined_method
= prev_inlined_method
;
4949 cfg
->real_offset
= prev_real_offset
;
4950 cfg
->cbb_hash
= prev_cbb_hash
;
4951 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4952 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4953 cfg
->cil_start
= prev_cil_start
;
4954 cfg
->locals
= prev_locals
;
4955 cfg
->args
= prev_args
;
4956 cfg
->arg_types
= prev_arg_types
;
4957 cfg
->current_method
= prev_current_method
;
4958 cfg
->generic_context
= prev_generic_context
;
4959 cfg
->ret_var_set
= prev_ret_var_set
;
4960 cfg
->inline_depth
--;
4962 if ((costs
>= 0 && costs
< 60) || inline_always
) {
4963 if (cfg
->verbose_level
> 2)
4964 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4966 cfg
->stat_inlined_methods
++;
4968 /* always add some code to avoid block split failures */
4969 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4970 MONO_ADD_INS (prev_cbb
, ins
);
4972 prev_cbb
->next_bb
= sbblock
;
4973 link_bblock (cfg
, prev_cbb
, sbblock
);
4976 * Get rid of the begin and end bblocks if possible to aid local
4979 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4981 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4982 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4984 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4985 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4986 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4988 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4989 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4990 cfg
->cbb
= prev_cbb
;
4998 * If the inlined method contains only a throw, then the ret var is not
4999 * set, so set it to a dummy value.
5002 static double r8_0
= 0.0;
5004 switch (rvar
->type
) {
5006 MONO_EMIT_NEW_ICONST (cfg
, rvar
->dreg
, 0);
5009 MONO_EMIT_NEW_I8CONST (cfg
, rvar
->dreg
, 0);
5014 MONO_EMIT_NEW_PCONST (cfg
, rvar
->dreg
, 0);
5017 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
5018 ins
->type
= STACK_R8
;
5019 ins
->inst_p0
= (void*)&r8_0
;
5020 ins
->dreg
= rvar
->dreg
;
5021 MONO_ADD_INS (cfg
->cbb
, ins
);
5024 MONO_EMIT_NEW_VZERO (cfg
, rvar
->dreg
, mono_class_from_mono_type (fsig
->ret
));
5027 g_assert_not_reached ();
5031 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
5034 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
5037 if (cfg
->verbose_level
> 2)
5038 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod
, TRUE
));
5039 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
5040 mono_loader_clear_error ();
5042 /* This gets rid of the newly added bblocks */
5043 cfg
->cbb
= prev_cbb
;
5045 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
5050 * Some of these comments may well be out-of-date.
5051 * Design decisions: we do a single pass over the IL code (and we do bblock
5052 * splitting/merging in the few cases when it's required: a back jump to an IL
5053 * address that was not already seen as bblock starting point).
5054 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5055 * Complex operations are decomposed in simpler ones right away. We need to let the
5056 * arch-specific code peek and poke inside this process somehow (except when the
5057 * optimizations can take advantage of the full semantic info of coarse opcodes).
5058 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5059 * MonoInst->opcode initially is the IL opcode or some simplification of that
5060 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5061 * opcode with value bigger than OP_LAST.
5062 * At this point the IR can be handed over to an interpreter, a dumb code generator
5063 * or to the optimizing code generator that will translate it to SSA form.
5065 * Profiling directed optimizations.
5066 * We may compile by default with few or no optimizations and instrument the code
5067 * or the user may indicate what methods to optimize the most either in a config file
5068 * or through repeated runs where the compiler applies offline the optimizations to
5069 * each method and then decides if it was worth it.
5072 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5073 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5074 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5075 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5076 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5077 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5078 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5079 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5081 /* offset from br.s -> br like opcodes */
5082 #define BIG_BRANCH_OFFSET 13
5085 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
5087 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
5089 return b
== NULL
|| b
== bb
;
5093 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, unsigned char *start
, unsigned char *end
, unsigned char **pos
)
5095 unsigned char *ip
= start
;
5096 unsigned char *target
;
5099 MonoBasicBlock
*bblock
;
5100 const MonoOpcode
*opcode
;
5103 cli_addr
= ip
- start
;
5104 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
5107 opcode
= &mono_opcodes
[i
];
5108 switch (opcode
->argument
) {
5109 case MonoInlineNone
:
5112 case MonoInlineString
:
5113 case MonoInlineType
:
5114 case MonoInlineField
:
5115 case MonoInlineMethod
:
5118 case MonoShortInlineR
:
5125 case MonoShortInlineVar
:
5126 case MonoShortInlineI
:
5129 case MonoShortInlineBrTarget
:
5130 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
5131 GET_BBLOCK (cfg
, bblock
, target
);
5134 GET_BBLOCK (cfg
, bblock
, ip
);
5136 case MonoInlineBrTarget
:
5137 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
5138 GET_BBLOCK (cfg
, bblock
, target
);
5141 GET_BBLOCK (cfg
, bblock
, ip
);
5143 case MonoInlineSwitch
: {
5144 guint32 n
= read32 (ip
+ 1);
5147 cli_addr
+= 5 + 4 * n
;
5148 target
= start
+ cli_addr
;
5149 GET_BBLOCK (cfg
, bblock
, target
);
5151 for (j
= 0; j
< n
; ++j
) {
5152 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
5153 GET_BBLOCK (cfg
, bblock
, target
);
5163 g_assert_not_reached ();
5166 if (i
== CEE_THROW
) {
5167 unsigned char *bb_start
= ip
- 1;
5169 /* Find the start of the bblock containing the throw */
5171 while ((bb_start
>= start
) && !bblock
) {
5172 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
5176 bblock
->out_of_line
= 1;
5185 static inline MonoMethod
*
5186 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
5190 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
)
5191 return mono_method_get_wrapper_data (m
, token
);
5193 method
= mono_get_method_full (m
->klass
->image
, token
, klass
, context
);
5198 static inline MonoMethod
*
5199 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
5201 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
);
5203 if (method
&& cfg
&& !cfg
->generic_sharing_context
&& mono_class_is_open_constructed_type (&method
->klass
->byval_arg
))
5209 static inline MonoClass
*
5210 mini_get_class (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
)
5214 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
5215 klass
= mono_method_get_wrapper_data (method
, token
);
5217 klass
= mono_class_get_full (method
->klass
->image
, token
, context
);
5219 mono_class_init (klass
);
5224 * Returns TRUE if the JIT should abort inlining because "callee"
5225 * is influenced by security attributes.
5228 gboolean
check_linkdemand (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
5232 if ((cfg
->method
!= caller
) && mono_method_has_declsec (callee
)) {
5236 result
= mono_declsec_linkdemand (cfg
->domain
, caller
, callee
);
5237 if (result
== MONO_JIT_SECURITY_OK
)
5240 if (result
== MONO_JIT_LINKDEMAND_ECMA
) {
5241 /* Generate code to throw a SecurityException before the actual call/link */
5242 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
5245 NEW_ICONST (cfg
, args
[0], 4);
5246 NEW_METHODCONST (cfg
, args
[1], caller
);
5247 mono_emit_method_call (cfg
, secman
->linkdemandsecurityexception
, args
, NULL
);
5248 } else if (cfg
->exception_type
== MONO_EXCEPTION_NONE
) {
5249 /* don't hide previous results */
5250 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_SECURITY_LINKDEMAND
);
5251 cfg
->exception_data
= result
;
5259 throw_exception (void)
5261 static MonoMethod
*method
= NULL
;
5264 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
5265 method
= mono_class_get_method_from_name (secman
->securitymanager
, "ThrowException", 1);
5272 emit_throw_exception (MonoCompile
*cfg
, MonoException
*ex
)
5274 MonoMethod
*thrower
= throw_exception ();
5277 EMIT_NEW_PCONST (cfg
, args
[0], ex
);
5278 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
5282 * Return the original method is a wrapper is specified. We can only access
5283 * the custom attributes from the original method.
5286 get_original_method (MonoMethod
*method
)
5288 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
5291 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5292 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
5295 /* in other cases we need to find the original method */
5296 return mono_marshal_method_from_wrapper (method
);
5300 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
,
5301 MonoBasicBlock
*bblock
, unsigned char *ip
)
5303 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5304 MonoException
*ex
= mono_security_core_clr_is_field_access_allowed (get_original_method (caller
), field
);
5306 emit_throw_exception (cfg
, ex
);
5310 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
,
5311 MonoBasicBlock
*bblock
, unsigned char *ip
)
5313 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5314 MonoException
*ex
= mono_security_core_clr_is_call_allowed (get_original_method (caller
), callee
);
5316 emit_throw_exception (cfg
, ex
);
5320 * Check that the IL instructions at ip are the array initialization
5321 * sequence and return the pointer to the data and the size.
5324 initialize_array_data (MonoMethod
*method
, gboolean aot
, unsigned char *ip
, MonoClass
*klass
, guint32 len
, int *out_size
, guint32
*out_field_token
)
5327 * newarr[System.Int32]
5329 * ldtoken field valuetype ...
5330 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5332 if (ip
[0] == CEE_DUP
&& ip
[1] == CEE_LDTOKEN
&& ip
[5] == 0x4 && ip
[6] == CEE_CALL
) {
5333 guint32 token
= read32 (ip
+ 7);
5334 guint32 field_token
= read32 (ip
+ 2);
5335 guint32 field_index
= field_token
& 0xffffff;
5337 const char *data_ptr
;
5339 MonoMethod
*cmethod
;
5340 MonoClass
*dummy_class
;
5341 MonoClassField
*field
= mono_field_from_token (method
->klass
->image
, field_token
, &dummy_class
, NULL
);
5347 *out_field_token
= field_token
;
5349 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
5352 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (cmethod
->klass
->name
, "RuntimeHelpers") || cmethod
->klass
->image
!= mono_defaults
.corlib
)
5354 switch (mono_type_get_underlying_type (&klass
->byval_arg
)->type
) {
5355 case MONO_TYPE_BOOLEAN
:
5359 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5360 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5361 case MONO_TYPE_CHAR
:
5371 return NULL
; /* stupid ARM FP swapped format */
5381 if (size
> mono_type_size (field
->type
, &dummy_align
))
5384 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5385 if (!method
->klass
->image
->dynamic
) {
5386 field_index
= read32 (ip
+ 2) & 0xffffff;
5387 mono_metadata_field_info (method
->klass
->image
, field_index
- 1, NULL
, &rva
, NULL
);
5388 data_ptr
= mono_image_rva_map (method
->klass
->image
, rva
);
5389 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5390 /* for aot code we do the lookup on load */
5391 if (aot
&& data_ptr
)
5392 return GUINT_TO_POINTER (rva
);
5394 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5396 data_ptr
= mono_field_get_data (field
);
5404 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, unsigned char *ip
)
5406 char *method_fname
= mono_method_full_name (method
, TRUE
);
5408 MonoMethodHeader
*header
= mono_method_get_header (method
);
5410 if (header
->code_size
== 0)
5411 method_code
= g_strdup ("method body is empty.");
5413 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
5414 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
5415 cfg
->exception_message
= g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
);
5416 g_free (method_fname
);
5417 g_free (method_code
);
5418 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
5422 set_exception_object (MonoCompile
*cfg
, MonoException
*exception
)
5424 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_OBJECT_SUPPLIED
);
5425 MONO_GC_REGISTER_ROOT_SINGLE (cfg
->exception_ptr
);
5426 cfg
->exception_ptr
= exception
;
5430 generic_class_is_reference_type (MonoCompile
*cfg
, MonoClass
*klass
)
5432 return mini_type_is_reference (cfg
, &klass
->byval_arg
);
5436 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5439 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5440 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5441 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5442 /* Optimize reg-reg moves away */
5444 * Can't optimize other opcodes, since sp[0] might point to
5445 * the last ins of a decomposed opcode.
5447 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5449 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5454 * ldloca inhibits many optimizations so try to get rid of it in common
5457 static inline unsigned char *
5458 emit_optimized_ldloca_ir (MonoCompile
*cfg
, unsigned char *ip
, unsigned char *end
, int size
)
5467 local
= read16 (ip
+ 2);
5471 if (ip
+ 6 < end
&& (ip
[0] == CEE_PREFIX1
) && (ip
[1] == CEE_INITOBJ
) && ip_in_bb (cfg
, cfg
->cbb
, ip
+ 1)) {
5472 gboolean skip
= FALSE
;
5474 /* From the INITOBJ case */
5475 token
= read32 (ip
+ 2);
5476 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5477 CHECK_TYPELOAD (klass
);
5478 if (mini_type_is_reference (cfg
, &klass
->byval_arg
)) {
5479 MONO_EMIT_NEW_PCONST (cfg
, cfg
->locals
[local
]->dreg
, NULL
);
5480 } else if (MONO_TYPE_ISSTRUCT (&klass
->byval_arg
)) {
5481 MONO_EMIT_NEW_VZERO (cfg
, cfg
->locals
[local
]->dreg
, klass
);
5494 is_exception_class (MonoClass
*class)
5497 if (class == mono_defaults
.exception_class
)
5499 class = class->parent
;
5505 * is_jit_optimizer_disabled:
5507 * Determine whenever M's assembly has a DebuggableAttribute with the
5508 * IsJITOptimizerDisabled flag set.
5511 is_jit_optimizer_disabled (MonoMethod
*m
)
5513 MonoAssembly
*ass
= m
->klass
->image
->assembly
;
5514 MonoCustomAttrInfo
* attrs
;
5515 static MonoClass
*klass
;
5517 gboolean val
= FALSE
;
5520 if (ass
->jit_optimizer_disabled_inited
)
5521 return ass
->jit_optimizer_disabled
;
5524 klass
= mono_class_from_name (mono_defaults
.corlib
, "System.Diagnostics", "DebuggableAttribute");
5527 ass
->jit_optimizer_disabled
= FALSE
;
5528 mono_memory_barrier ();
5529 ass
->jit_optimizer_disabled_inited
= TRUE
;
5533 attrs
= mono_custom_attrs_from_assembly (ass
);
5535 for (i
= 0; i
< attrs
->num_attrs
; ++i
) {
5536 MonoCustomAttrEntry
*attr
= &attrs
->attrs
[i
];
5539 MonoMethodSignature
*sig
;
5541 if (!attr
->ctor
|| attr
->ctor
->klass
!= klass
)
5543 /* Decode the attribute. See reflection.c */
5544 len
= attr
->data_size
;
5545 p
= (const char*)attr
->data
;
5546 g_assert (read16 (p
) == 0x0001);
5549 // FIXME: Support named parameters
5550 sig
= mono_method_signature (attr
->ctor
);
5551 if (sig
->param_count
!= 2 || sig
->params
[0]->type
!= MONO_TYPE_BOOLEAN
|| sig
->params
[1]->type
!= MONO_TYPE_BOOLEAN
)
5553 /* Two boolean arguments */
5557 mono_custom_attrs_free (attrs
);
5560 ass
->jit_optimizer_disabled
= val
;
5561 mono_memory_barrier ();
5562 ass
->jit_optimizer_disabled_inited
= TRUE
;
5568 is_supported_tail_call (MonoCompile
*cfg
, MonoMethod
*method
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
)
5570 gboolean supported_tail_call
;
5573 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5574 supported_tail_call
= MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method
), mono_method_signature (cmethod
));
5576 supported_tail_call
= mono_metadata_signature_equal (mono_method_signature (method
), mono_method_signature (cmethod
)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->ret
);
5579 for (i
= 0; i
< fsig
->param_count
; ++i
) {
5580 if (fsig
->params
[i
]->byref
|| fsig
->params
[i
]->type
== MONO_TYPE_PTR
|| fsig
->params
[i
]->type
== MONO_TYPE_FNPTR
)
5581 /* These can point to the current method's stack */
5582 supported_tail_call
= FALSE
;
5584 if (fsig
->hasthis
&& cmethod
->klass
->valuetype
)
5585 /* this might point to the current method's stack */
5586 supported_tail_call
= FALSE
;
5587 if (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)
5588 supported_tail_call
= FALSE
;
5589 if (cfg
->method
->save_lmf
)
5590 supported_tail_call
= FALSE
;
5591 if (cmethod
->wrapper_type
&& cmethod
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
5592 supported_tail_call
= FALSE
;
5594 /* Debugging support */
5596 if (supported_tail_call
) {
5597 static int count
= 0;
5599 if (getenv ("COUNT")) {
5600 if (count
== atoi (getenv ("COUNT")))
5601 printf ("LAST: %s\n", mono_method_full_name (cmethod
, TRUE
));
5602 if (count
> atoi (getenv ("COUNT")))
5603 supported_tail_call
= FALSE
;
5608 return supported_tail_call
;
5611 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5612 * it to the thread local value based on the tls_offset field. Every other kind of access to
5613 * the field causes an assert.
5616 is_magic_tls_access (MonoClassField
*field
)
5618 if (strcmp (field
->name
, "tlsdata"))
5620 if (strcmp (field
->parent
->name
, "ThreadLocal`1"))
5622 return field
->parent
->image
== mono_defaults
.corlib
;
5625 /* emits the code needed to access a managed tls var (like ThreadStatic)
5626 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5627 * pointer for the current thread.
5628 * Returns the MonoInst* representing the address of the tls var.
5631 emit_managed_static_data_access (MonoCompile
*cfg
, MonoInst
*thread_ins
, int offset_reg
)
5634 int static_data_reg
, array_reg
, dreg
;
5635 int offset2_reg
, idx_reg
;
5636 // inlined access to the tls data
5637 // idx = (offset >> 24) - 1;
5638 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5639 static_data_reg
= alloc_ireg (cfg
);
5640 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, G_STRUCT_OFFSET (MonoInternalThread
, static_data
));
5641 idx_reg
= alloc_ireg (cfg
);
5642 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, idx_reg
, offset_reg
, 24);
5643 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISUB_IMM
, idx_reg
, idx_reg
, 1);
5644 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, sizeof (gpointer
) == 8 ? 3 : 2);
5645 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
5646 array_reg
= alloc_ireg (cfg
);
5647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
5648 offset2_reg
= alloc_ireg (cfg
);
5649 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset_reg
, 0xffffff);
5650 dreg
= alloc_ireg (cfg
);
5651 EMIT_NEW_BIALU (cfg
, addr
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
5656 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5657 * this address is cached per-method in cached_tls_addr.
5660 create_magic_tls_access (MonoCompile
*cfg
, MonoClassField
*tls_field
, MonoInst
**cached_tls_addr
, MonoInst
*thread_local
)
5662 MonoInst
*load
, *addr
, *temp
, *store
, *thread_ins
;
5663 MonoClassField
*offset_field
;
5665 if (*cached_tls_addr
) {
5666 EMIT_NEW_TEMPLOAD (cfg
, addr
, (*cached_tls_addr
)->inst_c0
);
5669 thread_ins
= mono_get_thread_intrinsic (cfg
);
5670 offset_field
= mono_class_get_field_from_name (tls_field
->parent
, "tls_offset");
5672 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, offset_field
->type
, thread_local
->dreg
, offset_field
->offset
);
5674 MONO_ADD_INS (cfg
->cbb
, thread_ins
);
5676 MonoMethod
*thread_method
;
5677 thread_method
= mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5678 thread_ins
= mono_emit_method_call (cfg
, thread_method
, NULL
, NULL
);
5680 addr
= emit_managed_static_data_access (cfg
, thread_ins
, load
->dreg
);
5681 addr
->klass
= mono_class_from_mono_type (tls_field
->type
);
5682 addr
->type
= STACK_MP
;
5683 *cached_tls_addr
= temp
= mono_compile_create_var (cfg
, type_from_stack_type (addr
), OP_LOCAL
);
5684 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, addr
);
5686 EMIT_NEW_TEMPLOAD (cfg
, addr
, temp
->inst_c0
);
5691 * mono_method_to_ir:
5693 * Translate the .net IL into linear IR.
5696 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5697 MonoInst
*return_var
, GList
*dont_inline
, MonoInst
**inline_args
,
5698 guint inline_offset
, gboolean is_virtual_call
)
5701 MonoInst
*ins
, **sp
, **stack_start
;
5702 MonoBasicBlock
*bblock
, *tblock
= NULL
, *init_localsbb
= NULL
;
5703 MonoSimpleBasicBlock
*bb
= NULL
, *original_bb
= NULL
;
5704 MonoMethod
*cmethod
, *method_definition
;
5705 MonoInst
**arg_array
;
5706 MonoMethodHeader
*header
;
5708 guint32 token
, ins_flag
;
5710 MonoClass
*constrained_call
= NULL
;
5711 unsigned char *ip
, *end
, *target
, *err_pos
;
5712 static double r8_0
= 0.0;
5713 MonoMethodSignature
*sig
;
5714 MonoGenericContext
*generic_context
= NULL
;
5715 MonoGenericContainer
*generic_container
= NULL
;
5716 MonoType
**param_types
;
5717 int i
, n
, start_new_bblock
, dreg
;
5718 int num_calls
= 0, inline_costs
= 0;
5719 int breakpoint_id
= 0;
5721 MonoBoolean security
, pinvoke
;
5722 MonoSecurityManager
* secman
= NULL
;
5723 MonoDeclSecurityActions actions
;
5724 GSList
*class_inits
= NULL
;
5725 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5727 gboolean init_locals
, seq_points
, skip_dead_blocks
;
5728 gboolean disable_inline
;
5729 MonoInst
*cached_tls_addr
= NULL
;
5731 disable_inline
= is_jit_optimizer_disabled (method
);
5733 /* serialization and xdomain stuff may need access to private fields and methods */
5734 dont_verify
= method
->klass
->image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5735 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5736 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5737 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5738 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5739 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5741 dont_verify
|= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK
;
5743 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5744 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5745 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_UNKNOWN
;
5746 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
5747 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_STELEMREF
;
5749 image
= method
->klass
->image
;
5750 header
= mono_method_get_header (method
);
5752 MonoLoaderError
*error
;
5754 if ((error
= mono_loader_get_last_error ())) {
5755 mono_cfg_set_exception (cfg
, error
->exception_type
);
5757 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
5758 cfg
->exception_message
= g_strdup_printf ("Missing or incorrect header for method %s", cfg
->method
->name
);
5760 goto exception_exit
;
5762 generic_container
= mono_method_get_generic_container (method
);
5763 sig
= mono_method_signature (method
);
5764 num_args
= sig
->hasthis
+ sig
->param_count
;
5765 ip
= (unsigned char*)header
->code
;
5766 cfg
->cil_start
= ip
;
5767 end
= ip
+ header
->code_size
;
5768 cfg
->stat_cil_code_size
+= header
->code_size
;
5769 init_locals
= header
->init_locals
;
5771 seq_points
= cfg
->gen_seq_points
&& cfg
->method
== method
;
5774 * Methods without init_locals set could cause asserts in various passes
5779 method_definition
= method
;
5780 while (method_definition
->is_inflated
) {
5781 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
5782 method_definition
= imethod
->declaring
;
5785 /* SkipVerification is not allowed if core-clr is enabled */
5786 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
5788 dont_verify_stloc
= TRUE
;
5791 if (mono_debug_using_mono_debugger ())
5792 cfg
->keep_cil_nops
= TRUE
;
5794 if (sig
->is_inflated
)
5795 generic_context
= mono_method_get_context (method
);
5796 else if (generic_container
)
5797 generic_context
= &generic_container
->context
;
5798 cfg
->generic_context
= generic_context
;
5800 if (!cfg
->generic_sharing_context
)
5801 g_assert (!sig
->has_type_parameters
);
5803 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
5804 g_assert (method
->is_inflated
);
5805 g_assert (mono_method_get_context (method
)->method_inst
);
5807 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
5808 g_assert (sig
->generic_param_count
);
5810 if (cfg
->method
== method
) {
5811 cfg
->real_offset
= 0;
5813 cfg
->real_offset
= inline_offset
;
5816 cfg
->cil_offset_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
5817 cfg
->cil_offset_to_bb_len
= header
->code_size
;
5819 cfg
->current_method
= method
;
5821 if (cfg
->verbose_level
> 2)
5822 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
5824 param_types
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
5826 param_types
[0] = method
->klass
->valuetype
?&method
->klass
->this_arg
:&method
->klass
->byval_arg
;
5827 for (n
= 0; n
< sig
->param_count
; ++n
)
5828 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
5829 cfg
->arg_types
= param_types
;
5831 dont_inline
= g_list_prepend (dont_inline
, method
);
5832 if (cfg
->method
== method
) {
5834 if (cfg
->prof_options
& MONO_PROFILE_INS_COVERAGE
)
5835 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
5838 NEW_BBLOCK (cfg
, start_bblock
);
5839 cfg
->bb_entry
= start_bblock
;
5840 start_bblock
->cil_code
= NULL
;
5841 start_bblock
->cil_length
= 0;
5842 #if defined(__native_client_codegen__)
5843 MONO_INST_NEW (cfg
, ins
, OP_NACL_GC_SAFE_POINT
);
5844 ins
->dreg
= alloc_dreg (cfg
, STACK_I4
);
5845 MONO_ADD_INS (start_bblock
, ins
);
5849 NEW_BBLOCK (cfg
, end_bblock
);
5850 cfg
->bb_exit
= end_bblock
;
5851 end_bblock
->cil_code
= NULL
;
5852 end_bblock
->cil_length
= 0;
5853 end_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
5854 g_assert (cfg
->num_bblocks
== 2);
5856 arg_array
= cfg
->args
;
5858 if (header
->num_clauses
) {
5859 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
5860 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
5862 /* handle exception clauses */
5863 for (i
= 0; i
< header
->num_clauses
; ++i
) {
5864 MonoBasicBlock
*try_bb
;
5865 MonoExceptionClause
*clause
= &header
->clauses
[i
];
5866 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
5867 try_bb
->real_offset
= clause
->try_offset
;
5868 try_bb
->try_start
= TRUE
;
5869 try_bb
->region
= ((i
+ 1) << 8) | clause
->flags
;
5870 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
5871 tblock
->real_offset
= clause
->handler_offset
;
5872 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5874 link_bblock (cfg
, try_bb
, tblock
);
5876 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
5877 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
5879 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
5880 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
5881 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
5882 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5883 MONO_ADD_INS (tblock
, ins
);
5885 if (seq_points
&& clause
->flags
!= MONO_EXCEPTION_CLAUSE_FINALLY
) {
5886 /* finally clauses already have a seq point */
5887 NEW_SEQ_POINT (cfg
, ins
, clause
->handler_offset
, TRUE
);
5888 MONO_ADD_INS (tblock
, ins
);
5891 /* todo: is a fault block unsafe to optimize? */
5892 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
5893 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
5897 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5899 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5901 /* catch and filter blocks get the exception object on the stack */
5902 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
5903 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5904 MonoInst
*dummy_use
;
5906 /* mostly like handle_stack_args (), but just sets the input args */
5907 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5908 tblock
->in_scount
= 1;
5909 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5910 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5913 * Add a dummy use for the exvar so its liveness info will be
5917 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
5919 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
5920 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
5921 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
5922 tblock
->real_offset
= clause
->data
.filter_offset
;
5923 tblock
->in_scount
= 1;
5924 tblock
->in_stack
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
5925 /* The filter block shares the exvar with the handler block */
5926 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
5927 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
5928 MONO_ADD_INS (tblock
, ins
);
5932 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
5933 clause
->data
.catch_class
&&
5934 cfg
->generic_sharing_context
&&
5935 mono_class_check_context_used (clause
->data
.catch_class
)) {
5937 * In shared generic code with catch
5938 * clauses containing type variables
5939 * the exception handling code has to
5940 * be able to get to the rgctx.
5941 * Therefore we have to make sure that
5942 * the vtable/mrgctx argument (for
5943 * static or generic methods) or the
5944 * "this" argument (for non-static
5945 * methods) are live.
5947 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
5948 mini_method_get_context (method
)->method_inst
||
5949 method
->klass
->valuetype
) {
5950 mono_get_vtable_var (cfg
);
5952 MonoInst
*dummy_use
;
5954 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
5959 arg_array
= (MonoInst
**) alloca (sizeof (MonoInst
*) * num_args
);
5960 cfg
->cbb
= start_bblock
;
5961 cfg
->args
= arg_array
;
5962 mono_save_args (cfg
, sig
, inline_args
);
5965 /* FIRST CODE BLOCK */
5966 NEW_BBLOCK (cfg
, bblock
);
5967 bblock
->cil_code
= ip
;
5971 ADD_BBLOCK (cfg
, bblock
);
5973 if (cfg
->method
== method
) {
5974 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
5975 if (breakpoint_id
&& (mono_debug_format
!= MONO_DEBUG_FORMAT_DEBUGGER
)) {
5976 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
5977 MONO_ADD_INS (bblock
, ins
);
5981 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
5982 secman
= mono_security_manager_get_methods ();
5984 security
= (secman
&& mono_method_has_declsec (method
));
5985 /* at this point having security doesn't mean we have any code to generate */
5986 if (security
&& (cfg
->method
== method
)) {
5987 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5988 * And we do not want to enter the next section (with allocation) if we
5989 * have nothing to generate */
5990 security
= mono_declsec_get_demands (method
, &actions
);
5993 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5994 pinvoke
= (secman
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
));
5996 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
5997 if (wrapped
&& (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
5998 MonoCustomAttrInfo
* custom
= mono_custom_attrs_from_method (wrapped
);
6000 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6001 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
6005 mono_custom_attrs_free (custom
);
6008 custom
= mono_custom_attrs_from_class (wrapped
->klass
);
6009 if (custom
&& mono_custom_attrs_has_attr (custom
, secman
->suppressunmanagedcodesecurity
)) {
6013 mono_custom_attrs_free (custom
);
6016 /* not a P/Invoke after all */
6021 if ((init_locals
|| (cfg
->method
== method
&& (cfg
->opt
& MONO_OPT_SHARED
))) || cfg
->compile_aot
|| security
|| pinvoke
) {
6022 /* we use a separate basic block for the initialization code */
6023 NEW_BBLOCK (cfg
, init_localsbb
);
6024 cfg
->bb_init
= init_localsbb
;
6025 init_localsbb
->real_offset
= cfg
->real_offset
;
6026 start_bblock
->next_bb
= init_localsbb
;
6027 init_localsbb
->next_bb
= bblock
;
6028 link_bblock (cfg
, start_bblock
, init_localsbb
);
6029 link_bblock (cfg
, init_localsbb
, bblock
);
6031 cfg
->cbb
= init_localsbb
;
6033 start_bblock
->next_bb
= bblock
;
6034 link_bblock (cfg
, start_bblock
, bblock
);
6037 /* at this point we know, if security is TRUE, that some code needs to be generated */
6038 if (security
&& (cfg
->method
== method
)) {
6041 cfg
->stat_cas_demand_generation
++;
6043 if (actions
.demand
.blob
) {
6044 /* Add code for SecurityAction.Demand */
6045 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demand
);
6046 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demand
.size
);
6047 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6048 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
6050 if (actions
.noncasdemand
.blob
) {
6051 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6052 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6053 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.noncasdemand
);
6054 EMIT_NEW_ICONST (cfg
, args
[1], actions
.noncasdemand
.size
);
6055 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6056 mono_emit_method_call (cfg
, secman
->demand
, args
, NULL
);
6058 if (actions
.demandchoice
.blob
) {
6059 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6060 EMIT_NEW_DECLSECCONST (cfg
, args
[0], image
, actions
.demandchoice
);
6061 EMIT_NEW_ICONST (cfg
, args
[1], actions
.demandchoice
.size
);
6062 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6063 mono_emit_method_call (cfg
, secman
->demandchoice
, args
, NULL
);
6067 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6069 mono_emit_method_call (cfg
, secman
->demandunmanaged
, NULL
, NULL
);
6072 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
6073 /* check if this is native code, e.g. an icall or a p/invoke */
6074 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
6075 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
6077 gboolean pinvk
= (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
);
6078 gboolean icall
= (wrapped
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
);
6080 /* if this ia a native call then it can only be JITted from platform code */
6081 if ((icall
|| pinvk
) && method
->klass
&& method
->klass
->image
) {
6082 if (!mono_security_core_clr_is_platform_image (method
->klass
->image
)) {
6083 MonoException
*ex
= icall
? mono_get_exception_security () :
6084 mono_get_exception_method_access ();
6085 emit_throw_exception (cfg
, ex
);
6092 if (header
->code_size
== 0)
6095 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
6100 if (cfg
->method
== method
)
6101 mono_debug_init_method (cfg
, bblock
, breakpoint_id
);
6103 for (n
= 0; n
< header
->num_locals
; ++n
) {
6104 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
6109 /* We force the vtable variable here for all shared methods
6110 for the possibility that they might show up in a stack
6111 trace where their exact instantiation is needed. */
6112 if (cfg
->generic_sharing_context
&& method
== cfg
->method
) {
6113 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
6114 mini_method_get_context (method
)->method_inst
||
6115 method
->klass
->valuetype
) {
6116 mono_get_vtable_var (cfg
);
6118 /* FIXME: Is there a better way to do this?
6119 We need the variable live for the duration
6120 of the whole method. */
6121 cfg
->args
[0]->flags
|= MONO_INST_INDIRECT
;
6125 /* add a check for this != NULL to inlined methods */
6126 if (is_virtual_call
) {
6129 NEW_ARGLOAD (cfg
, arg_ins
, 0);
6130 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
6131 MONO_EMIT_NEW_CHECK_THIS (cfg
, arg_ins
->dreg
);
6134 skip_dead_blocks
= !dont_verify
;
6135 if (skip_dead_blocks
) {
6136 original_bb
= bb
= mono_basic_block_split (method
, &error
);
6137 if (!mono_error_ok (&error
)) {
6138 mono_error_cleanup (&error
);
6144 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6145 stack_start
= sp
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
6148 start_new_bblock
= 0;
6151 if (cfg
->method
== method
)
6152 cfg
->real_offset
= ip
- header
->code
;
6154 cfg
->real_offset
= inline_offset
;
6159 if (start_new_bblock
) {
6160 bblock
->cil_length
= ip
- bblock
->cil_code
;
6161 if (start_new_bblock
== 2) {
6162 g_assert (ip
== tblock
->cil_code
);
6164 GET_BBLOCK (cfg
, tblock
, ip
);
6166 bblock
->next_bb
= tblock
;
6169 start_new_bblock
= 0;
6170 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
6171 if (cfg
->verbose_level
> 3)
6172 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
6173 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
6177 g_slist_free (class_inits
);
6180 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= bblock
)) {
6181 link_bblock (cfg
, bblock
, tblock
);
6182 if (sp
!= stack_start
) {
6183 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6185 CHECK_UNVERIFIABLE (cfg
);
6187 bblock
->next_bb
= tblock
;
6190 for (i
= 0; i
< bblock
->in_scount
; ++i
) {
6191 if (cfg
->verbose_level
> 3)
6192 printf ("loading %d from temp %d\n", i
, (int)bblock
->in_stack
[i
]->inst_c0
);
6193 EMIT_NEW_TEMPLOAD (cfg
, ins
, bblock
->in_stack
[i
]->inst_c0
);
6196 g_slist_free (class_inits
);
6201 if (skip_dead_blocks
) {
6202 int ip_offset
= ip
- header
->code
;
6204 if (ip_offset
== bb
->end
)
6208 int op_size
= mono_opcode_size (ip
, end
);
6209 g_assert (op_size
> 0); /*The BB formation pass must catch all bad ops*/
6211 if (cfg
->verbose_level
> 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset
);
6213 if (ip_offset
+ op_size
== bb
->end
) {
6214 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6215 MONO_ADD_INS (bblock
, ins
);
6216 start_new_bblock
= 1;
6224 * Sequence points are points where the debugger can place a breakpoint.
6225 * Currently, we generate these automatically at points where the IL
6228 if (seq_points
&& sp
== stack_start
) {
6229 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
6230 MONO_ADD_INS (cfg
->cbb
, ins
);
6233 bblock
->real_offset
= cfg
->real_offset
;
6235 if ((cfg
->method
== method
) && cfg
->coverage_info
) {
6236 guint32 cil_offset
= ip
- header
->code
;
6237 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
6239 /* TODO: Use an increment here */
6240 #if defined(TARGET_X86)
6241 MONO_INST_NEW (cfg
, ins
, OP_STORE_MEM_IMM
);
6242 ins
->inst_p0
= &(cfg
->coverage_info
->data
[cil_offset
].count
);
6244 MONO_ADD_INS (cfg
->cbb
, ins
);
6246 EMIT_NEW_PCONST (cfg
, ins
, &(cfg
->coverage_info
->data
[cil_offset
].count
));
6247 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
6251 if (cfg
->verbose_level
> 3)
6252 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
6256 if (cfg
->keep_cil_nops
)
6257 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
6259 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6261 MONO_ADD_INS (bblock
, ins
);
6264 if (should_insert_brekpoint (cfg
->method
))
6265 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
6267 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6269 MONO_ADD_INS (bblock
, ins
);
6275 CHECK_STACK_OVF (1);
6276 n
= (*ip
)-CEE_LDARG_0
;
6278 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6286 CHECK_STACK_OVF (1);
6287 n
= (*ip
)-CEE_LDLOC_0
;
6289 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6298 n
= (*ip
)-CEE_STLOC_0
;
6301 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
6303 emit_stloc_ir (cfg
, sp
, header
, n
);
6310 CHECK_STACK_OVF (1);
6313 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6319 CHECK_STACK_OVF (1);
6322 NEW_ARGLOADA (cfg
, ins
, n
);
6323 MONO_ADD_INS (cfg
->cbb
, ins
);
6333 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[ip
[1]], *sp
))
6335 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
6340 CHECK_STACK_OVF (1);
6343 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6347 case CEE_LDLOCA_S
: {
6348 unsigned char *tmp_ip
;
6350 CHECK_STACK_OVF (1);
6351 CHECK_LOCAL (ip
[1]);
6353 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 1))) {
6359 EMIT_NEW_LOCLOADA (cfg
, ins
, ip
[1]);
6368 CHECK_LOCAL (ip
[1]);
6369 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[ip
[1]], *sp
))
6371 emit_stloc_ir (cfg
, sp
, header
, ip
[1]);
6376 CHECK_STACK_OVF (1);
6377 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
6378 ins
->type
= STACK_OBJ
;
6383 CHECK_STACK_OVF (1);
6384 EMIT_NEW_ICONST (cfg
, ins
, -1);
6397 CHECK_STACK_OVF (1);
6398 EMIT_NEW_ICONST (cfg
, ins
, (*ip
) - CEE_LDC_I4_0
);
6404 CHECK_STACK_OVF (1);
6406 EMIT_NEW_ICONST (cfg
, ins
, *((signed char*)ip
));
6412 CHECK_STACK_OVF (1);
6413 EMIT_NEW_ICONST (cfg
, ins
, (gint32
)read32 (ip
+ 1));
6419 CHECK_STACK_OVF (1);
6420 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6421 ins
->type
= STACK_I8
;
6422 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6424 ins
->inst_l
= (gint64
)read64 (ip
);
6425 MONO_ADD_INS (bblock
, ins
);
6431 gboolean use_aotconst
= FALSE
;
6433 #ifdef TARGET_POWERPC
6434 /* FIXME: Clean this up */
6435 if (cfg
->compile_aot
)
6436 use_aotconst
= TRUE
;
6439 /* FIXME: we should really allocate this only late in the compilation process */
6440 f
= mono_domain_alloc (cfg
->domain
, sizeof (float));
6442 CHECK_STACK_OVF (1);
6448 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
6450 dreg
= alloc_freg (cfg
);
6451 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
6452 ins
->type
= STACK_R8
;
6454 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
6455 ins
->type
= STACK_R8
;
6456 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6458 MONO_ADD_INS (bblock
, ins
);
6468 gboolean use_aotconst
= FALSE
;
6470 #ifdef TARGET_POWERPC
6471 /* FIXME: Clean this up */
6472 if (cfg
->compile_aot
)
6473 use_aotconst
= TRUE
;
6476 /* FIXME: we should really allocate this only late in the compilation process */
6477 d
= mono_domain_alloc (cfg
->domain
, sizeof (double));
6479 CHECK_STACK_OVF (1);
6485 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
6487 dreg
= alloc_freg (cfg
);
6488 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
6489 ins
->type
= STACK_R8
;
6491 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
6492 ins
->type
= STACK_R8
;
6493 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6495 MONO_ADD_INS (bblock
, ins
);
6504 MonoInst
*temp
, *store
;
6506 CHECK_STACK_OVF (1);
6510 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
6511 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
6513 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6516 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6529 if (sp
[0]->type
== STACK_R8
)
6530 /* we need to pop the value from the x86 FP stack */
6531 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
6540 if (stack_start
!= sp
)
6542 token
= read32 (ip
+ 1);
6543 /* FIXME: check the signature matches */
6544 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6546 if (!cmethod
|| mono_loader_get_last_error ())
6549 if (cfg
->generic_sharing_context
&& mono_method_check_context_used (cmethod
))
6550 GENERIC_SHARING_FAILURE (CEE_JMP
);
6552 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
)
6553 CHECK_CFG_EXCEPTION
;
6555 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6557 MonoMethodSignature
*fsig
= mono_method_signature (cmethod
);
6560 /* Handle tail calls similarly to calls */
6561 n
= fsig
->param_count
+ fsig
->hasthis
;
6563 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6564 call
->method
= cmethod
;
6565 call
->tail_call
= TRUE
;
6566 call
->signature
= mono_method_signature (cmethod
);
6567 call
->args
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6568 call
->inst
.inst_p0
= cmethod
;
6569 for (i
= 0; i
< n
; ++i
)
6570 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6572 mono_arch_emit_call (cfg
, call
);
6573 MONO_ADD_INS (bblock
, (MonoInst
*)call
);
6576 for (i
= 0; i
< num_args
; ++i
)
6577 /* Prevent arguments from being optimized away */
6578 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
6580 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
6581 ins
= (MonoInst
*)call
;
6582 ins
->inst_p0
= cmethod
;
6583 MONO_ADD_INS (bblock
, ins
);
6587 start_new_bblock
= 1;
6592 case CEE_CALLVIRT
: {
6593 MonoInst
*addr
= NULL
;
6594 MonoMethodSignature
*fsig
= NULL
;
6596 int virtual = *ip
== CEE_CALLVIRT
;
6597 int calli
= *ip
== CEE_CALLI
;
6598 gboolean pass_imt_from_rgctx
= FALSE
;
6599 MonoInst
*imt_arg
= NULL
;
6600 gboolean pass_vtable
= FALSE
;
6601 gboolean pass_mrgctx
= FALSE
;
6602 MonoInst
*vtable_arg
= NULL
;
6603 gboolean check_this
= FALSE
;
6604 gboolean supported_tail_call
= FALSE
;
6607 token
= read32 (ip
+ 1);
6614 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
6615 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
6617 fsig
= mono_metadata_parse_signature (image
, token
);
6619 n
= fsig
->param_count
+ fsig
->hasthis
;
6621 if (method
->dynamic
&& fsig
->pinvoke
) {
6625 * This is a call through a function pointer using a pinvoke
6626 * signature. Have to create a wrapper and call that instead.
6627 * FIXME: This is very slow, need to create a wrapper at JIT time
6628 * instead based on the signature.
6630 EMIT_NEW_IMAGECONST (cfg
, args
[0], method
->klass
->image
);
6631 EMIT_NEW_PCONST (cfg
, args
[1], fsig
);
6633 addr
= mono_emit_jit_icall (cfg
, mono_get_native_calli_wrapper
, args
);
6636 MonoMethod
*cil_method
;
6638 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
6639 if (constrained_call
&& cfg
->verbose_level
> 2)
6640 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call
));
6641 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
6642 cil_method
= cmethod
;
6643 if (constrained_call
&& !((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
||
6644 constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) &&
6645 cfg
->generic_sharing_context
)) {
6646 cmethod
= mono_get_method_constrained_with_method (image
, cil_method
, constrained_call
, generic_context
);
6648 } else if (constrained_call
) {
6649 if (cfg
->verbose_level
> 2)
6650 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call
));
6652 if ((constrained_call
->byval_arg
.type
== MONO_TYPE_VAR
|| constrained_call
->byval_arg
.type
== MONO_TYPE_MVAR
) && cfg
->generic_sharing_context
) {
6654 * This is needed since get_method_constrained can't find
6655 * the method in klass representing a type var.
6656 * The type var is guaranteed to be a reference type in this
6659 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6660 cil_method
= cmethod
;
6661 g_assert (!cmethod
->klass
->valuetype
);
6663 cmethod
= mono_get_method_constrained (image
, token
, constrained_call
, generic_context
, &cil_method
);
6666 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6667 cil_method
= cmethod
;
6670 if (!cmethod
|| mono_loader_get_last_error ())
6672 if (!dont_verify
&& !cfg
->skip_visibility
) {
6673 MonoMethod
*target_method
= cil_method
;
6674 if (method
->is_inflated
) {
6675 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
));
6677 if (!mono_method_can_access_method (method_definition
, target_method
) &&
6678 !mono_method_can_access_method (method
, cil_method
))
6679 METHOD_ACCESS_FAILURE
;
6682 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
6683 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
, bblock
, ip
);
6685 if (!virtual && (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
))
6686 /* MS.NET seems to silently convert this to a callvirt */
6691 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6692 * converts to a callvirt.
6694 * tests/bug-515884.il is an example of this behavior
6696 const int test_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
| METHOD_ATTRIBUTE_STATIC
;
6697 const int expected_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
;
6698 if (!virtual && cmethod
->klass
->marshalbyref
&& (cmethod
->flags
& test_flags
) == expected_flags
&& cfg
->method
->wrapper_type
== MONO_WRAPPER_NONE
)
6702 if (!cmethod
->klass
->inited
)
6703 if (!mono_class_init (cmethod
->klass
))
6706 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
6707 mini_class_is_system_array (cmethod
->klass
)) {
6708 array_rank
= cmethod
->klass
->rank
;
6709 fsig
= mono_method_signature (cmethod
);
6711 fsig
= mono_method_signature (cmethod
);
6716 if (fsig
->pinvoke
) {
6717 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
,
6718 check_for_pending_exc
, FALSE
);
6719 fsig
= mono_method_signature (wrapper
);
6720 } else if (constrained_call
) {
6721 fsig
= mono_method_signature (cmethod
);
6723 fsig
= mono_method_get_signature_full (cmethod
, image
, token
, generic_context
);
6727 mono_save_token_info (cfg
, image
, token
, cil_method
);
6729 n
= fsig
->param_count
+ fsig
->hasthis
;
6731 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
6732 if (check_linkdemand (cfg
, method
, cmethod
))
6734 CHECK_CFG_EXCEPTION
;
6737 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
6738 g_assert_not_reached ();
6741 if (!cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
->generic_container
)
6744 if (!cfg
->generic_sharing_context
&& cmethod
)
6745 g_assert (!mono_method_check_context_used (cmethod
));
6749 //g_assert (!virtual || fsig->hasthis);
6753 if (constrained_call
) {
6755 * We have the `constrained.' prefix opcode.
6757 if (constrained_call
->valuetype
&& !cmethod
->klass
->valuetype
) {
6759 * The type parameter is instantiated as a valuetype,
6760 * but that type doesn't override the method we're
6761 * calling, so we need to box `this'.
6763 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &constrained_call
->byval_arg
, sp
[0]->dreg
, 0);
6764 ins
->klass
= constrained_call
;
6765 sp
[0] = handle_box (cfg
, ins
, constrained_call
, mono_class_check_context_used (constrained_call
));
6766 CHECK_CFG_EXCEPTION
;
6767 } else if (!constrained_call
->valuetype
) {
6768 int dreg
= alloc_ireg_ref (cfg
);
6771 * The type parameter is instantiated as a reference
6772 * type. We have a managed pointer on the stack, so
6773 * we need to dereference it here.
6775 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
6776 ins
->type
= STACK_OBJ
;
6778 } else if (cmethod
->klass
->valuetype
)
6780 constrained_call
= NULL
;
6783 if (*ip
!= CEE_CALLI
&& check_call_signature (cfg
, fsig
, sp
))
6787 * If the callee is a shared method, then its static cctor
6788 * might not get called after the call was patched.
6790 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
6791 emit_generic_class_init (cfg
, cmethod
->klass
);
6792 CHECK_TYPELOAD (cmethod
->klass
);
6795 if (cmethod
&& ((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || cmethod
->klass
->valuetype
) &&
6796 (cmethod
->klass
->generic_class
|| cmethod
->klass
->generic_container
)) {
6797 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6798 MonoGenericContext
*context
= mini_class_get_context (cmethod
->klass
);
6799 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6802 * Pass vtable iff target method might
6803 * be shared, which means that sharing
6804 * is enabled for its class and its
6805 * context is sharable (and it's not a
6808 if (sharing_enabled
&& context_sharable
&&
6809 !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
6813 if (cmethod
&& mini_method_get_context (cmethod
) &&
6814 mini_method_get_context (cmethod
)->method_inst
) {
6815 gboolean sharing_enabled
= mono_class_generic_sharing_enabled (cmethod
->klass
);
6816 MonoGenericContext
*context
= mini_method_get_context (cmethod
);
6817 gboolean context_sharable
= mono_generic_context_is_sharable (context
, TRUE
);
6819 g_assert (!pass_vtable
);
6821 if (sharing_enabled
&& context_sharable
)
6825 if (cfg
->generic_sharing_context
&& cmethod
) {
6826 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
6828 context_used
= mono_method_check_context_used (cmethod
);
6830 if (context_used
&& (cmethod
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
6831 /* Generic method interface
6832 calls are resolved via a
6833 helper function and don't
6835 if (!cmethod_context
|| !cmethod_context
->method_inst
)
6836 pass_imt_from_rgctx
= TRUE
;
6840 * If a shared method calls another
6841 * shared method then the caller must
6842 * have a generic sharing context
6843 * because the magic trampoline
6844 * requires it. FIXME: We shouldn't
6845 * have to force the vtable/mrgctx
6846 * variable here. Instead there
6847 * should be a flag in the cfg to
6848 * request a generic sharing context.
6851 ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) || method
->klass
->valuetype
))
6852 mono_get_vtable_var (cfg
);
6857 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
6859 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
6861 CHECK_TYPELOAD (cmethod
->klass
);
6862 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
6867 g_assert (!vtable_arg
);
6869 if (!cfg
->compile_aot
) {
6871 * emit_get_rgctx_method () calls mono_class_vtable () so check
6872 * for type load errors before.
6874 mono_class_setup_vtable (cmethod
->klass
);
6875 CHECK_TYPELOAD (cmethod
->klass
);
6878 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
6880 /* !marshalbyref is needed to properly handle generic methods + remoting */
6881 if ((!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
6882 MONO_METHOD_IS_FINAL (cmethod
)) &&
6883 !cmethod
->klass
->marshalbyref
) {
6890 if (pass_imt_from_rgctx
) {
6891 g_assert (!pass_vtable
);
6894 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6895 cmethod
, MONO_RGCTX_INFO_METHOD
);
6899 MONO_EMIT_NEW_CHECK_THIS (cfg
, sp
[0]->dreg
);
6901 /* Calling virtual generic methods */
6902 if (cmethod
&& virtual &&
6903 (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
6904 !(MONO_METHOD_IS_FINAL (cmethod
) &&
6905 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
6906 mono_method_signature (cmethod
)->generic_param_count
) {
6907 MonoInst
*this_temp
, *this_arg_temp
, *store
;
6908 MonoInst
*iargs
[4];
6910 g_assert (mono_method_signature (cmethod
)->is_inflated
);
6912 /* Prevent inlining of methods that contain indirect calls */
6915 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6916 if (cmethod
->wrapper_type
== MONO_WRAPPER_NONE
&& mono_use_imt
) {
6917 g_assert (!imt_arg
);
6919 g_assert (cmethod
->is_inflated
);
6920 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
6921 cmethod
, MONO_RGCTX_INFO_METHOD
);
6922 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, sp
[0], imt_arg
, NULL
);
6926 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
6927 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
6928 MONO_ADD_INS (bblock
, store
);
6930 /* FIXME: This should be a managed pointer */
6931 this_arg_temp
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
6933 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
6934 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
,
6935 cmethod
, MONO_RGCTX_INFO_METHOD
);
6936 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
6937 addr
= mono_emit_jit_icall (cfg
,
6938 mono_helper_compile_generic_method
, iargs
);
6940 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
6942 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, NULL
);
6945 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
6946 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
6948 CHECK_CFG_EXCEPTION
;
6956 * Implement a workaround for the inherent races involved in locking:
6962 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6963 * try block, the Exit () won't be executed, see:
6964 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6965 * To work around this, we extend such try blocks to include the last x bytes
6966 * of the Monitor.Enter () call.
6968 if (cmethod
&& cmethod
->klass
== mono_defaults
.monitor_class
&& !strcmp (cmethod
->name
, "Enter") && mono_method_signature (cmethod
)->param_count
== 1) {
6969 MonoBasicBlock
*tbb
;
6971 GET_BBLOCK (cfg
, tbb
, ip
+ 5);
6973 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6974 * from Monitor.Enter like ArgumentNullException.
6976 if (tbb
->try_start
&& MONO_REGION_FLAGS(tbb
->region
) == MONO_EXCEPTION_CLAUSE_FINALLY
) {
6977 /* Mark this bblock as needing to be extended */
6978 tbb
->extend_try_block
= TRUE
;
6982 /* Conversion to a JIT intrinsic */
6983 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
6985 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
6986 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
6991 CHECK_CFG_EXCEPTION
;
6999 if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&&
7000 (!virtual || !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
7001 !disable_inline
&& mono_method_check_inlining (cfg
, cmethod
) &&
7002 !g_list_find (dont_inline
, cmethod
)) {
7004 gboolean always
= FALSE
;
7006 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
7007 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
7008 /* Prevent inlining of methods that call wrappers */
7010 cmethod
= mono_marshal_get_native_wrapper (cmethod
, check_for_pending_exc
, FALSE
);
7014 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, always
))) {
7016 cfg
->real_offset
+= 5;
7019 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7020 /* *sp is already set by inline_method */
7023 inline_costs
+= costs
;
7029 inline_costs
+= 10 * num_calls
++;
7031 /* Tail recursion elimination */
7032 if ((cfg
->opt
& MONO_OPT_TAILC
) && *ip
== CEE_CALL
&& cmethod
== method
&& ip
[5] == CEE_RET
&& !vtable_arg
) {
7033 gboolean has_vtargs
= FALSE
;
7036 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7039 /* keep it simple */
7040 for (i
= fsig
->param_count
- 1; i
>= 0; i
--) {
7041 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod
)->params
[i
]))
7046 for (i
= 0; i
< n
; ++i
)
7047 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
7048 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7049 MONO_ADD_INS (bblock
, ins
);
7050 tblock
= start_bblock
->out_bb
[0];
7051 link_bblock (cfg
, bblock
, tblock
);
7052 ins
->inst_target_bb
= tblock
;
7053 start_new_bblock
= 1;
7055 /* skip the CEE_RET, too */
7056 if (ip_in_bb (cfg
, bblock
, ip
+ 5))
7066 /* Generic sharing */
7067 /* FIXME: only do this for generic methods if
7068 they are not shared! */
7069 if (context_used
&& !imt_arg
&& !array_rank
&&
7070 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
7071 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
7072 (!virtual || MONO_METHOD_IS_FINAL (cmethod
) ||
7073 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
7076 g_assert (cfg
->generic_sharing_context
&& cmethod
);
7080 * We are compiling a call to a
7081 * generic method from shared code,
7082 * which means that we have to look up
7083 * the method in the rgctx and do an
7086 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7089 /* Indirect calls */
7091 g_assert (!imt_arg
);
7093 if (*ip
== CEE_CALL
)
7094 g_assert (context_used
);
7095 else if (*ip
== CEE_CALLI
)
7096 g_assert (!vtable_arg
);
7098 /* FIXME: what the hell is this??? */
7099 g_assert (cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
||
7100 !(cmethod
->flags
& METHOD_ATTRIBUTE_FINAL
));
7102 /* Prevent inlining of methods with indirect calls */
7106 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, vtable_arg
);
7108 if (addr
->opcode
== OP_AOTCONST
&& addr
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
7110 * Instead of emitting an indirect call, emit a direct call
7111 * with the contents of the aotconst as the patch info.
7113 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_p0
, fsig
, sp
);
7115 } else if (addr
->opcode
== OP_GOT_ENTRY
&& addr
->inst_right
->inst_c1
== MONO_PATCH_INFO_ICALL_ADDR
) {
7116 ins
= (MonoInst
*)mono_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR
, addr
->inst_right
->inst_left
, fsig
, sp
);
7119 ins
= (MonoInst
*)mono_emit_calli (cfg
, fsig
, sp
, addr
, NULL
);
7122 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7123 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7125 CHECK_CFG_EXCEPTION
;
7136 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
7137 MonoInst
*val
= sp
[fsig
->param_count
];
7139 if (val
->type
== STACK_OBJ
) {
7140 MonoInst
*iargs
[2];
7145 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
7148 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
7149 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, val
->dreg
);
7150 if (cfg
->gen_write_barriers
&& val
->type
== STACK_OBJ
&& !(val
->opcode
== OP_PCONST
&& val
->inst_c0
== 0))
7151 emit_write_barrier (cfg
, addr
, val
, 0);
7152 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
7153 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7155 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
7158 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
7159 if (!cmethod
->klass
->element_class
->valuetype
&& !readonly
)
7160 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
7161 CHECK_TYPELOAD (cmethod
->klass
);
7164 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7167 g_assert_not_reached ();
7170 CHECK_CFG_EXCEPTION
;
7177 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
);
7179 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7180 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7182 CHECK_CFG_EXCEPTION
;
7189 /* Tail prefix / tail call optimization */
7191 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7192 /* FIXME: runtime generic context pointer for jumps? */
7193 /* FIXME: handle this for generic sharing eventually */
7194 supported_tail_call
= cmethod
&&
7195 ((((ins_flag
& MONO_INST_TAILCALL
) && (*ip
== CEE_CALL
))
7196 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7197 && !vtable_arg
&& !cfg
->generic_sharing_context
&& is_supported_tail_call (cfg
, method
, cmethod
, fsig
);
7199 if (supported_tail_call
) {
7202 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7205 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7207 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7208 /* Handle tail calls similarly to calls */
7209 call
= mono_emit_call_args (cfg
, mono_method_signature (cmethod
), sp
, FALSE
, FALSE
, TRUE
, FALSE
);
7211 MONO_INST_NEW_CALL (cfg
, call
, OP_JMP
);
7212 call
->tail_call
= TRUE
;
7213 call
->method
= cmethod
;
7214 call
->signature
= mono_method_signature (cmethod
);
7217 * We implement tail calls by storing the actual arguments into the
7218 * argument variables, then emitting a CEE_JMP.
7220 for (i
= 0; i
< n
; ++i
) {
7221 /* Prevent argument from being register allocated */
7222 arg_array
[i
]->flags
|= MONO_INST_VOLATILE
;
7223 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
7227 ins
= (MonoInst
*)call
;
7228 ins
->inst_p0
= cmethod
;
7229 ins
->inst_p1
= arg_array
[0];
7230 MONO_ADD_INS (bblock
, ins
);
7231 link_bblock (cfg
, bblock
, end_bblock
);
7232 start_new_bblock
= 1;
7234 CHECK_CFG_EXCEPTION
;
7239 // FIXME: Eliminate unreachable epilogs
7242 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7243 * only reachable from this call.
7245 GET_BBLOCK (cfg
, tblock
, ip
);
7246 if (tblock
== bblock
|| tblock
->in_count
== 0)
7253 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, virtual ? sp
[0] : NULL
,
7254 imt_arg
, vtable_arg
);
7256 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7257 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7259 CHECK_CFG_EXCEPTION
;
7266 if (cfg
->method
!= method
) {
7267 /* return from inlined method */
7269 * If in_count == 0, that means the ret is unreachable due to
7270 * being preceeded by a throw. In that case, inline_method () will
7271 * handle setting the return value
7272 * (test case: test_0_inline_throw ()).
7274 if (return_var
&& cfg
->cbb
->in_count
) {
7278 //g_assert (returnvar != -1);
7279 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
7280 cfg
->ret_var_set
= TRUE
;
7284 MonoType
*ret_type
= mono_method_signature (method
)->ret
;
7288 * Place a seq point here too even through the IL stack is not
7289 * empty, so a step over on
7292 * will work correctly.
7294 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
7295 MONO_ADD_INS (cfg
->cbb
, ins
);
7298 g_assert (!return_var
);
7302 if ((method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
|| method
->wrapper_type
== MONO_WRAPPER_NONE
) && target_type_is_incompatible (cfg
, ret_type
, *sp
))
7305 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
7308 if (!cfg
->vret_addr
) {
7311 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, (*sp
));
7313 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
7315 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, (*sp
)->dreg
);
7316 ins
->klass
= mono_class_from_mono_type (ret_type
);
7319 #ifdef MONO_ARCH_SOFT_FLOAT
7320 if (COMPILE_SOFT_FLOAT (cfg
) && !ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
7321 MonoInst
*iargs
[1];
7325 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
7326 mono_arch_emit_setret (cfg
, method
, conv
);
7328 mono_arch_emit_setret (cfg
, method
, *sp
);
7331 mono_arch_emit_setret (cfg
, method
, *sp
);
7336 if (sp
!= stack_start
)
7338 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7340 ins
->inst_target_bb
= end_bblock
;
7341 MONO_ADD_INS (bblock
, ins
);
7342 link_bblock (cfg
, bblock
, end_bblock
);
7343 start_new_bblock
= 1;
7347 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7349 target
= ip
+ 1 + (signed char)(*ip
);
7351 GET_BBLOCK (cfg
, tblock
, target
);
7352 link_bblock (cfg
, bblock
, tblock
);
7353 ins
->inst_target_bb
= tblock
;
7354 if (sp
!= stack_start
) {
7355 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7357 CHECK_UNVERIFIABLE (cfg
);
7359 MONO_ADD_INS (bblock
, ins
);
7360 start_new_bblock
= 1;
7361 inline_costs
+= BRANCH_COST
;
7375 MONO_INST_NEW (cfg
, ins
, *ip
+ BIG_BRANCH_OFFSET
);
7377 target
= ip
+ 1 + *(signed char*)ip
;
7383 inline_costs
+= BRANCH_COST
;
7387 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7390 target
= ip
+ 4 + (gint32
)read32(ip
);
7392 GET_BBLOCK (cfg
, tblock
, target
);
7393 link_bblock (cfg
, bblock
, tblock
);
7394 ins
->inst_target_bb
= tblock
;
7395 if (sp
!= stack_start
) {
7396 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7398 CHECK_UNVERIFIABLE (cfg
);
7401 MONO_ADD_INS (bblock
, ins
);
7403 start_new_bblock
= 1;
7404 inline_costs
+= BRANCH_COST
;
7411 gboolean is_short
= ((*ip
) == CEE_BRFALSE_S
) || ((*ip
) == CEE_BRTRUE_S
);
7412 gboolean is_true
= ((*ip
) == CEE_BRTRUE_S
) || ((*ip
) == CEE_BRTRUE
);
7413 guint32 opsize
= is_short
? 1 : 4;
7415 CHECK_OPSIZE (opsize
);
7417 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
7420 target
= ip
+ opsize
+ (is_short
? *(signed char*)ip
: (gint32
)read32(ip
));
7425 GET_BBLOCK (cfg
, tblock
, target
);
7426 link_bblock (cfg
, bblock
, tblock
);
7427 GET_BBLOCK (cfg
, tblock
, ip
);
7428 link_bblock (cfg
, bblock
, tblock
);
7430 if (sp
!= stack_start
) {
7431 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7432 CHECK_UNVERIFIABLE (cfg
);
7435 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
7436 cmp
->sreg1
= sp
[0]->dreg
;
7437 type_from_op (cmp
, sp
[0], NULL
);
7440 #if SIZEOF_REGISTER == 4
7441 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
7442 /* Convert it to OP_LCOMPARE */
7443 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
7444 ins
->type
= STACK_I8
;
7445 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
7447 MONO_ADD_INS (bblock
, ins
);
7448 cmp
->opcode
= OP_LCOMPARE
;
7449 cmp
->sreg2
= ins
->dreg
;
7452 MONO_ADD_INS (bblock
, cmp
);
7454 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
7455 type_from_op (ins
, sp
[0], NULL
);
7456 MONO_ADD_INS (bblock
, ins
);
7457 ins
->inst_many_bb
= mono_mempool_alloc (cfg
->mempool
, sizeof(gpointer
)*2);
7458 GET_BBLOCK (cfg
, tblock
, target
);
7459 ins
->inst_true_bb
= tblock
;
7460 GET_BBLOCK (cfg
, tblock
, ip
);
7461 ins
->inst_false_bb
= tblock
;
7462 start_new_bblock
= 2;
7465 inline_costs
+= BRANCH_COST
;
7480 MONO_INST_NEW (cfg
, ins
, *ip
);
7482 target
= ip
+ 4 + (gint32
)read32(ip
);
7488 inline_costs
+= BRANCH_COST
;
7492 MonoBasicBlock
**targets
;
7493 MonoBasicBlock
*default_bblock
;
7494 MonoJumpInfoBBTable
*table
;
7495 int offset_reg
= alloc_preg (cfg
);
7496 int target_reg
= alloc_preg (cfg
);
7497 int table_reg
= alloc_preg (cfg
);
7498 int sum_reg
= alloc_preg (cfg
);
7499 gboolean use_op_switch
;
7503 n
= read32 (ip
+ 1);
7506 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
7510 CHECK_OPSIZE (n
* sizeof (guint32
));
7511 target
= ip
+ n
* sizeof (guint32
);
7513 GET_BBLOCK (cfg
, default_bblock
, target
);
7514 default_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
7516 targets
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
7517 for (i
= 0; i
< n
; ++i
) {
7518 GET_BBLOCK (cfg
, tblock
, target
+ (gint32
)read32(ip
));
7519 targets
[i
] = tblock
;
7520 targets
[i
]->flags
|= BB_INDIRECT_JUMP_TARGET
;
7524 if (sp
!= stack_start
) {
7526 * Link the current bb with the targets as well, so handle_stack_args
7527 * will set their in_stack correctly.
7529 link_bblock (cfg
, bblock
, default_bblock
);
7530 for (i
= 0; i
< n
; ++i
)
7531 link_bblock (cfg
, bblock
, targets
[i
]);
7533 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7535 CHECK_UNVERIFIABLE (cfg
);
7538 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
7539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
7542 for (i
= 0; i
< n
; ++i
)
7543 link_bblock (cfg
, bblock
, targets
[i
]);
7545 table
= mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
7546 table
->table
= targets
;
7547 table
->table_size
= n
;
7549 use_op_switch
= FALSE
;
7551 /* ARM implements SWITCH statements differently */
7552 /* FIXME: Make it use the generic implementation */
7553 if (!cfg
->compile_aot
)
7554 use_op_switch
= TRUE
;
7557 if (COMPILE_LLVM (cfg
))
7558 use_op_switch
= TRUE
;
7560 cfg
->cbb
->has_jump_table
= 1;
7562 if (use_op_switch
) {
7563 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
7564 ins
->sreg1
= src1
->dreg
;
7565 ins
->inst_p0
= table
;
7566 ins
->inst_many_bb
= targets
;
7567 ins
->klass
= GUINT_TO_POINTER (n
);
7568 MONO_ADD_INS (cfg
->cbb
, ins
);
7570 if (sizeof (gpointer
) == 8)
7571 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
7573 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
7575 #if SIZEOF_REGISTER == 8
7576 /* The upper word might not be zero, and we add it to a 64 bit address later */
7577 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
7580 if (cfg
->compile_aot
) {
7581 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
7583 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
7584 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
7585 ins
->inst_p0
= table
;
7586 ins
->dreg
= table_reg
;
7587 MONO_ADD_INS (cfg
->cbb
, ins
);
7590 /* FIXME: Use load_memindex */
7591 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
7592 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
7593 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
7595 start_new_bblock
= 1;
7596 inline_costs
+= (BRANCH_COST
* 2);
7616 dreg
= alloc_freg (cfg
);
7619 dreg
= alloc_lreg (cfg
);
7622 dreg
= alloc_ireg_ref (cfg
);
7625 dreg
= alloc_preg (cfg
);
7628 NEW_LOAD_MEMBASE (cfg
, ins
, ldind_to_load_membase (*ip
), dreg
, sp
[0]->dreg
, 0);
7629 ins
->type
= ldind_type
[*ip
- CEE_LDIND_I1
];
7630 ins
->flags
|= ins_flag
;
7632 MONO_ADD_INS (bblock
, ins
);
7647 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (*ip
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
7648 ins
->flags
|= ins_flag
;
7650 MONO_ADD_INS (bblock
, ins
);
7652 if (cfg
->gen_write_barriers
&& *ip
== CEE_STIND_REF
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !((sp
[1]->opcode
== OP_PCONST
) && (sp
[1]->inst_p0
== 0)))
7653 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
7662 MONO_INST_NEW (cfg
, ins
, (*ip
));
7664 ins
->sreg1
= sp
[0]->dreg
;
7665 ins
->sreg2
= sp
[1]->dreg
;
7666 type_from_op (ins
, sp
[0], sp
[1]);
7668 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7670 /* Use the immediate opcodes if possible */
7671 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (sp
[1]->inst_c0
)) {
7672 int imm_opcode
= mono_op_to_op_imm (ins
->opcode
);
7673 if (imm_opcode
!= -1) {
7674 ins
->opcode
= imm_opcode
;
7675 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
7678 sp
[1]->opcode
= OP_NOP
;
7682 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7684 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7701 MONO_INST_NEW (cfg
, ins
, (*ip
));
7703 ins
->sreg1
= sp
[0]->dreg
;
7704 ins
->sreg2
= sp
[1]->dreg
;
7705 type_from_op (ins
, sp
[0], sp
[1]);
7707 ADD_WIDEN_OP (ins
, sp
[0], sp
[1]);
7708 ins
->dreg
= alloc_dreg ((cfg
), (ins
)->type
);
7710 /* FIXME: Pass opcode to is_inst_imm */
7712 /* Use the immediate opcodes if possible */
7713 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) && mono_arch_is_inst_imm (sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
7716 imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
7717 if (imm_opcode
!= -1) {
7718 ins
->opcode
= imm_opcode
;
7719 if (sp
[1]->opcode
== OP_I8CONST
) {
7720 #if SIZEOF_REGISTER == 8
7721 ins
->inst_imm
= sp
[1]->inst_l
;
7723 ins
->inst_ls_word
= sp
[1]->inst_ls_word
;
7724 ins
->inst_ms_word
= sp
[1]->inst_ms_word
;
7728 ins
->inst_imm
= (gssize
)(sp
[1]->inst_c0
);
7731 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7732 if (sp
[1]->next
== NULL
)
7733 sp
[1]->opcode
= OP_NOP
;
7736 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
7738 *sp
++ = mono_decompose_opcode (cfg
, ins
);
7751 case CEE_CONV_OVF_I8
:
7752 case CEE_CONV_OVF_U8
:
7756 /* Special case this earlier so we have long constants in the IR */
7757 if ((((*ip
) == CEE_CONV_I8
) || ((*ip
) == CEE_CONV_U8
)) && (sp
[-1]->opcode
== OP_ICONST
)) {
7758 int data
= sp
[-1]->inst_c0
;
7759 sp
[-1]->opcode
= OP_I8CONST
;
7760 sp
[-1]->type
= STACK_I8
;
7761 #if SIZEOF_REGISTER == 8
7762 if ((*ip
) == CEE_CONV_U8
)
7763 sp
[-1]->inst_c0
= (guint32
)data
;
7765 sp
[-1]->inst_c0
= data
;
7767 sp
[-1]->inst_ls_word
= data
;
7768 if ((*ip
) == CEE_CONV_U8
)
7769 sp
[-1]->inst_ms_word
= 0;
7771 sp
[-1]->inst_ms_word
= (data
< 0) ? -1 : 0;
7773 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
7780 case CEE_CONV_OVF_I4
:
7781 case CEE_CONV_OVF_I1
:
7782 case CEE_CONV_OVF_I2
:
7783 case CEE_CONV_OVF_I
:
7784 case CEE_CONV_OVF_U
:
7787 if (sp
[-1]->type
== STACK_R8
) {
7788 ADD_UNOP (CEE_CONV_OVF_I8
);
7795 case CEE_CONV_OVF_U1
:
7796 case CEE_CONV_OVF_U2
:
7797 case CEE_CONV_OVF_U4
:
7800 if (sp
[-1]->type
== STACK_R8
) {
7801 ADD_UNOP (CEE_CONV_OVF_U8
);
7808 case CEE_CONV_OVF_I1_UN
:
7809 case CEE_CONV_OVF_I2_UN
:
7810 case CEE_CONV_OVF_I4_UN
:
7811 case CEE_CONV_OVF_I8_UN
:
7812 case CEE_CONV_OVF_U1_UN
:
7813 case CEE_CONV_OVF_U2_UN
:
7814 case CEE_CONV_OVF_U4_UN
:
7815 case CEE_CONV_OVF_U8_UN
:
7816 case CEE_CONV_OVF_I_UN
:
7817 case CEE_CONV_OVF_U_UN
:
7824 CHECK_CFG_EXCEPTION
;
7828 case CEE_ADD_OVF_UN
:
7830 case CEE_MUL_OVF_UN
:
7832 case CEE_SUB_OVF_UN
:
7840 token
= read32 (ip
+ 1);
7841 klass
= mini_get_class (method
, token
, generic_context
);
7842 CHECK_TYPELOAD (klass
);
7844 if (generic_class_is_reference_type (cfg
, klass
)) {
7845 MonoInst
*store
, *load
;
7846 int dreg
= alloc_ireg_ref (cfg
);
7848 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, dreg
, sp
[1]->dreg
, 0);
7849 load
->flags
|= ins_flag
;
7850 MONO_ADD_INS (cfg
->cbb
, load
);
7852 NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, sp
[0]->dreg
, 0, dreg
);
7853 store
->flags
|= ins_flag
;
7854 MONO_ADD_INS (cfg
->cbb
, store
);
7856 if (cfg
->gen_write_barriers
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
)
7857 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
7859 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7871 token
= read32 (ip
+ 1);
7872 klass
= mini_get_class (method
, token
, generic_context
);
7873 CHECK_TYPELOAD (klass
);
7875 /* Optimize the common ldobj+stloc combination */
7885 loc_index
= ip
[5] - CEE_STLOC_0
;
7892 if ((loc_index
!= -1) && ip_in_bb (cfg
, bblock
, ip
+ 5)) {
7893 CHECK_LOCAL (loc_index
);
7895 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7896 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
7902 /* Optimize the ldobj+stobj combination */
7903 /* The reference case ends up being a load+store anyway */
7904 if (((ip
[5] == CEE_STOBJ
) && ip_in_bb (cfg
, bblock
, ip
+ 5) && read32 (ip
+ 6) == token
) && !generic_class_is_reference_type (cfg
, klass
)) {
7909 mini_emit_stobj (cfg
, sp
[0], sp
[1], klass
, FALSE
);
7916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
7925 CHECK_STACK_OVF (1);
7927 n
= read32 (ip
+ 1);
7929 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
7930 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
7931 ins
->type
= STACK_OBJ
;
7934 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7935 MonoInst
*iargs
[1];
7937 EMIT_NEW_PCONST (cfg
, iargs
[0], mono_method_get_wrapper_data (method
, n
));
7938 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper
, iargs
);
7940 if (cfg
->opt
& MONO_OPT_SHARED
) {
7941 MonoInst
*iargs
[3];
7943 if (cfg
->compile_aot
) {
7944 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
7946 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
7947 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
7948 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
7949 *sp
= mono_emit_jit_icall (cfg
, mono_ldstr
, iargs
);
7950 mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7952 if (bblock
->out_of_line
) {
7953 MonoInst
*iargs
[2];
7955 if (image
== mono_defaults
.corlib
) {
7957 * Avoid relocations in AOT and save some space by using a
7958 * version of helper_ldstr specialized to mscorlib.
7960 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
7961 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
7963 /* Avoid creating the string object */
7964 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
7965 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
7966 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
7970 if (cfg
->compile_aot
) {
7971 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
7973 MONO_ADD_INS (bblock
, ins
);
7976 NEW_PCONST (cfg
, ins
, NULL
);
7977 ins
->type
= STACK_OBJ
;
7978 ins
->inst_p0
= mono_ldstr (cfg
->domain
, image
, mono_metadata_token_index (n
));
7980 OUT_OF_MEMORY_FAILURE
;
7983 MONO_ADD_INS (bblock
, ins
);
7992 MonoInst
*iargs
[2];
7993 MonoMethodSignature
*fsig
;
7996 MonoInst
*vtable_arg
= NULL
;
7999 token
= read32 (ip
+ 1);
8000 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
8001 if (!cmethod
|| mono_loader_get_last_error ())
8003 fsig
= mono_method_get_signature (cmethod
, image
, token
);
8007 mono_save_token_info (cfg
, image
, token
, cmethod
);
8009 if (!mono_class_init (cmethod
->klass
))
8012 if (cfg
->generic_sharing_context
)
8013 context_used
= mono_method_check_context_used (cmethod
);
8015 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
8016 if (check_linkdemand (cfg
, method
, cmethod
))
8018 CHECK_CFG_EXCEPTION
;
8019 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
8020 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
8023 if (cfg
->generic_sharing_context
&& cmethod
&& cmethod
->klass
!= method
->klass
&& cmethod
->klass
->generic_class
&& mono_method_is_generic_sharable_impl (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
8024 emit_generic_class_init (cfg
, cmethod
->klass
);
8025 CHECK_TYPELOAD (cmethod
->klass
);
8028 if (cmethod
->klass
->valuetype
&& mono_class_generic_sharing_enabled (cmethod
->klass
) &&
8029 mono_method_is_generic_sharable_impl (cmethod
, TRUE
)) {
8030 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
8031 mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8032 CHECK_TYPELOAD (cmethod
->klass
);
8034 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
8035 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
8038 vtable_arg
= emit_get_rgctx_klass (cfg
, context_used
,
8039 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
8041 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8043 CHECK_TYPELOAD (cmethod
->klass
);
8044 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
8049 n
= fsig
->param_count
;
8053 * Generate smaller code for the common newobj <exception> instruction in
8054 * argument checking code.
8056 if (bblock
->out_of_line
&& cmethod
->klass
->image
== mono_defaults
.corlib
&&
8057 is_exception_class (cmethod
->klass
) && n
<= 2 &&
8058 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
8059 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
8060 MonoInst
*iargs
[3];
8062 g_assert (!vtable_arg
);
8066 EMIT_NEW_ICONST (cfg
, iargs
[0], cmethod
->klass
->type_token
);
8069 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
8073 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
8078 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
8081 g_assert_not_reached ();
8089 /* move the args to allow room for 'this' in the first position */
8095 /* check_call_signature () requires sp[0] to be set */
8096 this_ins
.type
= STACK_OBJ
;
8098 if (check_call_signature (cfg
, fsig
, sp
))
8103 if (mini_class_is_system_array (cmethod
->klass
)) {
8104 g_assert (!vtable_arg
);
8106 *sp
= emit_get_rgctx_method (cfg
, context_used
,
8107 cmethod
, MONO_RGCTX_INFO_METHOD
);
8109 /* Avoid varargs in the common case */
8110 if (fsig
->param_count
== 1)
8111 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_1
, sp
);
8112 else if (fsig
->param_count
== 2)
8113 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_2
, sp
);
8114 else if (fsig
->param_count
== 3)
8115 alloc
= mono_emit_jit_icall (cfg
, mono_array_new_3
, sp
);
8117 alloc
= handle_array_new (cfg
, fsig
->param_count
, sp
, ip
);
8118 } else if (cmethod
->string_ctor
) {
8119 g_assert (!context_used
);
8120 g_assert (!vtable_arg
);
8121 /* we simply pass a null pointer */
8122 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
8123 /* now call the string ctor */
8124 alloc
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, NULL
, NULL
, NULL
);
8126 MonoInst
* callvirt_this_arg
= NULL
;
8128 if (cmethod
->klass
->valuetype
) {
8129 iargs
[0] = mono_compile_create_var (cfg
, &cmethod
->klass
->byval_arg
, OP_LOCAL
);
8130 MONO_EMIT_NEW_VZERO (cfg
, iargs
[0]->dreg
, cmethod
->klass
);
8131 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
8136 * The code generated by mini_emit_virtual_call () expects
8137 * iargs [0] to be a boxed instance, but luckily the vcall
8138 * will be transformed into a normal call there.
8140 } else if (context_used
) {
8141 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, context_used
);
8144 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, cmethod
->klass
);
8146 CHECK_TYPELOAD (cmethod
->klass
);
8149 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8150 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8151 * As a workaround, we call class cctors before allocating objects.
8153 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
) && !(g_slist_find (class_inits
, vtable
))) {
8154 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8155 if (cfg
->verbose_level
> 2)
8156 printf ("class %s.%s needs init call for ctor\n", cmethod
->klass
->name_space
, cmethod
->klass
->name
);
8157 class_inits
= g_slist_prepend (class_inits
, vtable
);
8160 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, 0);
8163 CHECK_CFG_EXCEPTION
; /*for handle_alloc*/
8166 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
8168 /* Now call the actual ctor */
8169 /* Avoid virtual calls to ctors if possible */
8170 if (cmethod
->klass
->marshalbyref
)
8171 callvirt_this_arg
= sp
[0];
8174 if (cmethod
&& (cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_ctor (cfg
, cmethod
, fsig
, sp
))) {
8175 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
8176 type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
8181 CHECK_CFG_EXCEPTION
;
8182 } else if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
8183 !disable_inline
&& mono_method_check_inlining (cfg
, cmethod
) &&
8184 !mono_class_is_subclass_of (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
) &&
8185 !g_list_find (dont_inline
, cmethod
)) {
8188 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, dont_inline
, FALSE
))) {
8189 cfg
->real_offset
+= 5;
8192 inline_costs
+= costs
- 5;
8195 mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
, callvirt_this_arg
, NULL
, NULL
);
8197 } else if (context_used
&&
8198 (!mono_method_is_generic_sharable_impl (cmethod
, TRUE
) ||
8199 !mono_class_generic_sharing_enabled (cmethod
->klass
))) {
8200 MonoInst
*cmethod_addr
;
8202 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
8203 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
8205 mono_emit_calli (cfg
, fsig
, sp
, cmethod_addr
, vtable_arg
);
8208 ins
= mono_emit_method_call_full (cfg
, cmethod
, fsig
, sp
,
8209 callvirt_this_arg
, NULL
, vtable_arg
);
8213 if (alloc
== NULL
) {
8215 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
8216 type_to_eval_stack_type (cfg
, &ins
->klass
->byval_arg
, ins
);
8230 token
= read32 (ip
+ 1);
8231 klass
= mini_get_class (method
, token
, generic_context
);
8232 CHECK_TYPELOAD (klass
);
8233 if (sp
[0]->type
!= STACK_OBJ
)
8236 if (cfg
->generic_sharing_context
)
8237 context_used
= mono_class_check_context_used (klass
);
8239 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8240 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
8247 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8250 if (cfg
->compile_aot
)
8251 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8253 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8255 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8256 *sp
++ = mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
8259 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8260 MonoMethod
*mono_castclass
;
8261 MonoInst
*iargs
[1];
8264 mono_castclass
= mono_marshal_get_castclass (klass
);
8267 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
8268 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8269 CHECK_CFG_EXCEPTION
;
8270 g_assert (costs
> 0);
8273 cfg
->real_offset
+= 5;
8278 inline_costs
+= costs
;
8281 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
8282 CHECK_CFG_EXCEPTION
;
8292 token
= read32 (ip
+ 1);
8293 klass
= mini_get_class (method
, token
, generic_context
);
8294 CHECK_TYPELOAD (klass
);
8295 if (sp
[0]->type
!= STACK_OBJ
)
8298 if (cfg
->generic_sharing_context
)
8299 context_used
= mono_class_check_context_used (klass
);
8301 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8302 MonoMethod
*mono_isinst
= mono_marshal_get_isinst_with_cache ();
8309 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8312 if (cfg
->compile_aot
)
8313 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8315 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8317 *sp
++ = mono_emit_method_call (cfg
, mono_isinst
, args
, NULL
);
8320 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8321 MonoMethod
*mono_isinst
;
8322 MonoInst
*iargs
[1];
8325 mono_isinst
= mono_marshal_get_isinst (klass
);
8328 costs
= inline_method (cfg
, mono_isinst
, mono_method_signature (mono_isinst
),
8329 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8330 CHECK_CFG_EXCEPTION
;
8331 g_assert (costs
> 0);
8334 cfg
->real_offset
+= 5;
8339 inline_costs
+= costs
;
8342 ins
= handle_isinst (cfg
, klass
, *sp
, context_used
);
8343 CHECK_CFG_EXCEPTION
;
8350 case CEE_UNBOX_ANY
: {
8354 token
= read32 (ip
+ 1);
8355 klass
= mini_get_class (method
, token
, generic_context
);
8356 CHECK_TYPELOAD (klass
);
8358 mono_save_token_info (cfg
, image
, token
, klass
);
8360 if (cfg
->generic_sharing_context
)
8361 context_used
= mono_class_check_context_used (klass
);
8363 if (generic_class_is_reference_type (cfg
, klass
)) {
8364 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8365 if (!context_used
&& mini_class_has_reference_variant_generic_argument (cfg
, klass
, context_used
)) {
8366 MonoMethod
*mono_castclass
= mono_marshal_get_castclass_with_cache ();
8373 EMIT_NEW_CLASSCONST (cfg
, args
[1], klass
);
8376 /*FIXME AOT support*/
8377 if (cfg
->compile_aot
)
8378 EMIT_NEW_AOTCONST (cfg
, args
[2], MONO_PATCH_INFO_CASTCLASS_CACHE
, NULL
);
8380 EMIT_NEW_PCONST (cfg
, args
[2], mono_domain_alloc0 (cfg
->domain
, sizeof (gpointer
)));
8382 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8383 *sp
++ = mono_emit_method_call (cfg
, mono_castclass
, args
, NULL
);
8386 } else if (!context_used
&& (klass
->marshalbyref
|| klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
)) {
8387 MonoMethod
*mono_castclass
;
8388 MonoInst
*iargs
[1];
8391 mono_castclass
= mono_marshal_get_castclass (klass
);
8394 costs
= inline_method (cfg
, mono_castclass
, mono_method_signature (mono_castclass
),
8395 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8396 CHECK_CFG_EXCEPTION
;
8397 g_assert (costs
> 0);
8400 cfg
->real_offset
+= 5;
8404 inline_costs
+= costs
;
8406 ins
= handle_castclass (cfg
, klass
, *sp
, context_used
);
8407 CHECK_CFG_EXCEPTION
;
8415 if (mono_class_is_nullable (klass
)) {
8416 ins
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
8423 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
8429 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0);
8442 token
= read32 (ip
+ 1);
8443 klass
= mini_get_class (method
, token
, generic_context
);
8444 CHECK_TYPELOAD (klass
);
8446 mono_save_token_info (cfg
, image
, token
, klass
);
8448 if (cfg
->generic_sharing_context
)
8449 context_used
= mono_class_check_context_used (klass
);
8451 if (generic_class_is_reference_type (cfg
, klass
)) {
8457 if (klass
== mono_defaults
.void_class
)
8459 if (target_type_is_incompatible (cfg
, &klass
->byval_arg
, *sp
))
8461 /* frequent check in generic code: box (struct), brtrue */
8463 // FIXME: LLVM can't handle the inconsistent bb linking
8464 if (!mono_class_is_nullable (klass
) &&
8465 ip
+ 5 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 5) &&
8466 (ip
[5] == CEE_BRTRUE
||
8467 ip
[5] == CEE_BRTRUE_S
||
8468 ip
[5] == CEE_BRFALSE
||
8469 ip
[5] == CEE_BRFALSE_S
)) {
8470 gboolean is_true
= ip
[5] == CEE_BRTRUE
|| ip
[5] == CEE_BRTRUE_S
;
8472 MonoBasicBlock
*true_bb
, *false_bb
;
8476 if (cfg
->verbose_level
> 3) {
8477 printf ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
8478 printf ("<box+brtrue opt>\n");
8486 target
= ip
+ 1 + (signed char)(*ip
);
8493 target
= ip
+ 4 + (gint
)(read32 (ip
));
8497 g_assert_not_reached ();
8501 * We need to link both bblocks, since it is needed for handling stack
8502 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8503 * Branching to only one of them would lead to inconsistencies, so
8504 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8506 GET_BBLOCK (cfg
, true_bb
, target
);
8507 GET_BBLOCK (cfg
, false_bb
, ip
);
8509 mono_link_bblock (cfg
, cfg
->cbb
, true_bb
);
8510 mono_link_bblock (cfg
, cfg
->cbb
, false_bb
);
8512 if (sp
!= stack_start
) {
8513 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8515 CHECK_UNVERIFIABLE (cfg
);
8518 if (COMPILE_LLVM (cfg
)) {
8519 dreg
= alloc_ireg (cfg
);
8520 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
8521 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, dreg
, is_true
? 0 : 1);
8523 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg
, OP_IBEQ
, true_bb
, false_bb
);
8525 /* The JIT can't eliminate the iconst+compare */
8526 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8527 ins
->inst_target_bb
= is_true
? true_bb
: false_bb
;
8528 MONO_ADD_INS (cfg
->cbb
, ins
);
8531 start_new_bblock
= 1;
8535 *sp
++ = handle_box (cfg
, val
, klass
, context_used
);
8537 CHECK_CFG_EXCEPTION
;
8546 token
= read32 (ip
+ 1);
8547 klass
= mini_get_class (method
, token
, generic_context
);
8548 CHECK_TYPELOAD (klass
);
8550 mono_save_token_info (cfg
, image
, token
, klass
);
8552 if (cfg
->generic_sharing_context
)
8553 context_used
= mono_class_check_context_used (klass
);
8555 if (mono_class_is_nullable (klass
)) {
8558 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
8559 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), &val
->klass
->byval_arg
);
8563 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
8573 MonoClassField
*field
;
8577 if (*ip
== CEE_STFLD
) {
8584 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
8586 if (*ip
!= CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
8589 token
= read32 (ip
+ 1);
8590 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8591 field
= mono_method_get_wrapper_data (method
, token
);
8592 klass
= field
->parent
;
8595 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8599 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8600 FIELD_ACCESS_FAILURE
;
8601 mono_class_init (klass
);
8603 if (*ip
!= CEE_LDFLDA
&& is_magic_tls_access (field
))
8605 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8606 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8607 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8608 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8611 foffset
= klass
->valuetype
? field
->offset
- sizeof (MonoObject
): field
->offset
;
8612 if (*ip
== CEE_STFLD
) {
8613 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
8615 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8616 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
8617 MonoInst
*iargs
[5];
8620 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8621 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8622 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) :
8626 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8627 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature (stfld_wrapper
),
8628 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8629 CHECK_CFG_EXCEPTION
;
8630 g_assert (costs
> 0);
8632 cfg
->real_offset
+= 5;
8635 inline_costs
+= costs
;
8637 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
8642 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
8644 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
8645 if (sp
[0]->opcode
!= OP_LDADDR
)
8646 store
->flags
|= MONO_INST_FAULT
;
8648 if (cfg
->gen_write_barriers
&& mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !(sp
[1]->opcode
== OP_PCONST
&& sp
[1]->inst_c0
== 0)) {
8649 /* insert call to write barrier */
8653 dreg
= alloc_ireg_mp (cfg
);
8654 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8655 emit_write_barrier (cfg
, ptr
, sp
[1], -1);
8658 store
->flags
|= ins_flag
;
8665 if ((klass
->marshalbyref
&& !MONO_CHECK_THIS (sp
[0])) || klass
->contextbound
|| klass
== mono_defaults
.marshalbyrefobject_class
) {
8666 MonoMethod
*wrapper
= (*ip
== CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
8667 MonoInst
*iargs
[4];
8670 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
8671 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
8672 EMIT_NEW_ICONST (cfg
, iargs
[3], klass
->valuetype
? field
->offset
- sizeof (MonoObject
) : field
->offset
);
8673 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
8674 costs
= inline_method (cfg
, wrapper
, mono_method_signature (wrapper
),
8675 iargs
, ip
, cfg
->real_offset
, dont_inline
, TRUE
);
8676 CHECK_CFG_EXCEPTION
;
8678 g_assert (costs
> 0);
8680 cfg
->real_offset
+= 5;
8684 inline_costs
+= costs
;
8686 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
8690 if (sp
[0]->type
== STACK_VTYPE
) {
8693 /* Have to compute the address of the variable */
8695 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
8697 var
= mono_compile_create_var_for_vreg (cfg
, &klass
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
8699 g_assert (var
->klass
== klass
);
8701 EMIT_NEW_VARLOADA (cfg
, ins
, var
, &var
->klass
->byval_arg
);
8705 if (*ip
== CEE_LDFLDA
) {
8706 if (is_magic_tls_access (field
)) {
8708 *sp
++ = create_magic_tls_access (cfg
, field
, &cached_tls_addr
, ins
);
8710 if (sp
[0]->type
== STACK_OBJ
) {
8711 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, sp
[0]->dreg
, 0);
8712 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "NullReferenceException");
8715 dreg
= alloc_ireg_mp (cfg
);
8717 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
8718 ins
->klass
= mono_class_from_mono_type (field
->type
);
8719 ins
->type
= STACK_MP
;
8725 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
);
8727 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, sp
[0]->dreg
, foffset
);
8728 load
->flags
|= ins_flag
;
8729 if (sp
[0]->opcode
!= OP_LDADDR
)
8730 load
->flags
|= MONO_INST_FAULT
;
8741 MonoClassField
*field
;
8742 gpointer addr
= NULL
;
8743 gboolean is_special_static
;
8747 token
= read32 (ip
+ 1);
8749 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8750 field
= mono_method_get_wrapper_data (method
, token
);
8751 klass
= field
->parent
;
8754 field
= mono_field_from_token (image
, token
, &klass
, generic_context
);
8757 mono_class_init (klass
);
8758 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
8759 FIELD_ACCESS_FAILURE
;
8761 /* if the class is Critical then transparent code cannot access it's fields */
8762 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
8763 ensure_method_is_allowed_to_access_field (cfg
, method
, field
, bblock
, ip
);
8766 * We can only support shared generic static
8767 * field access on architectures where the
8768 * trampoline code has been extended to handle
8769 * the generic class init.
8771 #ifndef MONO_ARCH_VTABLE_REG
8772 GENERIC_SHARING_FAILURE (*ip
);
8775 if (cfg
->generic_sharing_context
)
8776 context_used
= mono_class_check_context_used (klass
);
8778 ftype
= mono_field_get_type (field
);
8780 g_assert (!(ftype
->attrs
& FIELD_ATTRIBUTE_LITERAL
));
8782 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8783 * to be called here.
8785 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
8786 mono_class_vtable (cfg
->domain
, klass
);
8787 CHECK_TYPELOAD (klass
);
8789 mono_domain_lock (cfg
->domain
);
8790 if (cfg
->domain
->special_static_fields
)
8791 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
8792 mono_domain_unlock (cfg
->domain
);
8794 is_special_static
= mono_class_field_is_special_static (field
);
8796 /* Generate IR to compute the field address */
8797 if (is_special_static
&& ((gsize
)addr
& 0x80000000) == 0 && mono_get_thread_intrinsic (cfg
) && !(cfg
->opt
& MONO_OPT_SHARED
) && !context_used
) {
8799 * Fast access to TLS data
8800 * Inline version of get_thread_static_data () in
8804 int idx
, static_data_reg
, array_reg
, dreg
;
8805 MonoInst
*thread_ins
;
8807 // offset &= 0x7fffffff;
8808 // idx = (offset >> 24) - 1;
8809 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8811 thread_ins
= mono_get_thread_intrinsic (cfg
);
8812 MONO_ADD_INS (cfg
->cbb
, thread_ins
);
8813 static_data_reg
= alloc_ireg (cfg
);
8814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, G_STRUCT_OFFSET (MonoInternalThread
, static_data
));
8816 if (cfg
->compile_aot
) {
8817 int offset_reg
, offset2_reg
, idx_reg
;
8819 /* For TLS variables, this will return the TLS offset */
8820 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8821 offset_reg
= ins
->dreg
;
8822 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset_reg
, offset_reg
, 0x7fffffff);
8823 idx_reg
= alloc_ireg (cfg
);
8824 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_IMM
, idx_reg
, offset_reg
, 24);
8825 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISUB_IMM
, idx_reg
, idx_reg
, 1);
8826 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, sizeof (gpointer
) == 8 ? 3 : 2);
8827 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
8828 array_reg
= alloc_ireg (cfg
);
8829 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
8830 offset2_reg
= alloc_ireg (cfg
);
8831 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset_reg
, 0xffffff);
8832 dreg
= alloc_ireg (cfg
);
8833 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
8835 offset
= (gsize
)addr
& 0x7fffffff;
8836 idx
= (offset
>> 24) - 1;
8838 array_reg
= alloc_ireg (cfg
);
8839 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, idx
* sizeof (gpointer
));
8840 dreg
= alloc_ireg (cfg
);
8841 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_ADD_IMM
, dreg
, array_reg
, (offset
& 0xffffff));
8843 } else if ((cfg
->opt
& MONO_OPT_SHARED
) ||
8844 (cfg
->compile_aot
&& is_special_static
) ||
8845 (context_used
&& is_special_static
)) {
8846 MonoInst
*iargs
[2];
8848 g_assert (field
->parent
);
8849 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8851 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
8852 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
8854 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8856 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8857 } else if (context_used
) {
8858 MonoInst
*static_data
;
8861 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8862 method->klass->name_space, method->klass->name, method->name,
8863 depth, field->offset);
8866 if (mono_class_needs_cctor_run (klass
, method
))
8867 emit_generic_class_init (cfg
, klass
);
8870 * The pointer we're computing here is
8872 * super_info.static_data + field->offset
8874 static_data
= emit_get_rgctx_klass (cfg
, context_used
,
8875 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
8877 if (field
->offset
== 0) {
8880 int addr_reg
= mono_alloc_preg (cfg
);
8881 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
8883 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
8884 MonoInst
*iargs
[2];
8886 g_assert (field
->parent
);
8887 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8888 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
8889 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
8891 MonoVTable
*vtable
= mono_class_vtable (cfg
->domain
, klass
);
8893 CHECK_TYPELOAD (klass
);
8895 if (mini_field_access_needs_cctor_run (cfg
, method
, vtable
)) {
8896 if (!(g_slist_find (class_inits
, vtable
))) {
8897 mono_emit_abs_call (cfg
, MONO_PATCH_INFO_CLASS_INIT
, vtable
->klass
, helper_sig_class_init_trampoline
, NULL
);
8898 if (cfg
->verbose_level
> 2)
8899 printf ("class %s.%s needs init call for %s\n", klass
->name_space
, klass
->name
, mono_field_get_name (field
));
8900 class_inits
= g_slist_prepend (class_inits
, vtable
);
8903 if (cfg
->run_cctors
) {
8905 /* This makes so that inline cannot trigger */
8906 /* .cctors: too many apps depend on them */
8907 /* running with a specific order... */
8908 if (! vtable
->initialized
)
8910 ex
= mono_runtime_class_init_full (vtable
, FALSE
);
8912 set_exception_object (cfg
, ex
);
8913 goto exception_exit
;
8917 addr
= (char*)vtable
->data
+ field
->offset
;
8919 if (cfg
->compile_aot
)
8920 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
8922 EMIT_NEW_PCONST (cfg
, ins
, addr
);
8924 MonoInst
*iargs
[1];
8925 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
8926 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
8930 /* Generate IR to do the actual load/store operation */
8932 if (*ip
== CEE_LDSFLDA
) {
8933 ins
->klass
= mono_class_from_mono_type (ftype
);
8934 ins
->type
= STACK_PTR
;
8936 } else if (*ip
== CEE_STSFLD
) {
8941 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, ftype
, ins
->dreg
, 0, sp
[0]->dreg
);
8942 store
->flags
|= ins_flag
;
8944 gboolean is_const
= FALSE
;
8945 MonoVTable
*vtable
= NULL
;
8947 if (!context_used
) {
8948 vtable
= mono_class_vtable (cfg
->domain
, klass
);
8949 CHECK_TYPELOAD (klass
);
8951 if (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) &&
8952 vtable
->initialized
&& (ftype
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
)) {
8953 gpointer addr
= (char*)vtable
->data
+ field
->offset
;
8954 int ro_type
= ftype
->type
;
8955 if (ro_type
== MONO_TYPE_VALUETYPE
&& ftype
->data
.klass
->enumtype
) {
8956 ro_type
= mono_class_enum_basetype (ftype
->data
.klass
)->type
;
8958 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8961 case MONO_TYPE_BOOLEAN
:
8963 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
8967 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
8970 case MONO_TYPE_CHAR
:
8972 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
8976 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
8981 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
8985 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
8991 case MONO_TYPE_FNPTR
:
8992 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
8993 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
8996 case MONO_TYPE_STRING
:
8997 case MONO_TYPE_OBJECT
:
8998 case MONO_TYPE_CLASS
:
8999 case MONO_TYPE_SZARRAY
:
9000 case MONO_TYPE_ARRAY
:
9001 if (!mono_gc_is_moving ()) {
9002 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
9003 type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
9011 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
9016 case MONO_TYPE_VALUETYPE
:
9026 CHECK_STACK_OVF (1);
9028 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
9029 load
->flags
|= ins_flag
;
9042 token
= read32 (ip
+ 1);
9043 klass
= mini_get_class (method
, token
, generic_context
);
9044 CHECK_TYPELOAD (klass
);
9045 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9046 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, sp
[0]->dreg
, 0, sp
[1]->dreg
);
9047 if (cfg
->gen_write_barriers
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&&
9048 generic_class_is_reference_type (cfg
, klass
)) {
9049 /* insert call to write barrier */
9050 emit_write_barrier (cfg
, sp
[0], sp
[1], -1);
9062 const char *data_ptr
;
9064 guint32 field_token
;
9070 token
= read32 (ip
+ 1);
9072 klass
= mini_get_class (method
, token
, generic_context
);
9073 CHECK_TYPELOAD (klass
);
9075 if (cfg
->generic_sharing_context
)
9076 context_used
= mono_class_check_context_used (klass
);
9078 if (sp
[0]->type
== STACK_I8
|| (SIZEOF_VOID_P
== 8 && sp
[0]->type
== STACK_PTR
)) {
9079 MONO_INST_NEW (cfg
, ins
, OP_LCONV_TO_I4
);
9080 ins
->sreg1
= sp
[0]->dreg
;
9081 ins
->type
= STACK_I4
;
9082 ins
->dreg
= alloc_ireg (cfg
);
9083 MONO_ADD_INS (cfg
->cbb
, ins
);
9084 *sp
= mono_decompose_opcode (cfg
, ins
);
9089 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
9090 /* FIXME: we cannot get a managed
9091 allocator because we can't get the
9092 open generic class's vtable. We
9093 have the same problem in
9094 handle_alloc(). This
9095 needs to be solved so that we can
9096 have managed allocs of shared
9099 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9100 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9102 MonoMethod
*managed_alloc
= NULL
;
9104 /* FIXME: Decompose later to help abcrem */
9107 args
[0] = emit_get_rgctx_klass (cfg
, context_used
,
9108 array_class
, MONO_RGCTX_INFO_VTABLE
);
9113 ins
= mono_emit_method_call (cfg
, managed_alloc
, args
, NULL
);
9115 ins
= mono_emit_jit_icall (cfg
, mono_array_new_specific
, args
);
9117 if (cfg
->opt
& MONO_OPT_SHARED
) {
9118 /* Decompose now to avoid problems with references to the domainvar */
9119 MonoInst
*iargs
[3];
9121 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9122 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9125 ins
= mono_emit_jit_icall (cfg
, mono_array_new
, iargs
);
9127 /* Decompose later since it is needed by abcrem */
9128 MonoClass
*array_type
= mono_array_class_get (klass
, 1);
9129 mono_class_vtable (cfg
->domain
, array_type
);
9130 CHECK_TYPELOAD (array_type
);
9132 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
9133 ins
->dreg
= alloc_ireg_ref (cfg
);
9134 ins
->sreg1
= sp
[0]->dreg
;
9135 ins
->inst_newa_class
= klass
;
9136 ins
->type
= STACK_OBJ
;
9138 MONO_ADD_INS (cfg
->cbb
, ins
);
9139 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
9140 cfg
->cbb
->has_array_access
= TRUE
;
9142 /* Needed so mono_emit_load_get_addr () gets called */
9143 mono_get_got_var (cfg
);
9153 * we inline/optimize the initialization sequence if possible.
9154 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9155 * for small sizes open code the memcpy
9156 * ensure the rva field is big enough
9158 if ((cfg
->opt
& MONO_OPT_INTRINS
) && ip
+ 6 < end
&& ip_in_bb (cfg
, bblock
, ip
+ 6) && (len_ins
->opcode
== OP_ICONST
) && (data_ptr
= initialize_array_data (method
, cfg
->compile_aot
, ip
, klass
, len_ins
->inst_c0
, &data_size
, &field_token
))) {
9159 MonoMethod
*memcpy_method
= get_memcpy_method ();
9160 MonoInst
*iargs
[3];
9161 int add_reg
= alloc_ireg_mp (cfg
);
9163 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, G_STRUCT_OFFSET (MonoArray
, vector
));
9164 if (cfg
->compile_aot
) {
9165 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, method
->klass
->image
, GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
9167 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
9169 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
9170 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9179 if (sp
[0]->type
!= STACK_OBJ
)
9182 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
9183 ins
->dreg
= alloc_preg (cfg
);
9184 ins
->sreg1
= sp
[0]->dreg
;
9185 ins
->type
= STACK_I4
;
9186 /* This flag will be inherited by the decomposition */
9187 ins
->flags
|= MONO_INST_FAULT
;
9188 MONO_ADD_INS (cfg
->cbb
, ins
);
9189 cfg
->flags
|= MONO_CFG_HAS_ARRAY_ACCESS
;
9190 cfg
->cbb
->has_array_access
= TRUE
;
9198 if (sp
[0]->type
!= STACK_OBJ
)
9201 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9203 klass
= mini_get_class (method
, read32 (ip
+ 1), generic_context
);
9204 CHECK_TYPELOAD (klass
);
9205 /* we need to make sure that this array is exactly the type it needs
9206 * to be for correctness. the wrappers are lax with their usage
9207 * so we need to ignore them here
9209 if (!klass
->valuetype
&& method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
) {
9210 MonoClass
*array_class
= mono_array_class_get (klass
, 1);
9211 mini_emit_check_array_type (cfg
, sp
[0], array_class
);
9212 CHECK_TYPELOAD (array_class
);
9216 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9231 case CEE_LDELEM_REF
: {
9237 if (*ip
== CEE_LDELEM
) {
9239 token
= read32 (ip
+ 1);
9240 klass
= mini_get_class (method
, token
, generic_context
);
9241 CHECK_TYPELOAD (klass
);
9242 mono_class_init (klass
);
9245 klass
= array_access_to_klass (*ip
);
9247 if (sp
[0]->type
!= STACK_OBJ
)
9250 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9252 if (sp
[1]->opcode
== OP_ICONST
) {
9253 int array_reg
= sp
[0]->dreg
;
9254 int index_reg
= sp
[1]->dreg
;
9255 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
9257 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
9258 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
);
9260 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9261 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0);
9264 if (*ip
== CEE_LDELEM
)
9277 case CEE_STELEM_REF
:
9284 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9286 if (*ip
== CEE_STELEM
) {
9288 token
= read32 (ip
+ 1);
9289 klass
= mini_get_class (method
, token
, generic_context
);
9290 CHECK_TYPELOAD (klass
);
9291 mono_class_init (klass
);
9294 klass
= array_access_to_klass (*ip
);
9296 if (sp
[0]->type
!= STACK_OBJ
)
9299 /* storing a NULL doesn't need any of the complex checks in stelemref */
9300 if (generic_class_is_reference_type (cfg
, klass
) &&
9301 !(sp
[2]->opcode
== OP_PCONST
&& sp
[2]->inst_p0
== NULL
)) {
9302 MonoClass
*obj_array
= mono_array_class_get_cached (mono_defaults
.object_class
, 1);
9303 MonoMethod
*helper
= mono_marshal_get_virtual_stelemref (obj_array
);
9304 MonoInst
*iargs
[3];
9307 mono_class_setup_vtable (obj_array
);
9308 g_assert (helper
->slot
);
9310 if (sp
[0]->type
!= STACK_OBJ
)
9312 if (sp
[2]->type
!= STACK_OBJ
)
9319 mono_emit_method_call (cfg
, helper
, iargs
, sp
[0]);
9321 if (sp
[1]->opcode
== OP_ICONST
) {
9322 int array_reg
= sp
[0]->dreg
;
9323 int index_reg
= sp
[1]->dreg
;
9324 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + G_STRUCT_OFFSET (MonoArray
, vector
);
9326 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
9327 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, array_reg
, offset
, sp
[2]->dreg
);
9329 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9330 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, &klass
->byval_arg
, addr
->dreg
, 0, sp
[2]->dreg
);
9334 if (*ip
== CEE_STELEM
)
9341 case CEE_CKFINITE
: {
9345 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
9346 ins
->sreg1
= sp
[0]->dreg
;
9347 ins
->dreg
= alloc_freg (cfg
);
9348 ins
->type
= STACK_R8
;
9349 MONO_ADD_INS (bblock
, ins
);
9351 *sp
++ = mono_decompose_opcode (cfg
, ins
);
9356 case CEE_REFANYVAL
: {
9357 MonoInst
*src_var
, *src
;
9359 int klass_reg
= alloc_preg (cfg
);
9360 int dreg
= alloc_preg (cfg
);
9363 MONO_INST_NEW (cfg
, ins
, *ip
);
9366 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
9367 CHECK_TYPELOAD (klass
);
9368 mono_class_init (klass
);
9370 if (cfg
->generic_sharing_context
)
9371 context_used
= mono_class_check_context_used (klass
);
9374 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9376 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
9377 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
));
9381 MonoInst
*klass_ins
;
9383 klass_ins
= emit_get_rgctx_klass (cfg
, context_used
,
9384 klass
, MONO_RGCTX_INFO_KLASS
);
9387 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
9388 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
9390 mini_emit_class_check (cfg
, klass_reg
, klass
);
9392 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
));
9393 ins
->type
= STACK_MP
;
9398 case CEE_MKREFANY
: {
9399 MonoInst
*loc
, *addr
;
9402 MONO_INST_NEW (cfg
, ins
, *ip
);
9405 klass
= mono_class_get_full (image
, read32 (ip
+ 1), generic_context
);
9406 CHECK_TYPELOAD (klass
);
9407 mono_class_init (klass
);
9409 if (cfg
->generic_sharing_context
)
9410 context_used
= mono_class_check_context_used (klass
);
9412 loc
= mono_compile_create_var (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
);
9413 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
9416 MonoInst
*const_ins
;
9417 int type_reg
= alloc_preg (cfg
);
9419 const_ins
= emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
9420 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
9421 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
9422 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
9423 } else if (cfg
->compile_aot
) {
9424 int const_reg
= alloc_preg (cfg
);
9425 int type_reg
= alloc_preg (cfg
);
9427 MONO_EMIT_NEW_CLASSCONST (cfg
, const_reg
, klass
);
9428 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), const_reg
);
9429 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_reg
, G_STRUCT_OFFSET (MonoClass
, byval_arg
));
9430 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
9432 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
), &klass
->byval_arg
);
9433 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREP_MEMBASE_IMM
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, klass
), klass
);
9435 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
9437 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
9438 ins
->type
= STACK_VTYPE
;
9439 ins
->klass
= mono_defaults
.typed_reference_class
;
9446 MonoClass
*handle_class
;
9448 CHECK_STACK_OVF (1);
9451 n
= read32 (ip
+ 1);
9453 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
9454 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
9455 handle
= mono_method_get_wrapper_data (method
, n
);
9456 handle_class
= mono_method_get_wrapper_data (method
, n
+ 1);
9457 if (handle_class
== mono_defaults
.typehandle_class
)
9458 handle
= &((MonoClass
*)handle
)->byval_arg
;
9461 handle
= mono_ldtoken (image
, n
, &handle_class
, generic_context
);
9465 mono_class_init (handle_class
);
9466 if (cfg
->generic_sharing_context
) {
9467 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
9468 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
9469 /* This case handles ldtoken
9470 of an open type, like for
9473 } else if (handle_class
== mono_defaults
.typehandle_class
) {
9474 /* If we get a MONO_TYPE_CLASS
9475 then we need to provide the
9477 instantiation of it. */
9478 if (mono_type_get_type (handle
) == MONO_TYPE_CLASS
)
9481 context_used
= mono_class_check_context_used (mono_class_from_mono_type (handle
));
9482 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
9483 context_used
= mono_class_check_context_used (((MonoClassField
*)handle
)->parent
);
9484 else if (handle_class
== mono_defaults
.methodhandle_class
)
9485 context_used
= mono_method_check_context_used (handle
);
9487 g_assert_not_reached ();
9490 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
9491 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
9492 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
9493 MonoInst
*addr
, *vtvar
, *iargs
[3];
9494 int method_context_used
;
9496 if (cfg
->generic_sharing_context
)
9497 method_context_used
= mono_method_check_context_used (method
);
9499 method_context_used
= 0;
9501 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
9503 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
9504 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
9505 if (method_context_used
) {
9506 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
9507 method
, MONO_RGCTX_INFO_METHOD
);
9508 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
9510 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
9511 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
9513 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9515 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
9517 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9519 if ((ip
+ 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 5) &&
9520 ((ip
[5] == CEE_CALL
) || (ip
[5] == CEE_CALLVIRT
)) &&
9521 (cmethod
= mini_get_method (cfg
, method
, read32 (ip
+ 6), NULL
, generic_context
)) &&
9522 (cmethod
->klass
== mono_defaults
.monotype_class
->parent
) &&
9523 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
9524 MonoClass
*tclass
= mono_class_from_mono_type (handle
);
9526 mono_class_init (tclass
);
9528 ins
= emit_get_rgctx_klass (cfg
, context_used
,
9529 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
9530 } else if (cfg
->compile_aot
) {
9531 if (method
->wrapper_type
) {
9532 if (mono_class_get (tclass
->image
, tclass
->type_token
) == tclass
&& !generic_context
) {
9533 /* Special case for static synchronized wrappers */
9534 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, tclass
->image
, tclass
->type_token
, generic_context
);
9536 /* FIXME: n is not a normal token */
9537 cfg
->disable_aot
= TRUE
;
9538 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
9541 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
9544 EMIT_NEW_PCONST (cfg
, ins
, mono_type_get_object (cfg
->domain
, handle
));
9546 ins
->type
= STACK_OBJ
;
9547 ins
->klass
= cmethod
->klass
;
9550 MonoInst
*addr
, *vtvar
;
9552 vtvar
= mono_compile_create_var (cfg
, &handle_class
->byval_arg
, OP_LOCAL
);
9555 if (handle_class
== mono_defaults
.typehandle_class
) {
9556 ins
= emit_get_rgctx_klass (cfg
, context_used
,
9557 mono_class_from_mono_type (handle
),
9558 MONO_RGCTX_INFO_TYPE
);
9559 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
9560 ins
= emit_get_rgctx_method (cfg
, context_used
,
9561 handle
, MONO_RGCTX_INFO_METHOD
);
9562 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
9563 ins
= emit_get_rgctx_field (cfg
, context_used
,
9564 handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
9566 g_assert_not_reached ();
9568 } else if (cfg
->compile_aot
) {
9569 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
);
9571 EMIT_NEW_PCONST (cfg
, ins
, handle
);
9573 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9574 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
9575 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9585 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
9587 ins
->sreg1
= sp
[0]->dreg
;
9589 bblock
->out_of_line
= TRUE
;
9590 MONO_ADD_INS (bblock
, ins
);
9591 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
9592 MONO_ADD_INS (bblock
, ins
);
9595 link_bblock (cfg
, bblock
, end_bblock
);
9596 start_new_bblock
= 1;
9598 case CEE_ENDFINALLY
:
9599 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
9600 MONO_ADD_INS (bblock
, ins
);
9602 start_new_bblock
= 1;
9605 * Control will leave the method so empty the stack, otherwise
9606 * the next basic block will start with a nonempty stack.
9608 while (sp
!= stack_start
) {
9616 if (*ip
== CEE_LEAVE
) {
9618 target
= ip
+ 5 + (gint32
)read32(ip
+ 1);
9621 target
= ip
+ 2 + (signed char)(ip
[1]);
9624 /* empty the stack */
9625 while (sp
!= stack_start
) {
9630 * If this leave statement is in a catch block, check for a
9631 * pending exception, and rethrow it if necessary.
9632 * We avoid doing this in runtime invoke wrappers, since those are called
9633 * by native code which excepts the wrapper to catch all exceptions.
9635 for (i
= 0; i
< header
->num_clauses
; ++i
) {
9636 MonoExceptionClause
*clause
= &header
->clauses
[i
];
9639 * Use <= in the final comparison to handle clauses with multiple
9640 * leave statements, like in bug #78024.
9641 * The ordering of the exception clauses guarantees that we find the
9644 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((*ip
== CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
) && method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
) {
9646 MonoBasicBlock
*dont_throw
;
9651 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9654 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
9656 NEW_BBLOCK (cfg
, dont_throw
);
9659 * Currently, we always rethrow the abort exception, despite the
9660 * fact that this is not correct. See thread6.cs for an example.
9661 * But propagating the abort exception is more important than
9662 * getting the sematics right.
9664 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
9665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
9666 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
9668 MONO_START_BB (cfg
, dont_throw
);
9673 if ((handlers
= mono_find_final_block (cfg
, ip
, target
, MONO_EXCEPTION_CLAUSE_FINALLY
))) {
9675 MonoExceptionClause
*clause
;
9677 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
9679 tblock
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
9681 link_bblock (cfg
, bblock
, tblock
);
9682 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
9683 ins
->inst_target_bb
= tblock
;
9684 ins
->inst_eh_block
= clause
;
9685 MONO_ADD_INS (bblock
, ins
);
9686 bblock
->has_call_handler
= 1;
9687 if (COMPILE_LLVM (cfg
)) {
9688 MonoBasicBlock
*target_bb
;
9691 * Link the finally bblock with the target, since it will
9692 * conceptually branch there.
9693 * FIXME: Have to link the bblock containing the endfinally.
9695 GET_BBLOCK (cfg
, target_bb
, target
);
9696 link_bblock (cfg
, tblock
, target_bb
);
9699 g_list_free (handlers
);
9702 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9703 MONO_ADD_INS (bblock
, ins
);
9704 GET_BBLOCK (cfg
, tblock
, target
);
9705 link_bblock (cfg
, bblock
, tblock
);
9706 ins
->inst_target_bb
= tblock
;
9707 start_new_bblock
= 1;
9709 if (*ip
== CEE_LEAVE
)
9718 * Mono specific opcodes
9720 case MONO_CUSTOM_PREFIX
: {
9722 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
9726 case CEE_MONO_ICALL
: {
9728 MonoJitICallInfo
*info
;
9730 token
= read32 (ip
+ 2);
9731 func
= mono_method_get_wrapper_data (method
, token
);
9732 info
= mono_find_jit_icall_by_addr (func
);
9735 CHECK_STACK (info
->sig
->param_count
);
9736 sp
-= info
->sig
->param_count
;
9738 ins
= mono_emit_jit_icall (cfg
, info
->func
, sp
);
9739 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
9743 inline_costs
+= 10 * num_calls
++;
9747 case CEE_MONO_LDPTR
: {
9750 CHECK_STACK_OVF (1);
9752 token
= read32 (ip
+ 2);
9754 ptr
= mono_method_get_wrapper_data (method
, token
);
9755 if (cfg
->compile_aot
&& (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) && (strstr (method
->name
, "__icall_wrapper_") == method
->name
)) {
9756 MonoJitICallInfo
*callinfo
;
9757 const char *icall_name
;
9759 icall_name
= method
->name
+ strlen ("__icall_wrapper_");
9760 g_assert (icall_name
);
9761 callinfo
= mono_find_jit_icall_by_name (icall_name
);
9762 g_assert (callinfo
);
9764 if (ptr
== callinfo
->func
) {
9765 /* Will be transformed into an AOTCONST later */
9766 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9772 /* FIXME: Generalize this */
9773 if (cfg
->compile_aot
&& ptr
== mono_thread_interruption_request_flag ()) {
9774 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
, NULL
);
9779 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9782 inline_costs
+= 10 * num_calls
++;
9783 /* Can't embed random pointers into AOT code */
9784 cfg
->disable_aot
= 1;
9787 case CEE_MONO_ICALL_ADDR
: {
9788 MonoMethod
*cmethod
;
9791 CHECK_STACK_OVF (1);
9793 token
= read32 (ip
+ 2);
9795 cmethod
= mono_method_get_wrapper_data (method
, token
);
9797 if (cfg
->compile_aot
) {
9798 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
9800 ptr
= mono_lookup_internal_call (cmethod
);
9802 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
9808 case CEE_MONO_VTADDR
: {
9809 MonoInst
*src_var
, *src
;
9815 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9816 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
9821 case CEE_MONO_NEWOBJ
: {
9822 MonoInst
*iargs
[2];
9824 CHECK_STACK_OVF (1);
9826 token
= read32 (ip
+ 2);
9827 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9828 mono_class_init (klass
);
9829 NEW_DOMAINCONST (cfg
, iargs
[0]);
9830 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
9831 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9832 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
9833 *sp
++ = mono_emit_jit_icall (cfg
, mono_object_new
, iargs
);
9835 inline_costs
+= 10 * num_calls
++;
9838 case CEE_MONO_OBJADDR
:
9841 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
9842 ins
->dreg
= alloc_ireg_mp (cfg
);
9843 ins
->sreg1
= sp
[0]->dreg
;
9844 ins
->type
= STACK_MP
;
9845 MONO_ADD_INS (cfg
->cbb
, ins
);
9849 case CEE_MONO_LDNATIVEOBJ
:
9851 * Similar to LDOBJ, but instead load the unmanaged
9852 * representation of the vtype to the stack.
9857 token
= read32 (ip
+ 2);
9858 klass
= mono_method_get_wrapper_data (method
, token
);
9859 g_assert (klass
->valuetype
);
9860 mono_class_init (klass
);
9863 MonoInst
*src
, *dest
, *temp
;
9866 temp
= mono_compile_create_var (cfg
, &klass
->byval_arg
, OP_LOCAL
);
9867 temp
->backend
.is_pinvoke
= 1;
9868 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
9869 mini_emit_stobj (cfg
, dest
, src
, klass
, TRUE
);
9871 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
9872 dest
->type
= STACK_VTYPE
;
9873 dest
->klass
= klass
;
9879 case CEE_MONO_RETOBJ
: {
9881 * Same as RET, but return the native representation of a vtype
9884 g_assert (cfg
->ret
);
9885 g_assert (mono_method_signature (method
)->pinvoke
);
9890 token
= read32 (ip
+ 2);
9891 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9893 if (!cfg
->vret_addr
) {
9894 g_assert (cfg
->ret_var_is_local
);
9896 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
9898 EMIT_NEW_RETLOADA (cfg
, ins
);
9900 mini_emit_stobj (cfg
, ins
, sp
[0], klass
, TRUE
);
9902 if (sp
!= stack_start
)
9905 MONO_INST_NEW (cfg
, ins
, OP_BR
);
9906 ins
->inst_target_bb
= end_bblock
;
9907 MONO_ADD_INS (bblock
, ins
);
9908 link_bblock (cfg
, bblock
, end_bblock
);
9909 start_new_bblock
= 1;
9913 case CEE_MONO_CISINST
:
9914 case CEE_MONO_CCASTCLASS
: {
9919 token
= read32 (ip
+ 2);
9920 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
9921 if (ip
[1] == CEE_MONO_CISINST
)
9922 ins
= handle_cisinst (cfg
, klass
, sp
[0]);
9924 ins
= handle_ccastclass (cfg
, klass
, sp
[0]);
9930 case CEE_MONO_SAVE_LMF
:
9931 case CEE_MONO_RESTORE_LMF
:
9932 #ifdef MONO_ARCH_HAVE_LMF_OPS
9933 MONO_INST_NEW (cfg
, ins
, (ip
[1] == CEE_MONO_SAVE_LMF
) ? OP_SAVE_LMF
: OP_RESTORE_LMF
);
9934 MONO_ADD_INS (bblock
, ins
);
9935 cfg
->need_lmf_area
= TRUE
;
9939 case CEE_MONO_CLASSCONST
:
9940 CHECK_STACK_OVF (1);
9942 token
= read32 (ip
+ 2);
9943 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
9946 inline_costs
+= 10 * num_calls
++;
9948 case CEE_MONO_NOT_TAKEN
:
9949 bblock
->out_of_line
= TRUE
;
9953 CHECK_STACK_OVF (1);
9955 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
9956 ins
->dreg
= alloc_preg (cfg
);
9957 ins
->inst_offset
= (gint32
)read32 (ip
+ 2);
9958 ins
->type
= STACK_PTR
;
9959 MONO_ADD_INS (bblock
, ins
);
9963 case CEE_MONO_DYN_CALL
: {
9966 /* It would be easier to call a trampoline, but that would put an
9967 * extra frame on the stack, confusing exception handling. So
9968 * implement it inline using an opcode for now.
9971 if (!cfg
->dyn_call_var
) {
9972 cfg
->dyn_call_var
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
9973 /* prevent it from being register allocated */
9974 cfg
->dyn_call_var
->flags
|= MONO_INST_INDIRECT
;
9977 /* Has to use a call inst since it local regalloc expects it */
9978 MONO_INST_NEW_CALL (cfg
, call
, OP_DYN_CALL
);
9979 ins
= (MonoInst
*)call
;
9981 ins
->sreg1
= sp
[0]->dreg
;
9982 ins
->sreg2
= sp
[1]->dreg
;
9983 MONO_ADD_INS (bblock
, ins
);
9985 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9986 cfg
->param_area
= MAX (cfg
->param_area
, MONO_ARCH_DYN_CALL_PARAM_AREA
);
9990 inline_costs
+= 10 * num_calls
++;
9995 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX
, ip
[1]);
10001 case CEE_PREFIX1
: {
10004 case CEE_ARGLIST
: {
10005 /* somewhat similar to LDTOKEN */
10006 MonoInst
*addr
, *vtvar
;
10007 CHECK_STACK_OVF (1);
10008 vtvar
= mono_compile_create_var (cfg
, &mono_defaults
.argumenthandle_class
->byval_arg
, OP_LOCAL
);
10010 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
10011 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
10013 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
10014 ins
->type
= STACK_VTYPE
;
10015 ins
->klass
= mono_defaults
.argumenthandle_class
;
10028 * The following transforms:
10029 * CEE_CEQ into OP_CEQ
10030 * CEE_CGT into OP_CGT
10031 * CEE_CGT_UN into OP_CGT_UN
10032 * CEE_CLT into OP_CLT
10033 * CEE_CLT_UN into OP_CLT_UN
10035 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
10037 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
10039 cmp
->sreg1
= sp
[0]->dreg
;
10040 cmp
->sreg2
= sp
[1]->dreg
;
10041 type_from_op (cmp
, sp
[0], sp
[1]);
10043 if ((sp
[0]->type
== STACK_I8
) || ((SIZEOF_VOID_P
== 8) && ((sp
[0]->type
== STACK_PTR
) || (sp
[0]->type
== STACK_OBJ
) || (sp
[0]->type
== STACK_MP
))))
10044 cmp
->opcode
= OP_LCOMPARE
;
10045 else if (sp
[0]->type
== STACK_R8
)
10046 cmp
->opcode
= OP_FCOMPARE
;
10048 cmp
->opcode
= OP_ICOMPARE
;
10049 MONO_ADD_INS (bblock
, cmp
);
10050 ins
->type
= STACK_I4
;
10051 ins
->dreg
= alloc_dreg (cfg
, ins
->type
);
10052 type_from_op (ins
, sp
[0], sp
[1]);
10054 if (cmp
->opcode
== OP_FCOMPARE
) {
10056 * The backends expect the fceq opcodes to do the
10059 cmp
->opcode
= OP_NOP
;
10060 ins
->sreg1
= cmp
->sreg1
;
10061 ins
->sreg2
= cmp
->sreg2
;
10063 MONO_ADD_INS (bblock
, ins
);
10069 MonoInst
*argconst
;
10070 MonoMethod
*cil_method
;
10072 CHECK_STACK_OVF (1);
10074 n
= read32 (ip
+ 2);
10075 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10076 if (!cmethod
|| mono_loader_get_last_error ())
10078 mono_class_init (cmethod
->klass
);
10080 mono_save_token_info (cfg
, image
, n
, cmethod
);
10082 if (cfg
->generic_sharing_context
)
10083 context_used
= mono_method_check_context_used (cmethod
);
10085 cil_method
= cmethod
;
10086 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
10087 METHOD_ACCESS_FAILURE
;
10089 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
10090 if (check_linkdemand (cfg
, method
, cmethod
))
10092 CHECK_CFG_EXCEPTION
;
10093 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
10094 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
10098 * Optimize the common case of ldftn+delegate creation
10100 if ((sp
> stack_start
) && (ip
+ 6 + 5 < end
) && ip_in_bb (cfg
, bblock
, ip
+ 6) && (ip
[6] == CEE_NEWOBJ
)) {
10101 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (ip
+ 7), NULL
, generic_context
);
10102 if (ctor_method
&& (ctor_method
->klass
->parent
== mono_defaults
.multicastdelegate_class
)) {
10103 MonoInst
*target_ins
;
10104 MonoMethod
*invoke
;
10105 int invoke_context_used
= 0;
10107 invoke
= mono_get_delegate_invoke (ctor_method
->klass
);
10108 if (!invoke
|| !mono_method_signature (invoke
))
10111 if (cfg
->generic_sharing_context
)
10112 invoke_context_used
= mono_method_check_context_used (invoke
);
10114 target_ins
= sp
[-1];
10116 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
)
10117 ensure_method_is_allowed_to_call_method (cfg
, method
, ctor_method
, bblock
, ip
);
10119 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
)) {
10120 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10121 if (mono_method_signature (invoke
)->param_count
== mono_method_signature (cmethod
)->param_count
) {
10122 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, target_ins
->dreg
, 0);
10123 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "ArgumentException");
10127 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10128 /* FIXME: SGEN support */
10129 if (invoke_context_used
== 0) {
10131 if (cfg
->verbose_level
> 3)
10132 g_print ("converting (in B%d: stack: %d) %s", bblock
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
10134 *sp
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
, context_used
);
10135 CHECK_CFG_EXCEPTION
;
10144 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
10145 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
10149 inline_costs
+= 10 * num_calls
++;
10152 case CEE_LDVIRTFTN
: {
10153 MonoInst
*args
[2];
10157 n
= read32 (ip
+ 2);
10158 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10159 if (!cmethod
|| mono_loader_get_last_error ())
10161 mono_class_init (cmethod
->klass
);
10163 if (cfg
->generic_sharing_context
)
10164 context_used
= mono_method_check_context_used (cmethod
);
10166 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS
) {
10167 if (check_linkdemand (cfg
, method
, cmethod
))
10169 CHECK_CFG_EXCEPTION
;
10170 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR
) {
10171 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
, bblock
, ip
);
10177 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
10178 cmethod
, MONO_RGCTX_INFO_METHOD
);
10181 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
10183 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
10186 inline_costs
+= 10 * num_calls
++;
10190 CHECK_STACK_OVF (1);
10192 n
= read16 (ip
+ 2);
10194 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
10199 CHECK_STACK_OVF (1);
10201 n
= read16 (ip
+ 2);
10203 NEW_ARGLOADA (cfg
, ins
, n
);
10204 MONO_ADD_INS (cfg
->cbb
, ins
);
10212 n
= read16 (ip
+ 2);
10214 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
10216 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
10220 CHECK_STACK_OVF (1);
10222 n
= read16 (ip
+ 2);
10224 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
10229 unsigned char *tmp_ip
;
10230 CHECK_STACK_OVF (1);
10232 n
= read16 (ip
+ 2);
10235 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, ip
, end
, 2))) {
10241 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
10250 n
= read16 (ip
+ 2);
10252 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
10254 emit_stloc_ir (cfg
, sp
, header
, n
);
10261 if (sp
!= stack_start
)
10263 if (cfg
->method
!= method
)
10265 * Inlining this into a loop in a parent could lead to
10266 * stack overflows which is different behavior than the
10267 * non-inlined case, thus disable inlining in this case.
10269 goto inline_failure
;
10271 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
10272 ins
->dreg
= alloc_preg (cfg
);
10273 ins
->sreg1
= sp
[0]->dreg
;
10274 ins
->type
= STACK_PTR
;
10275 MONO_ADD_INS (cfg
->cbb
, ins
);
10277 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10279 ins
->flags
|= MONO_INST_INIT
;
10284 case CEE_ENDFILTER
: {
10285 MonoExceptionClause
*clause
, *nearest
;
10286 int cc
, nearest_num
;
10290 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
10292 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
10293 ins
->sreg1
= (*sp
)->dreg
;
10294 MONO_ADD_INS (bblock
, ins
);
10295 start_new_bblock
= 1;
10300 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
10301 clause
= &header
->clauses
[cc
];
10302 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
10303 ((ip
- header
->code
) > clause
->data
.filter_offset
&& (ip
- header
->code
) <= clause
->handler_offset
) &&
10304 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
))) {
10309 g_assert (nearest
);
10310 if ((ip
- header
->code
) != nearest
->handler_offset
)
10315 case CEE_UNALIGNED_
:
10316 ins_flag
|= MONO_INST_UNALIGNED
;
10317 /* FIXME: record alignment? we can assume 1 for now */
10321 case CEE_VOLATILE_
:
10322 ins_flag
|= MONO_INST_VOLATILE
;
10326 ins_flag
|= MONO_INST_TAILCALL
;
10327 cfg
->flags
|= MONO_CFG_HAS_TAIL
;
10328 /* Can't inline tail calls at this time */
10329 inline_costs
+= 100000;
10336 token
= read32 (ip
+ 2);
10337 klass
= mini_get_class (method
, token
, generic_context
);
10338 CHECK_TYPELOAD (klass
);
10339 if (generic_class_is_reference_type (cfg
, klass
))
10340 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
10342 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
10346 case CEE_CONSTRAINED_
:
10348 token
= read32 (ip
+ 2);
10349 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
)
10350 constrained_call
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10352 constrained_call
= mono_class_get_full (image
, token
, generic_context
);
10353 CHECK_TYPELOAD (constrained_call
);
10357 case CEE_INITBLK
: {
10358 MonoInst
*iargs
[3];
10362 if ((ip
[1] == CEE_CPBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5)) {
10363 mini_emit_memcpy (cfg
, sp
[0]->dreg
, 0, sp
[1]->dreg
, 0, sp
[2]->inst_c0
, 0);
10364 } else if ((ip
[1] == CEE_INITBLK
) && (cfg
->opt
& MONO_OPT_INTRINS
) && (sp
[2]->opcode
== OP_ICONST
) && ((n
= sp
[2]->inst_c0
) <= sizeof (gpointer
) * 5) && (sp
[1]->opcode
== OP_ICONST
) && (sp
[1]->inst_c0
== 0)) {
10365 /* emit_memset only works when val == 0 */
10366 mini_emit_memset (cfg
, sp
[0]->dreg
, 0, sp
[2]->inst_c0
, sp
[1]->inst_c0
, 0);
10368 iargs
[0] = sp
[0];
10369 iargs
[1] = sp
[1];
10370 iargs
[2] = sp
[2];
10371 if (ip
[1] == CEE_CPBLK
) {
10372 MonoMethod
*memcpy_method
= get_memcpy_method ();
10373 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
10375 MonoMethod
*memset_method
= get_memset_method ();
10376 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
10386 ins_flag
|= MONO_INST_NOTYPECHECK
;
10388 ins_flag
|= MONO_INST_NORANGECHECK
;
10389 /* we ignore the no-nullcheck for now since we
10390 * really do it explicitly only when doing callvirt->call
10394 case CEE_RETHROW
: {
10396 int handler_offset
= -1;
10398 for (i
= 0; i
< header
->num_clauses
; ++i
) {
10399 MonoExceptionClause
*clause
= &header
->clauses
[i
];
10400 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
10401 handler_offset
= clause
->handler_offset
;
10406 bblock
->flags
|= BB_EXCEPTION_UNSAFE
;
10408 g_assert (handler_offset
!= -1);
10410 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
10411 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
10412 ins
->sreg1
= load
->dreg
;
10413 MONO_ADD_INS (bblock
, ins
);
10415 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
10416 MONO_ADD_INS (bblock
, ins
);
10419 link_bblock (cfg
, bblock
, end_bblock
);
10420 start_new_bblock
= 1;
10428 CHECK_STACK_OVF (1);
10430 token
= read32 (ip
+ 2);
10431 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
&& !method
->klass
->image
->dynamic
&& !generic_context
) {
10432 MonoType
*type
= mono_type_create_from_typespec (image
, token
);
10433 token
= mono_type_size (type
, &ialign
);
10435 MonoClass
*klass
= mono_class_get_full (image
, token
, generic_context
);
10436 CHECK_TYPELOAD (klass
);
10437 mono_class_init (klass
);
10438 token
= mono_class_value_size (klass
, &align
);
10440 EMIT_NEW_ICONST (cfg
, ins
, token
);
10445 case CEE_REFANYTYPE
: {
10446 MonoInst
*src_var
, *src
;
10452 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
10454 src_var
= mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.typed_reference_class
->byval_arg
, OP_LOCAL
, sp
[0]->dreg
);
10455 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
10456 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, &mono_defaults
.typehandle_class
->byval_arg
, src
->dreg
, G_STRUCT_OFFSET (MonoTypedRef
, type
));
10461 case CEE_READONLY_
:
10474 g_warning ("opcode 0xfe 0x%02x not handled", ip
[1]);
10484 g_warning ("opcode 0x%02x not handled", *ip
);
10488 if (start_new_bblock
!= 1)
10491 bblock
->cil_length
= ip
- bblock
->cil_code
;
10492 bblock
->next_bb
= end_bblock
;
10494 if (cfg
->method
== method
&& cfg
->domainvar
) {
10496 MonoInst
*get_domain
;
10498 cfg
->cbb
= init_localsbb
;
10500 if (! (get_domain
= mono_arch_get_domain_intrinsic (cfg
))) {
10501 get_domain
= mono_emit_jit_icall (cfg
, mono_domain_get
, NULL
);
10504 get_domain
->dreg
= alloc_preg (cfg
);
10505 MONO_ADD_INS (cfg
->cbb
, get_domain
);
10507 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
10508 MONO_ADD_INS (cfg
->cbb
, store
);
10511 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10512 if (cfg
->compile_aot
)
10513 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10514 mono_get_got_var (cfg
);
10517 if (cfg
->method
== method
&& cfg
->got_var
)
10518 mono_emit_load_got_addr (cfg
);
10523 cfg
->cbb
= init_localsbb
;
10525 for (i
= 0; i
< header
->num_locals
; ++i
) {
10526 MonoType
*ptype
= header
->locals
[i
];
10527 int t
= ptype
->type
;
10528 dreg
= cfg
->locals
[i
]->dreg
;
10530 if (t
== MONO_TYPE_VALUETYPE
&& ptype
->data
.klass
->enumtype
)
10531 t
= mono_class_enum_basetype (ptype
->data
.klass
)->type
;
10532 if (ptype
->byref
) {
10533 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
10534 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
10535 MONO_EMIT_NEW_ICONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
10536 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
10537 MONO_EMIT_NEW_I8CONST (cfg
, cfg
->locals
[i
]->dreg
, 0);
10538 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
10539 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
10540 ins
->type
= STACK_R8
;
10541 ins
->inst_p0
= (void*)&r8_0
;
10542 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
10543 MONO_ADD_INS (init_localsbb
, ins
);
10544 EMIT_NEW_LOCSTORE (cfg
, store
, i
, ins
);
10545 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
10546 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (ptype
))) {
10547 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type (ptype
));
10549 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
10554 if (cfg
->init_ref_vars
&& cfg
->method
== method
) {
10555 /* Emit initialization for ref vars */
10556 // FIXME: Avoid duplication initialization for IL locals.
10557 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
10558 MonoInst
*ins
= cfg
->varinfo
[i
];
10560 if (ins
->opcode
== OP_LOCAL
&& ins
->type
== STACK_OBJ
)
10561 MONO_EMIT_NEW_PCONST (cfg
, ins
->dreg
, NULL
);
10565 /* Add a sequence point for method entry/exit events */
10567 NEW_SEQ_POINT (cfg
, ins
, METHOD_ENTRY_IL_OFFSET
, FALSE
);
10568 MONO_ADD_INS (init_localsbb
, ins
);
10569 NEW_SEQ_POINT (cfg
, ins
, METHOD_EXIT_IL_OFFSET
, FALSE
);
10570 MONO_ADD_INS (cfg
->bb_exit
, ins
);
10575 if (cfg
->method
== method
) {
10576 MonoBasicBlock
*bb
;
10577 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
10578 bb
->region
= mono_find_block_region (cfg
, bb
->real_offset
);
10580 mono_create_spvar_for_region (cfg
, bb
->region
);
10581 if (cfg
->verbose_level
> 2)
10582 printf ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
10586 g_slist_free (class_inits
);
10587 dont_inline
= g_list_remove (dont_inline
, method
);
10589 if (inline_costs
< 0) {
10592 /* Method is too large */
10593 mname
= mono_method_full_name (method
, TRUE
);
10594 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INVALID_PROGRAM
);
10595 cfg
->exception_message
= g_strdup_printf ("Method %s is too complex.", mname
);
10597 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10598 mono_basic_block_free (original_bb
);
10602 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
10603 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
10605 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10606 mono_basic_block_free (original_bb
);
10607 return inline_costs
;
10610 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
10617 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
10621 set_exception_type_from_invalid_il (cfg
, method
, ip
);
10625 g_slist_free (class_inits
);
10626 mono_basic_block_free (original_bb
);
10627 dont_inline
= g_list_remove (dont_inline
, method
);
10628 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
10633 store_membase_reg_to_store_membase_imm (int opcode
)
10636 case OP_STORE_MEMBASE_REG
:
10637 return OP_STORE_MEMBASE_IMM
;
10638 case OP_STOREI1_MEMBASE_REG
:
10639 return OP_STOREI1_MEMBASE_IMM
;
10640 case OP_STOREI2_MEMBASE_REG
:
10641 return OP_STOREI2_MEMBASE_IMM
;
10642 case OP_STOREI4_MEMBASE_REG
:
10643 return OP_STOREI4_MEMBASE_IMM
;
10644 case OP_STOREI8_MEMBASE_REG
:
10645 return OP_STOREI8_MEMBASE_IMM
;
10647 g_assert_not_reached ();
10653 #endif /* DISABLE_JIT */
10656 mono_op_to_op_imm (int opcode
)
10660 return OP_IADD_IMM
;
10662 return OP_ISUB_IMM
;
10664 return OP_IDIV_IMM
;
10666 return OP_IDIV_UN_IMM
;
10668 return OP_IREM_IMM
;
10670 return OP_IREM_UN_IMM
;
10672 return OP_IMUL_IMM
;
10674 return OP_IAND_IMM
;
10678 return OP_IXOR_IMM
;
10680 return OP_ISHL_IMM
;
10682 return OP_ISHR_IMM
;
10684 return OP_ISHR_UN_IMM
;
10687 return OP_LADD_IMM
;
10689 return OP_LSUB_IMM
;
10691 return OP_LAND_IMM
;
10695 return OP_LXOR_IMM
;
10697 return OP_LSHL_IMM
;
10699 return OP_LSHR_IMM
;
10701 return OP_LSHR_UN_IMM
;
10704 return OP_COMPARE_IMM
;
10706 return OP_ICOMPARE_IMM
;
10708 return OP_LCOMPARE_IMM
;
10710 case OP_STORE_MEMBASE_REG
:
10711 return OP_STORE_MEMBASE_IMM
;
10712 case OP_STOREI1_MEMBASE_REG
:
10713 return OP_STOREI1_MEMBASE_IMM
;
10714 case OP_STOREI2_MEMBASE_REG
:
10715 return OP_STOREI2_MEMBASE_IMM
;
10716 case OP_STOREI4_MEMBASE_REG
:
10717 return OP_STOREI4_MEMBASE_IMM
;
10719 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10721 return OP_X86_PUSH_IMM
;
10722 case OP_X86_COMPARE_MEMBASE_REG
:
10723 return OP_X86_COMPARE_MEMBASE_IMM
;
10725 #if defined(TARGET_AMD64)
10726 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
10727 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10729 case OP_VOIDCALL_REG
:
10730 return OP_VOIDCALL
;
10738 return OP_LOCALLOC_IMM
;
10745 ldind_to_load_membase (int opcode
)
10749 return OP_LOADI1_MEMBASE
;
10751 return OP_LOADU1_MEMBASE
;
10753 return OP_LOADI2_MEMBASE
;
10755 return OP_LOADU2_MEMBASE
;
10757 return OP_LOADI4_MEMBASE
;
10759 return OP_LOADU4_MEMBASE
;
10761 return OP_LOAD_MEMBASE
;
10762 case CEE_LDIND_REF
:
10763 return OP_LOAD_MEMBASE
;
10765 return OP_LOADI8_MEMBASE
;
10767 return OP_LOADR4_MEMBASE
;
10769 return OP_LOADR8_MEMBASE
;
10771 g_assert_not_reached ();
10778 stind_to_store_membase (int opcode
)
10782 return OP_STOREI1_MEMBASE_REG
;
10784 return OP_STOREI2_MEMBASE_REG
;
10786 return OP_STOREI4_MEMBASE_REG
;
10788 case CEE_STIND_REF
:
10789 return OP_STORE_MEMBASE_REG
;
10791 return OP_STOREI8_MEMBASE_REG
;
10793 return OP_STORER4_MEMBASE_REG
;
10795 return OP_STORER8_MEMBASE_REG
;
10797 g_assert_not_reached ();
10804 mono_load_membase_to_load_mem (int opcode
)
10806 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10807 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10809 case OP_LOAD_MEMBASE
:
10810 return OP_LOAD_MEM
;
10811 case OP_LOADU1_MEMBASE
:
10812 return OP_LOADU1_MEM
;
10813 case OP_LOADU2_MEMBASE
:
10814 return OP_LOADU2_MEM
;
10815 case OP_LOADI4_MEMBASE
:
10816 return OP_LOADI4_MEM
;
10817 case OP_LOADU4_MEMBASE
:
10818 return OP_LOADU4_MEM
;
10819 #if SIZEOF_REGISTER == 8
10820 case OP_LOADI8_MEMBASE
:
10821 return OP_LOADI8_MEM
;
10830 op_to_op_dest_membase (int store_opcode
, int opcode
)
10832 #if defined(TARGET_X86)
10833 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
10838 return OP_X86_ADD_MEMBASE_REG
;
10840 return OP_X86_SUB_MEMBASE_REG
;
10842 return OP_X86_AND_MEMBASE_REG
;
10844 return OP_X86_OR_MEMBASE_REG
;
10846 return OP_X86_XOR_MEMBASE_REG
;
10849 return OP_X86_ADD_MEMBASE_IMM
;
10852 return OP_X86_SUB_MEMBASE_IMM
;
10855 return OP_X86_AND_MEMBASE_IMM
;
10858 return OP_X86_OR_MEMBASE_IMM
;
10861 return OP_X86_XOR_MEMBASE_IMM
;
10867 #if defined(TARGET_AMD64)
10868 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
10873 return OP_X86_ADD_MEMBASE_REG
;
10875 return OP_X86_SUB_MEMBASE_REG
;
10877 return OP_X86_AND_MEMBASE_REG
;
10879 return OP_X86_OR_MEMBASE_REG
;
10881 return OP_X86_XOR_MEMBASE_REG
;
10883 return OP_X86_ADD_MEMBASE_IMM
;
10885 return OP_X86_SUB_MEMBASE_IMM
;
10887 return OP_X86_AND_MEMBASE_IMM
;
10889 return OP_X86_OR_MEMBASE_IMM
;
10891 return OP_X86_XOR_MEMBASE_IMM
;
10893 return OP_AMD64_ADD_MEMBASE_REG
;
10895 return OP_AMD64_SUB_MEMBASE_REG
;
10897 return OP_AMD64_AND_MEMBASE_REG
;
10899 return OP_AMD64_OR_MEMBASE_REG
;
10901 return OP_AMD64_XOR_MEMBASE_REG
;
10904 return OP_AMD64_ADD_MEMBASE_IMM
;
10907 return OP_AMD64_SUB_MEMBASE_IMM
;
10910 return OP_AMD64_AND_MEMBASE_IMM
;
10913 return OP_AMD64_OR_MEMBASE_IMM
;
10916 return OP_AMD64_XOR_MEMBASE_IMM
;
10926 op_to_op_store_membase (int store_opcode
, int opcode
)
10928 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10931 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10932 return OP_X86_SETEQ_MEMBASE
;
10934 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
10935 return OP_X86_SETNE_MEMBASE
;
10943 op_to_op_src1_membase (int load_opcode
, int opcode
)
10946 /* FIXME: This has sign extension issues */
10948 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10949 return OP_X86_COMPARE_MEMBASE8_IMM;
10952 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
10957 return OP_X86_PUSH_MEMBASE
;
10958 case OP_COMPARE_IMM
:
10959 case OP_ICOMPARE_IMM
:
10960 return OP_X86_COMPARE_MEMBASE_IMM
;
10963 return OP_X86_COMPARE_MEMBASE_REG
;
10967 #ifdef TARGET_AMD64
10968 /* FIXME: This has sign extension issues */
10970 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10971 return OP_X86_COMPARE_MEMBASE8_IMM;
10976 #ifdef __mono_ilp32__
10977 if (load_opcode
== OP_LOADI8_MEMBASE
)
10979 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
10981 return OP_X86_PUSH_MEMBASE
;
10983 /* FIXME: This only works for 32 bit immediates
10984 case OP_COMPARE_IMM:
10985 case OP_LCOMPARE_IMM:
10986 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10987 return OP_AMD64_COMPARE_MEMBASE_IMM;
10989 case OP_ICOMPARE_IMM
:
10990 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
10991 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
10995 #ifdef __mono_ilp32__
10996 if (load_opcode
== OP_LOAD_MEMBASE
)
10997 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
10998 if (load_opcode
== OP_LOADI8_MEMBASE
)
11000 if ((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI8_MEMBASE
))
11002 return OP_AMD64_COMPARE_MEMBASE_REG
;
11005 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
11006 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
11015 op_to_op_src2_membase (int load_opcode
, int opcode
)
11018 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
11024 return OP_X86_COMPARE_REG_MEMBASE
;
11026 return OP_X86_ADD_REG_MEMBASE
;
11028 return OP_X86_SUB_REG_MEMBASE
;
11030 return OP_X86_AND_REG_MEMBASE
;
11032 return OP_X86_OR_REG_MEMBASE
;
11034 return OP_X86_XOR_REG_MEMBASE
;
11038 #ifdef TARGET_AMD64
11039 #ifdef __mono_ilp32__
11040 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
) ) {
11042 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)) {
11046 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
11048 return OP_X86_ADD_REG_MEMBASE
;
11050 return OP_X86_SUB_REG_MEMBASE
;
11052 return OP_X86_AND_REG_MEMBASE
;
11054 return OP_X86_OR_REG_MEMBASE
;
11056 return OP_X86_XOR_REG_MEMBASE
;
11058 #ifdef __mono_ilp32__
11059 } else if (load_opcode
== OP_LOADI8_MEMBASE
) {
11061 } else if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
)) {
11066 return OP_AMD64_COMPARE_REG_MEMBASE
;
11068 return OP_AMD64_ADD_REG_MEMBASE
;
11070 return OP_AMD64_SUB_REG_MEMBASE
;
11072 return OP_AMD64_AND_REG_MEMBASE
;
11074 return OP_AMD64_OR_REG_MEMBASE
;
11076 return OP_AMD64_XOR_REG_MEMBASE
;
11085 mono_op_to_op_imm_noemul (int opcode
)
11088 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11094 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11102 return mono_op_to_op_imm (opcode
);
11106 #ifndef DISABLE_JIT
11109 * mono_handle_global_vregs:
11111 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11115 mono_handle_global_vregs (MonoCompile
*cfg
)
11117 gint32
*vreg_to_bb
;
11118 MonoBasicBlock
*bb
;
11121 vreg_to_bb
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
11123 #ifdef MONO_ARCH_SIMD_INTRINSICS
11124 if (cfg
->uses_simd_intrinsics
)
11125 mono_simd_simplify_indirection (cfg
);
11128 /* Find local vregs used in more than one bb */
11129 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11130 MonoInst
*ins
= bb
->code
;
11131 int block_num
= bb
->block_num
;
11133 if (cfg
->verbose_level
> 2)
11134 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
11137 for (; ins
; ins
= ins
->next
) {
11138 const char *spec
= INS_INFO (ins
->opcode
);
11139 int regtype
= 0, regindex
;
11142 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11143 mono_print_ins (ins
);
11145 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
11147 for (regindex
= 0; regindex
< 4; regindex
++) {
11150 if (regindex
== 0) {
11151 regtype
= spec
[MONO_INST_DEST
];
11152 if (regtype
== ' ')
11155 } else if (regindex
== 1) {
11156 regtype
= spec
[MONO_INST_SRC1
];
11157 if (regtype
== ' ')
11160 } else if (regindex
== 2) {
11161 regtype
= spec
[MONO_INST_SRC2
];
11162 if (regtype
== ' ')
11165 } else if (regindex
== 3) {
11166 regtype
= spec
[MONO_INST_SRC3
];
11167 if (regtype
== ' ')
11172 #if SIZEOF_REGISTER == 4
11173 /* In the LLVM case, the long opcodes are not decomposed */
11174 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
11176 * Since some instructions reference the original long vreg,
11177 * and some reference the two component vregs, it is quite hard
11178 * to determine when it needs to be global. So be conservative.
11180 if (!get_vreg_to_inst (cfg
, vreg
)) {
11181 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
11183 if (cfg
->verbose_level
> 2)
11184 printf ("LONG VREG R%d made global.\n", vreg
);
11188 * Make the component vregs volatile since the optimizations can
11189 * get confused otherwise.
11191 get_vreg_to_inst (cfg
, vreg
+ 1)->flags
|= MONO_INST_VOLATILE
;
11192 get_vreg_to_inst (cfg
, vreg
+ 2)->flags
|= MONO_INST_VOLATILE
;
11196 g_assert (vreg
!= -1);
11198 prev_bb
= vreg_to_bb
[vreg
];
11199 if (prev_bb
== 0) {
11200 /* 0 is a valid block num */
11201 vreg_to_bb
[vreg
] = block_num
+ 1;
11202 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
11203 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
11206 if (!get_vreg_to_inst (cfg
, vreg
)) {
11207 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11208 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
11212 if (vreg_is_ref (cfg
, vreg
))
11213 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.object_class
->byval_arg
, OP_LOCAL
, vreg
);
11215 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
, vreg
);
11218 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.int64_class
->byval_arg
, OP_LOCAL
, vreg
);
11221 mono_compile_create_var_for_vreg (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
, vreg
);
11224 mono_compile_create_var_for_vreg (cfg
, &ins
->klass
->byval_arg
, OP_LOCAL
, vreg
);
11227 g_assert_not_reached ();
11231 /* Flag as having been used in more than one bb */
11232 vreg_to_bb
[vreg
] = -1;
11238 /* If a variable is used in only one bblock, convert it into a local vreg */
11239 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11240 MonoInst
*var
= cfg
->varinfo
[i
];
11241 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
11243 switch (var
->type
) {
11249 #if SIZEOF_REGISTER == 8
11252 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11253 /* Enabling this screws up the fp stack on x86 */
11256 /* Arguments are implicitly global */
11257 /* Putting R4 vars into registers doesn't work currently */
11258 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (var
->klass
->byval_arg
.type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
) {
11260 * Make that the variable's liveness interval doesn't contain a call, since
11261 * that would cause the lvreg to be spilled, making the whole optimization
11264 /* This is too slow for JIT compilation */
11266 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
11268 int def_index
, call_index
, ins_index
;
11269 gboolean spilled
= FALSE
;
11274 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
11275 const char *spec
= INS_INFO (ins
->opcode
);
11277 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
11278 def_index
= ins_index
;
11280 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
11281 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
11282 if (call_index
> def_index
) {
11288 if (MONO_IS_CALL (ins
))
11289 call_index
= ins_index
;
11299 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11300 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
11301 var
->flags
|= MONO_INST_IS_DEAD
;
11302 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
11309 * Compress the varinfo and vars tables so the liveness computation is faster and
11310 * takes up less space.
11313 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11314 MonoInst
*var
= cfg
->varinfo
[i
];
11315 if (pos
< i
&& cfg
->locals_start
== i
)
11316 cfg
->locals_start
= pos
;
11317 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
11319 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
11320 cfg
->varinfo
[pos
]->inst_c0
= pos
;
11321 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
11322 cfg
->vars
[pos
].idx
= pos
;
11323 #if SIZEOF_REGISTER == 4
11324 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
11325 /* Modify the two component vars too */
11328 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 1);
11329 var1
->inst_c0
= pos
;
11330 var1
= get_vreg_to_inst (cfg
, cfg
->varinfo
[pos
]->dreg
+ 2);
11331 var1
->inst_c0
= pos
;
11338 cfg
->num_varinfo
= pos
;
11339 if (cfg
->locals_start
> cfg
->num_varinfo
)
11340 cfg
->locals_start
= cfg
->num_varinfo
;
11344 * mono_spill_global_vars:
11346 * Generate spill code for variables which are not allocated to registers,
11347 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11348 * code is generated which could be optimized by the local optimization passes.
11351 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
11353 MonoBasicBlock
*bb
;
11355 int orig_next_vreg
;
11356 guint32
*vreg_to_lvreg
;
11358 guint32 i
, lvregs_len
;
11359 gboolean dest_has_lvreg
= FALSE
;
11360 guint32 stacktypes
[128];
11361 MonoInst
**live_range_start
, **live_range_end
;
11362 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
11364 *need_local_opts
= FALSE
;
11366 memset (spec2
, 0, sizeof (spec2
));
11368 /* FIXME: Move this function to mini.c */
11369 stacktypes
['i'] = STACK_PTR
;
11370 stacktypes
['l'] = STACK_I8
;
11371 stacktypes
['f'] = STACK_R8
;
11372 #ifdef MONO_ARCH_SIMD_INTRINSICS
11373 stacktypes
['x'] = STACK_VTYPE
;
11376 #if SIZEOF_REGISTER == 4
11377 /* Create MonoInsts for longs */
11378 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11379 MonoInst
*ins
= cfg
->varinfo
[i
];
11381 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
11382 switch (ins
->type
) {
11387 if (ins
->type
== STACK_R8
&& !COMPILE_SOFT_FLOAT (cfg
))
11390 g_assert (ins
->opcode
== OP_REGOFFSET
);
11392 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 1);
11394 tree
->opcode
= OP_REGOFFSET
;
11395 tree
->inst_basereg
= ins
->inst_basereg
;
11396 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
11398 tree
= get_vreg_to_inst (cfg
, ins
->dreg
+ 2);
11400 tree
->opcode
= OP_REGOFFSET
;
11401 tree
->inst_basereg
= ins
->inst_basereg
;
11402 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
11412 if (cfg
->compute_gc_maps
) {
11413 /* registers need liveness info even for !non refs */
11414 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11415 MonoInst
*ins
= cfg
->varinfo
[i
];
11417 if (ins
->opcode
== OP_REGVAR
)
11418 ins
->flags
|= MONO_INST_GC_TRACK
;
11422 /* FIXME: widening and truncation */
11425 * As an optimization, when a variable allocated to the stack is first loaded into
11426 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11427 * the variable again.
11429 orig_next_vreg
= cfg
->next_vreg
;
11430 vreg_to_lvreg
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
11431 lvregs
= mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * 1024);
11435 * These arrays contain the first and last instructions accessing a given
11437 * Since we emit bblocks in the same order we process them here, and we
11438 * don't split live ranges, these will precisely describe the live range of
11439 * the variable, i.e. the instruction range where a valid value can be found
11440 * in the variables location.
11441 * The live range is computed using the liveness info computed by the liveness pass.
11442 * We can't use vmv->range, since that is an abstract live range, and we need
11443 * one which is instruction precise.
11444 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11446 /* FIXME: Only do this if debugging info is requested */
11447 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
11448 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
11449 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
11450 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
11452 /* Add spill loads/stores */
11453 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11456 if (cfg
->verbose_level
> 2)
11457 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
11459 /* Clear vreg_to_lvreg array */
11460 for (i
= 0; i
< lvregs_len
; i
++)
11461 vreg_to_lvreg
[lvregs
[i
]] = 0;
11465 MONO_BB_FOR_EACH_INS (bb
, ins
) {
11466 const char *spec
= INS_INFO (ins
->opcode
);
11467 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
11468 gboolean store
, no_lvreg
;
11469 int sregs
[MONO_MAX_SRC_REGS
];
11471 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11472 mono_print_ins (ins
);
11474 if (ins
->opcode
== OP_NOP
)
11478 * We handle LDADDR here as well, since it can only be decomposed
11479 * when variable addresses are known.
11481 if (ins
->opcode
== OP_LDADDR
) {
11482 MonoInst
*var
= ins
->inst_p0
;
11484 if (var
->opcode
== OP_VTARG_ADDR
) {
11485 /* Happens on SPARC/S390 where vtypes are passed by reference */
11486 MonoInst
*vtaddr
= var
->inst_left
;
11487 if (vtaddr
->opcode
== OP_REGVAR
) {
11488 ins
->opcode
= OP_MOVE
;
11489 ins
->sreg1
= vtaddr
->dreg
;
11491 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
11492 ins
->opcode
= OP_LOAD_MEMBASE
;
11493 ins
->inst_basereg
= vtaddr
->inst_basereg
;
11494 ins
->inst_offset
= vtaddr
->inst_offset
;
11498 g_assert (var
->opcode
== OP_REGOFFSET
);
11500 ins
->opcode
= OP_ADD_IMM
;
11501 ins
->sreg1
= var
->inst_basereg
;
11502 ins
->inst_imm
= var
->inst_offset
;
11505 *need_local_opts
= TRUE
;
11506 spec
= INS_INFO (ins
->opcode
);
11509 if (ins
->opcode
< MONO_CEE_LAST
) {
11510 mono_print_ins (ins
);
11511 g_assert_not_reached ();
11515 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11519 if (MONO_IS_STORE_MEMBASE (ins
)) {
11520 tmp_reg
= ins
->dreg
;
11521 ins
->dreg
= ins
->sreg2
;
11522 ins
->sreg2
= tmp_reg
;
11525 spec2
[MONO_INST_DEST
] = ' ';
11526 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
11527 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
11528 spec2
[MONO_INST_SRC3
] = ' ';
11530 } else if (MONO_IS_STORE_MEMINDEX (ins
))
11531 g_assert_not_reached ();
11536 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
11537 printf ("\t %.3s %d", spec
, ins
->dreg
);
11538 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
11539 for (srcindex
= 0; srcindex
< num_sregs
; ++srcindex
)
11540 printf (" %d", sregs
[srcindex
]);
11547 regtype
= spec
[MONO_INST_DEST
];
11548 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
11551 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
11552 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
11553 MonoInst
*store_ins
;
11555 MonoInst
*def_ins
= ins
;
11556 int dreg
= ins
->dreg
; /* The original vreg */
11558 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
11560 if (var
->opcode
== OP_REGVAR
) {
11561 ins
->dreg
= var
->dreg
;
11562 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
11564 * Instead of emitting a load+store, use a _membase opcode.
11566 g_assert (var
->opcode
== OP_REGOFFSET
);
11567 if (ins
->opcode
== OP_MOVE
) {
11571 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
11572 ins
->inst_basereg
= var
->inst_basereg
;
11573 ins
->inst_offset
= var
->inst_offset
;
11576 spec
= INS_INFO (ins
->opcode
);
11580 g_assert (var
->opcode
== OP_REGOFFSET
);
11582 prev_dreg
= ins
->dreg
;
11584 /* Invalidate any previous lvreg for this vreg */
11585 vreg_to_lvreg
[ins
->dreg
] = 0;
11589 if (COMPILE_SOFT_FLOAT (cfg
) && store_opcode
== OP_STORER8_MEMBASE_REG
) {
11591 store_opcode
= OP_STOREI8_MEMBASE_REG
;
11594 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
11596 if (regtype
== 'l') {
11597 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, ins
->dreg
+ 1);
11598 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11599 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, ins
->dreg
+ 2);
11600 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11601 def_ins
= store_ins
;
11604 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
11606 /* Try to fuse the store into the instruction itself */
11607 /* FIXME: Add more instructions */
11608 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
11609 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
11610 ins
->inst_imm
= ins
->inst_c0
;
11611 ins
->inst_destbasereg
= var
->inst_basereg
;
11612 ins
->inst_offset
= var
->inst_offset
;
11613 spec
= INS_INFO (ins
->opcode
);
11614 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
))) {
11615 ins
->opcode
= store_opcode
;
11616 ins
->inst_destbasereg
= var
->inst_basereg
;
11617 ins
->inst_offset
= var
->inst_offset
;
11621 tmp_reg
= ins
->dreg
;
11622 ins
->dreg
= ins
->sreg2
;
11623 ins
->sreg2
= tmp_reg
;
11626 spec2
[MONO_INST_DEST
] = ' ';
11627 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
11628 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
11629 spec2
[MONO_INST_SRC3
] = ' ';
11631 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
11632 // FIXME: The backends expect the base reg to be in inst_basereg
11633 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
11635 ins
->inst_basereg
= var
->inst_basereg
;
11636 ins
->inst_offset
= var
->inst_offset
;
11637 spec
= INS_INFO (ins
->opcode
);
11639 /* printf ("INS: "); mono_print_ins (ins); */
11640 /* Create a store instruction */
11641 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
11643 /* Insert it after the instruction */
11644 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
11646 def_ins
= store_ins
;
11649 * We can't assign ins->dreg to var->dreg here, since the
11650 * sregs could use it. So set a flag, and do it after
11653 if ((!MONO_ARCH_USE_FPSTACK
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
11654 dest_has_lvreg
= TRUE
;
11659 if (def_ins
&& !live_range_start
[dreg
]) {
11660 live_range_start
[dreg
] = def_ins
;
11661 live_range_start_bb
[dreg
] = bb
;
11664 if (cfg
->compute_gc_maps
&& def_ins
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
11667 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_DEF
);
11668 tmp
->inst_c1
= dreg
;
11669 mono_bblock_insert_after_ins (bb
, def_ins
, tmp
);
11676 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
11677 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
11678 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
11679 sreg
= sregs
[srcindex
];
11681 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
11682 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
11683 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
11684 MonoInst
*use_ins
= ins
;
11685 MonoInst
*load_ins
;
11686 guint32 load_opcode
;
11688 if (var
->opcode
== OP_REGVAR
) {
11689 sregs
[srcindex
] = var
->dreg
;
11690 //mono_inst_set_src_registers (ins, sregs);
11691 live_range_end
[sreg
] = use_ins
;
11692 live_range_end_bb
[sreg
] = bb
;
11694 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
11697 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
11698 /* var->dreg is a hreg */
11699 tmp
->inst_c1
= sreg
;
11700 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
11706 g_assert (var
->opcode
== OP_REGOFFSET
);
11708 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
11710 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
11712 if (vreg_to_lvreg
[sreg
]) {
11713 g_assert (vreg_to_lvreg
[sreg
] != -1);
11715 /* The variable is already loaded to an lvreg */
11716 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11717 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
11718 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
11719 //mono_inst_set_src_registers (ins, sregs);
11723 /* Try to fuse the load into the instruction */
11724 if ((srcindex
== 0) && (op_to_op_src1_membase (load_opcode
, ins
->opcode
) != -1)) {
11725 ins
->opcode
= op_to_op_src1_membase (load_opcode
, ins
->opcode
);
11726 sregs
[0] = var
->inst_basereg
;
11727 //mono_inst_set_src_registers (ins, sregs);
11728 ins
->inst_offset
= var
->inst_offset
;
11729 } else if ((srcindex
== 1) && (op_to_op_src2_membase (load_opcode
, ins
->opcode
) != -1)) {
11730 ins
->opcode
= op_to_op_src2_membase (load_opcode
, ins
->opcode
);
11731 sregs
[1] = var
->inst_basereg
;
11732 //mono_inst_set_src_registers (ins, sregs);
11733 ins
->inst_offset
= var
->inst_offset
;
11735 if (MONO_IS_REAL_MOVE (ins
)) {
11736 ins
->opcode
= OP_NOP
;
11739 //printf ("%d ", srcindex); mono_print_ins (ins);
11741 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
11743 if ((!MONO_ARCH_USE_FPSTACK
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
11744 if (var
->dreg
== prev_dreg
) {
11746 * sreg refers to the value loaded by the load
11747 * emitted below, but we need to use ins->dreg
11748 * since it refers to the store emitted earlier.
11752 g_assert (sreg
!= -1);
11753 vreg_to_lvreg
[var
->dreg
] = sreg
;
11754 g_assert (lvregs_len
< 1024);
11755 lvregs
[lvregs_len
++] = var
->dreg
;
11759 sregs
[srcindex
] = sreg
;
11760 //mono_inst_set_src_registers (ins, sregs);
11762 if (regtype
== 'l') {
11763 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 2, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
11764 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11765 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, sreg
+ 1, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
11766 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11767 use_ins
= load_ins
;
11770 #if SIZEOF_REGISTER == 4
11771 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
11773 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
11774 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
11775 use_ins
= load_ins
;
11779 if (var
->dreg
< orig_next_vreg
) {
11780 live_range_end
[var
->dreg
] = use_ins
;
11781 live_range_end_bb
[var
->dreg
] = bb
;
11784 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
11787 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
11788 tmp
->inst_c1
= var
->dreg
;
11789 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
11793 mono_inst_set_src_registers (ins
, sregs
);
11795 if (dest_has_lvreg
) {
11796 g_assert (ins
->dreg
!= -1);
11797 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
11798 g_assert (lvregs_len
< 1024);
11799 lvregs
[lvregs_len
++] = prev_dreg
;
11800 dest_has_lvreg
= FALSE
;
11804 tmp_reg
= ins
->dreg
;
11805 ins
->dreg
= ins
->sreg2
;
11806 ins
->sreg2
= tmp_reg
;
11809 if (MONO_IS_CALL (ins
)) {
11810 /* Clear vreg_to_lvreg array */
11811 for (i
= 0; i
< lvregs_len
; i
++)
11812 vreg_to_lvreg
[lvregs
[i
]] = 0;
11814 } else if (ins
->opcode
== OP_NOP
) {
11816 MONO_INST_NULLIFY_SREGS (ins
);
11819 if (cfg
->verbose_level
> 2)
11820 mono_print_ins_index (1, ins
);
11823 /* Extend the live range based on the liveness info */
11824 if (cfg
->compute_precise_live_ranges
&& bb
->live_out_set
&& bb
->code
) {
11825 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11826 MonoMethodVar
*vi
= MONO_VARINFO (cfg
, i
);
11828 if (vreg_is_volatile (cfg
, vi
->vreg
))
11829 /* The liveness info is incomplete */
11832 if (mono_bitset_test_fast (bb
->live_in_set
, i
) && !live_range_start
[vi
->vreg
]) {
11833 /* Live from at least the first ins of this bb */
11834 live_range_start
[vi
->vreg
] = bb
->code
;
11835 live_range_start_bb
[vi
->vreg
] = bb
;
11838 if (mono_bitset_test_fast (bb
->live_out_set
, i
)) {
11839 /* Live at least until the last ins of this bb */
11840 live_range_end
[vi
->vreg
] = bb
->last_ins
;
11841 live_range_end_bb
[vi
->vreg
] = bb
;
11847 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11849 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11850 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11852 if (cfg
->compute_precise_live_ranges
&& cfg
->comp_done
& MONO_COMP_LIVENESS
) {
11853 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11854 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
11857 if (live_range_start
[vreg
]) {
11858 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
11860 ins
->inst_c1
= vreg
;
11861 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
11863 if (live_range_end
[vreg
]) {
11864 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
11866 ins
->inst_c1
= vreg
;
11867 if (live_range_end
[vreg
] == live_range_end_bb
[vreg
]->last_ins
)
11868 mono_add_ins_to_end (live_range_end_bb
[vreg
], ins
);
11870 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
11876 g_free (live_range_start
);
11877 g_free (live_range_end
);
11878 g_free (live_range_start_bb
);
11879 g_free (live_range_end_bb
);
11884 * - use 'iadd' instead of 'int_add'
11885 * - handling ovf opcodes: decompose in method_to_ir.
11886 * - unify iregs/fregs
11887 * -> partly done, the missing parts are:
11888 * - a more complete unification would involve unifying the hregs as well, so
11889 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11890 * would no longer map to the machine hregs, so the code generators would need to
11891 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11892 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11893 * fp/non-fp branches speeds it up by about 15%.
11894 * - use sext/zext opcodes instead of shifts
11896 * - get rid of TEMPLOADs if possible and use vregs instead
11897 * - clean up usage of OP_P/OP_ opcodes
11898 * - cleanup usage of DUMMY_USE
11899 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11901 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11902 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11903 * - make sure handle_stack_args () is called before the branch is emitted
11904 * - when the new IR is done, get rid of all unused stuff
11905 * - COMPARE/BEQ as separate instructions or unify them ?
11906 * - keeping them separate allows specialized compare instructions like
11907 * compare_imm, compare_membase
11908 * - most back ends unify fp compare+branch, fp compare+ceq
11909 * - integrate mono_save_args into inline_method
11910 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11911 * - handle long shift opts on 32 bit platforms somehow: they require
11912 * 3 sregs (2 for arg1 and 1 for arg2)
11913 * - make byref a 'normal' type.
11914 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11915 * variable if needed.
11916 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11917 * like inline_method.
11918 * - remove inlining restrictions
11919 * - fix LNEG and enable cfold of INEG
11920 * - generalize x86 optimizations like ldelema as a peephole optimization
11921 * - add store_mem_imm for amd64
11922 * - optimize the loading of the interruption flag in the managed->native wrappers
11923 * - avoid special handling of OP_NOP in passes
11924 * - move code inserting instructions into one function/macro.
11925 * - try a coalescing phase after liveness analysis
11926 * - add float -> vreg conversion + local optimizations on !x86
11927 * - figure out how to handle decomposed branches during optimizations, ie.
11928 * compare+branch, op_jump_table+op_br etc.
11929 * - promote RuntimeXHandles to vregs
11930 * - vtype cleanups:
11931 * - add a NEW_VARLOADA_VREG macro
11932 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11933 * accessing vtype fields.
11934 * - get rid of I8CONST on 64 bit platforms
11935 * - dealing with the increase in code size due to branches created during opcode
11937 * - use extended basic blocks
11938 * - all parts of the JIT
11939 * - handle_global_vregs () && local regalloc
11940 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11941 * - sources of increase in code size:
11944 * - isinst and castclass
11945 * - lvregs not allocated to global registers even if used multiple times
11946 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11948 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11949 * - add all micro optimizations from the old JIT
11950 * - put tree optimizations into the deadce pass
11951 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11952 * specific function.
11953 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11954 * fcompare + branchCC.
11955 * - create a helper function for allocating a stack slot, taking into account
11956 * MONO_CFG_HAS_SPILLUP.
11958 * - merge the ia64 switch changes.
11959 * - optimize mono_regstate2_alloc_int/float.
11960 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11961 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11962 * parts of the tree could be separated by other instructions, killing the tree
11963 * arguments, or stores killing loads etc. Also, should we fold loads into other
11964 * instructions if the result of the load is used multiple times ?
11965 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11966 * - LAST MERGE: 108395.
11967 * - when returning vtypes in registers, generate IR and append it to the end of the
11968 * last bb instead of doing it in the epilog.
11969 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11977 - When to decompose opcodes:
11978 - earlier: this makes some optimizations hard to implement, since the low level IR
11979 no longer contains the neccessary information. But it is easier to do.
11980 - later: harder to implement, enables more optimizations.
11981 - Branches inside bblocks:
11982 - created when decomposing complex opcodes.
11983 - branches to another bblock: harmless, but not tracked by the branch
11984 optimizations, so need to branch to a label at the start of the bblock.
11985 - branches to inside the same bblock: very problematic, trips up the local
11986 reg allocator. Can be fixed by spitting the current bblock, but that is a
11987 complex operation, since some local vregs can become global vregs etc.
11988 - Local/global vregs:
11989 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11990 local register allocator.
11991 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11992 structure, created by mono_create_var (). Assigned to hregs or the stack by
11993 the global register allocator.
11994 - When to do optimizations like alu->alu_imm:
11995 - earlier -> saves work later on since the IR will be smaller/simpler
11996 - later -> can work on more instructions
11997 - Handling of valuetypes:
11998 - When a vtype is pushed on the stack, a new temporary is created, an
11999 instruction computing its address (LDADDR) is emitted and pushed on
12000 the stack. Need to optimize cases when the vtype is used immediately as in
12001 argument passing, stloc etc.
12002 - Instead of the to_end stuff in the old JIT, simply call the function handling
12003 the values on the stack before emitting the last instruction of the bb.
12006 #endif /* DISABLE_JIT */