3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/utils/mono-compiler.h>
32 #ifdef HAVE_SYS_TIME_H
40 #include <mono/utils/memcheck.h>
41 #include <mono/metadata/abi-details.h>
42 #include <mono/metadata/assembly.h>
43 #include <mono/metadata/attrdefs.h>
44 #include <mono/metadata/loader.h>
45 #include <mono/metadata/tabledefs.h>
46 #include <mono/metadata/class.h>
47 #include <mono/metadata/class-abi-details.h>
48 #include <mono/metadata/object.h>
49 #include <mono/metadata/exception.h>
50 #include <mono/metadata/exception-internals.h>
51 #include <mono/metadata/opcodes.h>
52 #include <mono/metadata/mono-endian.h>
53 #include <mono/metadata/tokentype.h>
54 #include <mono/metadata/tabledefs.h>
55 #include <mono/metadata/marshal.h>
56 #include <mono/metadata/debug-helpers.h>
57 #include <mono/metadata/debug-internals.h>
58 #include <mono/metadata/gc-internals.h>
59 #include <mono/metadata/security-manager.h>
60 #include <mono/metadata/threads-types.h>
61 #include <mono/metadata/security-core-clr.h>
62 #include <mono/metadata/profiler-private.h>
63 #include <mono/metadata/profiler.h>
64 #include <mono/metadata/monitor.h>
65 #include <mono/utils/mono-memory-model.h>
66 #include <mono/utils/mono-error-internals.h>
67 #include <mono/metadata/mono-basic-block.h>
68 #include <mono/metadata/reflection-internals.h>
69 #include <mono/utils/mono-threads-coop.h>
70 #include <mono/utils/mono-utils-debug.h>
71 #include <mono/utils/mono-logger-internals.h>
72 #include <mono/metadata/verify-internals.h>
73 #include <mono/metadata/icall-decl.h>
74 #include "mono/metadata/icall-signatures.h"
80 #include "jit-icalls.h"
82 #include "debugger-agent.h"
83 #include "seq-points.h"
84 #include "aot-compiler.h"
85 #include "mini-llvm.h"
86 #include "mini-runtime.h"
87 #include "llvmonly-runtime.h"
89 #define BRANCH_COST 10
91 /* Used for the JIT */
92 #define INLINE_LENGTH_LIMIT 20
94 * The aot and jit inline limits should be different,
95 * since aot sees the whole program so we can let opt inline methods for us,
96 * while the jit only sees one method, so we have to inline things ourselves.
98 /* Used by LLVM AOT */
99 #define LLVM_AOT_INLINE_LENGTH_LIMIT 30
100 /* Used to LLVM JIT */
101 #define LLVM_JIT_INLINE_LENGTH_LIMIT 100
103 static const gboolean debug_tailcall
= FALSE
; // logging
104 static const gboolean debug_tailcall_try_all
= FALSE
; // consider any call followed by ret
107 mono_tailcall_print_enabled (void)
109 return debug_tailcall
|| MONO_TRACE_IS_TRACED (G_LOG_LEVEL_DEBUG
, MONO_TRACE_TAILCALL
);
113 mono_tailcall_print (const char *format
, ...)
115 if (!mono_tailcall_print_enabled ())
118 va_start (args
, format
);
119 g_printv (format
, args
);
123 /* These have 'cfg' as an implicit argument */
124 #define INLINE_FAILURE(msg) do { \
125 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
126 inline_failure (cfg, msg); \
127 goto exception_exit; \
130 #define CHECK_CFG_EXCEPTION do {\
131 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
132 goto exception_exit; \
134 #define FIELD_ACCESS_FAILURE(method, field) do { \
135 field_access_failure ((cfg), (method), (field)); \
136 goto exception_exit; \
138 #define GENERIC_SHARING_FAILURE(opcode) do { \
139 if (cfg->gshared) { \
140 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
141 goto exception_exit; \
144 #define GSHAREDVT_FAILURE(opcode) do { \
145 if (cfg->gsharedvt) { \
146 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
147 goto exception_exit; \
150 #define OUT_OF_MEMORY_FAILURE do { \
151 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
152 mono_error_set_out_of_memory (cfg->error, ""); \
153 goto exception_exit; \
155 #define DISABLE_AOT(cfg) do { \
156 if ((cfg)->verbose_level >= 2) \
157 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
158 (cfg)->disable_aot = TRUE; \
160 #define LOAD_ERROR do { \
161 break_on_unverified (); \
162 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
163 goto exception_exit; \
166 #define TYPE_LOAD_ERROR(klass) do { \
167 cfg->exception_ptr = klass; \
171 #define CHECK_CFG_ERROR do {\
172 if (!is_ok (cfg->error)) { \
173 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
174 goto mono_error_exit; \
178 static int stind_to_store_membase (int opcode
);
180 int mono_op_to_op_imm (int opcode
);
181 int mono_op_to_op_imm_noemul (int opcode
);
183 static int inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
184 guchar
*ip
, guint real_offset
, gboolean inline_always
);
186 convert_value (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*ins
);
188 /* helper methods signatures */
190 /* type loading helpers */
191 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute
, "System.Diagnostics", "DebuggableAttribute")
192 static GENERATE_GET_CLASS_WITH_CACHE (iequatable
, "System", "IEquatable`1")
193 static GENERATE_GET_CLASS_WITH_CACHE (geqcomparer
, "System.Collections.Generic", "GenericEqualityComparer`1");
196 * Instruction metadata
204 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
205 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
211 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == TARGET_SIZEOF_VOID_P
216 /* keep in sync with the enum in mini.h */
219 #include "mini-ops.h"
224 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
225 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
227 * This should contain the index of the last sreg + 1. This is not the same
228 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
230 const gint8 mini_ins_sreg_counts
[] = {
231 #include "mini-ops.h"
237 mono_alloc_ireg (MonoCompile
*cfg
)
239 return alloc_ireg (cfg
);
243 mono_alloc_lreg (MonoCompile
*cfg
)
245 return alloc_lreg (cfg
);
249 mono_alloc_freg (MonoCompile
*cfg
)
251 return alloc_freg (cfg
);
255 mono_alloc_preg (MonoCompile
*cfg
)
257 return alloc_preg (cfg
);
261 mono_alloc_dreg (MonoCompile
*cfg
, MonoStackType stack_type
)
263 return alloc_dreg (cfg
, stack_type
);
267 * mono_alloc_ireg_ref:
269 * Allocate an IREG, and mark it as holding a GC ref.
272 mono_alloc_ireg_ref (MonoCompile
*cfg
)
274 return alloc_ireg_ref (cfg
);
278 * mono_alloc_ireg_mp:
280 * Allocate an IREG, and mark it as holding a managed pointer.
283 mono_alloc_ireg_mp (MonoCompile
*cfg
)
285 return alloc_ireg_mp (cfg
);
289 * mono_alloc_ireg_copy:
291 * Allocate an IREG with the same GC type as VREG.
294 mono_alloc_ireg_copy (MonoCompile
*cfg
, guint32 vreg
)
296 if (vreg_is_ref (cfg
, vreg
))
297 return alloc_ireg_ref (cfg
);
298 else if (vreg_is_mp (cfg
, vreg
))
299 return alloc_ireg_mp (cfg
);
301 return alloc_ireg (cfg
);
305 mono_type_to_regmove (MonoCompile
*cfg
, MonoType
*type
)
310 type
= mini_get_underlying_type (type
);
312 switch (type
->type
) {
325 case MONO_TYPE_FNPTR
:
327 case MONO_TYPE_CLASS
:
328 case MONO_TYPE_STRING
:
329 case MONO_TYPE_OBJECT
:
330 case MONO_TYPE_SZARRAY
:
331 case MONO_TYPE_ARRAY
:
335 #if SIZEOF_REGISTER == 8
341 return cfg
->r4fp
? OP_RMOVE
: OP_FMOVE
;
344 case MONO_TYPE_VALUETYPE
:
345 if (m_class_is_enumtype (type
->data
.klass
)) {
346 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
349 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
352 case MONO_TYPE_TYPEDBYREF
:
354 case MONO_TYPE_GENERICINST
:
355 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
357 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
361 g_assert (cfg
->gshared
);
362 if (mini_type_var_is_vt (type
))
365 return mono_type_to_regmove (cfg
, mini_get_underlying_type (type
));
367 g_error ("unknown type 0x%02x in type_to_regstore", type
->type
);
373 mono_print_bb (MonoBasicBlock
*bb
, const char *msg
)
377 GString
*str
= g_string_new ("");
379 g_string_append_printf (str
, "%s %d: [IN: ", msg
, bb
->block_num
);
380 for (i
= 0; i
< bb
->in_count
; ++i
)
381 g_string_append_printf (str
, " BB%d(%d)", bb
->in_bb
[i
]->block_num
, bb
->in_bb
[i
]->dfn
);
382 g_string_append_printf (str
, ", OUT: ");
383 for (i
= 0; i
< bb
->out_count
; ++i
)
384 g_string_append_printf (str
, " BB%d(%d)", bb
->out_bb
[i
]->block_num
, bb
->out_bb
[i
]->dfn
);
385 g_string_append_printf (str
, " ]\n");
387 g_print ("%s", str
->str
);
388 g_string_free (str
, TRUE
);
390 for (tree
= bb
->code
; tree
; tree
= tree
->next
)
391 mono_print_ins_index (-1, tree
);
394 static MONO_NEVER_INLINE gboolean
395 break_on_unverified (void)
397 if (mini_debug_options
.break_on_unverified
) {
405 clear_cfg_error (MonoCompile
*cfg
)
407 mono_error_cleanup (cfg
->error
);
408 error_init (cfg
->error
);
411 static MONO_NEVER_INLINE
void
412 field_access_failure (MonoCompile
*cfg
, MonoMethod
*method
, MonoClassField
*field
)
414 char *method_fname
= mono_method_full_name (method
, TRUE
);
415 char *field_fname
= mono_field_full_name (field
);
416 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
417 mono_error_set_generic_error (cfg
->error
, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname
, method_fname
);
418 g_free (method_fname
);
419 g_free (field_fname
);
422 static MONO_NEVER_INLINE
void
423 inline_failure (MonoCompile
*cfg
, const char *msg
)
425 if (cfg
->verbose_level
>= 2)
426 printf ("inline failed: %s\n", msg
);
427 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INLINE_FAILED
);
430 static MONO_NEVER_INLINE
void
431 gshared_failure (MonoCompile
*cfg
, int opcode
, const char *file
, int line
)
433 if (cfg
->verbose_level
> 2)
434 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", m_class_get_name_space (cfg
->current_method
->klass
), m_class_get_name (cfg
->current_method
->klass
), cfg
->current_method
->name
, cfg
->current_method
->signature
->param_count
, mono_opcode_name (opcode
), line
);
435 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_GENERIC_SHARING_FAILED
);
438 static MONO_NEVER_INLINE
void
439 gsharedvt_failure (MonoCompile
*cfg
, int opcode
, const char *file
, int line
)
441 cfg
->exception_message
= g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", m_class_get_name_space (cfg
->current_method
->klass
), m_class_get_name (cfg
->current_method
->klass
), cfg
->current_method
->name
, cfg
->current_method
->signature
->param_count
, mono_opcode_name ((opcode
)), file
, line
);
442 if (cfg
->verbose_level
>= 2)
443 printf ("%s\n", cfg
->exception_message
);
444 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_GENERIC_SHARING_FAILED
);
448 mini_set_inline_failure (MonoCompile
*cfg
, const char *msg
)
450 if (cfg
->verbose_level
>= 2)
451 printf ("inline failed: %s\n", msg
);
452 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_INLINE_FAILED
);
456 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
457 * foo<T> (int i) { ldarg.0; box T; }
459 #define UNVERIFIED do { \
460 if (cfg->gsharedvt) { \
461 if (cfg->verbose_level > 2) \
462 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
463 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
464 goto exception_exit; \
466 break_on_unverified (); \
470 #define GET_BBLOCK(cfg,tblock,ip) do { \
471 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
473 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
474 NEW_BBLOCK (cfg, (tblock)); \
475 (tblock)->cil_code = (ip); \
476 ADD_BBLOCK (cfg, (tblock)); \
480 /* Emit conversions so both operands of a binary opcode are of the same type */
482 add_widen_op (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
**arg1_ref
, MonoInst
**arg2_ref
)
484 MonoInst
*arg1
= *arg1_ref
;
485 MonoInst
*arg2
= *arg2_ref
;
488 ((arg1
->type
== STACK_R4
&& arg2
->type
== STACK_R8
) ||
489 (arg1
->type
== STACK_R8
&& arg2
->type
== STACK_R4
))) {
492 /* Mixing r4/r8 is allowed by the spec */
493 if (arg1
->type
== STACK_R4
) {
494 int dreg
= alloc_freg (cfg
);
496 EMIT_NEW_UNALU (cfg
, conv
, OP_RCONV_TO_R8
, dreg
, arg1
->dreg
);
497 conv
->type
= STACK_R8
;
501 if (arg2
->type
== STACK_R4
) {
502 int dreg
= alloc_freg (cfg
);
504 EMIT_NEW_UNALU (cfg
, conv
, OP_RCONV_TO_R8
, dreg
, arg2
->dreg
);
505 conv
->type
= STACK_R8
;
511 #if SIZEOF_REGISTER == 8
512 /* FIXME: Need to add many more cases */
513 if ((arg1
)->type
== STACK_PTR
&& (arg2
)->type
== STACK_I4
) {
516 int dr
= alloc_preg (cfg
);
517 EMIT_NEW_UNALU (cfg
, widen
, OP_SEXT_I4
, dr
, (arg2
)->dreg
);
518 (ins
)->sreg2
= widen
->dreg
;
523 #define ADD_BINOP(op) do { \
524 MONO_INST_NEW (cfg, ins, (op)); \
526 ins->sreg1 = sp [0]->dreg; \
527 ins->sreg2 = sp [1]->dreg; \
528 type_from_op (cfg, ins, sp [0], sp [1]); \
530 /* Have to insert a widening op */ \
531 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
532 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
533 MONO_ADD_INS ((cfg)->cbb, (ins)); \
534 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
537 #define ADD_UNOP(op) do { \
538 MONO_INST_NEW (cfg, ins, (op)); \
540 ins->sreg1 = sp [0]->dreg; \
541 type_from_op (cfg, ins, sp [0], NULL); \
543 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
544 MONO_ADD_INS ((cfg)->cbb, (ins)); \
545 *sp++ = mono_decompose_opcode (cfg, ins); \
548 #define ADD_BINCOND(next_block) do { \
551 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
552 cmp->sreg1 = sp [0]->dreg; \
553 cmp->sreg2 = sp [1]->dreg; \
554 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
555 type_from_op (cfg, cmp, sp [0], sp [1]); \
557 type_from_op (cfg, ins, sp [0], sp [1]); \
558 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
559 GET_BBLOCK (cfg, tblock, target); \
560 link_bblock (cfg, cfg->cbb, tblock); \
561 ins->inst_true_bb = tblock; \
562 if ((next_block)) { \
563 link_bblock (cfg, cfg->cbb, (next_block)); \
564 ins->inst_false_bb = (next_block); \
565 start_new_bblock = 1; \
567 GET_BBLOCK (cfg, tblock, next_ip); \
568 link_bblock (cfg, cfg->cbb, tblock); \
569 ins->inst_false_bb = tblock; \
570 start_new_bblock = 2; \
572 if (sp != stack_start) { \
573 handle_stack_args (cfg, stack_start, sp - stack_start); \
574 CHECK_UNVERIFIABLE (cfg); \
576 MONO_ADD_INS (cfg->cbb, cmp); \
577 MONO_ADD_INS (cfg->cbb, ins); \
581 * link_bblock: Links two basic blocks
583 * links two basic blocks in the control flow graph, the 'from'
584 * argument is the starting block and the 'to' argument is the block
585 * the control flow ends to after 'from'.
588 link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
590 MonoBasicBlock
**newa
;
594 if (from
->cil_code
) {
596 printf ("edge from IL%04x to IL_%04x\n", from
->cil_code
- cfg
->cil_code
, to
->cil_code
- cfg
->cil_code
);
598 printf ("edge from IL%04x to exit\n", from
->cil_code
- cfg
->cil_code
);
601 printf ("edge from entry to IL_%04x\n", to
->cil_code
- cfg
->cil_code
);
603 printf ("edge from entry to exit\n");
608 for (i
= 0; i
< from
->out_count
; ++i
) {
609 if (to
== from
->out_bb
[i
]) {
615 newa
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (from
->out_count
+ 1));
616 for (i
= 0; i
< from
->out_count
; ++i
) {
617 newa
[i
] = from
->out_bb
[i
];
625 for (i
= 0; i
< to
->in_count
; ++i
) {
626 if (from
== to
->in_bb
[i
]) {
632 newa
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * (to
->in_count
+ 1));
633 for (i
= 0; i
< to
->in_count
; ++i
) {
634 newa
[i
] = to
->in_bb
[i
];
643 mono_link_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
645 link_bblock (cfg
, from
, to
);
649 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
);
652 mark_bb_in_region (MonoCompile
*cfg
, guint region
, uint32_t start
, uint32_t end
)
654 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[start
];
656 //start must exist in cil_offset_to_bb as those are il offsets used by EH which should have GET_BBLOCK early.
659 if (cfg
->verbose_level
> 1)
660 g_print ("FIRST BB for %d is BB_%d\n", start
, bb
->block_num
);
661 for (; bb
&& bb
->real_offset
< end
; bb
= bb
->next_bb
) {
662 //no one claimed this bb, take it.
663 if (bb
->region
== -1) {
668 //current region is an early handler, bail
669 if ((bb
->region
& (0xf << 4)) != MONO_REGION_TRY
) {
673 //current region is a try, only overwrite if new region is a handler
674 if ((region
& (0xf << 4)) != MONO_REGION_TRY
) {
680 mono_create_spvar_for_region (cfg
, region
);
684 compute_bb_regions (MonoCompile
*cfg
)
687 MonoMethodHeader
*header
= cfg
->header
;
690 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
693 for (i
= 0; i
< header
->num_clauses
; ++i
) {
694 MonoExceptionClause
*clause
= &header
->clauses
[i
];
696 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
)
697 mark_bb_in_region (cfg
, ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
, clause
->data
.filter_offset
, clause
->handler_offset
);
699 guint handler_region
;
700 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
701 handler_region
= ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
702 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
703 handler_region
= ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
705 handler_region
= ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
707 mark_bb_in_region (cfg
, handler_region
, clause
->handler_offset
, clause
->handler_offset
+ clause
->handler_len
);
708 mark_bb_in_region (cfg
, ((i
+ 1) << 8) | clause
->flags
, clause
->try_offset
, clause
->try_offset
+ clause
->try_len
);
711 if (cfg
->verbose_level
> 2) {
713 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
714 g_print ("REGION BB%d IL_%04x ID_%08X\n", bb
->block_num
, bb
->real_offset
, bb
->region
);
721 ip_in_finally_clause (MonoCompile
*cfg
, int offset
)
723 MonoMethodHeader
*header
= cfg
->header
;
724 MonoExceptionClause
*clause
;
727 for (i
= 0; i
< header
->num_clauses
; ++i
) {
728 clause
= &header
->clauses
[i
];
729 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FINALLY
&& clause
->flags
!= MONO_EXCEPTION_CLAUSE_FAULT
)
732 if (MONO_OFFSET_IN_HANDLER (clause
, offset
))
738 /* Find clauses between ip and target, from inner to outer */
740 mono_find_leave_clauses (MonoCompile
*cfg
, guchar
*ip
, guchar
*target
)
742 MonoMethodHeader
*header
= cfg
->header
;
743 MonoExceptionClause
*clause
;
747 for (i
= 0; i
< header
->num_clauses
; ++i
) {
748 clause
= &header
->clauses
[i
];
749 if (MONO_OFFSET_IN_CLAUSE (clause
, (ip
- header
->code
)) &&
750 (!MONO_OFFSET_IN_CLAUSE (clause
, (target
- header
->code
)))) {
751 MonoLeaveClause
*leave
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoLeaveClause
));
753 leave
->clause
= clause
;
755 res
= g_list_append_mempool (cfg
->mempool
, res
, leave
);
762 mono_create_spvar_for_region (MonoCompile
*cfg
, int region
)
766 var
= (MonoInst
*)g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
770 var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
771 /* prevent it from being register allocated */
772 var
->flags
|= MONO_INST_VOLATILE
;
774 g_hash_table_insert (cfg
->spvars
, GINT_TO_POINTER (region
), var
);
778 mono_find_exvar_for_offset (MonoCompile
*cfg
, int offset
)
780 return (MonoInst
*)g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
784 mono_create_exvar_for_offset (MonoCompile
*cfg
, int offset
)
788 var
= (MonoInst
*)g_hash_table_lookup (cfg
->exvars
, GINT_TO_POINTER (offset
));
792 var
= mono_compile_create_var (cfg
, mono_get_object_type (), OP_LOCAL
);
793 /* prevent it from being register allocated */
794 var
->flags
|= MONO_INST_VOLATILE
;
796 g_hash_table_insert (cfg
->exvars
, GINT_TO_POINTER (offset
), var
);
802 * Returns the type used in the eval stack when @type is loaded.
803 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
806 mini_type_to_eval_stack_type (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*inst
)
810 type
= mini_get_underlying_type (type
);
811 inst
->klass
= klass
= mono_class_from_mono_type_internal (type
);
813 inst
->type
= STACK_MP
;
818 switch (type
->type
) {
820 inst
->type
= STACK_INV
;
828 inst
->type
= STACK_I4
;
833 case MONO_TYPE_FNPTR
:
834 inst
->type
= STACK_PTR
;
836 case MONO_TYPE_CLASS
:
837 case MONO_TYPE_STRING
:
838 case MONO_TYPE_OBJECT
:
839 case MONO_TYPE_SZARRAY
:
840 case MONO_TYPE_ARRAY
:
841 inst
->type
= STACK_OBJ
;
845 inst
->type
= STACK_I8
;
848 inst
->type
= cfg
->r4_stack_type
;
851 inst
->type
= STACK_R8
;
853 case MONO_TYPE_VALUETYPE
:
854 if (m_class_is_enumtype (type
->data
.klass
)) {
855 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
859 inst
->type
= STACK_VTYPE
;
862 case MONO_TYPE_TYPEDBYREF
:
863 inst
->klass
= mono_defaults
.typed_reference_class
;
864 inst
->type
= STACK_VTYPE
;
866 case MONO_TYPE_GENERICINST
:
867 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
871 g_assert (cfg
->gshared
);
872 if (mini_is_gsharedvt_type (type
)) {
873 g_assert (cfg
->gsharedvt
);
874 inst
->type
= STACK_VTYPE
;
876 mini_type_to_eval_stack_type (cfg
, mini_get_underlying_type (type
), inst
);
880 g_error ("unknown type 0x%02x in eval stack type", type
->type
);
885 * The following tables are used to quickly validate the IL code in type_from_op ().
888 bin_num_table
[STACK_MAX
] [STACK_MAX
] = {
889 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
890 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
891 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
892 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_INV
},
893 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
},
894 {STACK_INV
, STACK_MP
, STACK_INV
, STACK_MP
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
},
895 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
896 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
897 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R4
}
902 STACK_INV
, STACK_I4
, STACK_I8
, STACK_PTR
, STACK_R8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_R4
905 /* reduce the size of this table */
907 bin_int_table
[STACK_MAX
] [STACK_MAX
] = {
908 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
909 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
910 {STACK_INV
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
911 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
912 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
913 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
914 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
915 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
919 bin_comp_table
[STACK_MAX
] [STACK_MAX
] = {
920 /* Inv i L p F & O vt r4 */
922 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
923 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
924 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
925 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
926 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
927 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
928 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
929 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
932 /* reduce the size of this table */
934 shift_table
[STACK_MAX
] [STACK_MAX
] = {
935 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
936 {STACK_INV
, STACK_I4
, STACK_INV
, STACK_I4
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
937 {STACK_INV
, STACK_I8
, STACK_INV
, STACK_I8
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
938 {STACK_INV
, STACK_PTR
, STACK_INV
, STACK_PTR
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
939 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
940 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
941 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
},
942 {STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
, STACK_INV
}
946 * Tables to map from the non-specific opcode to the matching
947 * type-specific opcode.
949 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
951 binops_op_map
[STACK_MAX
] = {
952 0, OP_IADD
-CEE_ADD
, OP_LADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, OP_FADD
-CEE_ADD
, OP_PADD
-CEE_ADD
, 0, 0, OP_RADD
-CEE_ADD
955 /* handles from CEE_NEG to CEE_CONV_U8 */
957 unops_op_map
[STACK_MAX
] = {
958 0, OP_INEG
-CEE_NEG
, OP_LNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, OP_FNEG
-CEE_NEG
, OP_PNEG
-CEE_NEG
, 0, 0, OP_RNEG
-CEE_NEG
961 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
963 ovfops_op_map
[STACK_MAX
] = {
964 0, OP_ICONV_TO_U2
-CEE_CONV_U2
, OP_LCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_FCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, OP_PCONV_TO_U2
-CEE_CONV_U2
, 0, OP_RCONV_TO_U2
-CEE_CONV_U2
967 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
969 ovf2ops_op_map
[STACK_MAX
] = {
970 0, OP_ICONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_LCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_FCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, OP_PCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
, 0, 0, OP_RCONV_TO_OVF_I1_UN
-CEE_CONV_OVF_I1_UN
973 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
975 ovf3ops_op_map
[STACK_MAX
] = {
976 0, OP_ICONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_LCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_FCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, OP_PCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
, 0, 0, OP_RCONV_TO_OVF_I1
-CEE_CONV_OVF_I1
979 /* handles from CEE_BEQ to CEE_BLT_UN */
981 beqops_op_map
[STACK_MAX
] = {
982 0, OP_IBEQ
-CEE_BEQ
, OP_LBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_FBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, OP_PBEQ
-CEE_BEQ
, 0, OP_FBEQ
-CEE_BEQ
985 /* handles from CEE_CEQ to CEE_CLT_UN */
987 ceqops_op_map
[STACK_MAX
] = {
988 0, OP_ICEQ
-OP_CEQ
, OP_LCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_FCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, OP_PCEQ
-OP_CEQ
, 0, OP_RCEQ
-OP_CEQ
992 * Sets ins->type (the type on the eval stack) according to the
993 * type of the opcode and the arguments to it.
994 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
996 * FIXME: this function sets ins->type unconditionally in some cases, but
997 * it should set it to invalid for some types (a conv.x on an object)
1000 type_from_op (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
)
1002 switch (ins
->opcode
) {
1009 /* FIXME: check unverifiable args for STACK_MP */
1010 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
1011 ins
->opcode
+= binops_op_map
[ins
->type
];
1013 case MONO_CEE_DIV_UN
:
1014 case MONO_CEE_REM_UN
:
1018 ins
->type
= bin_int_table
[src1
->type
] [src2
->type
];
1019 ins
->opcode
+= binops_op_map
[ins
->type
];
1023 case MONO_CEE_SHR_UN
:
1024 ins
->type
= shift_table
[src1
->type
] [src2
->type
];
1025 ins
->opcode
+= binops_op_map
[ins
->type
];
1030 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
1031 if ((src1
->type
== STACK_I8
) || ((TARGET_SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
1032 ins
->opcode
= OP_LCOMPARE
;
1033 else if (src1
->type
== STACK_R4
)
1034 ins
->opcode
= OP_RCOMPARE
;
1035 else if (src1
->type
== STACK_R8
)
1036 ins
->opcode
= OP_FCOMPARE
;
1038 ins
->opcode
= OP_ICOMPARE
;
1040 case OP_ICOMPARE_IMM
:
1041 ins
->type
= bin_comp_table
[src1
->type
] [src1
->type
] ? STACK_I4
: STACK_INV
;
1042 if ((src1
->type
== STACK_I8
) || ((TARGET_SIZEOF_VOID_P
== 8) && ((src1
->type
== STACK_PTR
) || (src1
->type
== STACK_OBJ
) || (src1
->type
== STACK_MP
))))
1043 ins
->opcode
= OP_LCOMPARE_IMM
;
1050 case MONO_CEE_BNE_UN
:
1051 case MONO_CEE_BGE_UN
:
1052 case MONO_CEE_BGT_UN
:
1053 case MONO_CEE_BLE_UN
:
1054 case MONO_CEE_BLT_UN
:
1055 ins
->opcode
+= beqops_op_map
[src1
->type
];
1058 ins
->type
= bin_comp_table
[src1
->type
] [src2
->type
] ? STACK_I4
: STACK_INV
;
1059 ins
->opcode
+= ceqops_op_map
[src1
->type
];
1065 ins
->type
= (bin_comp_table
[src1
->type
] [src2
->type
] & 1) ? STACK_I4
: STACK_INV
;
1066 ins
->opcode
+= ceqops_op_map
[src1
->type
];
1070 ins
->type
= neg_table
[src1
->type
];
1071 ins
->opcode
+= unops_op_map
[ins
->type
];
1074 if (src1
->type
>= STACK_I4
&& src1
->type
<= STACK_PTR
)
1075 ins
->type
= src1
->type
;
1077 ins
->type
= STACK_INV
;
1078 ins
->opcode
+= unops_op_map
[ins
->type
];
1080 case MONO_CEE_CONV_I1
:
1081 case MONO_CEE_CONV_I2
:
1082 case MONO_CEE_CONV_I4
:
1083 case MONO_CEE_CONV_U4
:
1084 ins
->type
= STACK_I4
;
1085 ins
->opcode
+= unops_op_map
[src1
->type
];
1087 case MONO_CEE_CONV_R_UN
:
1088 ins
->type
= STACK_R8
;
1089 switch (src1
->type
) {
1092 ins
->opcode
= OP_ICONV_TO_R_UN
;
1095 ins
->opcode
= OP_LCONV_TO_R_UN
;
1098 ins
->opcode
= OP_FMOVE
;
1102 case MONO_CEE_CONV_OVF_I1
:
1103 case MONO_CEE_CONV_OVF_U1
:
1104 case MONO_CEE_CONV_OVF_I2
:
1105 case MONO_CEE_CONV_OVF_U2
:
1106 case MONO_CEE_CONV_OVF_I4
:
1107 case MONO_CEE_CONV_OVF_U4
:
1108 ins
->type
= STACK_I4
;
1109 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
1111 case MONO_CEE_CONV_OVF_I_UN
:
1112 case MONO_CEE_CONV_OVF_U_UN
:
1113 ins
->type
= STACK_PTR
;
1114 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
1116 case MONO_CEE_CONV_OVF_I1_UN
:
1117 case MONO_CEE_CONV_OVF_I2_UN
:
1118 case MONO_CEE_CONV_OVF_I4_UN
:
1119 case MONO_CEE_CONV_OVF_U1_UN
:
1120 case MONO_CEE_CONV_OVF_U2_UN
:
1121 case MONO_CEE_CONV_OVF_U4_UN
:
1122 ins
->type
= STACK_I4
;
1123 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
1125 case MONO_CEE_CONV_U
:
1126 ins
->type
= STACK_PTR
;
1127 switch (src1
->type
) {
1129 ins
->opcode
= OP_ICONV_TO_U
;
1134 #if TARGET_SIZEOF_VOID_P == 8
1135 ins
->opcode
= OP_LCONV_TO_U
;
1137 ins
->opcode
= OP_MOVE
;
1141 ins
->opcode
= OP_LCONV_TO_U
;
1144 ins
->opcode
= OP_FCONV_TO_U
;
1147 if (TARGET_SIZEOF_VOID_P
== 8)
1148 ins
->opcode
= OP_RCONV_TO_U8
;
1150 ins
->opcode
= OP_RCONV_TO_U4
;
1154 case MONO_CEE_CONV_I8
:
1155 case MONO_CEE_CONV_U8
:
1156 ins
->type
= STACK_I8
;
1157 ins
->opcode
+= unops_op_map
[src1
->type
];
1159 case MONO_CEE_CONV_OVF_I8
:
1160 case MONO_CEE_CONV_OVF_U8
:
1161 ins
->type
= STACK_I8
;
1162 ins
->opcode
+= ovf3ops_op_map
[src1
->type
];
1164 case MONO_CEE_CONV_OVF_U8_UN
:
1165 case MONO_CEE_CONV_OVF_I8_UN
:
1166 ins
->type
= STACK_I8
;
1167 ins
->opcode
+= ovf2ops_op_map
[src1
->type
];
1169 case MONO_CEE_CONV_R4
:
1170 ins
->type
= cfg
->r4_stack_type
;
1171 ins
->opcode
+= unops_op_map
[src1
->type
];
1173 case MONO_CEE_CONV_R8
:
1174 ins
->type
= STACK_R8
;
1175 ins
->opcode
+= unops_op_map
[src1
->type
];
1178 ins
->type
= STACK_R8
;
1180 case MONO_CEE_CONV_U2
:
1181 case MONO_CEE_CONV_U1
:
1182 ins
->type
= STACK_I4
;
1183 ins
->opcode
+= ovfops_op_map
[src1
->type
];
1185 case MONO_CEE_CONV_I
:
1186 case MONO_CEE_CONV_OVF_I
:
1187 case MONO_CEE_CONV_OVF_U
:
1188 ins
->type
= STACK_PTR
;
1189 ins
->opcode
+= ovfops_op_map
[src1
->type
];
1191 case MONO_CEE_ADD_OVF
:
1192 case MONO_CEE_ADD_OVF_UN
:
1193 case MONO_CEE_MUL_OVF
:
1194 case MONO_CEE_MUL_OVF_UN
:
1195 case MONO_CEE_SUB_OVF
:
1196 case MONO_CEE_SUB_OVF_UN
:
1197 ins
->type
= bin_num_table
[src1
->type
] [src2
->type
];
1198 ins
->opcode
+= ovfops_op_map
[src1
->type
];
1199 if (ins
->type
== STACK_R8
)
1200 ins
->type
= STACK_INV
;
1202 case OP_LOAD_MEMBASE
:
1203 ins
->type
= STACK_PTR
;
1205 case OP_LOADI1_MEMBASE
:
1206 case OP_LOADU1_MEMBASE
:
1207 case OP_LOADI2_MEMBASE
:
1208 case OP_LOADU2_MEMBASE
:
1209 case OP_LOADI4_MEMBASE
:
1210 case OP_LOADU4_MEMBASE
:
1211 ins
->type
= STACK_PTR
;
1213 case OP_LOADI8_MEMBASE
:
1214 ins
->type
= STACK_I8
;
1216 case OP_LOADR4_MEMBASE
:
1217 ins
->type
= cfg
->r4_stack_type
;
1219 case OP_LOADR8_MEMBASE
:
1220 ins
->type
= STACK_R8
;
1223 g_error ("opcode 0x%04x not handled in type from op", ins
->opcode
);
1227 if (ins
->type
== STACK_MP
) {
1228 if (src1
->type
== STACK_MP
)
1229 ins
->klass
= src1
->klass
;
1231 ins
->klass
= mono_defaults
.object_class
;
1236 mini_type_from_op (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src1
, MonoInst
*src2
)
1238 type_from_op (cfg
, ins
, src1
, src2
);
1242 ldind_to_type (int op
)
1245 case MONO_CEE_LDIND_I1
: return mono_defaults
.sbyte_class
;
1246 case MONO_CEE_LDIND_U1
: return mono_defaults
.byte_class
;
1247 case MONO_CEE_LDIND_I2
: return mono_defaults
.int16_class
;
1248 case MONO_CEE_LDIND_U2
: return mono_defaults
.uint16_class
;
1249 case MONO_CEE_LDIND_I4
: return mono_defaults
.int32_class
;
1250 case MONO_CEE_LDIND_U4
: return mono_defaults
.uint32_class
;
1251 case MONO_CEE_LDIND_I8
: return mono_defaults
.int64_class
;
1252 case MONO_CEE_LDIND_I
: return mono_defaults
.int_class
;
1253 case MONO_CEE_LDIND_R4
: return mono_defaults
.single_class
;
1254 case MONO_CEE_LDIND_R8
: return mono_defaults
.double_class
;
1255 case MONO_CEE_LDIND_REF
:return mono_defaults
.object_class
; //FIXME we should try to return a more specific type
1256 default: g_error ("Unknown ldind type %d", op
);
1263 param_table
[STACK_MAX
] [STACK_MAX
] = {
1268 check_values_to_signature (MonoInst
*args
, MonoType
*this_ins
, MonoMethodSignature
*sig
)
1273 switch (args
->type
) {
1283 for (i
= 0; i
< sig
->param_count
; ++i
) {
1284 switch (args
[i
].type
) {
1288 if (!sig
->params
[i
]->byref
)
1292 if (sig
->params
[i
]->byref
)
1294 switch (sig
->params
[i
]->type
) {
1295 case MONO_TYPE_CLASS
:
1296 case MONO_TYPE_STRING
:
1297 case MONO_TYPE_OBJECT
:
1298 case MONO_TYPE_SZARRAY
:
1299 case MONO_TYPE_ARRAY
:
1306 if (sig
->params
[i
]->byref
)
1308 if (sig
->params
[i
]->type
!= MONO_TYPE_R4
&& sig
->params
[i
]->type
!= MONO_TYPE_R8
)
1317 /*if (!param_table [args [i].type] [sig->params [i]->type])
1325 * When we need a pointer to the current domain many times in a method, we
1326 * call mono_domain_get() once and we store the result in a local variable.
1327 * This function returns the variable that represents the MonoDomain*.
1329 inline static MonoInst
*
1330 mono_get_domainvar (MonoCompile
*cfg
)
1332 if (!cfg
->domainvar
) {
1333 /* Make sure we don't generate references after checking whenever to init this */
1334 g_assert (!cfg
->domainvar_inited
);
1335 cfg
->domainvar
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
1336 /* Avoid optimizing it away */
1337 cfg
->domainvar
->flags
|= MONO_INST_VOLATILE
;
1339 return cfg
->domainvar
;
1343 * The got_var contains the address of the Global Offset Table when AOT
1347 mono_get_got_var (MonoCompile
*cfg
)
1349 if (!cfg
->compile_aot
|| !cfg
->backend
->need_got_var
|| cfg
->llvm_only
)
1351 if (!cfg
->got_var
) {
1352 cfg
->got_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
1354 return cfg
->got_var
;
1358 mono_create_rgctx_var (MonoCompile
*cfg
)
1360 if (!cfg
->rgctx_var
) {
1361 cfg
->rgctx_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
1362 /* force the var to be stack allocated */
1363 cfg
->rgctx_var
->flags
|= MONO_INST_VOLATILE
;
1368 mono_get_vtable_var (MonoCompile
*cfg
)
1370 g_assert (cfg
->gshared
);
1372 mono_create_rgctx_var (cfg
);
1374 return cfg
->rgctx_var
;
1378 type_from_stack_type (MonoInst
*ins
) {
1379 switch (ins
->type
) {
1380 case STACK_I4
: return mono_get_int32_type ();
1381 case STACK_I8
: return m_class_get_byval_arg (mono_defaults
.int64_class
);
1382 case STACK_PTR
: return mono_get_int_type ();
1383 case STACK_R4
: return m_class_get_byval_arg (mono_defaults
.single_class
);
1384 case STACK_R8
: return m_class_get_byval_arg (mono_defaults
.double_class
);
1386 return m_class_get_this_arg (ins
->klass
);
1387 case STACK_OBJ
: return mono_get_object_type ();
1388 case STACK_VTYPE
: return m_class_get_byval_arg (ins
->klass
);
1390 g_error ("stack type %d to monotype not handled\n", ins
->type
);
1395 static G_GNUC_UNUSED
int
1396 type_to_stack_type (MonoCompile
*cfg
, MonoType
*t
)
1398 t
= mono_type_get_underlying_type (t
);
1410 case MONO_TYPE_FNPTR
:
1412 case MONO_TYPE_CLASS
:
1413 case MONO_TYPE_STRING
:
1414 case MONO_TYPE_OBJECT
:
1415 case MONO_TYPE_SZARRAY
:
1416 case MONO_TYPE_ARRAY
:
1422 return cfg
->r4_stack_type
;
1425 case MONO_TYPE_VALUETYPE
:
1426 case MONO_TYPE_TYPEDBYREF
:
1428 case MONO_TYPE_GENERICINST
:
1429 if (mono_type_generic_inst_is_valuetype (t
))
1435 g_assert_not_reached ();
1442 array_access_to_klass (int opcode
)
1445 case MONO_CEE_LDELEM_U1
:
1446 return mono_defaults
.byte_class
;
1447 case MONO_CEE_LDELEM_U2
:
1448 return mono_defaults
.uint16_class
;
1449 case MONO_CEE_LDELEM_I
:
1450 case MONO_CEE_STELEM_I
:
1451 return mono_defaults
.int_class
;
1452 case MONO_CEE_LDELEM_I1
:
1453 case MONO_CEE_STELEM_I1
:
1454 return mono_defaults
.sbyte_class
;
1455 case MONO_CEE_LDELEM_I2
:
1456 case MONO_CEE_STELEM_I2
:
1457 return mono_defaults
.int16_class
;
1458 case MONO_CEE_LDELEM_I4
:
1459 case MONO_CEE_STELEM_I4
:
1460 return mono_defaults
.int32_class
;
1461 case MONO_CEE_LDELEM_U4
:
1462 return mono_defaults
.uint32_class
;
1463 case MONO_CEE_LDELEM_I8
:
1464 case MONO_CEE_STELEM_I8
:
1465 return mono_defaults
.int64_class
;
1466 case MONO_CEE_LDELEM_R4
:
1467 case MONO_CEE_STELEM_R4
:
1468 return mono_defaults
.single_class
;
1469 case MONO_CEE_LDELEM_R8
:
1470 case MONO_CEE_STELEM_R8
:
1471 return mono_defaults
.double_class
;
1472 case MONO_CEE_LDELEM_REF
:
1473 case MONO_CEE_STELEM_REF
:
1474 return mono_defaults
.object_class
;
1476 g_assert_not_reached ();
1482 * We try to share variables when possible
1485 mono_compile_get_interface_var (MonoCompile
*cfg
, int slot
, MonoInst
*ins
)
1491 type
= type_from_stack_type (ins
);
1493 /* inlining can result in deeper stacks */
1494 if (cfg
->inline_depth
|| slot
>= cfg
->header
->max_stack
)
1495 return mono_compile_create_var (cfg
, type
, OP_LOCAL
);
1497 pos
= ins
->type
- 1 + slot
* STACK_MAX
;
1499 switch (ins
->type
) {
1506 if ((vnum
= cfg
->intvars
[pos
]))
1507 return cfg
->varinfo
[vnum
];
1508 res
= mono_compile_create_var (cfg
, type
, OP_LOCAL
);
1509 cfg
->intvars
[pos
] = res
->inst_c0
;
1512 res
= mono_compile_create_var (cfg
, type
, OP_LOCAL
);
1518 mono_save_token_info (MonoCompile
*cfg
, MonoImage
*image
, guint32 token
, gpointer key
)
1521 * Don't use this if a generic_context is set, since that means AOT can't
1522 * look up the method using just the image+token.
1523 * table == 0 means this is a reference made from a wrapper.
1525 if (cfg
->compile_aot
&& !cfg
->generic_context
&& (mono_metadata_token_table (token
) > 0)) {
1526 MonoJumpInfoToken
*jump_info_token
= (MonoJumpInfoToken
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoToken
));
1527 jump_info_token
->image
= image
;
1528 jump_info_token
->token
= token
;
1529 g_hash_table_insert (cfg
->token_info_hash
, key
, jump_info_token
);
1534 * This function is called to handle items that are left on the evaluation stack
1535 * at basic block boundaries. What happens is that we save the values to local variables
1536 * and we reload them later when first entering the target basic block (with the
1537 * handle_loaded_temps () function).
1538 * A single joint point will use the same variables (stored in the array bb->out_stack or
1539 * bb->in_stack, if the basic block is before or after the joint point).
1541 * This function needs to be called _before_ emitting the last instruction of
1542 * the bb (i.e. before emitting a branch).
1543 * If the stack merge fails at a join point, cfg->unverifiable is set.
1546 handle_stack_args (MonoCompile
*cfg
, MonoInst
**sp
, int count
)
1549 MonoBasicBlock
*bb
= cfg
->cbb
;
1550 MonoBasicBlock
*outb
;
1551 MonoInst
*inst
, **locals
;
1556 if (cfg
->verbose_level
> 3)
1557 printf ("%d item(s) on exit from B%d\n", count
, bb
->block_num
);
1558 if (!bb
->out_scount
) {
1559 bb
->out_scount
= count
;
1560 //printf ("bblock %d has out:", bb->block_num);
1562 for (i
= 0; i
< bb
->out_count
; ++i
) {
1563 outb
= bb
->out_bb
[i
];
1564 /* exception handlers are linked, but they should not be considered for stack args */
1565 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1567 //printf (" %d", outb->block_num);
1568 if (outb
->in_stack
) {
1570 bb
->out_stack
= outb
->in_stack
;
1576 bb
->out_stack
= (MonoInst
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * count
);
1577 for (i
= 0; i
< count
; ++i
) {
1579 * try to reuse temps already allocated for this purpouse, if they occupy the same
1580 * stack slot and if they are of the same type.
1581 * This won't cause conflicts since if 'local' is used to
1582 * store one of the values in the in_stack of a bblock, then
1583 * the same variable will be used for the same outgoing stack
1585 * This doesn't work when inlining methods, since the bblocks
1586 * in the inlined methods do not inherit their in_stack from
1587 * the bblock they are inlined to. See bug #58863 for an
1590 bb
->out_stack
[i
] = mono_compile_get_interface_var (cfg
, i
, sp
[i
]);
1595 for (i
= 0; i
< bb
->out_count
; ++i
) {
1596 outb
= bb
->out_bb
[i
];
1597 /* exception handlers are linked, but they should not be considered for stack args */
1598 if (outb
->flags
& BB_EXCEPTION_HANDLER
)
1600 if (outb
->in_scount
) {
1601 if (outb
->in_scount
!= bb
->out_scount
) {
1602 cfg
->unverifiable
= TRUE
;
1605 continue; /* check they are the same locals */
1607 outb
->in_scount
= count
;
1608 outb
->in_stack
= bb
->out_stack
;
1611 locals
= bb
->out_stack
;
1613 for (i
= 0; i
< count
; ++i
) {
1614 sp
[i
] = convert_value (cfg
, locals
[i
]->inst_vtype
, sp
[i
]);
1615 EMIT_NEW_TEMPSTORE (cfg
, inst
, locals
[i
]->inst_c0
, sp
[i
]);
1616 inst
->cil_code
= sp
[i
]->cil_code
;
1617 sp
[i
] = locals
[i
];
1618 if (cfg
->verbose_level
> 3)
1619 printf ("storing %d to temp %d\n", i
, (int)locals
[i
]->inst_c0
);
1623 * It is possible that the out bblocks already have in_stack assigned, and
1624 * the in_stacks differ. In this case, we will store to all the different
1631 /* Find a bblock which has a different in_stack */
1633 while (bindex
< bb
->out_count
) {
1634 outb
= bb
->out_bb
[bindex
];
1635 /* exception handlers are linked, but they should not be considered for stack args */
1636 if (outb
->flags
& BB_EXCEPTION_HANDLER
) {
1640 if (outb
->in_stack
!= locals
) {
1641 for (i
= 0; i
< count
; ++i
) {
1642 sp
[i
] = convert_value (cfg
, outb
->in_stack
[i
]->inst_vtype
, sp
[i
]);
1643 EMIT_NEW_TEMPSTORE (cfg
, inst
, outb
->in_stack
[i
]->inst_c0
, sp
[i
]);
1644 inst
->cil_code
= sp
[i
]->cil_code
;
1645 sp
[i
] = locals
[i
];
1646 if (cfg
->verbose_level
> 3)
1647 printf ("storing %d to temp %d\n", i
, (int)outb
->in_stack
[i
]->inst_c0
);
1649 locals
= outb
->in_stack
;
1659 mini_emit_runtime_constant (MonoCompile
*cfg
, MonoJumpInfoType patch_type
, gpointer data
)
1663 if (cfg
->compile_aot
) {
1664 MONO_DISABLE_WARNING (4306) // 'type cast': conversion from 'MonoJumpInfoType' to 'MonoInst *' of greater size
1665 EMIT_NEW_AOTCONST (cfg
, ins
, patch_type
, data
);
1666 MONO_RESTORE_WARNING
1672 ji
.type
= patch_type
;
1673 ji
.data
.target
= data
;
1674 target
= mono_resolve_patch_target (NULL
, cfg
->domain
, NULL
, &ji
, FALSE
, error
);
1675 mono_error_assert_ok (error
);
1677 EMIT_NEW_PCONST (cfg
, ins
, target
);
1683 mono_create_fast_tls_getter (MonoCompile
*cfg
, MonoTlsKey key
)
1685 int tls_offset
= mono_tls_get_tls_offset (key
);
1687 if (cfg
->compile_aot
)
1690 if (tls_offset
!= -1 && mono_arch_have_fast_tls ()) {
1692 MONO_INST_NEW (cfg
, ins
, OP_TLS_GET
);
1693 ins
->dreg
= mono_alloc_preg (cfg
);
1694 ins
->inst_offset
= tls_offset
;
1701 mono_create_tls_get (MonoCompile
*cfg
, MonoTlsKey key
)
1703 MonoInst
*fast_tls
= NULL
;
1705 if (!mini_debug_options
.use_fallback_tls
)
1706 fast_tls
= mono_create_fast_tls_getter (cfg
, key
);
1709 MONO_ADD_INS (cfg
->cbb
, fast_tls
);
1713 const MonoJitICallId jit_icall_id
= mono_get_tls_key_to_jit_icall_id (key
);
1715 if (cfg
->compile_aot
) {
1718 * tls getters are critical pieces of code and we don't want to resolve them
1719 * through the standard plt/tramp mechanism since we might expose ourselves
1720 * to crashes and infinite recursions.
1721 * Therefore the NOCALL part of MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL, FALSE in is_plt_patch.
1723 EMIT_NEW_AOTCONST (cfg
, addr
, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL
, GUINT_TO_POINTER (jit_icall_id
));
1724 return mini_emit_calli (cfg
, mono_icall_sig_ptr
, NULL
, addr
, NULL
, NULL
);
1726 return mono_emit_jit_icall_id (cfg
, jit_icall_id
, NULL
);
1733 * Emit IR to push the current LMF onto the LMF stack.
1736 emit_push_lmf (MonoCompile
*cfg
)
1739 * Emit IR to push the LMF:
1740 * lmf_addr = <lmf_addr from tls>
1741 * lmf->lmf_addr = lmf_addr
1742 * lmf->prev_lmf = *lmf_addr
1745 MonoInst
*ins
, *lmf_ins
;
1750 int lmf_reg
, prev_lmf_reg
;
1752 * Store lmf_addr in a variable, so it can be allocated to a global register.
1754 if (!cfg
->lmf_addr_var
)
1755 cfg
->lmf_addr_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
1757 lmf_ins
= mono_create_tls_get (cfg
, TLS_KEY_LMF_ADDR
);
1760 lmf_ins
->dreg
= cfg
->lmf_addr_var
->dreg
;
1762 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->lmf_var
, NULL
);
1763 lmf_reg
= ins
->dreg
;
1765 prev_lmf_reg
= alloc_preg (cfg
);
1766 /* Save previous_lmf */
1767 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, prev_lmf_reg
, cfg
->lmf_addr_var
->dreg
, 0);
1768 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STORE_MEMBASE_REG
, lmf_reg
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), prev_lmf_reg
);
1770 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STORE_MEMBASE_REG
, cfg
->lmf_addr_var
->dreg
, 0, lmf_reg
);
1776 * Emit IR to pop the current LMF from the LMF stack.
1779 emit_pop_lmf (MonoCompile
*cfg
)
1781 int lmf_reg
, lmf_addr_reg
;
1787 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->lmf_var
, NULL
);
1788 lmf_reg
= ins
->dreg
;
1792 * Emit IR to pop the LMF:
1793 * *(lmf->lmf_addr) = lmf->prev_lmf
1795 /* This could be called before emit_push_lmf () */
1796 if (!cfg
->lmf_addr_var
)
1797 cfg
->lmf_addr_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
1798 lmf_addr_reg
= cfg
->lmf_addr_var
->dreg
;
1800 prev_lmf_reg
= alloc_preg (cfg
);
1801 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, prev_lmf_reg
, lmf_reg
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
1802 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STORE_MEMBASE_REG
, lmf_addr_reg
, 0, prev_lmf_reg
);
1806 * target_type_is_incompatible:
1807 * @cfg: MonoCompile context
1809 * Check that the item @arg on the evaluation stack can be stored
1810 * in the target type (can be a local, or field, etc).
1811 * The cfg arg can be used to check if we need verification or just
1814 * Returns: non-0 value if arg can't be stored on a target.
1817 target_type_is_incompatible (MonoCompile
*cfg
, MonoType
*target
, MonoInst
*arg
)
1819 MonoType
*simple_type
;
1822 if (target
->byref
) {
1823 /* FIXME: check that the pointed to types match */
1824 if (arg
->type
== STACK_MP
) {
1825 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1826 MonoClass
*target_class_lowered
= mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (mono_class_from_mono_type_internal (target
))));
1827 MonoClass
*source_class_lowered
= mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg
->klass
)));
1829 /* if the target is native int& or X* or same type */
1830 if (target
->type
== MONO_TYPE_I
|| target
->type
== MONO_TYPE_PTR
|| target_class_lowered
== source_class_lowered
)
1833 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1834 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (target_class_lowered
)) && MONO_TYPE_IS_PRIMITIVE_SCALAR (m_class_get_byval_arg (source_class_lowered
)) &&
1835 mono_class_instance_size (target_class_lowered
) <= mono_class_instance_size (source_class_lowered
))
1839 if (arg
->type
== STACK_PTR
)
1844 simple_type
= mini_get_underlying_type (target
);
1845 switch (simple_type
->type
) {
1846 case MONO_TYPE_VOID
:
1854 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
)
1858 /* STACK_MP is needed when setting pinned locals */
1859 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1864 case MONO_TYPE_FNPTR
:
1866 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1867 * in native int. (#688008).
1869 if (arg
->type
!= STACK_I4
&& arg
->type
!= STACK_PTR
&& arg
->type
!= STACK_MP
)
1872 case MONO_TYPE_CLASS
:
1873 case MONO_TYPE_STRING
:
1874 case MONO_TYPE_OBJECT
:
1875 case MONO_TYPE_SZARRAY
:
1876 case MONO_TYPE_ARRAY
:
1877 if (arg
->type
!= STACK_OBJ
)
1879 /* FIXME: check type compatibility */
1883 if (arg
->type
!= STACK_I8
)
1887 if (arg
->type
!= cfg
->r4_stack_type
)
1891 if (arg
->type
!= STACK_R8
)
1894 case MONO_TYPE_VALUETYPE
:
1895 if (arg
->type
!= STACK_VTYPE
)
1897 klass
= mono_class_from_mono_type_internal (simple_type
);
1898 if (klass
!= arg
->klass
)
1901 case MONO_TYPE_TYPEDBYREF
:
1902 if (arg
->type
!= STACK_VTYPE
)
1904 klass
= mono_class_from_mono_type_internal (simple_type
);
1905 if (klass
!= arg
->klass
)
1908 case MONO_TYPE_GENERICINST
:
1909 if (mono_type_generic_inst_is_valuetype (simple_type
)) {
1910 MonoClass
*target_class
;
1911 if (arg
->type
!= STACK_VTYPE
)
1913 klass
= mono_class_from_mono_type_internal (simple_type
);
1914 target_class
= mono_class_from_mono_type_internal (target
);
1915 /* The second cases is needed when doing partial sharing */
1916 if (klass
!= arg
->klass
&& target_class
!= arg
->klass
&& target_class
!= mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (arg
->klass
))))
1920 if (arg
->type
!= STACK_OBJ
)
1922 /* FIXME: check type compatibility */
1926 case MONO_TYPE_MVAR
:
1927 g_assert (cfg
->gshared
);
1928 if (mini_type_var_is_vt (simple_type
)) {
1929 if (arg
->type
!= STACK_VTYPE
)
1932 if (arg
->type
!= STACK_OBJ
)
1937 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type
->type
);
1945 * Emit some implicit conversions which are not part of the .net spec, but are allowed by MS.NET.
1948 convert_value (MonoCompile
*cfg
, MonoType
*type
, MonoInst
*ins
)
1952 type
= mini_get_underlying_type (type
);
1953 switch (type
->type
) {
1955 if (ins
->type
== STACK_R8
) {
1956 int dreg
= alloc_freg (cfg
);
1958 EMIT_NEW_UNALU (cfg
, conv
, OP_FCONV_TO_R4
, dreg
, ins
->dreg
);
1959 conv
->type
= STACK_R4
;
1964 if (ins
->type
== STACK_R4
) {
1965 int dreg
= alloc_freg (cfg
);
1967 EMIT_NEW_UNALU (cfg
, conv
, OP_RCONV_TO_R8
, dreg
, ins
->dreg
);
1968 conv
->type
= STACK_R8
;
1979 * Prepare arguments for passing to a function call.
1980 * Return a non-zero value if the arguments can't be passed to the given
1982 * The type checks are not yet complete and some conversions may need
1983 * casts on 32 or 64 bit architectures.
1985 * FIXME: implement this using target_type_is_incompatible ()
1988 check_call_signature (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**args
)
1990 MonoType
*simple_type
;
1994 if (args
[0]->type
!= STACK_OBJ
&& args
[0]->type
!= STACK_MP
&& args
[0]->type
!= STACK_PTR
)
1998 for (i
= 0; i
< sig
->param_count
; ++i
) {
1999 if (sig
->params
[i
]->byref
) {
2000 if (args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_PTR
)
2004 simple_type
= mini_get_underlying_type (sig
->params
[i
]);
2006 switch (simple_type
->type
) {
2007 case MONO_TYPE_VOID
:
2015 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
)
2021 case MONO_TYPE_FNPTR
:
2022 if (args
[i
]->type
!= STACK_I4
&& args
[i
]->type
!= STACK_PTR
&& args
[i
]->type
!= STACK_MP
&& args
[i
]->type
!= STACK_OBJ
)
2025 case MONO_TYPE_CLASS
:
2026 case MONO_TYPE_STRING
:
2027 case MONO_TYPE_OBJECT
:
2028 case MONO_TYPE_SZARRAY
:
2029 case MONO_TYPE_ARRAY
:
2030 if (args
[i
]->type
!= STACK_OBJ
)
2035 if (args
[i
]->type
!= STACK_I8
)
2039 if (args
[i
]->type
!= cfg
->r4_stack_type
)
2043 if (args
[i
]->type
!= STACK_R8
)
2046 case MONO_TYPE_VALUETYPE
:
2047 if (m_class_is_enumtype (simple_type
->data
.klass
)) {
2048 simple_type
= mono_class_enum_basetype_internal (simple_type
->data
.klass
);
2051 if (args
[i
]->type
!= STACK_VTYPE
)
2054 case MONO_TYPE_TYPEDBYREF
:
2055 if (args
[i
]->type
!= STACK_VTYPE
)
2058 case MONO_TYPE_GENERICINST
:
2059 simple_type
= m_class_get_byval_arg (simple_type
->data
.generic_class
->container_class
);
2062 case MONO_TYPE_MVAR
:
2064 if (args
[i
]->type
!= STACK_VTYPE
)
2068 g_error ("unknown type 0x%02x in check_call_signature",
2076 mono_patch_info_new (MonoMemPool
*mp
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
2078 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc (mp
, sizeof (MonoJumpInfo
));
2082 ji
->data
.target
= target
;
2088 mini_class_check_context_used (MonoCompile
*cfg
, MonoClass
*klass
)
2091 return mono_class_check_context_used (klass
);
2097 mini_method_check_context_used (MonoCompile
*cfg
, MonoMethod
*method
)
2100 return mono_method_check_context_used (method
);
2106 * check_method_sharing:
2108 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2111 check_method_sharing (MonoCompile
*cfg
, MonoMethod
*cmethod
, gboolean
*out_pass_vtable
, gboolean
*out_pass_mrgctx
)
2113 gboolean pass_vtable
= FALSE
;
2114 gboolean pass_mrgctx
= FALSE
;
2116 if (((cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
) || m_class_is_valuetype (cmethod
->klass
)) &&
2117 (mono_class_is_ginst (cmethod
->klass
) || mono_class_is_gtd (cmethod
->klass
))) {
2118 gboolean sharable
= FALSE
;
2120 if (mono_method_is_generic_sharable_full (cmethod
, TRUE
, TRUE
, TRUE
))
2124 * Pass vtable iff target method might
2125 * be shared, which means that sharing
2126 * is enabled for its class and its
2127 * context is sharable (and it's not a
2130 if (sharable
&& !(mini_method_get_context (cmethod
) && mini_method_get_context (cmethod
)->method_inst
))
2134 if (mini_method_needs_mrgctx (cmethod
)) {
2135 if (mini_method_is_default_method (cmethod
))
2136 pass_vtable
= FALSE
;
2138 g_assert (!pass_vtable
);
2140 if (mono_method_is_generic_sharable_full (cmethod
, TRUE
, TRUE
, TRUE
)) {
2143 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (mono_method_signature_internal (cmethod
)))
2148 if (out_pass_vtable
)
2149 *out_pass_vtable
= pass_vtable
;
2150 if (out_pass_mrgctx
)
2151 *out_pass_mrgctx
= pass_mrgctx
;
2155 direct_icalls_enabled (MonoCompile
*cfg
, MonoMethod
*method
)
2157 if (cfg
->gen_sdb_seq_points
|| cfg
->disable_direct_icalls
)
2160 if (method
&& mono_aot_direct_icalls_enabled_for_method (cfg
, method
))
2163 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2165 if (cfg
->compile_llvm
&& !cfg
->llvm_only
)
2173 mono_emit_jit_icall_by_info (MonoCompile
*cfg
, int il_offset
, MonoJitICallInfo
*info
, MonoInst
**args
)
2176 * Call the jit icall without a wrapper if possible.
2177 * The wrapper is needed to be able to do stack walks for asynchronously suspended
2178 * threads when debugging.
2180 if (direct_icalls_enabled (cfg
, NULL
)) {
2183 if (!info
->wrapper_method
) {
2184 info
->wrapper_method
= mono_marshal_get_icall_wrapper (info
, TRUE
);
2185 mono_memory_barrier ();
2189 * Inline the wrapper method, which is basically a call to the C icall, and
2190 * an exception check.
2192 costs
= inline_method (cfg
, info
->wrapper_method
, NULL
,
2193 args
, NULL
, il_offset
, TRUE
);
2194 g_assert (costs
> 0);
2195 g_assert (!MONO_TYPE_IS_VOID (info
->sig
->ret
));
2199 return mono_emit_jit_icall_id (cfg
, mono_jit_icall_info_id (info
), args
);
2203 mono_emit_widen_call_res (MonoCompile
*cfg
, MonoInst
*ins
, MonoMethodSignature
*fsig
)
2205 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
2206 if ((fsig
->pinvoke
|| LLVM_ENABLED
) && !fsig
->ret
->byref
) {
2210 * Native code might return non register sized integers
2211 * without initializing the upper bits.
2213 switch (mono_type_to_load_membase (cfg
, fsig
->ret
)) {
2214 case OP_LOADI1_MEMBASE
:
2215 widen_op
= OP_ICONV_TO_I1
;
2217 case OP_LOADU1_MEMBASE
:
2218 widen_op
= OP_ICONV_TO_U1
;
2220 case OP_LOADI2_MEMBASE
:
2221 widen_op
= OP_ICONV_TO_I2
;
2223 case OP_LOADU2_MEMBASE
:
2224 widen_op
= OP_ICONV_TO_U2
;
2230 if (widen_op
!= -1) {
2231 int dreg
= alloc_preg (cfg
);
2234 EMIT_NEW_UNALU (cfg
, widen
, widen_op
, dreg
, ins
->dreg
);
2235 widen
->type
= ins
->type
;
2245 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2246 MonoMethod
*cmethod
, MonoRgctxInfoType rgctx_type
);
2249 emit_method_access_failure (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
2252 args
[0] = emit_get_rgctx_method (cfg
, mono_method_check_context_used (caller
), caller
, MONO_RGCTX_INFO_METHOD
);
2253 args
[1] = emit_get_rgctx_method (cfg
, mono_method_check_context_used (callee
), callee
, MONO_RGCTX_INFO_METHOD
);
2254 mono_emit_jit_icall (cfg
, mono_throw_method_access
, args
);
2258 emit_bad_image_failure (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
2260 mono_emit_jit_icall (cfg
, mono_throw_bad_image
, NULL
);
2264 get_method_nofail (MonoClass
*klass
, const char *method_name
, int num_params
, int flags
)
2268 method
= mono_class_get_method_from_name_checked (klass
, method_name
, num_params
, flags
, error
);
2269 mono_error_assert_ok (error
);
2270 g_assertf (method
, "Could not lookup method %s in %s", method_name
, m_class_get_name (klass
));
2275 mini_get_memcpy_method (void)
2277 static MonoMethod
*memcpy_method
= NULL
;
2278 if (!memcpy_method
) {
2279 memcpy_method
= get_method_nofail (mono_defaults
.string_class
, "memcpy", 3, 0);
2281 g_error ("Old corlib found. Install a new one");
2283 return memcpy_method
;
2287 mini_emit_storing_write_barrier (MonoCompile
*cfg
, MonoInst
*ptr
, MonoInst
*value
)
2292 * Add a release memory barrier so the object contents are flushed
2293 * to memory before storing the reference into another object.
2295 if (mini_debug_options
.clr_memory_model
)
2296 mini_emit_memory_barrier (cfg
, MONO_MEMORY_BARRIER_REL
);
2298 EMIT_NEW_STORE_MEMBASE (cfg
, store
, OP_STORE_MEMBASE_REG
, ptr
->dreg
, 0, value
->dreg
);
2300 mini_emit_write_barrier (cfg
, ptr
, value
);
2305 mini_emit_write_barrier (MonoCompile
*cfg
, MonoInst
*ptr
, MonoInst
*value
)
2307 int card_table_shift_bits
;
2308 target_mgreg_t card_table_mask
;
2310 MonoInst
*dummy_use
;
2311 int nursery_shift_bits
;
2312 size_t nursery_size
;
2314 if (!cfg
->gen_write_barriers
)
2317 //method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1])
2319 card_table
= mono_gc_get_target_card_table (&card_table_shift_bits
, &card_table_mask
);
2321 mono_gc_get_nursery (&nursery_shift_bits
, &nursery_size
);
2323 if (cfg
->backend
->have_card_table_wb
&& !cfg
->compile_aot
&& card_table
&& nursery_shift_bits
> 0 && !COMPILE_LLVM (cfg
)) {
2326 MONO_INST_NEW (cfg
, wbarrier
, OP_CARD_TABLE_WBARRIER
);
2327 wbarrier
->sreg1
= ptr
->dreg
;
2328 wbarrier
->sreg2
= value
->dreg
;
2329 MONO_ADD_INS (cfg
->cbb
, wbarrier
);
2330 } else if (card_table
) {
2331 int offset_reg
= alloc_preg (cfg
);
2336 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2337 * collector case, so, for the serial collector, it might slightly slow down nursery
2338 * collections. We also expect that the host system and the target system have the same card
2339 * table configuration, which is the case if they have the same pointer size.
2342 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHR_UN_IMM
, offset_reg
, ptr
->dreg
, card_table_shift_bits
);
2343 if (card_table_mask
)
2344 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PAND_IMM
, offset_reg
, offset_reg
, card_table_mask
);
2346 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2347 * IMM's larger than 32bits.
2349 ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
, NULL
);
2350 card_reg
= ins
->dreg
;
2352 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, offset_reg
, offset_reg
, card_reg
);
2353 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STOREI1_MEMBASE_IMM
, offset_reg
, 0, 1);
2355 MonoMethod
*write_barrier
= mono_gc_get_write_barrier ();
2356 mono_emit_method_call (cfg
, write_barrier
, &ptr
, NULL
);
2359 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, value
);
2363 mini_get_memset_method (void)
2365 static MonoMethod
*memset_method
= NULL
;
2366 if (!memset_method
) {
2367 memset_method
= get_method_nofail (mono_defaults
.string_class
, "memset", 3, 0);
2369 g_error ("Old corlib found. Install a new one");
2371 return memset_method
;
2375 mini_emit_initobj (MonoCompile
*cfg
, MonoInst
*dest
, const guchar
*ip
, MonoClass
*klass
)
2377 MonoInst
*iargs
[3];
2380 MonoMethod
*memset_method
;
2381 MonoInst
*size_ins
= NULL
;
2382 MonoInst
*bzero_ins
= NULL
;
2383 static MonoMethod
*bzero_method
;
2385 /* FIXME: Optimize this for the case when dest is an LDADDR */
2386 mono_class_init_internal (klass
);
2387 if (mini_is_gsharedvt_klass (klass
)) {
2388 size_ins
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_VALUE_SIZE
);
2389 bzero_ins
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_BZERO
);
2391 bzero_method
= get_method_nofail (mono_defaults
.string_class
, "bzero_aligned_1", 2, 0);
2392 g_assert (bzero_method
);
2394 iargs
[1] = size_ins
;
2395 mini_emit_calli (cfg
, mono_method_signature_internal (bzero_method
), iargs
, bzero_ins
, NULL
, NULL
);
2399 klass
= mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass
)));
2401 n
= mono_class_value_size (klass
, &align
);
2403 if (n
<= TARGET_SIZEOF_VOID_P
* 8) {
2404 mini_emit_memset (cfg
, dest
->dreg
, 0, n
, 0, align
);
2407 memset_method
= mini_get_memset_method ();
2409 EMIT_NEW_ICONST (cfg
, iargs
[1], 0);
2410 EMIT_NEW_ICONST (cfg
, iargs
[2], n
);
2411 mono_emit_method_call (cfg
, memset_method
, iargs
, NULL
);
2416 context_used_is_mrgctx (MonoCompile
*cfg
, int context_used
)
2418 /* gshared dim methods use an mrgctx */
2419 if (mini_method_is_default_method (cfg
->method
))
2420 return context_used
!= 0;
2421 return context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
;
2427 * Emit IR to return either the this pointer for instance method,
2428 * or the mrgctx for static methods.
2431 emit_get_rgctx (MonoCompile
*cfg
, int context_used
)
2433 MonoInst
*this_ins
= NULL
;
2434 MonoMethod
*method
= cfg
->method
;
2436 g_assert (cfg
->gshared
);
2438 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
) &&
2439 !(context_used
& MONO_GENERIC_CONTEXT_USED_METHOD
) &&
2440 !m_class_is_valuetype (method
->klass
))
2441 EMIT_NEW_VARLOAD (cfg
, this_ins
, cfg
->this_arg
, mono_get_object_type ());
2443 if (context_used_is_mrgctx (cfg
, context_used
)) {
2444 MonoInst
*mrgctx_loc
, *mrgctx_var
;
2446 if (!mini_method_is_default_method (method
)) {
2447 g_assert (!this_ins
);
2448 g_assert (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
);
2451 mrgctx_loc
= mono_get_vtable_var (cfg
);
2452 EMIT_NEW_TEMPLOAD (cfg
, mrgctx_var
, mrgctx_loc
->inst_c0
);
2455 } else if (method
->flags
& METHOD_ATTRIBUTE_STATIC
|| m_class_is_valuetype (method
->klass
)) {
2456 MonoInst
*vtable_loc
, *vtable_var
;
2458 g_assert (!this_ins
);
2460 vtable_loc
= mono_get_vtable_var (cfg
);
2461 EMIT_NEW_TEMPLOAD (cfg
, vtable_var
, vtable_loc
->inst_c0
);
2463 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
) {
2464 MonoInst
*mrgctx_var
= vtable_var
;
2467 vtable_reg
= alloc_preg (cfg
);
2468 EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_var
, OP_LOAD_MEMBASE
, vtable_reg
, mrgctx_var
->dreg
, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext
, class_vtable
));
2469 vtable_var
->type
= STACK_PTR
;
2477 vtable_reg
= alloc_preg (cfg
);
2478 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, vtable_reg
, this_ins
->dreg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
2483 static MonoJumpInfoRgctxEntry
*
2484 mono_patch_info_rgctx_entry_new (MonoMemPool
*mp
, MonoMethod
*method
, gboolean in_mrgctx
, MonoJumpInfoType patch_type
, gconstpointer patch_data
, MonoRgctxInfoType info_type
)
2486 MonoJumpInfoRgctxEntry
*res
= (MonoJumpInfoRgctxEntry
*)mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfoRgctxEntry
));
2488 res
->d
.method
= method
;
2490 res
->d
.klass
= method
->klass
;
2491 res
->in_mrgctx
= in_mrgctx
;
2492 res
->data
= (MonoJumpInfo
*)mono_mempool_alloc0 (mp
, sizeof (MonoJumpInfo
));
2493 res
->data
->type
= patch_type
;
2494 res
->data
->data
.target
= patch_data
;
2495 res
->info_type
= info_type
;
2501 emit_rgctx_fetch_inline (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2503 MonoInst
*args
[16];
2506 // FIXME: No fastpath since the slot is not a compile time constant
2508 EMIT_NEW_AOTCONST (cfg
, args
[1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX
, entry
);
2509 if (entry
->in_mrgctx
)
2510 call
= mono_emit_jit_icall (cfg
, mono_fill_method_rgctx
, args
);
2512 call
= mono_emit_jit_icall (cfg
, mono_fill_class_rgctx
, args
);
2516 * FIXME: This can be called during decompose, which is a problem since it creates
2518 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
2520 int i
, slot
, depth
, index
, rgctx_reg
, val_reg
, res_reg
;
2522 MonoBasicBlock
*is_null_bb
, *end_bb
;
2523 MonoInst
*res
, *ins
, *call
;
2526 slot
= mini_get_rgctx_entry_slot (entry
);
2528 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
2529 index
= MONO_RGCTX_SLOT_INDEX (slot
);
2531 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ TARGET_SIZEOF_VOID_P
;
2532 for (depth
= 0; ; ++depth
) {
2533 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
2535 if (index
< size
- 1)
2540 NEW_BBLOCK (cfg
, end_bb
);
2541 NEW_BBLOCK (cfg
, is_null_bb
);
2544 rgctx_reg
= rgctx
->dreg
;
2546 rgctx_reg
= alloc_preg (cfg
);
2548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, rgctx_reg
, rgctx
->dreg
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
));
2549 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
2550 NEW_BBLOCK (cfg
, is_null_bb
);
2552 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rgctx_reg
, 0);
2553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2556 for (i
= 0; i
< depth
; ++i
) {
2557 int array_reg
= alloc_preg (cfg
);
2559 /* load ptr to next array */
2560 if (mrgctx
&& i
== 0)
2561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, rgctx_reg
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
);
2563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, rgctx_reg
, 0);
2564 rgctx_reg
= array_reg
;
2565 /* is the ptr null? */
2566 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rgctx_reg
, 0);
2567 /* if yes, jump to actual trampoline */
2568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2572 val_reg
= alloc_preg (cfg
);
2573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, val_reg
, rgctx_reg
, (index
+ 1) * TARGET_SIZEOF_VOID_P
);
2574 /* is the slot null? */
2575 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, val_reg
, 0);
2576 /* if yes, jump to actual trampoline */
2577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2580 res_reg
= alloc_preg (cfg
);
2581 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2582 ins
->dreg
= res_reg
;
2583 ins
->sreg1
= val_reg
;
2584 MONO_ADD_INS (cfg
->cbb
, ins
);
2586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
2589 MONO_START_BB (cfg
, is_null_bb
);
2591 EMIT_NEW_ICONST (cfg
, args
[1], index
);
2593 call
= mono_emit_jit_icall (cfg
, mono_fill_method_rgctx
, args
);
2595 call
= mono_emit_jit_icall (cfg
, mono_fill_class_rgctx
, args
);
2596 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2597 ins
->dreg
= res_reg
;
2598 ins
->sreg1
= call
->dreg
;
2599 MONO_ADD_INS (cfg
->cbb
, ins
);
2600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
2602 MONO_START_BB (cfg
, end_bb
);
2611 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
2615 emit_rgctx_fetch (MonoCompile
*cfg
, MonoInst
*rgctx
, MonoJumpInfoRgctxEntry
*entry
)
2618 return emit_rgctx_fetch_inline (cfg
, rgctx
, entry
);
2620 return mini_emit_abs_call (cfg
, MONO_PATCH_INFO_RGCTX_FETCH
, entry
, mono_icall_sig_ptr_ptr
, &rgctx
);
2624 * mini_emit_get_rgctx_klass:
2626 * Emit IR to load the property RGCTX_TYPE of KLASS. If context_used is 0, emit
2627 * normal constants, else emit a load from the rgctx.
2630 mini_emit_get_rgctx_klass (MonoCompile
*cfg
, int context_used
,
2631 MonoClass
*klass
, MonoRgctxInfoType rgctx_type
)
2633 if (!context_used
) {
2636 switch (rgctx_type
) {
2637 case MONO_RGCTX_INFO_KLASS
:
2638 EMIT_NEW_CLASSCONST (cfg
, ins
, klass
);
2641 g_assert_not_reached ();
2645 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_CLASS
, klass
, rgctx_type
);
2646 MonoInst
*rgctx
= emit_get_rgctx (cfg
, context_used
);
2648 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2652 emit_get_rgctx_sig (MonoCompile
*cfg
, int context_used
,
2653 MonoMethodSignature
*sig
, MonoRgctxInfoType rgctx_type
)
2655 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_SIGNATURE
, sig
, rgctx_type
);
2656 MonoInst
*rgctx
= emit_get_rgctx (cfg
, context_used
);
2658 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2662 emit_get_rgctx_gsharedvt_call (MonoCompile
*cfg
, int context_used
,
2663 MonoMethodSignature
*sig
, MonoMethod
*cmethod
, MonoRgctxInfoType rgctx_type
)
2665 MonoJumpInfoGSharedVtCall
*call_info
;
2666 MonoJumpInfoRgctxEntry
*entry
;
2669 call_info
= (MonoJumpInfoGSharedVtCall
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoGSharedVtCall
));
2670 call_info
->sig
= sig
;
2671 call_info
->method
= cmethod
;
2673 entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_GSHAREDVT_CALL
, call_info
, rgctx_type
);
2674 rgctx
= emit_get_rgctx (cfg
, context_used
);
2676 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2680 * emit_get_rgctx_virt_method:
2682 * Return data for method VIRT_METHOD for a receiver of type KLASS.
2685 emit_get_rgctx_virt_method (MonoCompile
*cfg
, int context_used
,
2686 MonoClass
*klass
, MonoMethod
*virt_method
, MonoRgctxInfoType rgctx_type
)
2688 MonoJumpInfoVirtMethod
*info
;
2689 MonoJumpInfoRgctxEntry
*entry
;
2692 info
= (MonoJumpInfoVirtMethod
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfoVirtMethod
));
2693 info
->klass
= klass
;
2694 info
->method
= virt_method
;
2696 entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_VIRT_METHOD
, info
, rgctx_type
);
2697 rgctx
= emit_get_rgctx (cfg
, context_used
);
2699 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2703 emit_get_rgctx_gsharedvt_method (MonoCompile
*cfg
, int context_used
,
2704 MonoMethod
*cmethod
, MonoGSharedVtMethodInfo
*info
)
2706 MonoJumpInfoRgctxEntry
*entry
;
2709 entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_GSHAREDVT_METHOD
, info
, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO
);
2710 rgctx
= emit_get_rgctx (cfg
, context_used
);
2712 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2716 * emit_get_rgctx_method:
2718 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2719 * normal constants, else emit a load from the rgctx.
2722 emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2723 MonoMethod
*cmethod
, MonoRgctxInfoType rgctx_type
)
2725 if (context_used
== -1)
2726 context_used
= mono_method_check_context_used (cmethod
);
2728 if (!context_used
) {
2731 switch (rgctx_type
) {
2732 case MONO_RGCTX_INFO_METHOD
:
2733 EMIT_NEW_METHODCONST (cfg
, ins
, cmethod
);
2735 case MONO_RGCTX_INFO_METHOD_RGCTX
:
2736 EMIT_NEW_METHOD_RGCTX_CONST (cfg
, ins
, cmethod
);
2738 case MONO_RGCTX_INFO_METHOD_FTNDESC
:
2739 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_METHOD_FTNDESC
, cmethod
);
2742 g_assert_not_reached ();
2745 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_METHODCONST
, cmethod
, rgctx_type
);
2746 MonoInst
*rgctx
= emit_get_rgctx (cfg
, context_used
);
2748 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2753 emit_get_rgctx_field (MonoCompile
*cfg
, int context_used
,
2754 MonoClassField
*field
, MonoRgctxInfoType rgctx_type
)
2756 MonoJumpInfoRgctxEntry
*entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_FIELD
, field
, rgctx_type
);
2757 MonoInst
*rgctx
= emit_get_rgctx (cfg
, context_used
);
2759 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
2763 mini_emit_get_rgctx_method (MonoCompile
*cfg
, int context_used
,
2764 MonoMethod
*cmethod
, MonoRgctxInfoType rgctx_type
)
2766 return emit_get_rgctx_method (cfg
, context_used
, cmethod
, rgctx_type
);
2770 get_gsharedvt_info_slot (MonoCompile
*cfg
, gpointer data
, MonoRgctxInfoType rgctx_type
)
2772 MonoGSharedVtMethodInfo
*info
= cfg
->gsharedvt_info
;
2773 MonoRuntimeGenericContextInfoTemplate
*template_
;
2778 for (i
= 0; i
< info
->num_entries
; ++i
) {
2779 MonoRuntimeGenericContextInfoTemplate
*otemplate
= &info
->entries
[i
];
2781 if (otemplate
->info_type
== rgctx_type
&& otemplate
->data
== data
&& rgctx_type
!= MONO_RGCTX_INFO_LOCAL_OFFSET
)
2785 if (info
->num_entries
== info
->count_entries
) {
2786 MonoRuntimeGenericContextInfoTemplate
*new_entries
;
2787 int new_count_entries
= info
->count_entries
? info
->count_entries
* 2 : 16;
2789 new_entries
= (MonoRuntimeGenericContextInfoTemplate
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoRuntimeGenericContextInfoTemplate
) * new_count_entries
);
2791 memcpy (new_entries
, info
->entries
, sizeof (MonoRuntimeGenericContextInfoTemplate
) * info
->count_entries
);
2792 info
->entries
= new_entries
;
2793 info
->count_entries
= new_count_entries
;
2796 idx
= info
->num_entries
;
2797 template_
= &info
->entries
[idx
];
2798 template_
->info_type
= rgctx_type
;
2799 template_
->data
= data
;
2801 info
->num_entries
++;
2807 * emit_get_gsharedvt_info:
2809 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
2812 emit_get_gsharedvt_info (MonoCompile
*cfg
, gpointer data
, MonoRgctxInfoType rgctx_type
)
2817 idx
= get_gsharedvt_info_slot (cfg
, data
, rgctx_type
);
2818 /* Load info->entries [idx] */
2819 dreg
= alloc_preg (cfg
);
2820 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, cfg
->gsharedvt_info_var
->dreg
, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo
, entries
) + (idx
* TARGET_SIZEOF_VOID_P
));
2826 mini_emit_get_gsharedvt_info_klass (MonoCompile
*cfg
, MonoClass
*klass
, MonoRgctxInfoType rgctx_type
)
2828 return emit_get_gsharedvt_info (cfg
, m_class_get_byval_arg (klass
), rgctx_type
);
2832 * On return the caller must check @klass for load errors.
2835 emit_class_init (MonoCompile
*cfg
, MonoClass
*klass
)
2837 MonoInst
*vtable_arg
;
2840 context_used
= mini_class_check_context_used (cfg
, klass
);
2843 vtable_arg
= mini_emit_get_rgctx_klass (cfg
, context_used
,
2844 klass
, MONO_RGCTX_INFO_VTABLE
);
2846 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, klass
, cfg
->error
);
2847 if (!is_ok (cfg
->error
)) {
2848 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2852 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
2855 if (!COMPILE_LLVM (cfg
) && cfg
->backend
->have_op_generic_class_init
) {
2859 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
2860 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
2862 MONO_INST_NEW (cfg
, ins
, OP_GENERIC_CLASS_INIT
);
2863 ins
->sreg1
= vtable_arg
->dreg
;
2864 MONO_ADD_INS (cfg
->cbb
, ins
);
2867 MonoBasicBlock
*inited_bb
;
2869 inited_reg
= alloc_ireg (cfg
);
2871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, inited_reg
, vtable_arg
->dreg
, MONO_STRUCT_OFFSET (MonoVTable
, initialized
));
2873 NEW_BBLOCK (cfg
, inited_bb
);
2875 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, inited_reg
, 0);
2876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBNE_UN
, inited_bb
);
2878 mono_emit_jit_icall (cfg
, mono_generic_class_init
, &vtable_arg
);
2880 MONO_START_BB (cfg
, inited_bb
);
2885 emit_seq_point (MonoCompile
*cfg
, MonoMethod
*method
, guint8
* ip
, gboolean intr_loc
, gboolean nonempty_stack
)
2889 if (cfg
->gen_seq_points
&& cfg
->method
== method
) {
2890 NEW_SEQ_POINT (cfg
, ins
, ip
- cfg
->header
->code
, intr_loc
);
2892 ins
->flags
|= MONO_INST_NONEMPTY_STACK
;
2893 MONO_ADD_INS (cfg
->cbb
, ins
);
2894 cfg
->last_seq_point
= ins
;
2899 mini_save_cast_details (MonoCompile
*cfg
, MonoClass
*klass
, int obj_reg
, gboolean null_check
)
2901 if (mini_debug_options
.better_cast_details
) {
2902 int vtable_reg
= alloc_preg (cfg
);
2903 int klass_reg
= alloc_preg (cfg
);
2904 MonoBasicBlock
*is_null_bb
= NULL
;
2908 NEW_BBLOCK (cfg
, is_null_bb
);
2910 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, obj_reg
, 0);
2911 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, is_null_bb
);
2914 tls_get
= mono_create_tls_get (cfg
, TLS_KEY_JIT_TLS
);
2916 fprintf (stderr
, "error: --debug=casts not supported on this platform.\n.");
2920 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, vtable_reg
, obj_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
2921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, MONO_STRUCT_OFFSET (MonoVTable
, klass
));
2923 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, MONO_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), klass_reg
);
2925 MonoInst
*class_ins
= mini_emit_get_rgctx_klass (cfg
, mini_class_check_context_used (cfg
, klass
), klass
, MONO_RGCTX_INFO_KLASS
);
2926 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, tls_get
->dreg
, MONO_STRUCT_OFFSET (MonoJitTlsData
, class_cast_to
), class_ins
->dreg
);
2929 MONO_START_BB (cfg
, is_null_bb
);
2934 mini_reset_cast_details (MonoCompile
*cfg
)
2936 /* Reset the variables holding the cast details */
2937 if (mini_debug_options
.better_cast_details
) {
2938 MonoInst
*tls_get
= mono_create_tls_get (cfg
, TLS_KEY_JIT_TLS
);
2939 /* It is enough to reset the from field */
2940 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, tls_get
->dreg
, MONO_STRUCT_OFFSET (MonoJitTlsData
, class_cast_from
), 0);
2945 * On return the caller must check @array_class for load errors
2948 mini_emit_check_array_type (MonoCompile
*cfg
, MonoInst
*obj
, MonoClass
*array_class
)
2950 int vtable_reg
= alloc_preg (cfg
);
2953 context_used
= mini_class_check_context_used (cfg
, array_class
);
2955 mini_save_cast_details (cfg
, array_class
, obj
->dreg
, FALSE
);
2957 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
2959 if (cfg
->opt
& MONO_OPT_SHARED
) {
2960 int class_reg
= alloc_preg (cfg
);
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, class_reg
, vtable_reg
, MONO_STRUCT_OFFSET (MonoVTable
, klass
));
2964 ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_CLASS
, array_class
);
2965 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, class_reg
, ins
->dreg
);
2966 } else if (context_used
) {
2967 MonoInst
*vtable_ins
;
2969 vtable_ins
= mini_emit_get_rgctx_klass (cfg
, context_used
, array_class
, MONO_RGCTX_INFO_VTABLE
);
2970 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vtable_ins
->dreg
);
2972 if (cfg
->compile_aot
) {
2976 if (!(vtable
= mono_class_vtable_checked (cfg
->domain
, array_class
, cfg
->error
))) {
2977 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2980 vt_reg
= alloc_preg (cfg
);
2981 MONO_EMIT_NEW_VTABLECONST (cfg
, vt_reg
, vtable
);
2982 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, vtable_reg
, vt_reg
);
2985 if (!(vtable
= mono_class_vtable_checked (cfg
->domain
, array_class
, cfg
->error
))) {
2986 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2989 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, vtable_reg
, (gssize
)vtable
);
2993 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "ArrayTypeMismatchException");
2995 mini_reset_cast_details (cfg
);
2999 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3000 * generic code is generated.
3003 handle_unbox_nullable (MonoCompile
* cfg
, MonoInst
* val
, MonoClass
* klass
, int context_used
)
3007 if (m_class_is_enumtype (mono_class_get_nullable_param_internal (klass
)))
3008 method
= get_method_nofail (klass
, "UnboxExact", 1, 0);
3010 method
= get_method_nofail (klass
, "Unbox", 1, 0);
3014 MonoInst
*rgctx
, *addr
;
3016 /* FIXME: What if the class is shared? We might not
3017 have to get the address of the method from the
3019 if (cfg
->llvm_only
) {
3020 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3021 MONO_RGCTX_INFO_METHOD_FTNDESC
);
3022 cfg
->signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->signatures
, mono_method_signature_internal (method
));
3023 return mini_emit_llvmonly_calli (cfg
, mono_method_signature_internal (method
), &val
, addr
);
3025 addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3026 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3027 rgctx
= emit_get_rgctx (cfg
, context_used
);
3029 return mini_emit_calli (cfg
, mono_method_signature_internal (method
), &val
, addr
, NULL
, rgctx
);
3032 gboolean pass_vtable
, pass_mrgctx
;
3033 MonoInst
*rgctx_arg
= NULL
;
3035 check_method_sharing (cfg
, method
, &pass_vtable
, &pass_mrgctx
);
3036 g_assert (!pass_mrgctx
);
3039 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, method
->klass
, cfg
->error
);
3041 mono_error_assert_ok (cfg
->error
);
3042 EMIT_NEW_VTABLECONST (cfg
, rgctx_arg
, vtable
);
3045 return mini_emit_method_call_full (cfg
, method
, NULL
, FALSE
, &val
, NULL
, NULL
, rgctx_arg
);
3050 handle_unbox (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, int context_used
)
3054 int vtable_reg
= alloc_dreg (cfg
,STACK_PTR
);
3055 int klass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3056 int eclass_reg
= alloc_dreg (cfg
,STACK_PTR
);
3057 int rank_reg
= alloc_dreg (cfg
,STACK_I4
);
3059 obj_reg
= sp
[0]->dreg
;
3060 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg
, vtable_reg
, obj_reg
, MONO_STRUCT_OFFSET (MonoObject
, vtable
));
3061 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, rank_reg
, vtable_reg
, MONO_STRUCT_OFFSET (MonoVTable
, rank
));
3063 /* FIXME: generics */
3064 g_assert (m_class_get_rank (klass
) == 0);
3067 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, rank_reg
, 0);
3068 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, vtable_reg
, MONO_STRUCT_OFFSET (MonoVTable
, klass
));
3071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, eclass_reg
, klass_reg
, m_class_offsetof_element_class ());
3074 MonoInst
*element_class
;
3076 /* This assertion is from the unboxcast insn */
3077 g_assert (m_class_get_rank (klass
) == 0);
3079 element_class
= mini_emit_get_rgctx_klass (cfg
, context_used
,
3080 klass
, MONO_RGCTX_INFO_ELEMENT_KLASS
);
3082 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, eclass_reg
, element_class
->dreg
);
3083 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
3085 mini_save_cast_details (cfg
, m_class_get_element_class (klass
), obj_reg
, FALSE
);
3086 mini_emit_class_check (cfg
, eclass_reg
, m_class_get_element_class (klass
));
3087 mini_reset_cast_details (cfg
);
3090 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_MP
), obj_reg
, MONO_ABI_SIZEOF (MonoObject
));
3091 MONO_ADD_INS (cfg
->cbb
, add
);
3092 add
->type
= STACK_MP
;
3099 handle_unbox_gsharedvt (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*obj
)
3101 MonoInst
*addr
, *klass_inst
, *is_ref
, *args
[16];
3102 MonoBasicBlock
*is_ref_bb
, *is_nullable_bb
, *end_bb
;
3106 klass_inst
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_KLASS
);
3112 args
[1] = klass_inst
;
3115 obj
= mono_emit_jit_icall (cfg
, mono_object_castclass_unbox
, args
);
3117 NEW_BBLOCK (cfg
, is_ref_bb
);
3118 NEW_BBLOCK (cfg
, is_nullable_bb
);
3119 NEW_BBLOCK (cfg
, end_bb
);
3120 is_ref
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_CLASS_BOX_TYPE
);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, is_ref
->dreg
, MONO_GSHAREDVT_BOX_TYPE_REF
);
3122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_ref_bb
);
3124 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, is_ref
->dreg
, MONO_GSHAREDVT_BOX_TYPE_NULLABLE
);
3125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_nullable_bb
);
3127 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3128 addr_reg
= alloc_dreg (cfg
, STACK_MP
);
3132 NEW_BIALU_IMM (cfg
, addr
, OP_ADD_IMM
, addr_reg
, obj
->dreg
, MONO_ABI_SIZEOF (MonoObject
));
3133 MONO_ADD_INS (cfg
->cbb
, addr
);
3135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3138 MONO_START_BB (cfg
, is_ref_bb
);
3140 /* Save the ref to a temporary */
3141 dreg
= alloc_ireg (cfg
);
3142 EMIT_NEW_VARLOADA_VREG (cfg
, addr
, dreg
, m_class_get_byval_arg (klass
));
3143 addr
->dreg
= addr_reg
;
3144 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, obj
->dreg
);
3145 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3148 MONO_START_BB (cfg
, is_nullable_bb
);
3151 MonoInst
*addr
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX
);
3152 MonoInst
*unbox_call
;
3153 MonoMethodSignature
*unbox_sig
;
3155 unbox_sig
= (MonoMethodSignature
*)mono_mempool_alloc0 (cfg
->mempool
, MONO_SIZEOF_METHOD_SIGNATURE
+ (1 * sizeof (MonoType
*)));
3156 unbox_sig
->ret
= m_class_get_byval_arg (klass
);
3157 unbox_sig
->param_count
= 1;
3158 unbox_sig
->params
[0] = mono_get_object_type ();
3161 unbox_call
= mini_emit_llvmonly_calli (cfg
, unbox_sig
, &obj
, addr
);
3163 unbox_call
= mini_emit_calli (cfg
, unbox_sig
, &obj
, addr
, NULL
, NULL
);
3165 EMIT_NEW_VARLOADA_VREG (cfg
, addr
, unbox_call
->dreg
, m_class_get_byval_arg (klass
));
3166 addr
->dreg
= addr_reg
;
3169 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3172 MONO_START_BB (cfg
, end_bb
);
3175 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), addr_reg
, 0);
3181 * Returns NULL and set the cfg exception on error.
3184 handle_alloc (MonoCompile
*cfg
, MonoClass
*klass
, gboolean for_box
, int context_used
)
3186 MonoInst
*iargs
[2];
3187 MonoJitICallId alloc_ftn
;
3189 if (mono_class_get_flags (klass
) & TYPE_ATTRIBUTE_ABSTRACT
) {
3190 char* full_name
= mono_type_get_full_name (klass
);
3191 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3192 mono_error_set_member_access (cfg
->error
, "Cannot create an abstract class: %s", full_name
);
3199 MonoRgctxInfoType rgctx_info
;
3200 MonoInst
*iargs
[2];
3201 gboolean known_instance_size
= !mini_is_gsharedvt_klass (klass
);
3203 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (klass
, for_box
, known_instance_size
);
3205 if (cfg
->opt
& MONO_OPT_SHARED
)
3206 rgctx_info
= MONO_RGCTX_INFO_KLASS
;
3208 rgctx_info
= MONO_RGCTX_INFO_VTABLE
;
3209 data
= mini_emit_get_rgctx_klass (cfg
, context_used
, klass
, rgctx_info
);
3211 if (cfg
->opt
& MONO_OPT_SHARED
) {
3212 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3214 alloc_ftn
= MONO_JIT_ICALL_ves_icall_object_new
;
3217 alloc_ftn
= MONO_JIT_ICALL_ves_icall_object_new_specific
;
3220 if (managed_alloc
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
3221 if (known_instance_size
) {
3222 int size
= mono_class_instance_size (klass
);
3223 if (size
< MONO_ABI_SIZEOF (MonoObject
))
3224 g_error ("Invalid size %d for class %s", size
, mono_type_get_full_name (klass
));
3226 EMIT_NEW_ICONST (cfg
, iargs
[1], size
);
3228 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
3231 return mono_emit_jit_icall_id (cfg
, alloc_ftn
, iargs
);
3234 if (cfg
->opt
& MONO_OPT_SHARED
) {
3235 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
3236 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
3238 alloc_ftn
= MONO_JIT_ICALL_ves_icall_object_new
;
3239 } else if (cfg
->compile_aot
&& cfg
->cbb
->out_of_line
&& m_class_get_type_token (klass
) && m_class_get_image (klass
) == mono_defaults
.corlib
&& !mono_class_is_ginst (klass
)) {
3240 /* This happens often in argument checking code, eg. throw new FooException... */
3241 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3242 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (m_class_get_type_token (klass
)));
3243 alloc_ftn
= MONO_JIT_ICALL_mono_helper_newobj_mscorlib
;
3245 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, klass
, cfg
->error
);
3247 if (!is_ok (cfg
->error
)) {
3248 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3252 MonoMethod
*managed_alloc
= mono_gc_get_managed_allocator (klass
, for_box
, TRUE
);
3254 if (managed_alloc
) {
3255 int size
= mono_class_instance_size (klass
);
3256 if (size
< MONO_ABI_SIZEOF (MonoObject
))
3257 g_error ("Invalid size %d for class %s", size
, mono_type_get_full_name (klass
));
3259 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3260 EMIT_NEW_ICONST (cfg
, iargs
[1], size
);
3261 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, NULL
);
3263 alloc_ftn
= MONO_JIT_ICALL_ves_icall_object_new_specific
;
3264 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
3267 return mono_emit_jit_icall_id (cfg
, alloc_ftn
, iargs
);
3271 * Returns NULL and set the cfg exception on error.
3274 mini_emit_box (MonoCompile
*cfg
, MonoInst
*val
, MonoClass
*klass
, int context_used
)
3276 MonoInst
*alloc
, *ins
;
3278 if (G_UNLIKELY (m_class_is_byreflike (klass
))) {
3279 mono_error_set_bad_image (cfg
->error
, m_class_get_image (cfg
->method
->klass
), "Cannot box IsByRefLike type '%s.%s'", m_class_get_name_space (klass
), m_class_get_name (klass
));
3280 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3284 if (mono_class_is_nullable (klass
)) {
3285 MonoMethod
* method
= get_method_nofail (klass
, "Box", 1, 0);
3288 if (cfg
->llvm_only
&& cfg
->gsharedvt
) {
3289 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3290 MONO_RGCTX_INFO_METHOD_FTNDESC
);
3291 return mini_emit_llvmonly_calli (cfg
, mono_method_signature_internal (method
), &val
, addr
);
3293 /* FIXME: What if the class is shared? We might not
3294 have to get the method address from the RGCTX. */
3295 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, method
,
3296 MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
3297 MonoInst
*rgctx
= emit_get_rgctx (cfg
, context_used
);
3299 return mini_emit_calli (cfg
, mono_method_signature_internal (method
), &val
, addr
, NULL
, rgctx
);
3302 gboolean pass_vtable
, pass_mrgctx
;
3303 MonoInst
*rgctx_arg
= NULL
;
3305 check_method_sharing (cfg
, method
, &pass_vtable
, &pass_mrgctx
);
3306 g_assert (!pass_mrgctx
);
3309 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, method
->klass
, cfg
->error
);
3311 mono_error_assert_ok (cfg
->error
);
3312 EMIT_NEW_VTABLECONST (cfg
, rgctx_arg
, vtable
);
3315 return mini_emit_method_call_full (cfg
, method
, NULL
, FALSE
, &val
, NULL
, NULL
, rgctx_arg
);
3319 if (mini_is_gsharedvt_klass (klass
)) {
3320 MonoBasicBlock
*is_ref_bb
, *is_nullable_bb
, *end_bb
;
3321 MonoInst
*res
, *is_ref
, *src_var
, *addr
;
3324 dreg
= alloc_ireg (cfg
);
3326 NEW_BBLOCK (cfg
, is_ref_bb
);
3327 NEW_BBLOCK (cfg
, is_nullable_bb
);
3328 NEW_BBLOCK (cfg
, end_bb
);
3329 is_ref
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_CLASS_BOX_TYPE
);
3330 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, is_ref
->dreg
, MONO_GSHAREDVT_BOX_TYPE_REF
);
3331 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_ref_bb
);
3333 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, is_ref
->dreg
, MONO_GSHAREDVT_BOX_TYPE_NULLABLE
);
3334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_nullable_bb
);
3337 alloc
= handle_alloc (cfg
, klass
, TRUE
, context_used
);
3340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), alloc
->dreg
, MONO_ABI_SIZEOF (MonoObject
), val
->dreg
);
3341 ins
->opcode
= OP_STOREV_MEMBASE
;
3343 EMIT_NEW_UNALU (cfg
, res
, OP_MOVE
, dreg
, alloc
->dreg
);
3344 res
->type
= STACK_OBJ
;
3346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3349 MONO_START_BB (cfg
, is_ref_bb
);
3351 /* val is a vtype, so has to load the value manually */
3352 src_var
= get_vreg_to_inst (cfg
, val
->dreg
);
3354 src_var
= mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (klass
), OP_LOCAL
, val
->dreg
);
3355 EMIT_NEW_VARLOADA (cfg
, addr
, src_var
, src_var
->inst_vtype
);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, addr
->dreg
, 0);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3360 MONO_START_BB (cfg
, is_nullable_bb
);
3363 MonoInst
*addr
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
,
3364 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX
);
3366 MonoMethodSignature
*box_sig
;
3369 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3370 * construct that method at JIT time, so have to do things by hand.
3372 box_sig
= (MonoMethodSignature
*)mono_mempool_alloc0 (cfg
->mempool
, MONO_SIZEOF_METHOD_SIGNATURE
+ (1 * sizeof (MonoType
*)));
3373 box_sig
->ret
= mono_get_object_type ();
3374 box_sig
->param_count
= 1;
3375 box_sig
->params
[0] = m_class_get_byval_arg (klass
);
3378 box_call
= mini_emit_llvmonly_calli (cfg
, box_sig
, &val
, addr
);
3380 box_call
= mini_emit_calli (cfg
, box_sig
, &val
, addr
, NULL
, NULL
);
3381 EMIT_NEW_UNALU (cfg
, res
, OP_MOVE
, dreg
, box_call
->dreg
);
3382 res
->type
= STACK_OBJ
;
3386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
3388 MONO_START_BB (cfg
, end_bb
);
3393 alloc
= handle_alloc (cfg
, klass
, TRUE
, context_used
);
3397 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), alloc
->dreg
, MONO_ABI_SIZEOF (MonoObject
), val
->dreg
);
3402 method_needs_stack_walk (MonoCompile
*cfg
, MonoMethod
*cmethod
)
3404 if (cmethod
->klass
== mono_defaults
.systemtype_class
) {
3405 if (!strcmp (cmethod
->name
, "GetType"))
3411 G_GNUC_UNUSED MonoInst
*
3412 mini_handle_enum_has_flag (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*enum_this
, int enum_val_reg
, MonoInst
*enum_flag
)
3414 MonoType
*enum_type
= mono_type_get_underlying_type (m_class_get_byval_arg (klass
));
3415 guint32 load_opc
= mono_type_to_load_membase (cfg
, enum_type
);
3418 switch (enum_type
->type
) {
3421 #if SIZEOF_REGISTER == 8
3433 MonoInst
*load
= NULL
, *and_
, *cmp
, *ceq
;
3434 int enum_reg
= is_i4
? alloc_ireg (cfg
) : alloc_lreg (cfg
);
3435 int and_reg
= is_i4
? alloc_ireg (cfg
) : alloc_lreg (cfg
);
3436 int dest_reg
= alloc_ireg (cfg
);
3439 EMIT_NEW_LOAD_MEMBASE (cfg
, load
, load_opc
, enum_reg
, enum_this
->dreg
, 0);
3441 g_assert (enum_val_reg
!= -1);
3442 enum_reg
= enum_val_reg
;
3444 EMIT_NEW_BIALU (cfg
, and_
, is_i4
? OP_IAND
: OP_LAND
, and_reg
, enum_reg
, enum_flag
->dreg
);
3445 EMIT_NEW_BIALU (cfg
, cmp
, is_i4
? OP_ICOMPARE
: OP_LCOMPARE
, -1, and_reg
, enum_flag
->dreg
);
3446 EMIT_NEW_UNALU (cfg
, ceq
, is_i4
? OP_ICEQ
: OP_LCEQ
, dest_reg
, -1);
3448 ceq
->type
= STACK_I4
;
3451 load
= load
? mono_decompose_opcode (cfg
, load
) : NULL
;
3452 and_
= mono_decompose_opcode (cfg
, and_
);
3453 cmp
= mono_decompose_opcode (cfg
, cmp
);
3454 ceq
= mono_decompose_opcode (cfg
, ceq
);
3462 emit_get_rgctx_dele_tramp (MonoCompile
*cfg
, int context_used
,
3463 MonoClass
*klass
, MonoMethod
*virt_method
, gboolean _virtual
, MonoRgctxInfoType rgctx_type
)
3465 MonoDelegateClassMethodPair
*info
;
3466 MonoJumpInfoRgctxEntry
*entry
;
3469 info
= (MonoDelegateClassMethodPair
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoDelegateClassMethodPair
));
3470 info
->klass
= klass
;
3471 info
->method
= virt_method
;
3472 info
->is_virtual
= _virtual
;
3474 entry
= mono_patch_info_rgctx_entry_new (cfg
->mempool
, cfg
->method
, context_used_is_mrgctx (cfg
, context_used
), MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, info
, rgctx_type
);
3475 rgctx
= emit_get_rgctx (cfg
, context_used
);
3477 return emit_rgctx_fetch (cfg
, rgctx
, entry
);
3482 * Returns NULL and set the cfg exception on error.
3484 static G_GNUC_UNUSED MonoInst
*
3485 handle_delegate_ctor (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*target
, MonoMethod
*method
, int target_method_context_used
, int invoke_context_used
, gboolean virtual_
)
3489 gpointer trampoline
;
3490 MonoInst
*obj
, *tramp_ins
;
3494 if (virtual_
&& !cfg
->llvm_only
) {
3495 MonoMethod
*invoke
= mono_get_delegate_invoke_internal (klass
);
3498 //FIXME verify & fix any issue with removing invoke_context_used restriction
3499 if (invoke_context_used
|| !mono_get_delegate_virtual_invoke_impl (mono_method_signature_internal (invoke
), target_method_context_used
? NULL
: method
))
3503 obj
= handle_alloc (cfg
, klass
, FALSE
, invoke_context_used
);
3507 /* Inline the contents of mono_delegate_ctor */
3509 /* Set target field */
3510 /* Optimize away setting of NULL target */
3511 if (!MONO_INS_IS_PCONST_NULL (target
)) {
3512 if (!(method
->flags
& METHOD_ATTRIBUTE_STATIC
)) {
3513 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, target
->dreg
, 0);
3514 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "NullReferenceException");
3516 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, target
), target
->dreg
);
3517 if (cfg
->gen_write_barriers
) {
3518 dreg
= alloc_preg (cfg
);
3519 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
3520 mini_emit_write_barrier (cfg
, ptr
, target
);
3524 /* Set method field */
3525 if (!(target_method_context_used
|| invoke_context_used
) || cfg
->llvm_only
) {
3526 //If compiling with gsharing enabled, it's faster to load method the delegate trampoline info than to use a rgctx slot
3527 MonoInst
*method_ins
= emit_get_rgctx_method (cfg
, target_method_context_used
, method
, MONO_RGCTX_INFO_METHOD
);
3528 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, method
), method_ins
->dreg
);
3532 * To avoid looking up the compiled code belonging to the target method
3533 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3534 * store it, and we fill it after the method has been compiled.
3536 if (!method
->dynamic
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
3537 MonoInst
*code_slot_ins
;
3539 if (target_method_context_used
) {
3540 code_slot_ins
= emit_get_rgctx_method (cfg
, target_method_context_used
, method
, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE
);
3542 domain
= mono_domain_get ();
3543 mono_domain_lock (domain
);
3544 if (!domain_jit_info (domain
)->method_code_hash
)
3545 domain_jit_info (domain
)->method_code_hash
= g_hash_table_new (NULL
, NULL
);
3546 code_slot
= (guint8
**)g_hash_table_lookup (domain_jit_info (domain
)->method_code_hash
, method
);
3548 code_slot
= (guint8
**)mono_domain_alloc0 (domain
, sizeof (gpointer
));
3549 g_hash_table_insert (domain_jit_info (domain
)->method_code_hash
, method
, code_slot
);
3551 mono_domain_unlock (domain
);
3553 code_slot_ins
= mini_emit_runtime_constant (cfg
, MONO_PATCH_INFO_METHOD_CODE_SLOT
, method
);
3555 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, method_code
), code_slot_ins
->dreg
);
3558 if (cfg
->llvm_only
) {
3560 MonoInst
*args
[ ] = {
3563 emit_get_rgctx_method (cfg
, target_method_context_used
, method
, MONO_RGCTX_INFO_METHOD
)
3565 mono_emit_jit_icall (cfg
, mini_llvmonly_init_delegate_virtual
, args
);
3567 mono_emit_jit_icall (cfg
, mini_llvmonly_init_delegate
, &obj
);
3572 if (target_method_context_used
|| invoke_context_used
) {
3573 tramp_ins
= emit_get_rgctx_dele_tramp (cfg
, target_method_context_used
| invoke_context_used
, klass
, method
, virtual_
, MONO_RGCTX_INFO_DELEGATE_TRAMP_INFO
);
3575 //This is emited as a contant store for the non-shared case.
3576 //We copy from the delegate trampoline info as it's faster than a rgctx fetch
3577 dreg
= alloc_preg (cfg
);
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, tramp_ins
->dreg
, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo
, method
));
3579 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, method
), dreg
);
3580 } else if (cfg
->compile_aot
) {
3581 MonoDelegateClassMethodPair
*del_tramp
;
3583 del_tramp
= (MonoDelegateClassMethodPair
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoDelegateClassMethodPair
));
3584 del_tramp
->klass
= klass
;
3585 del_tramp
->method
= method
;
3586 del_tramp
->is_virtual
= virtual_
;
3587 EMIT_NEW_AOTCONST (cfg
, tramp_ins
, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE
, del_tramp
);
3590 trampoline
= mono_create_delegate_virtual_trampoline (cfg
->domain
, klass
, method
);
3592 trampoline
= mono_create_delegate_trampoline_info (cfg
->domain
, klass
, method
);
3593 EMIT_NEW_PCONST (cfg
, tramp_ins
, trampoline
);
3596 /* Set invoke_impl field */
3598 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), tramp_ins
->dreg
);
3600 dreg
= alloc_preg (cfg
);
3601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, tramp_ins
->dreg
, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo
, invoke_impl
));
3602 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, invoke_impl
), dreg
);
3604 dreg
= alloc_preg (cfg
);
3605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, tramp_ins
->dreg
, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo
, method_ptr
));
3606 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
), dreg
);
3609 dreg
= alloc_preg (cfg
);
3610 MONO_EMIT_NEW_ICONST (cfg
, dreg
, virtual_
? 1 : 0);
3611 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI1_MEMBASE_REG
, obj
->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, method_is_virtual
), dreg
);
3613 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3619 * handle_constrained_gsharedvt_call:
3621 * Handle constrained calls where the receiver is a gsharedvt type.
3622 * Return the instruction representing the call. Set the cfg exception on failure.
3625 handle_constrained_gsharedvt_call (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
, MonoClass
*constrained_class
,
3626 gboolean
*ref_emit_widen
)
3628 MonoInst
*ins
= NULL
;
3629 gboolean emit_widen
= *ref_emit_widen
;
3633 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
3634 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
3635 * pack the arguments into an array, and do the rest of the work in in an icall.
3637 supported
= ((cmethod
->klass
== mono_defaults
.object_class
) || mono_class_is_interface (cmethod
->klass
) || (!m_class_is_valuetype (cmethod
->klass
) && m_class_get_image (cmethod
->klass
) != mono_defaults
.corlib
));
3639 supported
= (MONO_TYPE_IS_VOID (fsig
->ret
) || MONO_TYPE_IS_PRIMITIVE (fsig
->ret
) || MONO_TYPE_IS_REFERENCE (fsig
->ret
) || MONO_TYPE_ISSTRUCT (fsig
->ret
) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig
->ret
)) || mini_is_gsharedvt_type (fsig
->ret
));
3641 if (fsig
->param_count
== 0 || (!fsig
->hasthis
&& fsig
->param_count
== 1)) {
3644 /* Allow scalar parameters and a gsharedvt first parameter */
3645 supported
= MONO_TYPE_IS_PRIMITIVE (fsig
->params
[0]) || MONO_TYPE_IS_REFERENCE (fsig
->params
[0]) || fsig
->params
[0]->byref
|| mini_is_gsharedvt_type (fsig
->params
[0]);
3647 for (int i
= 1; i
< fsig
->param_count
; ++i
) {
3648 if (!(fsig
->params
[i
]->byref
|| MONO_TYPE_IS_PRIMITIVE (fsig
->params
[i
]) || MONO_TYPE_IS_REFERENCE (fsig
->params
[i
]) || MONO_TYPE_ISSTRUCT (fsig
->params
[i
])))
3655 MonoInst
*args
[16];
3658 * This case handles calls to
3659 * - object:ToString()/Equals()/GetHashCode(),
3660 * - System.IComparable<T>:CompareTo()
3661 * - System.IEquatable<T>:Equals ()
3662 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
3666 args
[1] = emit_get_rgctx_method (cfg
, mono_method_check_context_used (cmethod
), cmethod
, MONO_RGCTX_INFO_METHOD
);
3667 args
[2] = mini_emit_get_rgctx_klass (cfg
, mono_class_check_context_used (constrained_class
), constrained_class
, MONO_RGCTX_INFO_KLASS
);
3669 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
3670 if (fsig
->hasthis
&& fsig
->param_count
) {
3671 /* Call mono_gsharedvt_constrained_call (gpointer mp, MonoMethod *cmethod, MonoClass *klass, gboolean deref_arg, gpointer *args) */
3672 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
3673 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC_IMM
);
3674 ins
->dreg
= alloc_preg (cfg
);
3675 ins
->inst_imm
= fsig
->param_count
* sizeof (target_mgreg_t
);
3676 MONO_ADD_INS (cfg
->cbb
, ins
);
3679 /* Only the first argument is allowed to be gsharedvt */
3680 /* args [3] = deref_arg */
3681 if (mini_is_gsharedvt_type (fsig
->params
[0])) {
3683 ins
= mini_emit_get_gsharedvt_info_klass (cfg
, mono_class_from_mono_type_internal (fsig
->params
[0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE
);
3684 deref_arg_reg
= alloc_preg (cfg
);
3685 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
3686 EMIT_NEW_BIALU_IMM (cfg
, args
[3], OP_ISUB_IMM
, deref_arg_reg
, ins
->dreg
, 1);
3688 EMIT_NEW_ICONST (cfg
, args
[3], 0);
3691 for (int i
= 0; i
< fsig
->param_count
; ++i
) {
3694 if (mini_is_gsharedvt_type (fsig
->params
[i
]) || MONO_TYPE_IS_PRIMITIVE (fsig
->params
[i
]) || MONO_TYPE_ISSTRUCT (fsig
->params
[i
])) {
3695 EMIT_NEW_VARLOADA_VREG (cfg
, ins
, sp
[i
+ 1]->dreg
, fsig
->params
[i
]);
3696 addr_reg
= ins
->dreg
;
3697 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STORE_MEMBASE_REG
, args
[4]->dreg
, i
* sizeof (target_mgreg_t
), addr_reg
);
3699 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STORE_MEMBASE_REG
, args
[4]->dreg
, i
* sizeof (target_mgreg_t
), sp
[i
+ 1]->dreg
);
3703 EMIT_NEW_ICONST (cfg
, args
[3], 0);
3704 EMIT_NEW_ICONST (cfg
, args
[4], 0);
3706 ins
= mono_emit_jit_icall (cfg
, mono_gsharedvt_constrained_call
, args
);
3709 if (mini_is_gsharedvt_type (fsig
->ret
)) {
3710 ins
= handle_unbox_gsharedvt (cfg
, mono_class_from_mono_type_internal (fsig
->ret
), ins
);
3711 } else if (MONO_TYPE_IS_PRIMITIVE (fsig
->ret
) || MONO_TYPE_ISSTRUCT (fsig
->ret
) || m_class_is_enumtype (mono_class_from_mono_type_internal (fsig
->ret
))) {
3715 NEW_BIALU_IMM (cfg
, add
, OP_ADD_IMM
, alloc_dreg (cfg
, STACK_MP
), ins
->dreg
, MONO_ABI_SIZEOF (MonoObject
));
3716 MONO_ADD_INS (cfg
->cbb
, add
);
3718 NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, add
->dreg
, 0);
3719 MONO_ADD_INS (cfg
->cbb
, ins
);
3720 /* ins represents the call result */
3723 GSHAREDVT_FAILURE (CEE_CALLVIRT
);
3726 *ref_emit_widen
= emit_widen
;
3735 mono_emit_load_got_addr (MonoCompile
*cfg
)
3737 MonoInst
*getaddr
, *dummy_use
;
3739 if (!cfg
->got_var
|| cfg
->got_var_allocated
)
3742 MONO_INST_NEW (cfg
, getaddr
, OP_LOAD_GOTADDR
);
3743 getaddr
->cil_code
= cfg
->header
->code
;
3744 getaddr
->dreg
= cfg
->got_var
->dreg
;
3746 /* Add it to the start of the first bblock */
3747 if (cfg
->bb_entry
->code
) {
3748 getaddr
->next
= cfg
->bb_entry
->code
;
3749 cfg
->bb_entry
->code
= getaddr
;
3752 MONO_ADD_INS (cfg
->bb_entry
, getaddr
);
3754 cfg
->got_var_allocated
= TRUE
;
3757 * Add a dummy use to keep the got_var alive, since real uses might
3758 * only be generated by the back ends.
3759 * Add it to end_bblock, so the variable's lifetime covers the whole
3761 * It would be better to make the usage of the got var explicit in all
3762 * cases when the backend needs it (i.e. calls, throw etc.), so this
3763 * wouldn't be needed.
3765 NEW_DUMMY_USE (cfg
, dummy_use
, cfg
->got_var
);
3766 MONO_ADD_INS (cfg
->bb_exit
, dummy_use
);
3770 method_does_not_return (MonoMethod
*method
)
3772 // FIXME: Under netcore, these are decorated with the [DoesNotReturn] attribute
3773 return m_class_get_image (method
->klass
) == mono_defaults
.corlib
&&
3774 !strcmp (m_class_get_name (method
->klass
), "ThrowHelper") &&
3775 strstr (method
->name
, "Throw") == method
->name
&&
3776 !method
->is_inflated
;
3779 static int inline_limit
, llvm_jit_inline_limit
, llvm_aot_inline_limit
;
3780 static gboolean inline_limit_inited
;
3783 mono_method_check_inlining (MonoCompile
*cfg
, MonoMethod
*method
)
3785 MonoMethodHeaderSummary header
;
3788 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3789 MonoMethodSignature
*sig
= mono_method_signature_internal (method
);
3793 if (cfg
->disable_inline
)
3798 if (cfg
->inline_depth
> 10)
3801 if (!mono_method_get_header_summary (method
, &header
))
3804 /*runtime, icall and pinvoke are checked by summary call*/
3805 if ((method
->iflags
& METHOD_IMPL_ATTRIBUTE_NOINLINING
) ||
3806 (method
->iflags
& METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED
) ||
3807 (mono_class_is_marshalbyref (method
->klass
)) ||
3811 if (method
->flags
& METHOD_ATTRIBUTE_REQSECOBJ
)
3812 /* Used to mark methods containing StackCrawlMark locals */
3815 /* also consider num_locals? */
3816 /* Do the size check early to avoid creating vtables */
3817 if (!inline_limit_inited
) {
3819 if ((inlinelimit
= g_getenv ("MONO_INLINELIMIT"))) {
3820 inline_limit
= atoi (inlinelimit
);
3821 llvm_jit_inline_limit
= inline_limit
;
3822 llvm_aot_inline_limit
= inline_limit
;
3823 g_free (inlinelimit
);
3825 inline_limit
= INLINE_LENGTH_LIMIT
;
3826 llvm_jit_inline_limit
= LLVM_JIT_INLINE_LENGTH_LIMIT
;
3827 llvm_aot_inline_limit
= LLVM_AOT_INLINE_LENGTH_LIMIT
;
3829 inline_limit_inited
= TRUE
;
3832 #ifdef ENABLE_NETCORE
3833 if (COMPILE_LLVM (cfg
)) {
3834 if (cfg
->compile_aot
)
3835 limit
= llvm_aot_inline_limit
;
3837 limit
= llvm_jit_inline_limit
;
3839 limit
= inline_limit
;
3842 if (COMPILE_LLVM (cfg
) && !cfg
->compile_aot
)
3843 limit
= llvm_jit_inline_limit
;
3845 limit
= inline_limit
;
3847 if (header
.code_size
>= limit
&& !(method
->iflags
& METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING
))
3851 * if we can initialize the class of the method right away, we do,
3852 * otherwise we don't allow inlining if the class needs initialization,
3853 * since it would mean inserting a call to mono_runtime_class_init()
3854 * inside the inlined code
3856 if (cfg
->gshared
&& m_class_has_cctor (method
->klass
) && mini_class_check_context_used (cfg
, method
->klass
))
3859 if (!(cfg
->opt
& MONO_OPT_SHARED
)) {
3860 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
3861 if (method
->iflags
& METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING
) {
3862 if (m_class_has_cctor (method
->klass
)) {
3864 vtable
= mono_class_vtable_checked (cfg
->domain
, method
->klass
, error
);
3865 if (!is_ok (error
)) {
3866 mono_error_cleanup (error
);
3869 if (!cfg
->compile_aot
) {
3870 if (!mono_runtime_class_init_full (vtable
, error
)) {
3871 mono_error_cleanup (error
);
3876 } else if (mono_class_is_before_field_init (method
->klass
)) {
3877 if (cfg
->run_cctors
&& m_class_has_cctor (method
->klass
)) {
3879 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3880 if (!m_class_get_runtime_info (method
->klass
))
3881 /* No vtable created yet */
3883 vtable
= mono_class_vtable_checked (cfg
->domain
, method
->klass
, error
);
3884 if (!is_ok (error
)) {
3885 mono_error_cleanup (error
);
3888 /* This makes so that inline cannot trigger */
3889 /* .cctors: too many apps depend on them */
3890 /* running with a specific order... */
3891 if (! vtable
->initialized
)
3893 if (!mono_runtime_class_init_full (vtable
, error
)) {
3894 mono_error_cleanup (error
);
3898 } else if (mono_class_needs_cctor_run (method
->klass
, NULL
)) {
3900 if (!m_class_get_runtime_info (method
->klass
))
3901 /* No vtable created yet */
3903 vtable
= mono_class_vtable_checked (cfg
->domain
, method
->klass
, error
);
3904 if (!is_ok (error
)) {
3905 mono_error_cleanup (error
);
3908 if (!vtable
->initialized
)
3913 * If we're compiling for shared code
3914 * the cctor will need to be run at aot method load time, for example,
3915 * or at the end of the compilation of the inlining method.
3917 if (mono_class_needs_cctor_run (method
->klass
, NULL
) && !mono_class_is_before_field_init (method
->klass
))
3921 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3922 if (mono_arch_is_soft_float ()) {
3924 if (sig
->ret
&& sig
->ret
->type
== MONO_TYPE_R4
)
3926 for (i
= 0; i
< sig
->param_count
; ++i
)
3927 if (!sig
->params
[i
]->byref
&& sig
->params
[i
]->type
== MONO_TYPE_R4
)
3932 if (g_list_find (cfg
->dont_inline
, method
))
3935 if (mono_profiler_get_call_instrumentation_flags (method
))
3938 if (mono_profiler_coverage_instrumentation_enabled (method
))
3941 if (method_does_not_return (method
))
3948 mini_field_access_needs_cctor_run (MonoCompile
*cfg
, MonoMethod
*method
, MonoClass
*klass
, MonoVTable
*vtable
)
3950 if (!cfg
->compile_aot
) {
3952 if (vtable
->initialized
)
3956 if (mono_class_is_before_field_init (klass
)) {
3957 if (cfg
->method
== method
)
3961 if (!mono_class_needs_cctor_run (klass
, method
))
3964 if (! (method
->flags
& METHOD_ATTRIBUTE_STATIC
) && (klass
== method
->klass
))
3965 /* The initialization is already done before the method is called */
3972 mini_emit_sext_index_reg (MonoCompile
*cfg
, MonoInst
*index
)
3974 int index_reg
= index
->dreg
;
3977 #if SIZEOF_REGISTER == 8
3978 /* The array reg is 64 bits but the index reg is only 32 */
3979 if (COMPILE_LLVM (cfg
)) {
3981 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
3982 * during OP_BOUNDS_CHECK decomposition, and in the implementation
3983 * of OP_X86_LEA for llvm.
3985 index2_reg
= index_reg
;
3987 index2_reg
= alloc_preg (cfg
);
3988 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, index2_reg
, index_reg
);
3991 if (index
->type
== STACK_I8
) {
3992 index2_reg
= alloc_preg (cfg
);
3993 MONO_EMIT_NEW_UNALU (cfg
, OP_LCONV_TO_I4
, index2_reg
, index_reg
);
3995 index2_reg
= index_reg
;
4003 mini_emit_ldelema_1_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index
, gboolean bcheck
)
4007 int mult_reg
, add_reg
, array_reg
, index2_reg
;
4010 if (mini_is_gsharedvt_variable_klass (klass
)) {
4013 mono_class_init_internal (klass
);
4014 size
= mono_class_array_element_size (klass
);
4017 mult_reg
= alloc_preg (cfg
);
4018 array_reg
= arr
->dreg
;
4020 index2_reg
= mini_emit_sext_index_reg (cfg
, index
);
4023 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index2_reg
);
4025 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4026 if (size
== 1 || size
== 2 || size
== 4 || size
== 8) {
4027 static const int fast_log2
[] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4029 EMIT_NEW_X86_LEA (cfg
, ins
, array_reg
, index2_reg
, fast_log2
[size
], MONO_STRUCT_OFFSET (MonoArray
, vector
));
4031 ins
->type
= STACK_MP
;
4037 add_reg
= alloc_ireg_mp (cfg
);
4040 MonoInst
*rgctx_ins
;
4043 g_assert (cfg
->gshared
);
4044 context_used
= mini_class_check_context_used (cfg
, klass
);
4045 g_assert (context_used
);
4046 rgctx_ins
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE
);
4047 MONO_EMIT_NEW_BIALU (cfg
, OP_IMUL
, mult_reg
, index2_reg
, rgctx_ins
->dreg
);
4049 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_MUL_IMM
, mult_reg
, index2_reg
, size
);
4051 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, array_reg
, mult_reg
);
4052 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, MONO_STRUCT_OFFSET (MonoArray
, vector
));
4054 ins
->type
= STACK_MP
;
4055 MONO_ADD_INS (cfg
->cbb
, ins
);
4061 mini_emit_ldelema_2_ins (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
*arr
, MonoInst
*index_ins1
, MonoInst
*index_ins2
)
4063 int bounds_reg
= alloc_preg (cfg
);
4064 int add_reg
= alloc_ireg_mp (cfg
);
4065 int mult_reg
= alloc_preg (cfg
);
4066 int mult2_reg
= alloc_preg (cfg
);
4067 int low1_reg
= alloc_preg (cfg
);
4068 int low2_reg
= alloc_preg (cfg
);
4069 int high1_reg
= alloc_preg (cfg
);
4070 int high2_reg
= alloc_preg (cfg
);
4071 int realidx1_reg
= alloc_preg (cfg
);
4072 int realidx2_reg
= alloc_preg (cfg
);
4073 int sum_reg
= alloc_preg (cfg
);
4078 mono_class_init_internal (klass
);
4079 size
= mono_class_array_element_size (klass
);
4081 index1
= index_ins1
->dreg
;
4082 index2
= index_ins2
->dreg
;
4084 #if SIZEOF_REGISTER == 8
4085 /* The array reg is 64 bits but the index reg is only 32 */
4086 if (COMPILE_LLVM (cfg
)) {
4089 int tmpreg
= alloc_preg (cfg
);
4090 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, tmpreg
, index1
);
4092 tmpreg
= alloc_preg (cfg
);
4093 MONO_EMIT_NEW_UNALU (cfg
, OP_SEXT_I4
, tmpreg
, index2
);
4097 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4100 /* range checking */
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, bounds_reg
,
4102 arr
->dreg
, MONO_STRUCT_OFFSET (MonoArray
, bounds
));
4104 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low1_reg
,
4105 bounds_reg
, MONO_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4106 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx1_reg
, index1
, low1_reg
);
4107 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high1_reg
,
4108 bounds_reg
, MONO_STRUCT_OFFSET (MonoArrayBounds
, length
));
4109 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high1_reg
, realidx1_reg
);
4110 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4112 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, low2_reg
,
4113 bounds_reg
, sizeof (MonoArrayBounds
) + MONO_STRUCT_OFFSET (MonoArrayBounds
, lower_bound
));
4114 MONO_EMIT_NEW_BIALU (cfg
, OP_PSUB
, realidx2_reg
, index2
, low2_reg
);
4115 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, high2_reg
,
4116 bounds_reg
, sizeof (MonoArrayBounds
) + MONO_STRUCT_OFFSET (MonoArrayBounds
, length
));
4117 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, high2_reg
, realidx2_reg
);
4118 MONO_EMIT_NEW_COND_EXC (cfg
, LE_UN
, "IndexOutOfRangeException");
4120 MONO_EMIT_NEW_BIALU (cfg
, OP_PMUL
, mult_reg
, high2_reg
, realidx1_reg
);
4121 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, mult_reg
, realidx2_reg
);
4122 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PMUL_IMM
, mult2_reg
, sum_reg
, size
);
4123 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, add_reg
, mult2_reg
, arr
->dreg
);
4124 NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, add_reg
, add_reg
, MONO_STRUCT_OFFSET (MonoArray
, vector
));
4126 ins
->type
= STACK_MP
;
4128 MONO_ADD_INS (cfg
->cbb
, ins
);
4134 mini_emit_ldelema_ins (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
**sp
, guchar
*ip
, gboolean is_set
)
4138 MonoMethod
*addr_method
;
4140 MonoClass
*eclass
= m_class_get_element_class (cmethod
->klass
);
4142 rank
= mono_method_signature_internal (cmethod
)->param_count
- (is_set
? 1: 0);
4145 return mini_emit_ldelema_1_ins (cfg
, eclass
, sp
[0], sp
[1], TRUE
);
4147 /* emit_ldelema_2 depends on OP_LMUL */
4148 if (!cfg
->backend
->emulate_mul_div
&& rank
== 2 && (cfg
->opt
& MONO_OPT_INTRINS
) && !mini_is_gsharedvt_variable_klass (eclass
)) {
4149 return mini_emit_ldelema_2_ins (cfg
, eclass
, sp
[0], sp
[1], sp
[2]);
4152 if (mini_is_gsharedvt_variable_klass (eclass
))
4155 element_size
= mono_class_array_element_size (eclass
);
4156 addr_method
= mono_marshal_get_array_address (rank
, element_size
);
4157 addr
= mono_emit_method_call (cfg
, addr_method
, sp
, NULL
);
4163 mini_class_is_reference (MonoClass
*klass
)
4165 return mini_type_is_reference (m_class_get_byval_arg (klass
));
4169 mini_emit_array_store (MonoCompile
*cfg
, MonoClass
*klass
, MonoInst
**sp
, gboolean safety_checks
)
4171 if (safety_checks
&& mini_class_is_reference (klass
) &&
4172 !(MONO_INS_IS_PCONST_NULL (sp
[2]))) {
4173 MonoClass
*obj_array
= mono_array_class_get_cached (mono_defaults
.object_class
, 1);
4174 MonoMethod
*helper
= mono_marshal_get_virtual_stelemref (obj_array
);
4175 MonoInst
*iargs
[3];
4178 mono_class_setup_vtable (obj_array
);
4179 g_assert (helper
->slot
);
4181 if (sp
[0]->type
!= STACK_OBJ
)
4183 if (sp
[2]->type
!= STACK_OBJ
)
4190 return mono_emit_method_call (cfg
, helper
, iargs
, sp
[0]);
4194 if (mini_is_gsharedvt_variable_klass (klass
)) {
4197 // FIXME-VT: OP_ICONST optimization
4198 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
4199 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), addr
->dreg
, 0, sp
[2]->dreg
);
4200 ins
->opcode
= OP_STOREV_MEMBASE
;
4201 } else if (sp
[1]->opcode
== OP_ICONST
) {
4202 int array_reg
= sp
[0]->dreg
;
4203 int index_reg
= sp
[1]->dreg
;
4204 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + MONO_STRUCT_OFFSET (MonoArray
, vector
);
4206 if (SIZEOF_REGISTER
== 8 && COMPILE_LLVM (cfg
) && sp
[1]->inst_c0
< 0)
4207 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, index_reg
, index_reg
);
4210 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
4211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), array_reg
, offset
, sp
[2]->dreg
);
4213 MonoInst
*addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], safety_checks
);
4214 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), addr
->dreg
, 0, sp
[2]->dreg
);
4215 if (mini_class_is_reference (klass
))
4216 mini_emit_write_barrier (cfg
, addr
, sp
[2]);
4223 mini_emit_memory_barrier (MonoCompile
*cfg
, int kind
)
4225 MonoInst
*ins
= NULL
;
4226 MONO_INST_NEW (cfg
, ins
, OP_MEMORY_BARRIER
);
4227 MONO_ADD_INS (cfg
->cbb
, ins
);
4228 ins
->backend
.memory_barrier_kind
= kind
;
4234 * This entry point could be used later for arbitrary method
4237 inline static MonoInst
*
4238 mini_redirect_call (MonoCompile
*cfg
, MonoMethod
*method
,
4239 MonoMethodSignature
*signature
, MonoInst
**args
, MonoInst
*this_ins
)
4241 if (method
->klass
== mono_defaults
.string_class
) {
4242 /* managed string allocation support */
4243 if (strcmp (method
->name
, "InternalAllocateStr") == 0 && !(cfg
->opt
& MONO_OPT_SHARED
)) {
4244 MonoInst
*iargs
[2];
4245 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, method
->klass
, cfg
->error
);
4246 MonoMethod
*managed_alloc
= NULL
;
4248 mono_error_assert_ok (cfg
->error
); /*Should not fail since it System.String*/
4249 #ifndef MONO_CROSS_COMPILE
4250 managed_alloc
= mono_gc_get_managed_allocator (method
->klass
, FALSE
, FALSE
);
4254 EMIT_NEW_VTABLECONST (cfg
, iargs
[0], vtable
);
4255 iargs
[1] = args
[0];
4256 return mono_emit_method_call (cfg
, managed_alloc
, iargs
, this_ins
);
4263 mono_save_args (MonoCompile
*cfg
, MonoMethodSignature
*sig
, MonoInst
**sp
)
4265 MonoInst
*store
, *temp
;
4268 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4269 MonoType
*argtype
= (sig
->hasthis
&& (i
== 0)) ? type_from_stack_type (*sp
) : sig
->params
[i
- sig
->hasthis
];
4272 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4273 * would be different than the MonoInst's used to represent arguments, and
4274 * the ldelema implementation can't deal with that.
4275 * Solution: When ldelema is used on an inline argument, create a var for
4276 * it, emit ldelema on that var, and emit the saving code below in
4277 * inline_method () if needed.
4279 temp
= mono_compile_create_var (cfg
, argtype
, OP_LOCAL
);
4280 cfg
->args
[i
] = temp
;
4281 /* This uses cfg->args [i] which is set by the preceeding line */
4282 EMIT_NEW_ARGSTORE (cfg
, store
, i
, *sp
);
4283 store
->cil_code
= sp
[0]->cil_code
;
4288 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4289 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4291 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4293 check_inline_called_method_name_limit (MonoMethod
*called_method
)
4296 static const char *limit
= NULL
;
4298 if (limit
== NULL
) {
4299 const char *limit_string
= g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4301 if (limit_string
!= NULL
)
4302 limit
= limit_string
;
4307 if (limit
[0] != '\0') {
4308 char *called_method_name
= mono_method_full_name (called_method
, TRUE
);
4310 strncmp_result
= strncmp (called_method_name
, limit
, strlen (limit
));
4311 g_free (called_method_name
);
4313 //return (strncmp_result <= 0);
4314 return (strncmp_result
== 0);
4321 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4323 check_inline_caller_method_name_limit (MonoMethod
*caller_method
)
4326 static const char *limit
= NULL
;
4328 if (limit
== NULL
) {
4329 const char *limit_string
= g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4330 if (limit_string
!= NULL
) {
4331 limit
= limit_string
;
4337 if (limit
[0] != '\0') {
4338 char *caller_method_name
= mono_method_full_name (caller_method
, TRUE
);
4340 strncmp_result
= strncmp (caller_method_name
, limit
, strlen (limit
));
4341 g_free (caller_method_name
);
4343 //return (strncmp_result <= 0);
4344 return (strncmp_result
== 0);
4352 emit_init_rvar (MonoCompile
*cfg
, int dreg
, MonoType
*rtype
)
4354 static double r8_0
= 0.0;
4355 static float r4_0
= 0.0;
4359 rtype
= mini_get_underlying_type (rtype
);
4363 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
4364 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
4365 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
4366 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
4367 MONO_EMIT_NEW_I8CONST (cfg
, dreg
, 0);
4368 } else if (cfg
->r4fp
&& t
== MONO_TYPE_R4
) {
4369 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
4370 ins
->type
= STACK_R4
;
4371 ins
->inst_p0
= (void*)&r4_0
;
4373 MONO_ADD_INS (cfg
->cbb
, ins
);
4374 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
4375 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
4376 ins
->type
= STACK_R8
;
4377 ins
->inst_p0
= (void*)&r8_0
;
4379 MONO_ADD_INS (cfg
->cbb
, ins
);
4380 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
4381 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (rtype
))) {
4382 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type_internal (rtype
));
4383 } else if (((t
== MONO_TYPE_VAR
) || (t
== MONO_TYPE_MVAR
)) && mini_type_var_is_vt (rtype
)) {
4384 MONO_EMIT_NEW_VZERO (cfg
, dreg
, mono_class_from_mono_type_internal (rtype
));
4386 MONO_EMIT_NEW_PCONST (cfg
, dreg
, NULL
);
4391 emit_dummy_init_rvar (MonoCompile
*cfg
, int dreg
, MonoType
*rtype
)
4395 rtype
= mini_get_underlying_type (rtype
);
4399 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_PCONST
);
4400 } else if (t
>= MONO_TYPE_BOOLEAN
&& t
<= MONO_TYPE_U4
) {
4401 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_ICONST
);
4402 } else if (t
== MONO_TYPE_I8
|| t
== MONO_TYPE_U8
) {
4403 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_I8CONST
);
4404 } else if (cfg
->r4fp
&& t
== MONO_TYPE_R4
) {
4405 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_R4CONST
);
4406 } else if (t
== MONO_TYPE_R4
|| t
== MONO_TYPE_R8
) {
4407 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_R8CONST
);
4408 } else if ((t
== MONO_TYPE_VALUETYPE
) || (t
== MONO_TYPE_TYPEDBYREF
) ||
4409 ((t
== MONO_TYPE_GENERICINST
) && mono_type_generic_inst_is_valuetype (rtype
))) {
4410 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_VZERO
);
4411 } else if (((t
== MONO_TYPE_VAR
) || (t
== MONO_TYPE_MVAR
)) && mini_type_var_is_vt (rtype
)) {
4412 MONO_EMIT_NEW_DUMMY_INIT (cfg
, dreg
, OP_DUMMY_VZERO
);
4414 emit_init_rvar (cfg
, dreg
, rtype
);
4418 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
4420 emit_init_local (MonoCompile
*cfg
, int local
, MonoType
*type
, gboolean init
)
4422 MonoInst
*var
= cfg
->locals
[local
];
4423 if (COMPILE_SOFT_FLOAT (cfg
)) {
4425 int reg
= alloc_dreg (cfg
, (MonoStackType
)var
->type
);
4426 emit_init_rvar (cfg
, reg
, type
);
4427 EMIT_NEW_LOCSTORE (cfg
, store
, local
, cfg
->cbb
->last_ins
);
4430 emit_init_rvar (cfg
, var
->dreg
, type
);
4432 emit_dummy_init_rvar (cfg
, var
->dreg
, type
);
4437 mini_inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
, guchar
*ip
, guint real_offset
, gboolean inline_always
)
4439 return inline_method (cfg
, cmethod
, fsig
, sp
, ip
, real_offset
, inline_always
);
4445 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
4448 inline_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**sp
,
4449 guchar
*ip
, guint real_offset
, gboolean inline_always
)
4452 MonoInst
*ins
, *rvar
= NULL
;
4453 MonoMethodHeader
*cheader
;
4454 MonoBasicBlock
*ebblock
, *sbblock
;
4456 MonoInst
**prev_locals
, **prev_args
;
4457 MonoType
**prev_arg_types
;
4458 guint prev_real_offset
;
4459 GHashTable
*prev_cbb_hash
;
4460 MonoBasicBlock
**prev_cil_offset_to_bb
;
4461 MonoBasicBlock
*prev_cbb
;
4462 const guchar
*prev_ip
;
4463 guchar
*prev_cil_start
;
4464 guint32 prev_cil_offset_to_bb_len
;
4465 MonoMethod
*prev_current_method
;
4466 MonoGenericContext
*prev_generic_context
;
4467 gboolean ret_var_set
, prev_ret_var_set
, prev_disable_inline
, virtual_
= FALSE
;
4469 g_assert (cfg
->exception_type
== MONO_EXCEPTION_NONE
);
4471 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4472 if ((! inline_always
) && ! check_inline_called_method_name_limit (cmethod
))
4475 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4476 if ((! inline_always
) && ! check_inline_caller_method_name_limit (cfg
->method
))
4481 fsig
= mono_method_signature_internal (cmethod
);
4483 if (cfg
->verbose_level
> 2)
4484 printf ("INLINE START %p %s -> %s\n", cmethod
, mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4486 if (!cmethod
->inline_info
) {
4487 cfg
->stat_inlineable_methods
++;
4488 cmethod
->inline_info
= 1;
4491 /* allocate local variables */
4492 cheader
= mono_method_get_header_checked (cmethod
, error
);
4494 if (inline_always
) {
4495 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
4496 mono_error_move (cfg
->error
, error
);
4498 mono_error_cleanup (error
);
4503 /*Must verify before creating locals as it can cause the JIT to assert.*/
4504 if (mono_compile_is_broken (cfg
, cmethod
, FALSE
)) {
4505 mono_metadata_free_mh (cheader
);
4509 /* allocate space to store the return value */
4510 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
4511 rvar
= mono_compile_create_var (cfg
, fsig
->ret
, OP_LOCAL
);
4514 prev_locals
= cfg
->locals
;
4515 cfg
->locals
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, cheader
->num_locals
* sizeof (MonoInst
*));
4516 for (i
= 0; i
< cheader
->num_locals
; ++i
)
4517 cfg
->locals
[i
] = mono_compile_create_var (cfg
, cheader
->locals
[i
], OP_LOCAL
);
4519 /* allocate start and end blocks */
4520 /* This is needed so if the inline is aborted, we can clean up */
4521 NEW_BBLOCK (cfg
, sbblock
);
4522 sbblock
->real_offset
= real_offset
;
4524 NEW_BBLOCK (cfg
, ebblock
);
4525 ebblock
->block_num
= cfg
->num_bblocks
++;
4526 ebblock
->real_offset
= real_offset
;
4528 prev_args
= cfg
->args
;
4529 prev_arg_types
= cfg
->arg_types
;
4530 prev_ret_var_set
= cfg
->ret_var_set
;
4531 prev_real_offset
= cfg
->real_offset
;
4532 prev_cbb_hash
= cfg
->cbb_hash
;
4533 prev_cil_offset_to_bb
= cfg
->cil_offset_to_bb
;
4534 prev_cil_offset_to_bb_len
= cfg
->cil_offset_to_bb_len
;
4535 prev_cil_start
= cfg
->cil_start
;
4537 prev_cbb
= cfg
->cbb
;
4538 prev_current_method
= cfg
->current_method
;
4539 prev_generic_context
= cfg
->generic_context
;
4540 prev_disable_inline
= cfg
->disable_inline
;
4542 cfg
->ret_var_set
= FALSE
;
4543 cfg
->inline_depth
++;
4545 if (ip
&& *ip
== CEE_CALLVIRT
&& !(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
))
4548 costs
= mono_method_to_ir (cfg
, cmethod
, sbblock
, ebblock
, rvar
, sp
, real_offset
, virtual_
);
4550 ret_var_set
= cfg
->ret_var_set
;
4552 cfg
->real_offset
= prev_real_offset
;
4553 cfg
->cbb_hash
= prev_cbb_hash
;
4554 cfg
->cil_offset_to_bb
= prev_cil_offset_to_bb
;
4555 cfg
->cil_offset_to_bb_len
= prev_cil_offset_to_bb_len
;
4556 cfg
->cil_start
= prev_cil_start
;
4558 cfg
->locals
= prev_locals
;
4559 cfg
->args
= prev_args
;
4560 cfg
->arg_types
= prev_arg_types
;
4561 cfg
->current_method
= prev_current_method
;
4562 cfg
->generic_context
= prev_generic_context
;
4563 cfg
->ret_var_set
= prev_ret_var_set
;
4564 cfg
->disable_inline
= prev_disable_inline
;
4565 cfg
->inline_depth
--;
4567 if ((costs
>= 0 && costs
< 60) || inline_always
|| (costs
>= 0 && (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING
))) {
4568 if (cfg
->verbose_level
> 2)
4569 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg
->method
, TRUE
), mono_method_full_name (cmethod
, TRUE
));
4571 mono_error_assert_ok (cfg
->error
);
4573 cfg
->stat_inlined_methods
++;
4575 /* always add some code to avoid block split failures */
4576 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
4577 MONO_ADD_INS (prev_cbb
, ins
);
4579 prev_cbb
->next_bb
= sbblock
;
4580 link_bblock (cfg
, prev_cbb
, sbblock
);
4583 * Get rid of the begin and end bblocks if possible to aid local
4586 if (prev_cbb
->out_count
== 1)
4587 mono_merge_basic_blocks (cfg
, prev_cbb
, sbblock
);
4589 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] != ebblock
))
4590 mono_merge_basic_blocks (cfg
, prev_cbb
, prev_cbb
->out_bb
[0]);
4592 if ((ebblock
->in_count
== 1) && ebblock
->in_bb
[0]->out_count
== 1) {
4593 MonoBasicBlock
*prev
= ebblock
->in_bb
[0];
4595 if (prev
->next_bb
== ebblock
) {
4596 mono_merge_basic_blocks (cfg
, prev
, ebblock
);
4598 if ((prev_cbb
->out_count
== 1) && (prev_cbb
->out_bb
[0]->in_count
== 1) && (prev_cbb
->out_bb
[0] == prev
)) {
4599 mono_merge_basic_blocks (cfg
, prev_cbb
, prev
);
4600 cfg
->cbb
= prev_cbb
;
4603 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
4608 * Its possible that the rvar is set in some prev bblock, but not in others.
4614 for (i
= 0; i
< ebblock
->in_count
; ++i
) {
4615 bb
= ebblock
->in_bb
[i
];
4617 if (bb
->last_ins
&& bb
->last_ins
->opcode
== OP_NOT_REACHED
) {
4620 emit_init_rvar (cfg
, rvar
->dreg
, fsig
->ret
);
4630 * If the inlined method contains only a throw, then the ret var is not
4631 * set, so set it to a dummy value.
4634 emit_init_rvar (cfg
, rvar
->dreg
, fsig
->ret
);
4636 EMIT_NEW_TEMPLOAD (cfg
, ins
, rvar
->inst_c0
);
4639 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
4642 if (cfg
->verbose_level
> 2) {
4643 const char *msg
= mono_error_get_message (cfg
->error
);
4644 printf ("INLINE ABORTED %s (cost %d) %s\n", mono_method_full_name (cmethod
, TRUE
), costs
, msg
? msg
: "");
4646 cfg
->exception_type
= MONO_EXCEPTION_NONE
;
4648 clear_cfg_error (cfg
);
4650 /* This gets rid of the newly added bblocks */
4651 cfg
->cbb
= prev_cbb
;
4653 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, cheader
);
4658 * Some of these comments may well be out-of-date.
4659 * Design decisions: we do a single pass over the IL code (and we do bblock
4660 * splitting/merging in the few cases when it's required: a back jump to an IL
4661 * address that was not already seen as bblock starting point).
4662 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4663 * Complex operations are decomposed in simpler ones right away. We need to let the
4664 * arch-specific code peek and poke inside this process somehow (except when the
4665 * optimizations can take advantage of the full semantic info of coarse opcodes).
4666 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4667 * MonoInst->opcode initially is the IL opcode or some simplification of that
4668 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4669 * opcode with value bigger than OP_LAST.
4670 * At this point the IR can be handed over to an interpreter, a dumb code generator
4671 * or to the optimizing code generator that will translate it to SSA form.
4673 * Profiling directed optimizations.
4674 * We may compile by default with few or no optimizations and instrument the code
4675 * or the user may indicate what methods to optimize the most either in a config file
4676 * or through repeated runs where the compiler applies offline the optimizations to
4677 * each method and then decides if it was worth it.
4680 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4681 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4682 #define CHECK_STACK_OVF() if (((sp - stack_start) + 1) > header->max_stack) UNVERIFIED
4683 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4684 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4685 #define CHECK_OPSIZE(size) if ((size) < 1 || ip + (size) > end) UNVERIFIED
4686 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4687 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
4689 /* offset from br.s -> br like opcodes */
4690 #define BIG_BRANCH_OFFSET 13
4693 ip_in_bb (MonoCompile
*cfg
, MonoBasicBlock
*bb
, const guint8
* ip
)
4695 MonoBasicBlock
*b
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
];
4697 return b
== NULL
|| b
== bb
;
4701 get_basic_blocks (MonoCompile
*cfg
, MonoMethodHeader
* header
, guint real_offset
, guchar
*start
, guchar
*end
, guchar
**pos
)
4707 MonoBasicBlock
*bblock
;
4708 const MonoOpcode
*opcode
;
4711 cli_addr
= ip
- start
;
4712 i
= mono_opcode_value ((const guint8
**)&ip
, end
);
4715 opcode
= &mono_opcodes
[i
];
4716 switch (opcode
->argument
) {
4717 case MonoInlineNone
:
4720 case MonoInlineString
:
4721 case MonoInlineType
:
4722 case MonoInlineField
:
4723 case MonoInlineMethod
:
4726 case MonoShortInlineR
:
4733 case MonoShortInlineVar
:
4734 case MonoShortInlineI
:
4737 case MonoShortInlineBrTarget
:
4738 target
= start
+ cli_addr
+ 2 + (signed char)ip
[1];
4739 GET_BBLOCK (cfg
, bblock
, target
);
4742 GET_BBLOCK (cfg
, bblock
, ip
);
4744 case MonoInlineBrTarget
:
4745 target
= start
+ cli_addr
+ 5 + (gint32
)read32 (ip
+ 1);
4746 GET_BBLOCK (cfg
, bblock
, target
);
4749 GET_BBLOCK (cfg
, bblock
, ip
);
4751 case MonoInlineSwitch
: {
4752 guint32 n
= read32 (ip
+ 1);
4755 cli_addr
+= 5 + 4 * n
;
4756 target
= start
+ cli_addr
;
4757 GET_BBLOCK (cfg
, bblock
, target
);
4759 for (j
= 0; j
< n
; ++j
) {
4760 target
= start
+ cli_addr
+ (gint32
)read32 (ip
);
4761 GET_BBLOCK (cfg
, bblock
, target
);
4771 g_assert_not_reached ();
4774 if (i
== CEE_THROW
) {
4775 guchar
*bb_start
= ip
- 1;
4777 /* Find the start of the bblock containing the throw */
4779 while ((bb_start
>= start
) && !bblock
) {
4780 bblock
= cfg
->cil_offset_to_bb
[(bb_start
) - start
];
4784 bblock
->out_of_line
= 1;
4795 mini_get_method_allow_open (MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
, MonoError
*error
)
4801 if (m
->wrapper_type
!= MONO_WRAPPER_NONE
) {
4802 method
= (MonoMethod
*)mono_method_get_wrapper_data (m
, token
);
4804 method
= mono_class_inflate_generic_method_checked (method
, context
, error
);
4807 method
= mono_get_method_checked (m_class_get_image (m
->klass
), token
, klass
, context
, error
);
4814 mini_get_method (MonoCompile
*cfg
, MonoMethod
*m
, guint32 token
, MonoClass
*klass
, MonoGenericContext
*context
)
4817 MonoMethod
*method
= mini_get_method_allow_open (m
, token
, klass
, context
, cfg
? cfg
->error
: error
);
4819 if (method
&& cfg
&& !cfg
->gshared
&& mono_class_is_open_constructed_type (m_class_get_byval_arg (method
->klass
))) {
4820 mono_error_set_bad_image (cfg
->error
, m_class_get_image (cfg
->method
->klass
), "Method with open type while not compiling gshared");
4824 if (!method
&& !cfg
)
4825 mono_error_cleanup (error
); /* FIXME don't swallow the error */
4830 static MonoMethodSignature
*
4831 mini_get_signature (MonoMethod
*method
, guint32 token
, MonoGenericContext
*context
, MonoError
*error
)
4833 MonoMethodSignature
*fsig
;
4836 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
4837 fsig
= (MonoMethodSignature
*)mono_method_get_wrapper_data (method
, token
);
4839 fsig
= mono_metadata_parse_signature_checked (m_class_get_image (method
->klass
), token
, error
);
4840 return_val_if_nok (error
, NULL
);
4843 fsig
= mono_inflate_generic_signature(fsig
, context
, error
);
4849 throw_exception (void)
4851 static MonoMethod
*method
= NULL
;
4854 MonoSecurityManager
*secman
= mono_security_manager_get_methods ();
4855 method
= get_method_nofail (secman
->securitymanager
, "ThrowException", 1, 0);
4862 emit_throw_exception (MonoCompile
*cfg
, MonoException
*ex
)
4864 MonoMethod
*thrower
= throw_exception ();
4867 EMIT_NEW_PCONST (cfg
, args
[0], ex
);
4868 mono_emit_method_call (cfg
, thrower
, args
, NULL
);
4872 * Return the original method is a wrapper is specified. We can only access
4873 * the custom attributes from the original method.
4876 get_original_method (MonoMethod
*method
)
4878 if (method
->wrapper_type
== MONO_WRAPPER_NONE
)
4881 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4882 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
)
4885 /* in other cases we need to find the original method */
4886 return mono_marshal_method_from_wrapper (method
);
4890 ensure_method_is_allowed_to_access_field (MonoCompile
*cfg
, MonoMethod
*caller
, MonoClassField
*field
)
4892 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4893 MonoException
*ex
= mono_security_core_clr_is_field_access_allowed (get_original_method (caller
), field
);
4895 emit_throw_exception (cfg
, ex
);
4899 ensure_method_is_allowed_to_call_method (MonoCompile
*cfg
, MonoMethod
*caller
, MonoMethod
*callee
)
4901 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4902 MonoException
*ex
= mono_security_core_clr_is_call_allowed (get_original_method (caller
), callee
);
4904 emit_throw_exception (cfg
, ex
);
4908 il_read_op (guchar
*ip
, guchar
*end
, guchar first_byte
, MonoOpcodeEnum desired_il_op
)
4909 // If ip is desired_il_op, return the next ip, else NULL.
4911 if (G_LIKELY (ip
< end
) && G_UNLIKELY (*ip
== first_byte
)) {
4912 MonoOpcodeEnum il_op
= MonoOpcodeEnum_Invalid
;
4913 // mono_opcode_value_and_size updates ip, but not in the expected way.
4914 const guchar
*temp_ip
= ip
;
4915 const int size
= mono_opcode_value_and_size (&temp_ip
, end
, &il_op
);
4916 return (G_LIKELY (size
> 0) && G_UNLIKELY (il_op
== desired_il_op
)) ? (ip
+ size
) : NULL
;
4922 il_read_op_and_token (guchar
*ip
, guchar
*end
, guchar first_byte
, MonoOpcodeEnum desired_il_op
, guint32
*token
)
4924 ip
= il_read_op (ip
, end
, first_byte
, desired_il_op
);
4926 *token
= read32 (ip
- 4); // could be +1 or +2 from start
4931 il_read_branch_and_target (guchar
*ip
, guchar
*end
, guchar first_byte
, MonoOpcodeEnum desired_il_op
, int size
, guchar
**target
)
4933 ip
= il_read_op (ip
, end
, first_byte
, desired_il_op
);
4938 delta
= (signed char)ip
[-1];
4941 delta
= (gint32
)read32 (ip
- 4);
4944 // FIXME verify it is within the function and start of an instruction.
4945 *target
= ip
+ delta
;
4951 #define il_read_brtrue(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE, MONO_CEE_BRTRUE, 4, target))
4952 #define il_read_brtrue_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRTRUE_S, MONO_CEE_BRTRUE_S, 1, target))
4953 #define il_read_brfalse(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE, MONO_CEE_BRFALSE, 4, target))
4954 #define il_read_brfalse_s(ip, end, target) (il_read_branch_and_target (ip, end, CEE_BRFALSE_S, MONO_CEE_BRFALSE_S, 1, target))
4955 #define il_read_dup(ip, end) (il_read_op (ip, end, CEE_DUP, MONO_CEE_DUP))
4956 #define il_read_newobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_NEW_OBJ, MONO_CEE_NEWOBJ, token))
4957 #define il_read_ldtoken(ip, end, token) (il_read_op_and_token (ip, end, CEE_LDTOKEN, MONO_CEE_LDTOKEN, token))
4958 #define il_read_call(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALL, MONO_CEE_CALL, token))
4959 #define il_read_callvirt(ip, end, token) (il_read_op_and_token (ip, end, CEE_CALLVIRT, MONO_CEE_CALLVIRT, token))
4960 #define il_read_initobj(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_INITOBJ, token))
4961 #define il_read_constrained(ip, end, token) (il_read_op_and_token (ip, end, CEE_PREFIX1, MONO_CEE_CONSTRAINED_, token))
4962 #define il_read_unbox_any(ip, end, token) (il_read_op_and_token (ip, end, CEE_UNBOX_ANY, MONO_CEE_UNBOX_ANY, token))
4965 * Check that the IL instructions at ip are the array initialization
4966 * sequence and return the pointer to the data and the size.
4969 initialize_array_data (MonoCompile
*cfg
, MonoMethod
*method
, gboolean aot
, guchar
*ip
,
4970 guchar
*end
, MonoClass
*klass
, guint32 len
, int *out_size
,
4971 guint32
*out_field_token
, MonoOpcodeEnum
*il_op
, guchar
**next_ip
)
4974 * newarr[System.Int32]
4976 * ldtoken field valuetype ...
4977 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4981 guint32 field_token
;
4983 if ((ip
= il_read_dup (ip
, end
))
4984 && ip_in_bb (cfg
, cfg
->cbb
, ip
)
4985 && (ip
= il_read_ldtoken (ip
, end
, &field_token
))
4986 && IS_FIELD_DEF (field_token
)
4987 && ip_in_bb (cfg
, cfg
->cbb
, ip
)
4988 && (ip
= il_read_call (ip
, end
, &token
))) {
4991 const char *data_ptr
;
4993 MonoMethod
*cmethod
;
4994 MonoClass
*dummy_class
;
4995 MonoClassField
*field
= mono_field_from_token_checked (m_class_get_image (method
->klass
), field_token
, &dummy_class
, NULL
, error
);
4999 mono_error_cleanup (error
); /* FIXME don't swallow the error */
5003 *out_field_token
= field_token
;
5005 cmethod
= mini_get_method (NULL
, method
, token
, NULL
, NULL
);
5008 if (strcmp (cmethod
->name
, "InitializeArray") || strcmp (m_class_get_name (cmethod
->klass
), "RuntimeHelpers") || m_class_get_image (cmethod
->klass
) != mono_defaults
.corlib
)
5010 switch (mini_get_underlying_type (m_class_get_byval_arg (klass
))->type
) {
5014 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5015 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5032 if (size
> mono_type_size (field
->type
, &dummy_align
))
5035 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5036 MonoImage
*method_klass_image
= m_class_get_image (method
->klass
);
5037 if (!image_is_dynamic (method_klass_image
)) {
5038 guint32 field_index
= mono_metadata_token_index (field_token
);
5039 mono_metadata_field_info (method_klass_image
, field_index
- 1, NULL
, &rva
, NULL
);
5040 data_ptr
= mono_image_rva_map (method_klass_image
, rva
);
5041 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5042 /* for aot code we do the lookup on load */
5043 if (aot
&& data_ptr
)
5044 data_ptr
= (const char *)GUINT_TO_POINTER (rva
);
5046 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5048 data_ptr
= mono_field_get_data (field
);
5052 *il_op
= MONO_CEE_CALL
;
5060 set_exception_type_from_invalid_il (MonoCompile
*cfg
, MonoMethod
*method
, guchar
*ip
)
5063 char *method_fname
= mono_method_full_name (method
, TRUE
);
5065 MonoMethodHeader
*header
= mono_method_get_header_checked (method
, error
);
5068 method_code
= g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (error
));
5069 mono_error_cleanup (error
);
5070 } else if (header
->code_size
== 0)
5071 method_code
= g_strdup ("method body is empty.");
5073 method_code
= mono_disasm_code_one (NULL
, method
, ip
, NULL
);
5074 mono_cfg_set_exception_invalid_program (cfg
, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname
, method_code
));
5075 g_free (method_fname
);
5076 g_free (method_code
);
5077 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
5081 mono_type_to_stloc_coerce (MonoType
*type
)
5086 type
= mini_get_underlying_type (type
);
5088 switch (type
->type
) {
5090 return OP_ICONV_TO_I1
;
5092 return OP_ICONV_TO_U1
;
5094 return OP_ICONV_TO_I2
;
5096 return OP_ICONV_TO_U2
;
5102 case MONO_TYPE_FNPTR
:
5103 case MONO_TYPE_CLASS
:
5104 case MONO_TYPE_STRING
:
5105 case MONO_TYPE_OBJECT
:
5106 case MONO_TYPE_SZARRAY
:
5107 case MONO_TYPE_ARRAY
:
5112 case MONO_TYPE_TYPEDBYREF
:
5113 case MONO_TYPE_GENERICINST
:
5115 case MONO_TYPE_VALUETYPE
:
5116 if (m_class_is_enumtype (type
->data
.klass
)) {
5117 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
5122 case MONO_TYPE_MVAR
: //TODO I believe we don't need to handle gsharedvt as there won't be match and, for example, u1 is not covariant to u32
5125 g_error ("unknown type 0x%02x in mono_type_to_stloc_coerce", type
->type
);
5131 emit_stloc_ir (MonoCompile
*cfg
, MonoInst
**sp
, MonoMethodHeader
*header
, int n
)
5134 guint32 coerce_op
= mono_type_to_stloc_coerce (header
->locals
[n
]);
5137 if (cfg
->cbb
->last_ins
== sp
[0] && sp
[0]->opcode
== coerce_op
) {
5138 if (cfg
->verbose_level
> 2)
5139 printf ("Found existing coercing is enough for stloc\n");
5141 MONO_INST_NEW (cfg
, ins
, coerce_op
);
5142 ins
->dreg
= alloc_ireg (cfg
);
5143 ins
->sreg1
= sp
[0]->dreg
;
5144 ins
->type
= STACK_I4
;
5145 ins
->klass
= mono_class_from_mono_type_internal (header
->locals
[n
]);
5146 MONO_ADD_INS (cfg
->cbb
, ins
);
5147 *sp
= mono_decompose_opcode (cfg
, ins
);
5152 guint32 opcode
= mono_type_to_regmove (cfg
, header
->locals
[n
]);
5153 if ((opcode
== OP_MOVE
) && cfg
->cbb
->last_ins
== sp
[0] &&
5154 ((sp
[0]->opcode
== OP_ICONST
) || (sp
[0]->opcode
== OP_I8CONST
))) {
5155 /* Optimize reg-reg moves away */
5157 * Can't optimize other opcodes, since sp[0] might point to
5158 * the last ins of a decomposed opcode.
5160 sp
[0]->dreg
= (cfg
)->locals
[n
]->dreg
;
5162 EMIT_NEW_LOCSTORE (cfg
, ins
, n
, *sp
);
5167 emit_starg_ir (MonoCompile
*cfg
, MonoInst
**sp
, int n
)
5170 guint32 coerce_op
= mono_type_to_stloc_coerce (cfg
->arg_types
[n
]);
5173 if (cfg
->cbb
->last_ins
== sp
[0] && sp
[0]->opcode
== coerce_op
) {
5174 if (cfg
->verbose_level
> 2)
5175 printf ("Found existing coercing is enough for starg\n");
5177 MONO_INST_NEW (cfg
, ins
, coerce_op
);
5178 ins
->dreg
= alloc_ireg (cfg
);
5179 ins
->sreg1
= sp
[0]->dreg
;
5180 ins
->type
= STACK_I4
;
5181 ins
->klass
= mono_class_from_mono_type_internal (cfg
->arg_types
[n
]);
5182 MONO_ADD_INS (cfg
->cbb
, ins
);
5183 *sp
= mono_decompose_opcode (cfg
, ins
);
5187 EMIT_NEW_ARGSTORE (cfg
, ins
, n
, *sp
);
5191 * ldloca inhibits many optimizations so try to get rid of it in common
5195 emit_optimized_ldloca_ir (MonoCompile
*cfg
, guchar
*ip
, guchar
*end
, int local
)
5203 if ((ip
= il_read_initobj (ip
, end
, &token
)) && ip_in_bb (cfg
, cfg
->cbb
, start
+ 1)) {
5204 /* From the INITOBJ case */
5205 klass
= mini_get_class (cfg
->current_method
, token
, cfg
->generic_context
);
5206 CHECK_TYPELOAD (klass
);
5207 type
= mini_get_underlying_type (m_class_get_byval_arg (klass
));
5208 emit_init_local (cfg
, local
, type
, TRUE
);
5216 handle_call_res_devirt (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoInst
*call_res
)
5219 * Devirt EqualityComparer.Default.Equals () calls for some types.
5220 * The corefx code excepts these calls to be devirtualized.
5221 * This depends on the implementation of EqualityComparer.Default, which is
5222 * in mcs/class/referencesource/mscorlib/system/collections/generic/equalitycomparer.cs
5224 if (m_class_get_image (cmethod
->klass
) == mono_defaults
.corlib
&&
5225 !strcmp (m_class_get_name (cmethod
->klass
), "EqualityComparer`1") &&
5226 !strcmp (cmethod
->name
, "get_Default")) {
5227 MonoType
*param_type
= mono_class_get_generic_class (cmethod
->klass
)->context
.class_inst
->type_argv
[0];
5229 MonoGenericContext ctx
;
5230 MonoType
*args
[16];
5233 memset (&ctx
, 0, sizeof (ctx
));
5235 args
[0] = param_type
;
5236 ctx
.class_inst
= mono_metadata_get_generic_inst (1, args
);
5238 inst
= mono_class_inflate_generic_class_checked (mono_class_get_iequatable_class (), &ctx
, error
);
5239 mono_error_assert_ok (error
);
5241 /* EqualityComparer<T>.Default returns specific types depending on T */
5243 /* 1. Implements IEquatable<T> */
5245 * Can't use this for string/byte as it might use a different comparer:
5247 * // Specialize type byte for performance reasons
5248 * if (t == typeof(byte)) {
5249 * return (EqualityComparer<T>)(object)(new ByteEqualityComparer());
5252 * // Breaks .net serialization compatibility
5253 * if (t == typeof (string))
5254 * return (EqualityComparer<T>)(object)new InternalStringComparer ();
5257 if (mono_class_is_assignable_from_internal (inst
, mono_class_from_mono_type_internal (param_type
)) && param_type
->type
!= MONO_TYPE_U1
&& param_type
->type
!= MONO_TYPE_STRING
) {
5258 MonoInst
*typed_objref
;
5259 MonoClass
*gcomparer_inst
;
5261 memset (&ctx
, 0, sizeof (ctx
));
5263 args
[0] = param_type
;
5264 ctx
.class_inst
= mono_metadata_get_generic_inst (1, args
);
5266 MonoClass
*gcomparer
= mono_class_get_geqcomparer_class ();
5267 g_assert (gcomparer
);
5268 gcomparer_inst
= mono_class_inflate_generic_class_checked (gcomparer
, &ctx
, error
);
5269 mono_error_assert_ok (error
);
5271 MONO_INST_NEW (cfg
, typed_objref
, OP_TYPED_OBJREF
);
5272 typed_objref
->type
= STACK_OBJ
;
5273 typed_objref
->dreg
= alloc_ireg_ref (cfg
);
5274 typed_objref
->sreg1
= call_res
->dreg
;
5275 typed_objref
->klass
= gcomparer_inst
;
5276 MONO_ADD_INS (cfg
->cbb
, typed_objref
);
5278 call_res
= typed_objref
;
5280 /* Force decompose */
5281 cfg
->flags
|= MONO_CFG_NEEDS_DECOMPOSE
;
5282 cfg
->cbb
->needs_decompose
= TRUE
;
5290 is_exception_class (MonoClass
*klass
)
5292 if (G_LIKELY (m_class_get_supertypes (klass
)))
5293 return mono_class_has_parent_fast (klass
, mono_defaults
.exception_class
);
5295 if (klass
== mono_defaults
.exception_class
)
5297 klass
= m_class_get_parent (klass
);
5303 * is_jit_optimizer_disabled:
5305 * Determine whenever M's assembly has a DebuggableAttribute with the
5306 * IsJITOptimizerDisabled flag set.
5309 is_jit_optimizer_disabled (MonoMethod
*m
)
5312 MonoAssembly
*ass
= m_class_get_image (m
->klass
)->assembly
;
5313 MonoCustomAttrInfo
* attrs
;
5316 gboolean val
= FALSE
;
5319 if (ass
->jit_optimizer_disabled_inited
)
5320 return ass
->jit_optimizer_disabled
;
5322 klass
= mono_class_try_get_debuggable_attribute_class ();
5326 ass
->jit_optimizer_disabled
= FALSE
;
5327 mono_memory_barrier ();
5328 ass
->jit_optimizer_disabled_inited
= TRUE
;
5332 attrs
= mono_custom_attrs_from_assembly_checked (ass
, FALSE
, error
);
5333 mono_error_cleanup (error
); /* FIXME don't swallow the error */
5335 for (i
= 0; i
< attrs
->num_attrs
; ++i
) {
5336 MonoCustomAttrEntry
*attr
= &attrs
->attrs
[i
];
5338 MonoMethodSignature
*sig
;
5340 if (!attr
->ctor
|| attr
->ctor
->klass
!= klass
)
5342 /* Decode the attribute. See reflection.c */
5343 p
= (const char*)attr
->data
;
5344 g_assert (read16 (p
) == 0x0001);
5347 // FIXME: Support named parameters
5348 sig
= mono_method_signature_internal (attr
->ctor
);
5349 if (sig
->param_count
!= 2 || sig
->params
[0]->type
!= MONO_TYPE_BOOLEAN
|| sig
->params
[1]->type
!= MONO_TYPE_BOOLEAN
)
5351 /* Two boolean arguments */
5355 mono_custom_attrs_free (attrs
);
5358 ass
->jit_optimizer_disabled
= val
;
5359 mono_memory_barrier ();
5360 ass
->jit_optimizer_disabled_inited
= TRUE
;
5366 mono_is_supported_tailcall_helper (gboolean value
, const char *svalue
)
5369 mono_tailcall_print ("%s %s\n", __func__
, svalue
);
5374 mono_is_not_supported_tailcall_helper (gboolean value
, const char *svalue
, MonoMethod
*method
, MonoMethod
*cmethod
)
5376 // Return value, printing if it inhibits tailcall.
5378 if (value
&& mono_tailcall_print_enabled ()) {
5379 const char *lparen
= strchr (svalue
, ' ') ? "(" : "";
5380 const char *rparen
= *lparen
? ")" : "";
5381 mono_tailcall_print ("%s %s -> %s %s%s%s:%d\n", __func__
, method
->name
, cmethod
->name
, lparen
, svalue
, rparen
, value
);
5386 #define IS_NOT_SUPPORTED_TAILCALL(x) (mono_is_not_supported_tailcall_helper((x), #x, method, cmethod))
5389 is_supported_tailcall (MonoCompile
*cfg
, const guint8
*ip
, MonoMethod
*method
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
,
5390 gboolean virtual_
, gboolean extra_arg
, gboolean
*ptailcall_calli
)
5392 // Some checks apply to "regular", some to "calli", some to both.
5393 // To ease burden on caller, always compute regular and calli.
5395 gboolean tailcall
= TRUE
;
5396 gboolean tailcall_calli
= TRUE
;
5398 if (IS_NOT_SUPPORTED_TAILCALL (virtual_
&& !cfg
->backend
->have_op_tailcall_membase
))
5401 if (IS_NOT_SUPPORTED_TAILCALL (!cfg
->backend
->have_op_tailcall_reg
))
5402 tailcall_calli
= FALSE
;
5404 if (!tailcall
&& !tailcall_calli
)
5407 // FIXME in calli, there is no type for for the this parameter,
5408 // so we assume it might be valuetype; in future we should issue a range
5409 // check, so rule out pointing to frame (for other reference parameters also)
5411 if ( IS_NOT_SUPPORTED_TAILCALL (cmethod
&& fsig
->hasthis
&& m_class_is_valuetype (cmethod
->klass
)) // This might point to the current method's stack. Emit range check?
5412 || IS_NOT_SUPPORTED_TAILCALL (cmethod
&& (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
))
5413 || IS_NOT_SUPPORTED_TAILCALL (fsig
->pinvoke
) // i.e. if !cmethod (calli)
5414 || IS_NOT_SUPPORTED_TAILCALL (cfg
->method
->save_lmf
)
5415 || IS_NOT_SUPPORTED_TAILCALL (!cmethod
&& fsig
->hasthis
) // FIXME could be valuetype to current frame; range check
5416 || IS_NOT_SUPPORTED_TAILCALL (cmethod
&& cmethod
->wrapper_type
&& cmethod
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
5418 // http://www.mono-project.com/docs/advanced/runtime/docs/generic-sharing/
5420 // 1. Non-generic non-static methods of reference types have access to the
5421 // RGCTX via the “this” argument (this->vtable->rgctx).
5422 // 2. a Non-generic static methods of reference types and b. non-generic methods
5423 // of value types need to be passed a pointer to the caller’s class’s VTable in the MONO_ARCH_RGCTX_REG register.
5424 // 3. Generic methods need to be passed a pointer to the MRGCTX in the MONO_ARCH_RGCTX_REG register
5426 // That is what vtable_arg is here (always?).
5428 // Passing vtable_arg uses (requires?) a volatile non-parameter register,
5429 // such as AMD64 rax, r10, r11, or the return register on many architectures.
5430 // ARM32 does not always clearly have such a register. ARM32's return register
5431 // is a parameter register.
5432 // iPhone could use r9 except on old systems. iPhone/ARM32 is not particularly
5433 // important. Linux/arm32 is less clear.
5434 // ARM32's scratch r12 might work but only with much collateral change.
5436 // Imagine F1 calls F2, and F2 tailcalls F3.
5437 // F2 and F3 are managed. F1 is native.
5438 // Without a tailcall, F2 can save and restore everything needed for F1.
5439 // However if the extra parameter were in a non-volatile, such as ARM32 V5/R8,
5440 // F3 cannot easily restore it for F1, in the current scheme. The current
5441 // scheme where the extra parameter is not merely an extra parameter, but
5442 // passed "outside of the ABI".
5444 // If all native to managed transitions are intercepted and wrapped (w/o tailcall),
5445 // then they can preserve this register and the rest of the managed callgraph
5446 // treat it as volatile.
5448 // Interface method dispatch has the same problem (imt_arg).
5450 || IS_NOT_SUPPORTED_TAILCALL (extra_arg
&& !cfg
->backend
->have_volatile_non_param_register
)
5451 || IS_NOT_SUPPORTED_TAILCALL (cfg
->gsharedvt
)
5453 tailcall_calli
= FALSE
;
5458 for (int i
= 0; i
< fsig
->param_count
; ++i
) {
5459 if (IS_NOT_SUPPORTED_TAILCALL (fsig
->params
[i
]->byref
|| fsig
->params
[i
]->type
== MONO_TYPE_PTR
|| fsig
->params
[i
]->type
== MONO_TYPE_FNPTR
)) {
5460 tailcall_calli
= FALSE
;
5461 tailcall
= FALSE
; // These can point to the current method's stack. Emit range check?
5466 MonoMethodSignature
*caller_signature
;
5467 MonoMethodSignature
*callee_signature
;
5468 caller_signature
= mono_method_signature_internal (method
);
5469 callee_signature
= cmethod
? mono_method_signature_internal (cmethod
) : fsig
;
5471 g_assert (caller_signature
);
5472 g_assert (callee_signature
);
5474 // Require an exact match on return type due to various conversions in emit_move_return_value that would be skipped.
5475 // The main troublesome conversions are double <=> float.
5476 // CoreCLR allows some conversions here, such as integer truncation.
5477 // As well I <=> I[48] and U <=> U[48] would be ok, for matching size.
5478 if (IS_NOT_SUPPORTED_TAILCALL (mini_get_underlying_type (caller_signature
->ret
)->type
!= mini_get_underlying_type (callee_signature
->ret
)->type
)
5479 || IS_NOT_SUPPORTED_TAILCALL (!mono_arch_tailcall_supported (cfg
, caller_signature
, callee_signature
, virtual_
))) {
5480 tailcall_calli
= FALSE
;
5485 /* Debugging support */
5487 if (!mono_debug_count ()) {
5488 tailcall_calli
= FALSE
;
5493 // See check_sp in mini_emit_calli_full.
5494 if (tailcall_calli
&& IS_NOT_SUPPORTED_TAILCALL (mini_should_check_stack_pointer (cfg
)))
5495 tailcall_calli
= FALSE
;
5497 mono_tailcall_print ("tail.%s %s -> %s tailcall:%d tailcall_calli:%d gshared:%d extra_arg:%d virtual_:%d\n",
5498 mono_opcode_name (*ip
), method
->name
, cmethod
? cmethod
->name
: "calli", tailcall
, tailcall_calli
,
5499 cfg
->gshared
, extra_arg
, virtual_
);
5501 *ptailcall_calli
= tailcall_calli
;
5506 * is_addressable_valuetype_load
5508 * Returns true if a previous load can be done without doing an extra copy, given the new instruction ip and the type of the object being loaded ldtype
5511 is_addressable_valuetype_load (MonoCompile
* cfg
, guint8
* ip
, MonoType
* ldtype
)
5513 /* Avoid loading a struct just to load one of its fields */
5514 gboolean is_load_instruction
= (*ip
== CEE_LDFLD
);
5515 gboolean is_in_previous_bb
= ip_in_bb(cfg
, cfg
->cbb
, ip
);
5516 gboolean is_struct
= MONO_TYPE_ISSTRUCT(ldtype
);
5517 return is_load_instruction
&& is_in_previous_bb
&& is_struct
;
5523 * Handle calls made to ctors from NEWOBJ opcodes.
5526 handle_ctor_call (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, int context_used
,
5527 MonoInst
**sp
, guint8
*ip
, int *inline_costs
)
5529 MonoInst
*vtable_arg
= NULL
, *callvirt_this_arg
= NULL
, *ins
;
5531 if (m_class_is_valuetype (cmethod
->klass
) && mono_class_generic_sharing_enabled (cmethod
->klass
) &&
5532 mono_method_is_generic_sharable (cmethod
, TRUE
)) {
5533 if (cmethod
->is_inflated
&& mono_method_get_context (cmethod
)->method_inst
) {
5534 mono_class_vtable_checked (cfg
->domain
, cmethod
->klass
, cfg
->error
);
5536 CHECK_TYPELOAD (cmethod
->klass
);
5538 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
,
5539 cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
5542 vtable_arg
= mini_emit_get_rgctx_klass (cfg
, context_used
,
5543 cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
5545 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, cmethod
->klass
, cfg
->error
);
5547 CHECK_TYPELOAD (cmethod
->klass
);
5548 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
5553 /* Avoid virtual calls to ctors if possible */
5554 if (mono_class_is_marshalbyref (cmethod
->klass
))
5555 callvirt_this_arg
= sp
[0];
5557 if (cmethod
&& (ins
= mini_emit_inst_for_ctor (cfg
, cmethod
, fsig
, sp
))) {
5558 g_assert (MONO_TYPE_IS_VOID (fsig
->ret
));
5559 CHECK_CFG_EXCEPTION
;
5560 } else if ((cfg
->opt
& MONO_OPT_INLINE
) && cmethod
&& !context_used
&& !vtable_arg
&&
5561 mono_method_check_inlining (cfg
, cmethod
) &&
5562 !mono_class_is_subclass_of_internal (cmethod
->klass
, mono_defaults
.exception_class
, FALSE
)) {
5565 if ((costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, FALSE
))) {
5566 cfg
->real_offset
+= 5;
5568 *inline_costs
+= costs
- 5;
5570 INLINE_FAILURE ("inline failure");
5571 // FIXME-VT: Clean this up
5572 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (fsig
))
5573 GSHAREDVT_FAILURE(*ip
);
5574 mini_emit_method_call_full (cfg
, cmethod
, fsig
, FALSE
, sp
, callvirt_this_arg
, NULL
, NULL
);
5576 } else if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (fsig
)) {
5579 addr
= emit_get_rgctx_gsharedvt_call (cfg
, context_used
, fsig
, cmethod
, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE
);
5581 if (cfg
->llvm_only
) {
5582 // FIXME: Avoid initializing vtable_arg
5583 mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
5585 mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, vtable_arg
);
5587 } else if (context_used
&&
5588 ((!mono_method_is_generic_sharable_full (cmethod
, TRUE
, FALSE
, FALSE
) ||
5589 !mono_class_generic_sharing_enabled (cmethod
->klass
)) || cfg
->gsharedvt
)) {
5590 MonoInst
*cmethod_addr
;
5592 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
5594 if (cfg
->llvm_only
) {
5595 MonoInst
*addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
,
5596 MONO_RGCTX_INFO_METHOD_FTNDESC
);
5597 mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
5599 cmethod_addr
= emit_get_rgctx_method (cfg
, context_used
,
5600 cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
5602 mini_emit_calli (cfg
, fsig
, sp
, cmethod_addr
, NULL
, vtable_arg
);
5605 INLINE_FAILURE ("ctor call");
5606 ins
= mini_emit_method_call_full (cfg
, cmethod
, fsig
, FALSE
, sp
,
5607 callvirt_this_arg
, NULL
, vtable_arg
);
5616 gboolean inst_tailcall
;
5620 * handle_constrained_call:
5622 * Handle constrained calls. Return a MonoInst* representing the call or NULL.
5623 * May overwrite sp [0] and modify the ref_... parameters.
5626 handle_constrained_call (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoClass
*constrained_class
, MonoInst
**sp
,
5627 HandleCallData
*cdata
, MonoMethod
**ref_cmethod
, gboolean
*ref_virtual
, gboolean
*ref_emit_widen
)
5629 MonoInst
*ins
, *addr
;
5630 MonoMethod
*method
= cdata
->method
;
5631 gboolean constrained_partial_call
= FALSE
;
5632 gboolean constrained_is_generic_param
=
5633 m_class_get_byval_arg (constrained_class
)->type
== MONO_TYPE_VAR
||
5634 m_class_get_byval_arg (constrained_class
)->type
== MONO_TYPE_MVAR
;
5636 if (constrained_is_generic_param
&& cfg
->gshared
) {
5637 if (!mini_is_gsharedvt_klass (constrained_class
)) {
5638 g_assert (!m_class_is_valuetype (cmethod
->klass
));
5639 if (!mini_type_is_reference (m_class_get_byval_arg (constrained_class
)))
5640 constrained_partial_call
= TRUE
;
5644 if (mini_is_gsharedvt_klass (constrained_class
)) {
5645 if ((cmethod
->klass
!= mono_defaults
.object_class
) && m_class_is_valuetype (constrained_class
) && m_class_is_valuetype (cmethod
->klass
)) {
5646 /* The 'Own method' case below */
5647 } else if (m_class_get_image (cmethod
->klass
) != mono_defaults
.corlib
&& !mono_class_is_interface (cmethod
->klass
) && !m_class_is_valuetype (cmethod
->klass
)) {
5648 /* 'The type parameter is instantiated as a reference type' case below. */
5650 ins
= handle_constrained_gsharedvt_call (cfg
, cmethod
, fsig
, sp
, constrained_class
, ref_emit_widen
);
5651 CHECK_CFG_EXCEPTION
;
5653 if (cdata
->inst_tailcall
) // FIXME
5654 mono_tailcall_print ("missed tailcall constrained_class %s -> %s\n", method
->name
, cmethod
->name
);
5659 if (constrained_partial_call
) {
5660 gboolean need_box
= TRUE
;
5663 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
5664 * called method is not known at compile time either. The called method could end up being
5665 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
5666 * to box the receiver.
5667 * A simple solution would be to box always and make a normal virtual call, but that would
5668 * be bad performance wise.
5670 if (mono_class_is_interface (cmethod
->klass
) && mono_class_is_ginst (cmethod
->klass
) &&
5671 (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
)) {
5673 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
5675 /* If the method is not abstract, it's a default interface method, and we need to box */
5679 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) && (cmethod
->klass
== mono_defaults
.object_class
|| cmethod
->klass
== m_class_get_parent (mono_defaults
.enum_class
) || cmethod
->klass
== mono_defaults
.enum_class
)) {
5680 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
5681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (constrained_class
), sp
[0]->dreg
, 0);
5682 ins
->klass
= constrained_class
;
5683 sp
[0] = mini_emit_box (cfg
, ins
, constrained_class
, mono_class_check_context_used (constrained_class
));
5684 CHECK_CFG_EXCEPTION
;
5685 } else if (need_box
) {
5687 MonoBasicBlock
*is_ref_bb
, *end_bb
;
5688 MonoInst
*nonbox_call
, *addr
;
5691 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
5693 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
5694 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
5696 addr
= emit_get_rgctx_virt_method (cfg
, mono_class_check_context_used (constrained_class
), constrained_class
, cmethod
, MONO_RGCTX_INFO_VIRT_METHOD_CODE
);
5698 NEW_BBLOCK (cfg
, is_ref_bb
);
5699 NEW_BBLOCK (cfg
, end_bb
);
5701 box_type
= emit_get_rgctx_virt_method (cfg
, mono_class_check_context_used (constrained_class
), constrained_class
, cmethod
, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE
);
5702 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, box_type
->dreg
, MONO_GSHAREDVT_BOX_TYPE_REF
);
5703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBEQ
, is_ref_bb
);
5707 /* addr is an ftndesc in this case */
5708 nonbox_call
= mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
5710 nonbox_call
= (MonoInst
*)mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, NULL
);
5712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
5715 MONO_START_BB (cfg
, is_ref_bb
);
5716 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (constrained_class
), sp
[0]->dreg
, 0);
5717 ins
->klass
= constrained_class
;
5718 sp
[0] = mini_emit_box (cfg
, ins
, constrained_class
, mono_class_check_context_used (constrained_class
));
5719 CHECK_CFG_EXCEPTION
;
5721 ins
= mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
5723 ins
= (MonoInst
*)mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, NULL
);
5725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
5727 MONO_START_BB (cfg
, end_bb
);
5730 nonbox_call
->dreg
= ins
->dreg
;
5731 if (cdata
->inst_tailcall
) // FIXME
5732 mono_tailcall_print ("missed tailcall constrained_partial_need_box %s -> %s\n", method
->name
, cmethod
->name
);
5735 g_assert (mono_class_is_interface (cmethod
->klass
));
5736 addr
= emit_get_rgctx_virt_method (cfg
, mono_class_check_context_used (constrained_class
), constrained_class
, cmethod
, MONO_RGCTX_INFO_VIRT_METHOD_CODE
);
5738 ins
= mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
5740 ins
= (MonoInst
*)mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, NULL
);
5741 if (cdata
->inst_tailcall
) // FIXME
5742 mono_tailcall_print ("missed tailcall constrained_partial %s -> %s\n", method
->name
, cmethod
->name
);
5745 } else if (!m_class_is_valuetype (constrained_class
)) {
5746 int dreg
= alloc_ireg_ref (cfg
);
5749 * The type parameter is instantiated as a reference
5750 * type. We have a managed pointer on the stack, so
5751 * we need to dereference it here.
5753 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, 0);
5754 ins
->type
= STACK_OBJ
;
5756 } else if (cmethod
->klass
== mono_defaults
.object_class
|| cmethod
->klass
== m_class_get_parent (mono_defaults
.enum_class
) || cmethod
->klass
== mono_defaults
.enum_class
) {
5758 * The type parameter is instantiated as a valuetype,
5759 * but that type doesn't override the method we're
5760 * calling, so we need to box `this'.
5762 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (constrained_class
), sp
[0]->dreg
, 0);
5763 ins
->klass
= constrained_class
;
5764 sp
[0] = mini_emit_box (cfg
, ins
, constrained_class
, mono_class_check_context_used (constrained_class
));
5765 CHECK_CFG_EXCEPTION
;
5767 if (cmethod
->klass
!= constrained_class
) {
5768 /* Enums/default interface methods */
5769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (constrained_class
), sp
[0]->dreg
, 0);
5770 ins
->klass
= constrained_class
;
5771 sp
[0] = mini_emit_box (cfg
, ins
, constrained_class
, mono_class_check_context_used (constrained_class
));
5772 CHECK_CFG_EXCEPTION
;
5774 *ref_virtual
= FALSE
;
5782 emit_setret (MonoCompile
*cfg
, MonoInst
*val
)
5784 MonoType
*ret_type
= mini_get_underlying_type (mono_method_signature_internal (cfg
->method
)->ret
);
5787 if (mini_type_to_stind (cfg
, ret_type
) == CEE_STOBJ
) {
5790 if (!cfg
->vret_addr
) {
5791 EMIT_NEW_VARSTORE (cfg
, ins
, cfg
->ret
, ret_type
, val
);
5793 EMIT_NEW_RETLOADA (cfg
, ret_addr
);
5795 MonoClass
*ret_class
= mono_class_from_mono_type_internal (ret_type
);
5796 if (MONO_CLASS_IS_SIMD (cfg
, ret_class
))
5797 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREX_MEMBASE
, ret_addr
->dreg
, 0, val
->dreg
);
5799 EMIT_NEW_STORE_MEMBASE (cfg
, ins
, OP_STOREV_MEMBASE
, ret_addr
->dreg
, 0, val
->dreg
);
5800 ins
->klass
= ret_class
;
5803 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5804 if (COMPILE_SOFT_FLOAT (cfg
) && !ret_type
->byref
&& ret_type
->type
== MONO_TYPE_R4
) {
5805 MonoInst
*iargs
[1];
5809 conv
= mono_emit_jit_icall (cfg
, mono_fload_r4_arg
, iargs
);
5810 mono_arch_emit_setret (cfg
, cfg
->method
, conv
);
5812 mono_arch_emit_setret (cfg
, cfg
->method
, val
);
5815 mono_arch_emit_setret (cfg
, cfg
->method
, val
);
5820 typedef union _MonoOpcodeParameter
{
5825 guchar
*branch_target
;
5826 } MonoOpcodeParameter
;
5828 typedef struct _MonoOpcodeInfo
{
5829 guint constant
: 4; // private
5830 gint pops
: 3; // public -1 means variable
5831 gint pushes
: 3; // public -1 means variable
5834 static const MonoOpcodeInfo
*
5835 mono_opcode_decode (guchar
*ip
, guint op_size
, MonoOpcodeEnum il_op
, MonoOpcodeParameter
*parameter
)
5851 #define VarPush (-1)
5854 static const MonoOpcodeInfo mono_opcode_info
[ ] = {
5855 #define OPDEF(name, str, pops, pushes, param, param_constant, a, b, c, flow) {param_constant + 1, pops, pushes },
5856 #include "mono/cil/opcode.def"
5878 guchar
*next_ip
= ip
+ op_size
;
5880 const MonoOpcodeInfo
*info
= &mono_opcode_info
[il_op
];
5882 switch (mono_opcodes
[il_op
].argument
) {
5883 case MonoInlineNone
:
5884 parameter
->i32
= (int)info
->constant
- 1;
5886 case MonoInlineString
:
5887 case MonoInlineType
:
5888 case MonoInlineField
:
5889 case MonoInlineMethod
:
5892 case MonoShortInlineR
:
5894 parameter
->i32
= read32 (next_ip
- 4);
5895 // FIXME check token type?
5897 case MonoShortInlineI
:
5898 parameter
->i32
= (signed char)next_ip
[-1];
5901 parameter
->i32
= read16 (next_ip
- 2);
5903 case MonoShortInlineVar
:
5904 parameter
->i32
= next_ip
[-1];
5908 parameter
->i64
= read64 (next_ip
- 8);
5910 case MonoShortInlineBrTarget
:
5911 delta
= (signed char)next_ip
[-1];
5913 case MonoInlineBrTarget
:
5914 delta
= (gint32
)read32 (next_ip
- 4);
5916 parameter
->branch_target
= delta
+ next_ip
;
5918 case MonoInlineSwitch
: // complicated
5921 g_error ("%s %d %d\n", __func__
, il_op
, mono_opcodes
[il_op
].argument
);
5927 * mono_method_to_ir:
5929 * Translate the .net IL into linear IR.
5931 * @start_bblock: if not NULL, the starting basic block, used during inlining.
5932 * @end_bblock: if not NULL, the ending basic block, used during inlining.
5933 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
5934 * @inline_args: if not NULL, contains the arguments to the inline call
5935 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
5936 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
5938 * This method is used to turn ECMA IL into Mono's internal Linear IR
5939 * reprensetation. It is used both for entire methods, as well as
5940 * inlining existing methods. In the former case, the @start_bblock,
5941 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
5942 * inline_offset is set to zero.
5944 * Returns: the inline cost, or -1 if there was an error processing this method.
5947 mono_method_to_ir (MonoCompile
*cfg
, MonoMethod
*method
, MonoBasicBlock
*start_bblock
, MonoBasicBlock
*end_bblock
,
5948 MonoInst
*return_var
, MonoInst
**inline_args
,
5949 guint inline_offset
, gboolean is_virtual_call
)
5952 // Buffer to hold parameters to mono_new_array, instead of varargs.
5953 MonoInst
*array_new_localalloc_ins
= NULL
;
5954 MonoInst
*ins
, **sp
, **stack_start
;
5955 MonoBasicBlock
*tblock
= NULL
;
5956 MonoBasicBlock
*init_localsbb
= NULL
, *init_localsbb2
= NULL
;
5957 MonoSimpleBasicBlock
*bb
= NULL
, *original_bb
= NULL
;
5958 MonoMethod
*method_definition
;
5959 MonoInst
**arg_array
;
5960 MonoMethodHeader
*header
;
5962 guint32 token
, ins_flag
;
5964 MonoClass
*constrained_class
= NULL
;
5965 gboolean save_last_error
= FALSE
;
5966 guchar
*ip
, *end
, *target
, *err_pos
;
5967 MonoMethodSignature
*sig
;
5968 MonoGenericContext
*generic_context
= NULL
;
5969 MonoGenericContainer
*generic_container
= NULL
;
5970 MonoType
**param_types
;
5971 int i
, n
, start_new_bblock
, dreg
;
5972 int num_calls
= 0, inline_costs
= 0;
5973 int breakpoint_id
= 0;
5975 GSList
*class_inits
= NULL
;
5976 gboolean dont_verify
, dont_verify_stloc
, readonly
= FALSE
;
5978 gboolean init_locals
, seq_points
, skip_dead_blocks
;
5979 gboolean sym_seq_points
= FALSE
;
5980 MonoDebugMethodInfo
*minfo
;
5981 MonoBitSet
*seq_point_locs
= NULL
;
5982 MonoBitSet
*seq_point_set_locs
= NULL
;
5983 gboolean emitted_funccall_seq_point
= FALSE
;
5985 cfg
->disable_inline
= is_jit_optimizer_disabled (method
);
5987 image
= m_class_get_image (method
->klass
);
5989 /* serialization and xdomain stuff may need access to private fields and methods */
5990 dont_verify
= image
->assembly
->corlib_internal
? TRUE
: FALSE
;
5991 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
;
5992 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_DISPATCH
;
5993 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
; /* bug #77896 */
5994 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP
;
5995 dont_verify
|= method
->wrapper_type
== MONO_WRAPPER_COMINTEROP_INVOKE
;
5997 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5998 dont_verify_stloc
= method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
;
5999 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_OTHER
;
6000 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
;
6001 dont_verify_stloc
|= method
->wrapper_type
== MONO_WRAPPER_STELEMREF
;
6003 header
= mono_method_get_header_checked (method
, cfg
->error
);
6005 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
6006 goto exception_exit
;
6008 cfg
->headers_to_free
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->headers_to_free
, header
);
6011 generic_container
= mono_method_get_generic_container (method
);
6012 sig
= mono_method_signature_internal (method
);
6013 num_args
= sig
->hasthis
+ sig
->param_count
;
6014 ip
= (guchar
*)header
->code
;
6015 cfg
->cil_start
= ip
;
6016 end
= ip
+ header
->code_size
;
6017 cfg
->stat_cil_code_size
+= header
->code_size
;
6019 seq_points
= cfg
->gen_seq_points
&& cfg
->method
== method
;
6021 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
6022 /* We could hit a seq point before attaching to the JIT (#8338) */
6026 if (cfg
->prof_coverage
) {
6027 if (cfg
->compile_aot
)
6028 g_error ("Coverage profiling is not supported with AOT.");
6030 cfg
->coverage_info
= mono_profiler_coverage_alloc (cfg
->method
, header
->code_size
);
6033 if ((cfg
->gen_sdb_seq_points
&& cfg
->method
== method
) || cfg
->prof_coverage
) {
6034 minfo
= mono_debug_lookup_method (method
);
6036 MonoSymSeqPoint
*sps
;
6037 int i
, n_il_offsets
;
6039 mono_debug_get_seq_points (minfo
, NULL
, NULL
, NULL
, &sps
, &n_il_offsets
);
6040 seq_point_locs
= mono_bitset_mem_new (mono_mempool_alloc0 (cfg
->mempool
, mono_bitset_alloc_size (header
->code_size
, 0)), header
->code_size
, 0);
6041 seq_point_set_locs
= mono_bitset_mem_new (mono_mempool_alloc0 (cfg
->mempool
, mono_bitset_alloc_size (header
->code_size
, 0)), header
->code_size
, 0);
6042 sym_seq_points
= TRUE
;
6043 for (i
= 0; i
< n_il_offsets
; ++i
) {
6044 if (sps
[i
].il_offset
< header
->code_size
)
6045 mono_bitset_set_fast (seq_point_locs
, sps
[i
].il_offset
);
6049 MonoDebugMethodAsyncInfo
* asyncMethod
= mono_debug_lookup_method_async_debug_info (method
);
6051 for (i
= 0; asyncMethod
!= NULL
&& i
< asyncMethod
->num_awaits
; i
++)
6053 mono_bitset_set_fast (seq_point_locs
, asyncMethod
->resume_offsets
[i
]);
6054 mono_bitset_set_fast (seq_point_locs
, asyncMethod
->yield_offsets
[i
]);
6056 mono_debug_free_method_async_debug_info (asyncMethod
);
6058 } else if (!method
->wrapper_type
&& !method
->dynamic
&& mono_debug_image_has_debug_info (m_class_get_image (method
->klass
))) {
6059 /* Methods without line number info like auto-generated property accessors */
6060 seq_point_locs
= mono_bitset_mem_new (mono_mempool_alloc0 (cfg
->mempool
, mono_bitset_alloc_size (header
->code_size
, 0)), header
->code_size
, 0);
6061 seq_point_set_locs
= mono_bitset_mem_new (mono_mempool_alloc0 (cfg
->mempool
, mono_bitset_alloc_size (header
->code_size
, 0)), header
->code_size
, 0);
6062 sym_seq_points
= TRUE
;
6067 * Methods without init_locals set could cause asserts in various passes
6068 * (#497220). To work around this, we emit dummy initialization opcodes
6069 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6070 * on some platforms.
6072 if (cfg
->opt
& MONO_OPT_UNSAFE
)
6073 init_locals
= header
->init_locals
;
6077 method_definition
= method
;
6078 while (method_definition
->is_inflated
) {
6079 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
6080 method_definition
= imethod
->declaring
;
6083 /* SkipVerification is not allowed if core-clr is enabled */
6084 if (!dont_verify
&& mini_assembly_can_skip_verification (cfg
->domain
, method
)) {
6086 dont_verify_stloc
= TRUE
;
6089 if (sig
->is_inflated
)
6090 generic_context
= mono_method_get_context (method
);
6091 else if (generic_container
)
6092 generic_context
= &generic_container
->context
;
6093 cfg
->generic_context
= generic_context
;
6096 g_assert (!sig
->has_type_parameters
);
6098 if (sig
->generic_param_count
&& method
->wrapper_type
== MONO_WRAPPER_NONE
) {
6099 g_assert (method
->is_inflated
);
6100 g_assert (mono_method_get_context (method
)->method_inst
);
6102 if (method
->is_inflated
&& mono_method_get_context (method
)->method_inst
)
6103 g_assert (sig
->generic_param_count
);
6105 if (cfg
->method
== method
) {
6106 cfg
->real_offset
= 0;
6108 cfg
->real_offset
= inline_offset
;
6111 cfg
->cil_offset_to_bb
= (MonoBasicBlock
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoBasicBlock
*) * header
->code_size
);
6112 cfg
->cil_offset_to_bb_len
= header
->code_size
;
6114 cfg
->current_method
= method
;
6116 if (cfg
->verbose_level
> 2)
6117 printf ("method to IR %s\n", mono_method_full_name (method
, TRUE
));
6119 param_types
= (MonoType
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoType
*) * num_args
);
6121 param_types
[0] = m_class_is_valuetype (method
->klass
) ? m_class_get_this_arg (method
->klass
) : m_class_get_byval_arg (method
->klass
);
6122 for (n
= 0; n
< sig
->param_count
; ++n
)
6123 param_types
[n
+ sig
->hasthis
] = sig
->params
[n
];
6124 cfg
->arg_types
= param_types
;
6126 cfg
->dont_inline
= g_list_prepend (cfg
->dont_inline
, method
);
6127 if (cfg
->method
== method
) {
6129 NEW_BBLOCK (cfg
, start_bblock
);
6130 cfg
->bb_entry
= start_bblock
;
6131 start_bblock
->cil_code
= NULL
;
6132 start_bblock
->cil_length
= 0;
6135 NEW_BBLOCK (cfg
, end_bblock
);
6136 cfg
->bb_exit
= end_bblock
;
6137 end_bblock
->cil_code
= NULL
;
6138 end_bblock
->cil_length
= 0;
6139 end_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
6140 g_assert (cfg
->num_bblocks
== 2);
6142 arg_array
= cfg
->args
;
6144 if (header
->num_clauses
) {
6145 cfg
->spvars
= g_hash_table_new (NULL
, NULL
);
6146 cfg
->exvars
= g_hash_table_new (NULL
, NULL
);
6148 /* handle exception clauses */
6149 for (i
= 0; i
< header
->num_clauses
; ++i
) {
6150 MonoBasicBlock
*try_bb
;
6151 MonoExceptionClause
*clause
= &header
->clauses
[i
];
6152 GET_BBLOCK (cfg
, try_bb
, ip
+ clause
->try_offset
);
6154 try_bb
->real_offset
= clause
->try_offset
;
6155 try_bb
->try_start
= TRUE
;
6156 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->handler_offset
);
6157 tblock
->real_offset
= clause
->handler_offset
;
6158 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
6160 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
6161 mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
6163 * Linking the try block with the EH block hinders inlining as we won't be able to
6164 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6166 if (COMPILE_LLVM (cfg
))
6167 link_bblock (cfg
, try_bb
, tblock
);
6169 if (*(ip
+ clause
->handler_offset
) == CEE_POP
)
6170 tblock
->flags
|= BB_EXCEPTION_DEAD_OBJ
;
6172 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
||
6173 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
||
6174 clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
) {
6175 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
6176 MONO_ADD_INS (tblock
, ins
);
6178 if (seq_points
&& clause
->flags
!= MONO_EXCEPTION_CLAUSE_FINALLY
&& clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
) {
6179 /* finally clauses already have a seq point */
6180 /* seq points for filter clauses are emitted below */
6181 NEW_SEQ_POINT (cfg
, ins
, clause
->handler_offset
, TRUE
);
6182 MONO_ADD_INS (tblock
, ins
);
6185 /* todo: is a fault block unsafe to optimize? */
6186 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
6187 tblock
->flags
|= BB_EXCEPTION_UNSAFE
;
6190 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6192 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6194 /* catch and filter blocks get the exception object on the stack */
6195 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
||
6196 clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
6198 /* mostly like handle_stack_args (), but just sets the input args */
6199 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6200 tblock
->in_scount
= 1;
6201 tblock
->in_stack
= (MonoInst
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
6202 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
6206 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
6207 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
6208 if (!cfg
->compile_llvm
) {
6209 MONO_INST_NEW (cfg
, ins
, OP_GET_EX_OBJ
);
6210 ins
->dreg
= tblock
->in_stack
[0]->dreg
;
6211 MONO_ADD_INS (tblock
, ins
);
6214 MonoInst
*dummy_use
;
6217 * Add a dummy use for the exvar so its liveness info will be
6220 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, tblock
->in_stack
[0]);
6223 if (seq_points
&& clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
6224 NEW_SEQ_POINT (cfg
, ins
, clause
->handler_offset
, TRUE
);
6225 MONO_ADD_INS (tblock
, ins
);
6228 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
6229 GET_BBLOCK (cfg
, tblock
, ip
+ clause
->data
.filter_offset
);
6230 tblock
->flags
|= BB_EXCEPTION_HANDLER
;
6231 tblock
->real_offset
= clause
->data
.filter_offset
;
6232 tblock
->in_scount
= 1;
6233 tblock
->in_stack
= (MonoInst
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*));
6234 /* The filter block shares the exvar with the handler block */
6235 tblock
->in_stack
[0] = mono_create_exvar_for_offset (cfg
, clause
->handler_offset
);
6236 MONO_INST_NEW (cfg
, ins
, OP_START_HANDLER
);
6237 MONO_ADD_INS (tblock
, ins
);
6241 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FILTER
&&
6242 clause
->data
.catch_class
&&
6244 mono_class_check_context_used (clause
->data
.catch_class
)) {
6246 * In shared generic code with catch
6247 * clauses containing type variables
6248 * the exception handling code has to
6249 * be able to get to the rgctx.
6250 * Therefore we have to make sure that
6251 * the vtable/mrgctx argument (for
6252 * static or generic methods) or the
6253 * "this" argument (for non-static
6254 * methods) are live.
6256 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
6257 mini_method_get_context (method
)->method_inst
||
6258 m_class_is_valuetype (method
->klass
)) {
6259 mono_get_vtable_var (cfg
);
6261 MonoInst
*dummy_use
;
6263 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, arg_array
[0]);
6268 arg_array
= g_newa (MonoInst
*, num_args
);
6269 cfg
->cbb
= start_bblock
;
6270 cfg
->args
= arg_array
;
6271 mono_save_args (cfg
, sig
, inline_args
);
6274 /* FIRST CODE BLOCK */
6275 NEW_BBLOCK (cfg
, tblock
);
6276 tblock
->cil_code
= ip
;
6280 ADD_BBLOCK (cfg
, tblock
);
6282 if (cfg
->method
== method
) {
6283 breakpoint_id
= mono_debugger_method_has_breakpoint (method
);
6284 if (breakpoint_id
) {
6285 MONO_INST_NEW (cfg
, ins
, OP_BREAK
);
6286 MONO_ADD_INS (cfg
->cbb
, ins
);
6290 /* we use a separate basic block for the initialization code */
6291 NEW_BBLOCK (cfg
, init_localsbb
);
6292 if (cfg
->method
== method
)
6293 cfg
->bb_init
= init_localsbb
;
6294 init_localsbb
->real_offset
= cfg
->real_offset
;
6295 start_bblock
->next_bb
= init_localsbb
;
6296 init_localsbb
->next_bb
= cfg
->cbb
;
6297 link_bblock (cfg
, start_bblock
, init_localsbb
);
6298 link_bblock (cfg
, init_localsbb
, cfg
->cbb
);
6299 init_localsbb2
= init_localsbb
;
6300 cfg
->cbb
= init_localsbb
;
6302 if (cfg
->gsharedvt
&& cfg
->method
== method
) {
6303 MonoGSharedVtMethodInfo
*info
;
6304 MonoInst
*var
, *locals_var
;
6307 info
= (MonoGSharedVtMethodInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoGSharedVtMethodInfo
));
6308 info
->method
= cfg
->method
;
6309 info
->count_entries
= 16;
6310 info
->entries
= (MonoRuntimeGenericContextInfoTemplate
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoRuntimeGenericContextInfoTemplate
) * info
->count_entries
);
6311 cfg
->gsharedvt_info
= info
;
6313 var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
6314 /* prevent it from being register allocated */
6315 //var->flags |= MONO_INST_VOLATILE;
6316 cfg
->gsharedvt_info_var
= var
;
6318 ins
= emit_get_rgctx_gsharedvt_method (cfg
, mini_method_check_context_used (cfg
, method
), method
, info
);
6319 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, var
->dreg
, ins
->dreg
);
6321 /* Allocate locals */
6322 locals_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
6323 /* prevent it from being register allocated */
6324 //locals_var->flags |= MONO_INST_VOLATILE;
6325 cfg
->gsharedvt_locals_var
= locals_var
;
6327 dreg
= alloc_ireg (cfg
);
6328 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADI4_MEMBASE
, dreg
, var
->dreg
, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo
, locals_size
));
6330 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
6331 ins
->dreg
= locals_var
->dreg
;
6333 MONO_ADD_INS (cfg
->cbb
, ins
);
6334 cfg
->gsharedvt_locals_var_ins
= ins
;
6336 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
6339 ins->flags |= MONO_INST_INIT;
6343 if (mono_security_core_clr_enabled ()) {
6344 /* check if this is native code, e.g. an icall or a p/invoke */
6345 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
6346 MonoMethod
*wrapped
= mono_marshal_method_from_wrapper (method
);
6348 gboolean pinvk
= (wrapped
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
);
6349 gboolean icall
= (wrapped
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
);
6351 /* if this ia a native call then it can only be JITted from platform code */
6352 if ((icall
|| pinvk
) && method
->klass
&& m_class_get_image (method
->klass
)) {
6353 if (!mono_security_core_clr_is_platform_image (m_class_get_image (method
->klass
))) {
6354 MonoException
*ex
= icall
? mono_get_exception_security () :
6355 mono_get_exception_method_access ();
6356 emit_throw_exception (cfg
, ex
);
6363 CHECK_CFG_EXCEPTION
;
6365 if (header
->code_size
== 0)
6368 if (get_basic_blocks (cfg
, header
, cfg
->real_offset
, ip
, end
, &err_pos
)) {
6373 if (cfg
->method
== method
)
6374 mono_debug_init_method (cfg
, cfg
->cbb
, breakpoint_id
);
6376 for (n
= 0; n
< header
->num_locals
; ++n
) {
6377 if (header
->locals
[n
]->type
== MONO_TYPE_VOID
&& !header
->locals
[n
]->byref
)
6382 /* We force the vtable variable here for all shared methods
6383 for the possibility that they might show up in a stack
6384 trace where their exact instantiation is needed. */
6385 if (cfg
->gshared
&& method
== cfg
->method
) {
6386 if ((method
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
6387 mini_method_get_context (method
)->method_inst
||
6388 m_class_is_valuetype (method
->klass
)) {
6389 mono_get_vtable_var (cfg
);
6391 /* FIXME: Is there a better way to do this?
6392 We need the variable live for the duration
6393 of the whole method. */
6394 cfg
->args
[0]->flags
|= MONO_INST_VOLATILE
;
6398 /* add a check for this != NULL to inlined methods */
6399 if (is_virtual_call
) {
6402 NEW_ARGLOAD (cfg
, arg_ins
, 0);
6403 MONO_ADD_INS (cfg
->cbb
, arg_ins
);
6404 MONO_EMIT_NEW_CHECK_THIS (cfg
, arg_ins
->dreg
);
6407 skip_dead_blocks
= !dont_verify
;
6408 if (skip_dead_blocks
) {
6409 original_bb
= bb
= mono_basic_block_split (method
, cfg
->error
, header
);
6414 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6415 stack_start
= sp
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * (header
->max_stack
+ 1));
6418 start_new_bblock
= 0;
6419 MonoOpcodeEnum il_op
; il_op
= MonoOpcodeEnum_Invalid
;
6421 for (guchar
*next_ip
= ip
; ip
< end
; ip
= next_ip
) {
6422 MonoOpcodeEnum previous_il_op
= il_op
;
6423 const guchar
*tmp_ip
= ip
;
6424 const int op_size
= mono_opcode_value_and_size (&tmp_ip
, end
, &il_op
);
6425 CHECK_OPSIZE (op_size
);
6428 if (cfg
->method
== method
)
6429 cfg
->real_offset
= ip
- header
->code
;
6431 cfg
->real_offset
= inline_offset
;
6436 if (start_new_bblock
) {
6437 cfg
->cbb
->cil_length
= ip
- cfg
->cbb
->cil_code
;
6438 if (start_new_bblock
== 2) {
6439 g_assert (ip
== tblock
->cil_code
);
6441 GET_BBLOCK (cfg
, tblock
, ip
);
6443 cfg
->cbb
->next_bb
= tblock
;
6445 start_new_bblock
= 0;
6446 for (i
= 0; i
< cfg
->cbb
->in_scount
; ++i
) {
6447 if (cfg
->verbose_level
> 3)
6448 printf ("loading %d from temp %d\n", i
, (int)cfg
->cbb
->in_stack
[i
]->inst_c0
);
6449 EMIT_NEW_TEMPLOAD (cfg
, ins
, cfg
->cbb
->in_stack
[i
]->inst_c0
);
6453 g_slist_free (class_inits
);
6456 if ((tblock
= cfg
->cil_offset_to_bb
[ip
- cfg
->cil_start
]) && (tblock
!= cfg
->cbb
)) {
6457 link_bblock (cfg
, cfg
->cbb
, tblock
);
6458 if (sp
!= stack_start
) {
6459 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
6461 CHECK_UNVERIFIABLE (cfg
);
6463 cfg
->cbb
->next_bb
= tblock
;
6465 for (i
= 0; i
< cfg
->cbb
->in_scount
; ++i
) {
6466 if (cfg
->verbose_level
> 3)
6467 printf ("loading %d from temp %d\n", i
, (int)cfg
->cbb
->in_stack
[i
]->inst_c0
);
6468 EMIT_NEW_TEMPLOAD (cfg
, ins
, cfg
->cbb
->in_stack
[i
]->inst_c0
);
6471 g_slist_free (class_inits
);
6476 if (skip_dead_blocks
) {
6477 int ip_offset
= ip
- header
->code
;
6479 if (ip_offset
== bb
->end
)
6483 g_assert (op_size
> 0); /*The BB formation pass must catch all bad ops*/
6485 if (cfg
->verbose_level
> 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset
);
6487 if (ip_offset
+ op_size
== bb
->end
) {
6488 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6489 MONO_ADD_INS (cfg
->cbb
, ins
);
6490 start_new_bblock
= 1;
6496 * Sequence points are points where the debugger can place a breakpoint.
6497 * Currently, we generate these automatically at points where the IL
6500 if (seq_points
&& ((!sym_seq_points
&& (sp
== stack_start
)) || (sym_seq_points
&& mono_bitset_test_fast (seq_point_locs
, ip
- header
->code
)))) {
6502 * Make methods interruptable at the beginning, and at the targets of
6503 * backward branches.
6504 * Also, do this at the start of every bblock in methods with clauses too,
6505 * to be able to handle instructions with inprecise control flow like
6507 * Backward branches are handled at the end of method-to-ir ().
6509 gboolean intr_loc
= ip
== header
->code
|| (!cfg
->cbb
->last_ins
&& cfg
->header
->num_clauses
);
6510 gboolean sym_seq_point
= sym_seq_points
&& mono_bitset_test_fast (seq_point_locs
, ip
- header
->code
);
6512 /* Avoid sequence points on empty IL like .volatile */
6513 // FIXME: Enable this
6514 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6515 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, intr_loc
);
6516 if ((sp
!= stack_start
) && !sym_seq_point
)
6517 ins
->flags
|= MONO_INST_NONEMPTY_STACK
;
6518 MONO_ADD_INS (cfg
->cbb
, ins
);
6521 mono_bitset_set_fast (seq_point_set_locs
, ip
- header
->code
);
6523 if (cfg
->prof_coverage
) {
6524 guint32 cil_offset
= ip
- header
->code
;
6525 gpointer counter
= &cfg
->coverage_info
->data
[cil_offset
].count
;
6526 cfg
->coverage_info
->data
[cil_offset
].cil_code
= ip
;
6528 if (mono_arch_opcode_supported (OP_ATOMIC_ADD_I4
)) {
6529 MonoInst
*one_ins
, *load_ins
;
6531 EMIT_NEW_PCONST (cfg
, load_ins
, counter
);
6532 EMIT_NEW_ICONST (cfg
, one_ins
, 1);
6533 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_ADD_I4
);
6534 ins
->dreg
= mono_alloc_ireg (cfg
);
6535 ins
->inst_basereg
= load_ins
->dreg
;
6536 ins
->inst_offset
= 0;
6537 ins
->sreg2
= one_ins
->dreg
;
6538 ins
->type
= STACK_I4
;
6539 MONO_ADD_INS (cfg
->cbb
, ins
);
6541 EMIT_NEW_PCONST (cfg
, ins
, counter
);
6542 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, ins
->dreg
, 0, 1);
6547 cfg
->cbb
->real_offset
= cfg
->real_offset
;
6549 if (cfg
->verbose_level
> 3)
6550 printf ("converting (in B%d: stack: %d) %s", cfg
->cbb
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
6552 // Variables shared by CEE_CALLI CEE_CALL CEE_CALLVIRT CEE_JMP.
6553 // Initialize to either what they all need or zero.
6554 gboolean emit_widen
= TRUE
;
6555 gboolean tailcall
= FALSE
;
6556 gboolean common_call
= FALSE
;
6557 MonoInst
*keep_this_alive
= NULL
;
6558 MonoMethod
*cmethod
= NULL
;
6559 MonoMethodSignature
*fsig
= NULL
;
6561 // These are used only in CALL/CALLVIRT but must be initialized also for CALLI,
6562 // since it jumps into CALL/CALLVIRT.
6563 gboolean need_seq_point
= FALSE
;
6564 gboolean push_res
= TRUE
;
6565 gboolean skip_ret
= FALSE
;
6566 gboolean tailcall_remove_ret
= FALSE
;
6568 // FIXME split 500 lines load/store field into separate file/function.
6570 MonoOpcodeParameter parameter
;
6571 const MonoOpcodeInfo
* info
= mono_opcode_decode (ip
, op_size
, il_op
, ¶meter
);
6574 token
= parameter
.i32
;
6575 target
= parameter
.branch_target
;
6577 // Check stack size for push/pop except variable cases -- -1 like call/ret/newobj.
6578 const int pushes
= info
->pushes
;
6579 const int pops
= info
->pops
;
6580 if (pushes
>= 0 && pops
>= 0) {
6581 g_assert (pushes
- pops
<= 1);
6582 if (pushes
- pops
== 1)
6590 if (seq_points
&& !sym_seq_points
&& sp
!= stack_start
) {
6592 * The C# compiler uses these nops to notify the JIT that it should
6593 * insert seq points.
6595 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, FALSE
);
6596 MONO_ADD_INS (cfg
->cbb
, ins
);
6598 if (cfg
->keep_cil_nops
)
6599 MONO_INST_NEW (cfg
, ins
, OP_HARD_NOP
);
6601 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6602 MONO_ADD_INS (cfg
->cbb
, ins
);
6603 emitted_funccall_seq_point
= FALSE
;
6605 case MONO_CEE_BREAK
:
6606 if (mini_should_insert_breakpoint (cfg
->method
)) {
6607 ins
= mono_emit_jit_icall (cfg
, mono_debugger_agent_user_break
, NULL
);
6609 MONO_INST_NEW (cfg
, ins
, OP_NOP
);
6610 MONO_ADD_INS (cfg
->cbb
, ins
);
6613 case MONO_CEE_LDARG_0
:
6614 case MONO_CEE_LDARG_1
:
6615 case MONO_CEE_LDARG_2
:
6616 case MONO_CEE_LDARG_3
:
6617 case MONO_CEE_LDARG_S
:
6618 case MONO_CEE_LDARG
:
6620 if (next_ip
< end
&& is_addressable_valuetype_load (cfg
, next_ip
, cfg
->arg_types
[n
])) {
6621 EMIT_NEW_ARGLOADA (cfg
, ins
, n
);
6623 EMIT_NEW_ARGLOAD (cfg
, ins
, n
);
6628 case MONO_CEE_LDLOC_0
:
6629 case MONO_CEE_LDLOC_1
:
6630 case MONO_CEE_LDLOC_2
:
6631 case MONO_CEE_LDLOC_3
:
6632 case MONO_CEE_LDLOC_S
:
6633 case MONO_CEE_LDLOC
:
6635 if (next_ip
< end
&& is_addressable_valuetype_load (cfg
, next_ip
, header
->locals
[n
])) {
6636 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
6638 EMIT_NEW_LOCLOAD (cfg
, ins
, n
);
6643 case MONO_CEE_STLOC_0
:
6644 case MONO_CEE_STLOC_1
:
6645 case MONO_CEE_STLOC_2
:
6646 case MONO_CEE_STLOC_3
:
6647 case MONO_CEE_STLOC_S
:
6648 case MONO_CEE_STLOC
:
6651 *sp
= convert_value (cfg
, header
->locals
[n
], *sp
);
6652 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, header
->locals
[n
], *sp
))
6654 emit_stloc_ir (cfg
, sp
, header
, n
);
6657 case MONO_CEE_LDARGA_S
:
6658 case MONO_CEE_LDARGA
:
6660 NEW_ARGLOADA (cfg
, ins
, n
);
6661 MONO_ADD_INS (cfg
->cbb
, ins
);
6664 case MONO_CEE_STARG_S
:
6665 case MONO_CEE_STARG
:
6668 *sp
= convert_value (cfg
, param_types
[n
], *sp
);
6669 if (!dont_verify_stloc
&& target_type_is_incompatible (cfg
, param_types
[n
], *sp
))
6671 emit_starg_ir (cfg
, sp
, n
);
6673 case MONO_CEE_LDLOCA
:
6674 case MONO_CEE_LDLOCA_S
: {
6678 if ((tmp_ip
= emit_optimized_ldloca_ir (cfg
, next_ip
, end
, n
))) {
6680 il_op
= MONO_CEE_INITOBJ
;
6685 EMIT_NEW_LOCLOADA (cfg
, ins
, n
);
6689 case MONO_CEE_LDNULL
:
6690 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
6691 ins
->type
= STACK_OBJ
;
6694 case MONO_CEE_LDC_I4_M1
:
6695 case MONO_CEE_LDC_I4_0
:
6696 case MONO_CEE_LDC_I4_1
:
6697 case MONO_CEE_LDC_I4_2
:
6698 case MONO_CEE_LDC_I4_3
:
6699 case MONO_CEE_LDC_I4_4
:
6700 case MONO_CEE_LDC_I4_5
:
6701 case MONO_CEE_LDC_I4_6
:
6702 case MONO_CEE_LDC_I4_7
:
6703 case MONO_CEE_LDC_I4_8
:
6704 case MONO_CEE_LDC_I4_S
:
6705 case MONO_CEE_LDC_I4
:
6706 EMIT_NEW_ICONST (cfg
, ins
, n
);
6709 case MONO_CEE_LDC_I8
:
6710 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
6711 ins
->type
= STACK_I8
;
6712 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
6713 ins
->inst_l
= parameter
.i64
;
6714 MONO_ADD_INS (cfg
->cbb
, ins
);
6717 case MONO_CEE_LDC_R4
: {
6719 gboolean use_aotconst
= FALSE
;
6721 #ifdef TARGET_POWERPC
6722 /* FIXME: Clean this up */
6723 if (cfg
->compile_aot
)
6724 use_aotconst
= TRUE
;
6726 /* FIXME: we should really allocate this only late in the compilation process */
6727 f
= (float *)mono_domain_alloc (cfg
->domain
, sizeof (float));
6733 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R4
, f
);
6735 dreg
= alloc_freg (cfg
);
6736 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR4_MEMBASE
, dreg
, cons
->dreg
, 0);
6737 ins
->type
= cfg
->r4_stack_type
;
6739 MONO_INST_NEW (cfg
, ins
, OP_R4CONST
);
6740 ins
->type
= cfg
->r4_stack_type
;
6741 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6743 MONO_ADD_INS (cfg
->cbb
, ins
);
6749 case MONO_CEE_LDC_R8
: {
6751 gboolean use_aotconst
= FALSE
;
6753 #ifdef TARGET_POWERPC
6754 /* FIXME: Clean this up */
6755 if (cfg
->compile_aot
)
6756 use_aotconst
= TRUE
;
6759 /* FIXME: we should really allocate this only late in the compilation process */
6760 d
= (double *)mono_domain_alloc (cfg
->domain
, sizeof (double));
6766 EMIT_NEW_AOTCONST (cfg
, cons
, MONO_PATCH_INFO_R8
, d
);
6768 dreg
= alloc_freg (cfg
);
6769 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOADR8_MEMBASE
, dreg
, cons
->dreg
, 0);
6770 ins
->type
= STACK_R8
;
6772 MONO_INST_NEW (cfg
, ins
, OP_R8CONST
);
6773 ins
->type
= STACK_R8
;
6774 ins
->dreg
= alloc_dreg (cfg
, STACK_R8
);
6776 MONO_ADD_INS (cfg
->cbb
, ins
);
6782 case MONO_CEE_DUP
: {
6783 MonoInst
*temp
, *store
;
6787 temp
= mono_compile_create_var (cfg
, type_from_stack_type (ins
), OP_LOCAL
);
6788 EMIT_NEW_TEMPSTORE (cfg
, store
, temp
->inst_c0
, ins
);
6790 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6793 EMIT_NEW_TEMPLOAD (cfg
, ins
, temp
->inst_c0
);
6803 if (sp
[0]->type
== STACK_R8
)
6804 /* we need to pop the value from the x86 FP stack */
6805 MONO_EMIT_NEW_UNALU (cfg
, OP_X86_FPOP
, -1, sp
[0]->dreg
);
6808 case MONO_CEE_JMP
: {
6812 INLINE_FAILURE ("jmp");
6813 GSHAREDVT_FAILURE (il_op
);
6815 if (stack_start
!= sp
)
6817 /* FIXME: check the signature matches */
6818 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
6821 if (cfg
->gshared
&& mono_method_check_context_used (cmethod
))
6822 GENERIC_SHARING_FAILURE (CEE_JMP
);
6824 mini_profiler_emit_tail_call (cfg
, cmethod
);
6826 fsig
= mono_method_signature_internal (cmethod
);
6827 n
= fsig
->param_count
+ fsig
->hasthis
;
6828 if (cfg
->llvm_only
) {
6831 args
= (MonoInst
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6832 for (i
= 0; i
< n
; ++i
)
6833 EMIT_NEW_ARGLOAD (cfg
, args
[i
], i
);
6834 ins
= mini_emit_method_call_full (cfg
, cmethod
, fsig
, TRUE
, args
, NULL
, NULL
, NULL
);
6836 * The code in mono-basic-block.c treats the rest of the code as dead, but we
6837 * have to emit a normal return since llvm expects it.
6840 emit_setret (cfg
, ins
);
6841 MONO_INST_NEW (cfg
, ins
, OP_BR
);
6842 ins
->inst_target_bb
= end_bblock
;
6843 MONO_ADD_INS (cfg
->cbb
, ins
);
6844 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
6847 /* Handle tailcalls similarly to calls */
6850 mini_emit_tailcall_parameters (cfg
, fsig
);
6851 MONO_INST_NEW_CALL (cfg
, call
, OP_TAILCALL
);
6852 call
->method
= cmethod
;
6853 // FIXME Other initialization of the tailcall field occurs after
6854 // it is used. So this is the only "real" use and needs more attention.
6855 call
->tailcall
= TRUE
;
6856 call
->signature
= fsig
;
6857 call
->args
= (MonoInst
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoInst
*) * n
);
6858 call
->inst
.inst_p0
= cmethod
;
6859 for (i
= 0; i
< n
; ++i
)
6860 EMIT_NEW_ARGLOAD (cfg
, call
->args
[i
], i
);
6862 if (mini_type_is_vtype (mini_get_underlying_type (call
->signature
->ret
)))
6863 call
->vret_var
= cfg
->vret_addr
;
6865 mono_arch_emit_call (cfg
, call
);
6866 cfg
->param_area
= MAX(cfg
->param_area
, call
->stack_usage
);
6867 MONO_ADD_INS (cfg
->cbb
, (MonoInst
*)call
);
6870 start_new_bblock
= 1;
6873 case MONO_CEE_CALLI
: {
6874 // FIXME tail.calli is problemetic because the this pointer's type
6875 // is not in the signature, and we cannot check for a byref valuetype.
6877 MonoInst
*callee
= NULL
;
6879 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
6880 common_call
= TRUE
; // i.e. skip_ret/push_res/seq_point logic
6883 gboolean
const inst_tailcall
= G_UNLIKELY (debug_tailcall_try_all
6884 ? (next_ip
< end
&& next_ip
[0] == CEE_RET
)
6885 : ((ins_flag
& MONO_INST_TAILCALL
) != 0));
6888 //GSHAREDVT_FAILURE (il_op);
6893 fsig
= mini_get_signature (method
, token
, generic_context
, cfg
->error
);
6896 if (method
->dynamic
&& fsig
->pinvoke
) {
6900 * This is a call through a function pointer using a pinvoke
6901 * signature. Have to create a wrapper and call that instead.
6902 * FIXME: This is very slow, need to create a wrapper at JIT time
6903 * instead based on the signature.
6905 EMIT_NEW_IMAGECONST (cfg
, args
[0], m_class_get_image (method
->klass
));
6906 EMIT_NEW_PCONST (cfg
, args
[1], fsig
);
6909 addr
= mono_emit_jit_icall (cfg
, mono_get_native_calli_wrapper
, args
);
6912 n
= fsig
->param_count
+ fsig
->hasthis
;
6916 //g_assert (!virtual_ || fsig->hasthis);
6920 if (!(cfg
->method
->wrapper_type
&& cfg
->method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
) && check_call_signature (cfg
, fsig
, sp
)) {
6921 if (break_on_unverified ())
6922 check_call_signature (cfg
, fsig
, sp
); // Again, step through it.
6926 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
6929 * Making generic calls out of gsharedvt methods.
6930 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
6931 * patching gshared method addresses into a gsharedvt method.
6933 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (fsig
)) {
6935 * We pass the address to the gsharedvt trampoline in the rgctx reg
6938 g_assert (addr
); // Doubles as boolean after tailcall check.
6941 inst_tailcall
&& is_supported_tailcall (cfg
, ip
, method
, NULL
, fsig
,
6942 FALSE
/*virtual irrelevant*/, addr
!= NULL
, &tailcall
);
6945 if (method
->wrapper_type
!= MONO_WRAPPER_DELEGATE_INVOKE
)
6947 GSHAREDVT_FAILURE (il_op
);
6951 GSHAREDVT_FAILURE (il_op
);
6953 addr
= emit_get_rgctx_sig (cfg
, context_used
, fsig
, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI
);
6954 ins
= (MonoInst
*)mini_emit_calli_full (cfg
, fsig
, sp
, addr
, NULL
, callee
, tailcall
);
6958 /* Prevent inlining of methods with indirect calls */
6959 INLINE_FAILURE ("indirect call");
6961 if (addr
->opcode
== OP_PCONST
|| addr
->opcode
== OP_AOTCONST
|| addr
->opcode
== OP_GOT_ENTRY
) {
6962 MonoJumpInfoType info_type
;
6966 * Instead of emitting an indirect call, emit a direct call
6967 * with the contents of the aotconst as the patch info.
6969 if (addr
->opcode
== OP_PCONST
|| addr
->opcode
== OP_AOTCONST
) {
6970 info_type
= (MonoJumpInfoType
)addr
->inst_c1
;
6971 info_data
= addr
->inst_p0
;
6973 info_type
= (MonoJumpInfoType
)addr
->inst_right
->inst_c1
;
6974 info_data
= addr
->inst_right
->inst_left
;
6977 if (info_type
== MONO_PATCH_INFO_ICALL_ADDR
) {
6978 // non-JIT icall, mostly builtin, but also user-extensible
6980 ins
= (MonoInst
*)mini_emit_abs_call (cfg
, MONO_PATCH_INFO_ICALL_ADDR_CALL
, info_data
, fsig
, sp
);
6983 } else if (info_type
== MONO_PATCH_INFO_JIT_ICALL_ADDR
6984 || info_type
== MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
) {
6986 ins
= (MonoInst
*)mini_emit_abs_call (cfg
, info_type
, info_data
, fsig
, sp
);
6991 ins
= (MonoInst
*)mini_emit_calli_full (cfg
, fsig
, sp
, addr
, NULL
, NULL
, tailcall
);
6995 case MONO_CEE_CALLVIRT
: {
6996 MonoInst
*addr
; addr
= NULL
;
6997 int array_rank
; array_rank
= 0;
6998 gboolean virtual_
; virtual_
= il_op
== MONO_CEE_CALLVIRT
;
6999 gboolean pass_imt_from_rgctx
; pass_imt_from_rgctx
= FALSE
;
7000 MonoInst
*imt_arg
; imt_arg
= NULL
;
7001 gboolean pass_vtable
; pass_vtable
= FALSE
;
7002 gboolean pass_mrgctx
; pass_mrgctx
= FALSE
;
7003 MonoInst
*vtable_arg
; vtable_arg
= NULL
;
7004 gboolean check_this
; check_this
= FALSE
;
7005 gboolean delegate_invoke
; delegate_invoke
= FALSE
;
7006 gboolean direct_icall
; direct_icall
= FALSE
;
7007 gboolean tailcall_calli
; tailcall_calli
= FALSE
;
7008 gboolean noreturn
; noreturn
= FALSE
;
7010 // Variables shared by CEE_CALLI and CEE_CALL/CEE_CALLVIRT.
7011 common_call
= FALSE
;
7013 // variables to help in assertions
7014 gboolean called_is_supported_tailcall
; called_is_supported_tailcall
= FALSE
;
7015 MonoMethod
*tailcall_method
; tailcall_method
= NULL
;
7016 MonoMethod
*tailcall_cmethod
; tailcall_cmethod
= NULL
;
7017 MonoMethodSignature
*tailcall_fsig
; tailcall_fsig
= NULL
;
7018 gboolean tailcall_virtual
; tailcall_virtual
= FALSE
;
7019 gboolean tailcall_extra_arg
; tailcall_extra_arg
= FALSE
;
7021 gboolean inst_tailcall
; inst_tailcall
= G_UNLIKELY (debug_tailcall_try_all
7022 ? (next_ip
< end
&& next_ip
[0] == CEE_RET
)
7023 : ((ins_flag
& MONO_INST_TAILCALL
) != 0));
7026 /* Used to pass arguments to called functions */
7027 HandleCallData cdata
;
7028 memset (&cdata
, 0, sizeof (HandleCallData
));
7030 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
7033 if (cfg
->verbose_level
> 3)
7034 printf ("cmethod = %s\n", mono_method_get_full_name (cmethod
));
7036 MonoMethod
*cil_method
; cil_method
= cmethod
;
7038 if (constrained_class
) {
7039 gboolean constrained_is_generic_param
=
7040 m_class_get_byval_arg (constrained_class
)->type
== MONO_TYPE_VAR
||
7041 m_class_get_byval_arg (constrained_class
)->type
== MONO_TYPE_MVAR
;
7043 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
7044 if (cfg
->verbose_level
> 2)
7045 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class
));
7046 if (!(constrained_is_generic_param
&&
7048 cmethod
= mono_get_method_constrained_with_method (image
, cil_method
, constrained_class
, generic_context
, cfg
->error
);
7052 if (cfg
->verbose_level
> 2)
7053 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class
));
7055 if (constrained_is_generic_param
&& cfg
->gshared
) {
7057 * This is needed since get_method_constrained can't find
7058 * the method in klass representing a type var.
7059 * The type var is guaranteed to be a reference type in this
7062 if (!mini_is_gsharedvt_klass (constrained_class
))
7063 g_assert (!m_class_is_valuetype (cmethod
->klass
));
7065 cmethod
= mono_get_method_constrained_checked (image
, token
, constrained_class
, generic_context
, &cil_method
, cfg
->error
);
7070 if (m_class_is_enumtype (constrained_class
) && !strcmp (cmethod
->name
, "GetHashCode")) {
7071 /* Use the corresponding method from the base type to avoid boxing */
7072 MonoType
*base_type
= mono_class_enum_basetype_internal (constrained_class
);
7073 g_assert (base_type
);
7074 constrained_class
= mono_class_from_mono_type_internal (base_type
);
7075 cmethod
= get_method_nofail (constrained_class
, cmethod
->name
, 0, 0);
7080 if (!dont_verify
&& !cfg
->skip_visibility
) {
7081 MonoMethod
*target_method
= cil_method
;
7082 if (method
->is_inflated
) {
7083 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
), cfg
->error
);
7086 if (!mono_method_can_access_method (method_definition
, target_method
) &&
7087 !mono_method_can_access_method (method
, cil_method
))
7088 emit_method_access_failure (cfg
, method
, cil_method
);
7091 if (mono_security_core_clr_enabled ())
7092 ensure_method_is_allowed_to_call_method (cfg
, method
, cil_method
);
7094 if (!virtual_
&& (cmethod
->flags
& METHOD_ATTRIBUTE_ABSTRACT
)) {
7095 if (!mono_class_is_interface (method
->klass
))
7096 emit_bad_image_failure (cfg
, method
, cil_method
);
7103 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7104 * converts to a callvirt.
7106 * tests/bug-515884.il is an example of this behavior
7108 const int test_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
| METHOD_ATTRIBUTE_STATIC
;
7109 const int expected_flags
= METHOD_ATTRIBUTE_VIRTUAL
| METHOD_ATTRIBUTE_FINAL
;
7110 if (!virtual_
&& mono_class_is_marshalbyref (cmethod
->klass
) && (cmethod
->flags
& test_flags
) == expected_flags
&& cfg
->method
->wrapper_type
== MONO_WRAPPER_NONE
)
7114 if (!m_class_is_inited (cmethod
->klass
))
7115 if (!mono_class_init_internal (cmethod
->klass
))
7116 TYPE_LOAD_ERROR (cmethod
->klass
);
7118 fsig
= mono_method_signature_internal (cmethod
);
7121 if (cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
&&
7122 mini_class_is_system_array (cmethod
->klass
)) {
7123 array_rank
= m_class_get_rank (cmethod
->klass
);
7124 } else if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) && direct_icalls_enabled (cfg
, cmethod
)) {
7125 direct_icall
= TRUE
;
7126 } else if (fsig
->pinvoke
) {
7127 MonoMethod
*wrapper
= mono_marshal_get_native_wrapper (cmethod
, TRUE
, cfg
->compile_aot
);
7128 fsig
= mono_method_signature_internal (wrapper
);
7129 } else if (constrained_class
) {
7131 fsig
= mono_method_get_signature_checked (cmethod
, image
, token
, generic_context
, cfg
->error
);
7135 if (cfg
->llvm_only
&& !cfg
->method
->wrapper_type
&& (!cmethod
|| cmethod
->is_inflated
))
7136 cfg
->signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->signatures
, fsig
);
7138 /* See code below */
7139 if (cmethod
->klass
== mono_defaults
.monitor_class
&& !strcmp (cmethod
->name
, "Enter") && mono_method_signature_internal (cmethod
)->param_count
== 1) {
7140 MonoBasicBlock
*tbb
;
7142 GET_BBLOCK (cfg
, tbb
, next_ip
);
7143 if (tbb
->try_start
&& MONO_REGION_FLAGS(tbb
->region
) == MONO_EXCEPTION_CLAUSE_FINALLY
) {
7145 * We want to extend the try block to cover the call, but we can't do it if the
7146 * call is made directly since its followed by an exception check.
7148 direct_icall
= FALSE
;
7152 mono_save_token_info (cfg
, image
, token
, cil_method
);
7154 if (!(seq_point_locs
&& mono_bitset_test_fast (seq_point_locs
, next_ip
- header
->code
)))
7155 need_seq_point
= TRUE
;
7157 /* Don't support calls made using type arguments for now */
7159 if (cfg->gsharedvt) {
7160 if (mini_is_gsharedvt_signature (fsig))
7161 GSHAREDVT_FAILURE (il_op);
7165 if (cmethod
->string_ctor
&& method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
)
7166 g_assert_not_reached ();
7168 n
= fsig
->param_count
+ fsig
->hasthis
;
7170 if (!cfg
->gshared
&& mono_class_is_gtd (cmethod
->klass
))
7174 g_assert (!mono_method_check_context_used (cmethod
));
7178 //g_assert (!virtual_ || fsig->hasthis);
7182 if (virtual_
&& cmethod
&& sp
[0]->opcode
== OP_TYPED_OBJREF
) {
7185 MonoMethod
*new_cmethod
= mono_class_get_virtual_method (sp
[0]->klass
, cmethod
, FALSE
, error
);
7186 mono_error_assert_ok (error
);
7187 cmethod
= new_cmethod
;
7191 if (cmethod
&& method_does_not_return (cmethod
)) {
7192 cfg
->cbb
->out_of_line
= TRUE
;
7196 cdata
.method
= method
;
7197 cdata
.inst_tailcall
= inst_tailcall
;
7200 * We have the `constrained.' prefix opcode.
7202 if (constrained_class
) {
7203 ins
= handle_constrained_call (cfg
, cmethod
, fsig
, constrained_class
, sp
, &cdata
, &cmethod
, &virtual_
, &emit_widen
);
7204 CHECK_CFG_EXCEPTION
;
7205 constrained_class
= NULL
;
7210 for (int i
= 0; i
< fsig
->param_count
; ++i
)
7211 sp
[i
+ fsig
->hasthis
] = convert_value (cfg
, fsig
->params
[i
], sp
[i
+ fsig
->hasthis
]);
7213 if (check_call_signature (cfg
, fsig
, sp
)) {
7214 if (break_on_unverified ())
7215 check_call_signature (cfg
, fsig
, sp
); // Again, step through it.
7219 if ((m_class_get_parent (cmethod
->klass
) == mono_defaults
.multicastdelegate_class
) && !strcmp (cmethod
->name
, "Invoke"))
7220 delegate_invoke
= TRUE
;
7222 if ((cfg
->opt
& MONO_OPT_INTRINS
) && (ins
= mini_emit_inst_for_sharable_method (cfg
, cmethod
, fsig
, sp
))) {
7223 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
7224 mini_type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
7228 if (inst_tailcall
) // FIXME
7229 mono_tailcall_print ("missed tailcall intrins_sharable %s -> %s\n", method
->name
, cmethod
->name
);
7234 * Implement a workaround for the inherent races involved in locking:
7240 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7241 * try block, the Exit () won't be executed, see:
7242 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7243 * To work around this, we extend such try blocks to include the last x bytes
7244 * of the Monitor.Enter () call.
7246 if (cmethod
->klass
== mono_defaults
.monitor_class
&& !strcmp (cmethod
->name
, "Enter") && mono_method_signature_internal (cmethod
)->param_count
== 1) {
7247 MonoBasicBlock
*tbb
;
7249 GET_BBLOCK (cfg
, tbb
, next_ip
);
7251 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7252 * from Monitor.Enter like ArgumentNullException.
7254 if (tbb
->try_start
&& MONO_REGION_FLAGS(tbb
->region
) == MONO_EXCEPTION_CLAUSE_FINALLY
) {
7255 /* Mark this bblock as needing to be extended */
7256 tbb
->extend_try_block
= TRUE
;
7260 /* Conversion to a JIT intrinsic */
7261 if ((ins
= mini_emit_inst_for_method (cfg
, cmethod
, fsig
, sp
))) {
7262 if (!MONO_TYPE_IS_VOID (fsig
->ret
)) {
7263 mini_type_to_eval_stack_type ((cfg
), fsig
->ret
, ins
);
7266 // FIXME This is only missed if in fact the intrinsic involves a call.
7267 if (inst_tailcall
) // FIXME
7268 mono_tailcall_print ("missed tailcall intrins %s -> %s\n", method
->name
, cmethod
->name
);
7274 * If the callee is a shared method, then its static cctor
7275 * might not get called after the call was patched.
7277 if (cfg
->gshared
&& cmethod
->klass
!= method
->klass
&& mono_class_is_ginst (cmethod
->klass
) && mono_method_is_generic_sharable (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
7278 emit_class_init (cfg
, cmethod
->klass
);
7279 CHECK_TYPELOAD (cmethod
->klass
);
7282 check_method_sharing (cfg
, cmethod
, &pass_vtable
, &pass_mrgctx
);
7285 MonoGenericContext
*cmethod_context
= mono_method_get_context (cmethod
);
7287 context_used
= mini_method_check_context_used (cfg
, cmethod
);
7289 if (context_used
&& mono_class_is_interface (cmethod
->klass
)) {
7290 /* Generic method interface
7291 calls are resolved via a
7292 helper function and don't
7294 if (!cmethod_context
|| !cmethod_context
->method_inst
)
7295 pass_imt_from_rgctx
= TRUE
;
7299 * If a shared method calls another
7300 * shared method then the caller must
7301 * have a generic sharing context
7302 * because the magic trampoline
7303 * requires it. FIXME: We shouldn't
7304 * have to force the vtable/mrgctx
7305 * variable here. Instead there
7306 * should be a flag in the cfg to
7307 * request a generic sharing context.
7310 ((cfg
->method
->flags
& METHOD_ATTRIBUTE_STATIC
) || m_class_is_valuetype (cfg
->method
->klass
)))
7311 mono_get_vtable_var (cfg
);
7316 vtable_arg
= mini_emit_get_rgctx_klass (cfg
, context_used
, cmethod
->klass
, MONO_RGCTX_INFO_VTABLE
);
7318 MonoVTable
*vtable
= mono_class_vtable_checked (cfg
->domain
, cmethod
->klass
, cfg
->error
);
7321 CHECK_TYPELOAD (cmethod
->klass
);
7322 EMIT_NEW_VTABLECONST (cfg
, vtable_arg
, vtable
);
7327 g_assert (!vtable_arg
);
7329 if (!cfg
->compile_aot
) {
7331 * emit_get_rgctx_method () calls mono_class_vtable () so check
7332 * for type load errors before.
7334 mono_class_setup_vtable (cmethod
->klass
);
7335 CHECK_TYPELOAD (cmethod
->klass
);
7338 vtable_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_RGCTX
);
7340 /* !marshalbyref is needed to properly handle generic methods + remoting */
7341 if ((!(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) ||
7342 MONO_METHOD_IS_FINAL (cmethod
)) &&
7343 !mono_class_is_marshalbyref (cmethod
->klass
)) {
7350 if (pass_imt_from_rgctx
) {
7351 g_assert (!pass_vtable
);
7353 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
7354 cmethod
, MONO_RGCTX_INFO_METHOD
);
7359 MONO_EMIT_NEW_CHECK_THIS (cfg
, sp
[0]->dreg
);
7361 /* Calling virtual generic methods */
7363 // These temporaries help detangle "pure" computation of
7364 // inputs to is_supported_tailcall from side effects, so that
7365 // is_supported_tailcall can be computed just once.
7366 gboolean virtual_generic
; virtual_generic
= FALSE
;
7367 gboolean virtual_generic_imt
; virtual_generic_imt
= FALSE
;
7369 if (virtual_
&& (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) &&
7370 !(MONO_METHOD_IS_FINAL (cmethod
) &&
7371 cmethod
->wrapper_type
!= MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
) &&
7372 fsig
->generic_param_count
&&
7373 !(cfg
->gsharedvt
&& mini_is_gsharedvt_signature (fsig
)) &&
7376 g_assert (fsig
->is_inflated
);
7378 virtual_generic
= TRUE
;
7380 /* Prevent inlining of methods that contain indirect calls */
7381 INLINE_FAILURE ("virtual generic call");
7383 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (fsig
))
7384 GSHAREDVT_FAILURE (il_op
);
7386 if (cfg
->backend
->have_generalized_imt_trampoline
&& cfg
->backend
->gshared_supported
&& cmethod
->wrapper_type
== MONO_WRAPPER_NONE
) {
7387 virtual_generic_imt
= TRUE
;
7388 g_assert (!imt_arg
);
7390 g_assert (cmethod
->is_inflated
);
7392 imt_arg
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
7400 // Capture some intent before computing tailcall.
7402 gboolean make_generic_call_out_of_gsharedvt_method
;
7403 gboolean will_have_imt_arg
;
7405 make_generic_call_out_of_gsharedvt_method
= FALSE
;
7406 will_have_imt_arg
= FALSE
;
7409 * Making generic calls out of gsharedvt methods.
7410 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7411 * patching gshared method addresses into a gsharedvt method.
7413 if (cfg
->gsharedvt
&& (mini_is_gsharedvt_signature (fsig
) || cmethod
->is_inflated
|| mono_class_is_ginst (cmethod
->klass
)) &&
7414 !(m_class_get_rank (cmethod
->klass
) && m_class_get_byval_arg (cmethod
->klass
)->type
!= MONO_TYPE_SZARRAY
) &&
7415 (!(cfg
->llvm_only
&& virtual_
&& (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
)))) {
7417 make_generic_call_out_of_gsharedvt_method
= TRUE
;
7420 if (fsig
->generic_param_count
) {
7421 will_have_imt_arg
= TRUE
;
7422 } else if (mono_class_is_interface (cmethod
->klass
) && !imt_arg
) {
7423 will_have_imt_arg
= TRUE
;
7428 #ifdef ENABLE_NETCORE
7429 if (save_last_error
) {
7430 mono_emit_jit_icall (cfg
, mono_marshal_clear_last_error
, NULL
);
7434 /* Tail prefix / tailcall optimization */
7436 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests.
7437 Inlining and stack traces are not guaranteed however. */
7438 /* FIXME: runtime generic context pointer for jumps? */
7439 /* FIXME: handle this for generic sharing eventually */
7441 // tailcall means "the backend can and will handle it".
7442 // inst_tailcall means the tail. prefix is present.
7443 tailcall_extra_arg
= vtable_arg
|| imt_arg
|| will_have_imt_arg
|| mono_class_is_interface (cmethod
->klass
);
7444 tailcall
= inst_tailcall
&& is_supported_tailcall (cfg
, ip
, method
, cmethod
, fsig
,
7445 virtual_
, tailcall_extra_arg
, &tailcall_calli
);
7446 // Writes to imt_arg, vtable_arg, virtual_, cmethod, must not occur from here (inputs to is_supported_tailcall).
7447 // Capture values to later assert they don't change.
7448 called_is_supported_tailcall
= TRUE
;
7449 tailcall_method
= method
;
7450 tailcall_cmethod
= cmethod
;
7451 tailcall_fsig
= fsig
;
7452 tailcall_virtual
= virtual_
;
7454 if (virtual_generic
) {
7455 if (virtual_generic_imt
) {
7457 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7458 INLINE_FAILURE ("tailcall");
7464 MonoInst
*this_temp
, *this_arg_temp
, *store
;
7465 MonoInst
*iargs
[4];
7467 this_temp
= mono_compile_create_var (cfg
, type_from_stack_type (sp
[0]), OP_LOCAL
);
7468 NEW_TEMPSTORE (cfg
, store
, this_temp
->inst_c0
, sp
[0]);
7469 MONO_ADD_INS (cfg
->cbb
, store
);
7471 /* FIXME: This should be a managed pointer */
7472 this_arg_temp
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
7474 EMIT_NEW_TEMPLOAD (cfg
, iargs
[0], this_temp
->inst_c0
);
7475 iargs
[1] = emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
7477 EMIT_NEW_TEMPLOADA (cfg
, iargs
[2], this_arg_temp
->inst_c0
);
7478 addr
= mono_emit_jit_icall (cfg
, mono_helper_compile_generic_method
, iargs
);
7480 EMIT_NEW_TEMPLOAD (cfg
, sp
[0], this_arg_temp
->inst_c0
);
7482 ins
= (MonoInst
*)mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, NULL
);
7484 if (inst_tailcall
) // FIXME
7485 mono_tailcall_print ("missed tailcall virtual generic %s -> %s\n", method
->name
, cmethod
->name
);
7491 if ((cfg
->opt
& MONO_OPT_INLINE
) &&
7492 (!virtual_
|| !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) || MONO_METHOD_IS_FINAL (cmethod
)) &&
7493 mono_method_check_inlining (cfg
, cmethod
)) {
7495 gboolean always
= FALSE
;
7497 if ((cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL
) ||
7498 (cmethod
->flags
& METHOD_ATTRIBUTE_PINVOKE_IMPL
)) {
7499 /* Prevent inlining of methods that call wrappers */
7500 INLINE_FAILURE ("wrapper call");
7501 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7502 // Neither pinvoke or icall are likely to be tailcalled.
7503 cmethod
= mono_marshal_get_native_wrapper (cmethod
, TRUE
, FALSE
);
7507 costs
= inline_method (cfg
, cmethod
, fsig
, sp
, ip
, cfg
->real_offset
, always
);
7509 cfg
->real_offset
+= 5;
7511 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7512 /* *sp is already set by inline_method */
7515 inline_costs
+= costs
;
7516 // FIXME This is missed if the inlinee contains tail calls that
7517 // would work, but not once inlined into caller.
7518 // This matchingness could be a factor in inlining.
7519 // i.e. Do not inline if it hurts tailcall, do inline
7520 // if it helps and/or or is neutral, and helps performance
7521 // using usual heuristics.
7522 // Note that inlining will expose multiple tailcall opportunities
7523 // so the tradeoff is not obvious. If we can tailcall anything
7524 // like desktop, then this factor mostly falls away, except
7525 // that inlining can affect tailcall performance due to
7526 // signature match/mismatch.
7527 if (inst_tailcall
) // FIXME
7528 mono_tailcall_print ("missed tailcall inline %s -> %s\n", method
->name
, cmethod
->name
);
7533 /* Tail recursion elimination */
7534 if (((cfg
->opt
& MONO_OPT_TAILCALL
) || inst_tailcall
) && il_op
== MONO_CEE_CALL
&& cmethod
== method
&& next_ip
< end
&& next_ip
[0] == CEE_RET
&& !vtable_arg
) {
7535 gboolean has_vtargs
= FALSE
;
7538 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7539 INLINE_FAILURE ("tailcall");
7541 /* keep it simple */
7542 for (i
= fsig
->param_count
- 1; !has_vtargs
&& i
>= 0; i
--)
7543 has_vtargs
= MONO_TYPE_ISSTRUCT (mono_method_signature_internal (cmethod
)->params
[i
]);
7546 if (need_seq_point
) {
7547 emit_seq_point (cfg
, method
, ip
, FALSE
, TRUE
);
7548 need_seq_point
= FALSE
;
7550 for (i
= 0; i
< n
; ++i
)
7551 EMIT_NEW_ARGSTORE (cfg
, ins
, i
, sp
[i
]);
7553 mini_profiler_emit_tail_call (cfg
, cmethod
);
7555 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7556 MONO_ADD_INS (cfg
->cbb
, ins
);
7557 tblock
= start_bblock
->out_bb
[0];
7558 link_bblock (cfg
, cfg
->cbb
, tblock
);
7559 ins
->inst_target_bb
= tblock
;
7560 start_new_bblock
= 1;
7562 /* skip the CEE_RET, too */
7563 if (ip_in_bb (cfg
, cfg
->cbb
, next_ip
))
7566 need_seq_point
= FALSE
;
7571 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
7574 * Synchronized wrappers.
7575 * Its hard to determine where to replace a method with its synchronized
7576 * wrapper without causing an infinite recursion. The current solution is
7577 * to add the synchronized wrapper in the trampolines, and to
7578 * change the called method to a dummy wrapper, and resolve that wrapper
7579 * to the real method in mono_jit_compile_method ().
7581 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
7582 MonoMethod
*orig
= mono_marshal_method_from_wrapper (cfg
->method
);
7583 if (cmethod
== orig
|| (cmethod
->is_inflated
&& mono_method_get_declaring_generic_method (cmethod
) == orig
)) {
7584 // FIXME? Does this write to cmethod impact tailcall_supported? Probably not.
7585 cmethod
= mono_marshal_get_synchronized_inner_wrapper (cmethod
);
7590 * Making generic calls out of gsharedvt methods.
7591 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
7592 * patching gshared method addresses into a gsharedvt method.
7594 if (make_generic_call_out_of_gsharedvt_method
) {
7596 //if (mono_class_is_interface (cmethod->klass))
7597 //GSHAREDVT_FAILURE (il_op);
7598 // disable for possible remoting calls
7599 if (fsig
->hasthis
&& (mono_class_is_marshalbyref (method
->klass
) || method
->klass
== mono_defaults
.object_class
))
7600 GSHAREDVT_FAILURE (il_op
);
7601 if (fsig
->generic_param_count
) {
7602 /* virtual generic call */
7603 g_assert (!imt_arg
);
7604 g_assert (will_have_imt_arg
);
7605 /* Same as the virtual generic case above */
7606 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
7607 cmethod
, MONO_RGCTX_INFO_METHOD
);
7609 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
7611 } else if (mono_class_is_interface (cmethod
->klass
) && !imt_arg
) {
7612 /* This can happen when we call a fully instantiated iface method */
7613 g_assert (will_have_imt_arg
);
7614 imt_arg
= emit_get_rgctx_method (cfg
, context_used
,
7615 cmethod
, MONO_RGCTX_INFO_METHOD
);
7621 if ((m_class_get_parent (cmethod
->klass
) == mono_defaults
.multicastdelegate_class
) && (!strcmp (cmethod
->name
, "Invoke")))
7622 keep_this_alive
= sp
[0];
7624 MonoRgctxInfoType info_type
;
7626 if (virtual_
&& (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))
7627 info_type
= MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT
;
7629 info_type
= MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE
;
7630 addr
= emit_get_rgctx_gsharedvt_call (cfg
, context_used
, fsig
, cmethod
, info_type
);
7632 if (cfg
->llvm_only
) {
7633 // FIXME: Avoid initializing vtable_arg
7634 ins
= mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
7635 if (inst_tailcall
) // FIXME
7636 mono_tailcall_print ("missed tailcall llvmonly gsharedvt %s -> %s\n", method
->name
, cmethod
->name
);
7638 tailcall
= tailcall_calli
;
7639 ins
= (MonoInst
*)mini_emit_calli_full (cfg
, fsig
, sp
, addr
, imt_arg
, vtable_arg
, tailcall
);
7640 tailcall_remove_ret
|= tailcall
;
7645 /* Generic sharing */
7648 * Use this if the callee is gsharedvt sharable too, since
7649 * at runtime we might find an instantiation so the call cannot
7650 * be patched (the 'no_patch' code path in mini-trampolines.c).
7652 if (context_used
&& !imt_arg
&& !array_rank
&& !delegate_invoke
&&
7653 (!mono_method_is_generic_sharable_full (cmethod
, TRUE
, FALSE
, FALSE
) ||
7654 !mono_class_generic_sharing_enabled (cmethod
->klass
)) &&
7655 (!virtual_
|| MONO_METHOD_IS_FINAL (cmethod
) ||
7656 !(cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
))) {
7657 INLINE_FAILURE ("gshared");
7659 g_assert (cfg
->gshared
&& cmethod
);
7663 * We are compiling a call to a
7664 * generic method from shared code,
7665 * which means that we have to look up
7666 * the method in the rgctx and do an
7670 MONO_EMIT_NEW_CHECK_THIS (cfg
, sp
[0]->dreg
);
7672 if (cfg
->llvm_only
) {
7673 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_signature (fsig
))
7674 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER
);
7676 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD_FTNDESC
);
7677 // FIXME: Avoid initializing imt_arg/vtable_arg
7678 ins
= mini_emit_llvmonly_calli (cfg
, fsig
, sp
, addr
);
7679 if (inst_tailcall
) // FIXME
7680 mono_tailcall_print ("missed tailcall context_used_llvmonly %s -> %s\n", method
->name
, cmethod
->name
);
7682 addr
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_GENERIC_METHOD_CODE
);
7684 mono_tailcall_print ("%s tailcall_calli#2 %s -> %s\n", tailcall_calli
? "making" : "missed", method
->name
, cmethod
->name
);
7685 tailcall
= tailcall_calli
;
7686 ins
= (MonoInst
*)mini_emit_calli_full (cfg
, fsig
, sp
, addr
, imt_arg
, vtable_arg
, tailcall
);
7687 tailcall_remove_ret
|= tailcall
;
7692 /* Direct calls to icalls */
7694 MonoMethod
*wrapper
;
7697 /* Inline the wrapper */
7698 wrapper
= mono_marshal_get_native_wrapper (cmethod
, TRUE
, cfg
->compile_aot
);
7700 costs
= inline_method (cfg
, wrapper
, fsig
, sp
, ip
, cfg
->real_offset
, TRUE
);
7701 g_assert (costs
> 0);
7702 cfg
->real_offset
+= 5;
7704 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
7705 /* *sp is already set by inline_method */
7708 inline_costs
+= costs
;
7710 if (inst_tailcall
) // FIXME
7711 mono_tailcall_print ("missed tailcall direct_icall %s -> %s\n", method
->name
, cmethod
->name
);
7719 if (strcmp (cmethod
->name
, "Set") == 0) { /* array Set */
7720 MonoInst
*val
= sp
[fsig
->param_count
];
7722 if (val
->type
== STACK_OBJ
) {
7723 MonoInst
*iargs
[2];
7728 mono_emit_jit_icall (cfg
, mono_helper_stelem_ref_check
, iargs
);
7731 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, TRUE
);
7732 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, ins
, fsig
->params
[fsig
->param_count
- 1], addr
->dreg
, 0, val
->dreg
);
7733 if (cfg
->gen_write_barriers
&& val
->type
== STACK_OBJ
&& !MONO_INS_IS_PCONST_NULL (val
))
7734 mini_emit_write_barrier (cfg
, addr
, val
);
7735 if (cfg
->gen_write_barriers
&& mini_is_gsharedvt_klass (cmethod
->klass
))
7736 GSHAREDVT_FAILURE (il_op
);
7737 } else if (strcmp (cmethod
->name
, "Get") == 0) { /* array Get */
7738 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7740 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, fsig
->ret
, addr
->dreg
, 0);
7741 } else if (strcmp (cmethod
->name
, "Address") == 0) { /* array Address */
7742 if (!m_class_is_valuetype (m_class_get_element_class (cmethod
->klass
)) && !readonly
)
7743 mini_emit_check_array_type (cfg
, sp
[0], cmethod
->klass
);
7744 CHECK_TYPELOAD (cmethod
->klass
);
7747 addr
= mini_emit_ldelema_ins (cfg
, cmethod
, sp
, ip
, FALSE
);
7750 g_assert_not_reached ();
7754 if (inst_tailcall
) // FIXME
7755 mono_tailcall_print ("missed tailcall array_rank %s -> %s\n", method
->name
, cmethod
->name
);
7759 ins
= mini_redirect_call (cfg
, cmethod
, fsig
, sp
, virtual_
? sp
[0] : NULL
);
7761 if (inst_tailcall
) // FIXME
7762 mono_tailcall_print ("missed tailcall redirect %s -> %s\n", method
->name
, cmethod
->name
);
7766 /* Tail prefix / tailcall optimization */
7769 /* Prevent inlining of methods with tailcalls (the call stack would be altered) */
7770 INLINE_FAILURE ("tailcall");
7774 * Virtual calls in llvm-only mode.
7776 if (cfg
->llvm_only
&& virtual_
&& cmethod
&& (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
)) {
7777 ins
= mini_emit_llvmonly_virtual_call (cfg
, cmethod
, fsig
, context_used
, sp
);
7782 if (!(method
->iflags
& METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING
) && !(cmethod
->iflags
& METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING
))
7783 INLINE_FAILURE ("call");
7787 // Check that the decision to tailcall would not have changed.
7788 g_assert (!called_is_supported_tailcall
|| tailcall_method
== method
);
7789 // FIXME? cmethod does change, weaken the assert if we weren't tailcalling anyway.
7790 // If this still fails, restructure the code, or call tailcall_supported again and assert no change.
7791 g_assert (!called_is_supported_tailcall
|| !tailcall
|| tailcall_cmethod
== cmethod
);
7792 g_assert (!called_is_supported_tailcall
|| tailcall_fsig
== fsig
);
7793 g_assert (!called_is_supported_tailcall
|| tailcall_virtual
== virtual_
);
7794 g_assert (!called_is_supported_tailcall
|| tailcall_extra_arg
== (vtable_arg
|| imt_arg
|| will_have_imt_arg
|| mono_class_is_interface (cmethod
->klass
)));
7796 if (common_call
) // FIXME goto call_end && !common_call often skips tailcall processing.
7797 ins
= mini_emit_method_call_full (cfg
, cmethod
, fsig
, tailcall
, sp
, virtual_
? sp
[0] : NULL
,
7798 imt_arg
, vtable_arg
);
7801 * Handle devirt of some A.B.C calls by replacing the result of A.B with a OP_TYPED_OBJREF instruction, so the .C
7802 * call can be devirtualized above.
7805 ins
= handle_call_res_devirt (cfg
, cmethod
, ins
);
7808 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
7809 MONO_ADD_INS (cfg
->cbb
, ins
);
7812 if ((tailcall_remove_ret
|| (common_call
&& tailcall
)) && !cfg
->llvm_only
) {
7813 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
7814 start_new_bblock
= 1;
7816 // FIXME: Eliminate unreachable epilogs
7819 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7820 * only reachable from this call.
7822 GET_BBLOCK (cfg
, tblock
, next_ip
);
7823 if (tblock
== cfg
->cbb
|| tblock
->in_count
== 0)
7826 need_seq_point
= FALSE
;
7829 if (ins_flag
& MONO_INST_TAILCALL
)
7830 mini_test_tailcall (cfg
, tailcall
);
7832 /* End of call, INS should contain the result of the call, if any */
7834 if (push_res
&& !MONO_TYPE_IS_VOID (fsig
->ret
)) {
7837 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
7842 if (save_last_error
) {
7843 save_last_error
= FALSE
;
7845 // Making icalls etc could clobber the value so emit inline code
7846 // to read last error on Windows.
7847 MONO_INST_NEW (cfg
, ins
, OP_GET_LAST_ERROR
);
7848 ins
->dreg
= alloc_dreg (cfg
, STACK_I4
);
7849 ins
->type
= STACK_I4
;
7850 MONO_ADD_INS (cfg
->cbb
, ins
);
7851 mono_emit_jit_icall (cfg
, mono_marshal_set_last_error_windows
, &ins
);
7853 mono_emit_jit_icall (cfg
, mono_marshal_set_last_error
, NULL
);
7857 if (keep_this_alive
) {
7858 MonoInst
*dummy_use
;
7860 /* See mini_emit_method_call_full () */
7861 EMIT_NEW_DUMMY_USE (cfg
, dummy_use
, keep_this_alive
);
7864 if (cfg
->llvm_only
&& cmethod
&& method_needs_stack_walk (cfg
, cmethod
)) {
7866 * Clang can convert these calls to tailcalls which screw up the stack
7867 * walk. This happens even when the -fno-optimize-sibling-calls
7868 * option is passed to clang.
7869 * Work around this by emitting a dummy call.
7871 mono_emit_jit_icall (cfg
, mono_dummy_jit_icall
, NULL
);
7874 CHECK_CFG_EXCEPTION
;
7877 // FIXME When not followed by CEE_RET, correct behavior is to raise an exception.
7878 g_assert (next_ip
[0] == CEE_RET
);
7880 il_op
= MonoOpcodeEnum_Invalid
; // Call or ret? Unclear.
7883 constrained_class
= NULL
;
7885 if (need_seq_point
) {
7886 //check is is a nested call and remove the non_empty_stack of the last call, only for non native methods
7887 if (!(method
->flags
& METHOD_IMPL_ATTRIBUTE_NATIVE
)) {
7888 if (emitted_funccall_seq_point
) {
7889 if (cfg
->last_seq_point
)
7890 cfg
->last_seq_point
->flags
|= MONO_INST_NESTED_CALL
;
7893 emitted_funccall_seq_point
= TRUE
;
7895 emit_seq_point (cfg
, method
, next_ip
, FALSE
, TRUE
);
7900 mini_profiler_emit_leave (cfg
, sig
->ret
->type
!= MONO_TYPE_VOID
? sp
[-1] : NULL
);
7902 g_assert (!method_does_not_return (method
));
7904 if (cfg
->method
!= method
) {
7905 /* return from inlined method */
7907 * If in_count == 0, that means the ret is unreachable due to
7908 * being preceeded by a throw. In that case, inline_method () will
7909 * handle setting the return value
7910 * (test case: test_0_inline_throw ()).
7912 if (return_var
&& cfg
->cbb
->in_count
) {
7913 MonoType
*ret_type
= mono_method_signature_internal (method
)->ret
;
7918 *sp
= convert_value (cfg
, ret_type
, *sp
);
7920 if ((method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
|| method
->wrapper_type
== MONO_WRAPPER_NONE
) && target_type_is_incompatible (cfg
, ret_type
, *sp
))
7923 //g_assert (returnvar != -1);
7924 EMIT_NEW_TEMPSTORE (cfg
, store
, return_var
->inst_c0
, *sp
);
7925 cfg
->ret_var_set
= TRUE
;
7928 if (cfg
->lmf_var
&& cfg
->cbb
->in_count
&& !cfg
->llvm_only
)
7932 MonoType
*ret_type
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
7934 if (seq_points
&& !sym_seq_points
) {
7936 * Place a seq point here too even through the IL stack is not
7937 * empty, so a step over on
7940 * will work correctly.
7942 NEW_SEQ_POINT (cfg
, ins
, ip
- header
->code
, TRUE
);
7943 MONO_ADD_INS (cfg
->cbb
, ins
);
7946 g_assert (!return_var
);
7949 *sp
= convert_value (cfg
, ret_type
, *sp
);
7951 if ((method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
|| method
->wrapper_type
== MONO_WRAPPER_NONE
) && target_type_is_incompatible (cfg
, ret_type
, *sp
))
7954 emit_setret (cfg
, *sp
);
7957 if (sp
!= stack_start
)
7959 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7960 ins
->inst_target_bb
= end_bblock
;
7961 MONO_ADD_INS (cfg
->cbb
, ins
);
7962 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
7963 start_new_bblock
= 1;
7966 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7967 GET_BBLOCK (cfg
, tblock
, target
);
7968 link_bblock (cfg
, cfg
->cbb
, tblock
);
7969 ins
->inst_target_bb
= tblock
;
7970 if (sp
!= stack_start
) {
7971 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
7973 CHECK_UNVERIFIABLE (cfg
);
7975 MONO_ADD_INS (cfg
->cbb
, ins
);
7976 start_new_bblock
= 1;
7977 inline_costs
+= BRANCH_COST
;
7979 case MONO_CEE_BEQ_S
:
7980 case MONO_CEE_BGE_S
:
7981 case MONO_CEE_BGT_S
:
7982 case MONO_CEE_BLE_S
:
7983 case MONO_CEE_BLT_S
:
7984 case MONO_CEE_BNE_UN_S
:
7985 case MONO_CEE_BGE_UN_S
:
7986 case MONO_CEE_BGT_UN_S
:
7987 case MONO_CEE_BLE_UN_S
:
7988 case MONO_CEE_BLT_UN_S
:
7989 MONO_INST_NEW (cfg
, ins
, il_op
+ BIG_BRANCH_OFFSET
);
7994 inline_costs
+= BRANCH_COST
;
7997 MONO_INST_NEW (cfg
, ins
, OP_BR
);
7999 GET_BBLOCK (cfg
, tblock
, target
);
8000 link_bblock (cfg
, cfg
->cbb
, tblock
);
8001 ins
->inst_target_bb
= tblock
;
8002 if (sp
!= stack_start
) {
8003 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8005 CHECK_UNVERIFIABLE (cfg
);
8008 MONO_ADD_INS (cfg
->cbb
, ins
);
8010 start_new_bblock
= 1;
8011 inline_costs
+= BRANCH_COST
;
8013 case MONO_CEE_BRFALSE_S
:
8014 case MONO_CEE_BRTRUE_S
:
8015 case MONO_CEE_BRFALSE
:
8016 case MONO_CEE_BRTRUE
: {
8018 gboolean is_true
= il_op
== MONO_CEE_BRTRUE_S
|| il_op
== MONO_CEE_BRTRUE
;
8020 if (sp
[-1]->type
== STACK_VTYPE
|| sp
[-1]->type
== STACK_R8
)
8025 GET_BBLOCK (cfg
, tblock
, target
);
8026 link_bblock (cfg
, cfg
->cbb
, tblock
);
8027 GET_BBLOCK (cfg
, tblock
, next_ip
);
8028 link_bblock (cfg
, cfg
->cbb
, tblock
);
8030 if (sp
!= stack_start
) {
8031 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8032 CHECK_UNVERIFIABLE (cfg
);
8035 MONO_INST_NEW(cfg
, cmp
, OP_ICOMPARE_IMM
);
8036 cmp
->sreg1
= sp
[0]->dreg
;
8037 type_from_op (cfg
, cmp
, sp
[0], NULL
);
8040 #if SIZEOF_REGISTER == 4
8041 if (cmp
->opcode
== OP_LCOMPARE_IMM
) {
8042 /* Convert it to OP_LCOMPARE */
8043 MONO_INST_NEW (cfg
, ins
, OP_I8CONST
);
8044 ins
->type
= STACK_I8
;
8045 ins
->dreg
= alloc_dreg (cfg
, STACK_I8
);
8047 MONO_ADD_INS (cfg
->cbb
, ins
);
8048 cmp
->opcode
= OP_LCOMPARE
;
8049 cmp
->sreg2
= ins
->dreg
;
8052 MONO_ADD_INS (cfg
->cbb
, cmp
);
8054 MONO_INST_NEW (cfg
, ins
, is_true
? CEE_BNE_UN
: CEE_BEQ
);
8055 type_from_op (cfg
, ins
, sp
[0], NULL
);
8056 MONO_ADD_INS (cfg
->cbb
, ins
);
8057 ins
->inst_many_bb
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (gpointer
) * 2);
8058 GET_BBLOCK (cfg
, tblock
, target
);
8059 ins
->inst_true_bb
= tblock
;
8060 GET_BBLOCK (cfg
, tblock
, next_ip
);
8061 ins
->inst_false_bb
= tblock
;
8062 start_new_bblock
= 2;
8065 inline_costs
+= BRANCH_COST
;
8073 case MONO_CEE_BNE_UN
:
8074 case MONO_CEE_BGE_UN
:
8075 case MONO_CEE_BGT_UN
:
8076 case MONO_CEE_BLE_UN
:
8077 case MONO_CEE_BLT_UN
:
8078 MONO_INST_NEW (cfg
, ins
, il_op
);
8083 inline_costs
+= BRANCH_COST
;
8085 case MONO_CEE_SWITCH
: {
8087 MonoBasicBlock
**targets
;
8088 MonoBasicBlock
*default_bblock
;
8089 MonoJumpInfoBBTable
*table
;
8090 int offset_reg
= alloc_preg (cfg
);
8091 int target_reg
= alloc_preg (cfg
);
8092 int table_reg
= alloc_preg (cfg
);
8093 int sum_reg
= alloc_preg (cfg
);
8094 gboolean use_op_switch
;
8096 n
= read32 (ip
+ 1);
8099 if ((src1
->type
!= STACK_I4
) && (src1
->type
!= STACK_PTR
))
8104 GET_BBLOCK (cfg
, default_bblock
, next_ip
);
8105 default_bblock
->flags
|= BB_INDIRECT_JUMP_TARGET
;
8107 targets
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * n
);
8108 for (i
= 0; i
< n
; ++i
) {
8109 GET_BBLOCK (cfg
, tblock
, next_ip
+ (gint32
)read32 (ip
));
8110 targets
[i
] = tblock
;
8111 targets
[i
]->flags
|= BB_INDIRECT_JUMP_TARGET
;
8115 if (sp
!= stack_start
) {
8117 * Link the current bb with the targets as well, so handle_stack_args
8118 * will set their in_stack correctly.
8120 link_bblock (cfg
, cfg
->cbb
, default_bblock
);
8121 for (i
= 0; i
< n
; ++i
)
8122 link_bblock (cfg
, cfg
->cbb
, targets
[i
]);
8124 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8126 CHECK_UNVERIFIABLE (cfg
);
8128 /* Undo the links */
8129 mono_unlink_bblock (cfg
, cfg
->cbb
, default_bblock
);
8130 for (i
= 0; i
< n
; ++i
)
8131 mono_unlink_bblock (cfg
, cfg
->cbb
, targets
[i
]);
8134 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ICOMPARE_IMM
, -1, src1
->dreg
, n
);
8135 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_IBGE_UN
, default_bblock
);
8137 for (i
= 0; i
< n
; ++i
)
8138 link_bblock (cfg
, cfg
->cbb
, targets
[i
]);
8140 table
= (MonoJumpInfoBBTable
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
8141 table
->table
= targets
;
8142 table
->table_size
= n
;
8144 use_op_switch
= FALSE
;
8146 /* ARM implements SWITCH statements differently */
8147 /* FIXME: Make it use the generic implementation */
8148 if (!cfg
->compile_aot
)
8149 use_op_switch
= TRUE
;
8152 if (COMPILE_LLVM (cfg
))
8153 use_op_switch
= TRUE
;
8155 cfg
->cbb
->has_jump_table
= 1;
8157 if (use_op_switch
) {
8158 MONO_INST_NEW (cfg
, ins
, OP_SWITCH
);
8159 ins
->sreg1
= src1
->dreg
;
8160 ins
->inst_p0
= table
;
8161 ins
->inst_many_bb
= targets
;
8162 ins
->klass
= (MonoClass
*)GUINT_TO_POINTER (n
);
8163 MONO_ADD_INS (cfg
->cbb
, ins
);
8165 if (TARGET_SIZEOF_VOID_P
== 8)
8166 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 3);
8168 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, offset_reg
, src1
->dreg
, 2);
8170 #if SIZEOF_REGISTER == 8
8171 /* The upper word might not be zero, and we add it to a 64 bit address later */
8172 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, offset_reg
, offset_reg
);
8175 if (cfg
->compile_aot
) {
8176 MONO_EMIT_NEW_AOTCONST (cfg
, table_reg
, table
, MONO_PATCH_INFO_SWITCH
);
8178 MONO_INST_NEW (cfg
, ins
, OP_JUMP_TABLE
);
8179 ins
->inst_c1
= MONO_PATCH_INFO_SWITCH
;
8180 ins
->inst_p0
= table
;
8181 ins
->dreg
= table_reg
;
8182 MONO_ADD_INS (cfg
->cbb
, ins
);
8185 /* FIXME: Use load_memindex */
8186 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, sum_reg
, table_reg
, offset_reg
);
8187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, target_reg
, sum_reg
, 0);
8188 MONO_EMIT_NEW_UNALU (cfg
, OP_BR_REG
, -1, target_reg
);
8190 start_new_bblock
= 1;
8191 inline_costs
+= BRANCH_COST
* 2;
8194 case MONO_CEE_LDIND_I1
:
8195 case MONO_CEE_LDIND_U1
:
8196 case MONO_CEE_LDIND_I2
:
8197 case MONO_CEE_LDIND_U2
:
8198 case MONO_CEE_LDIND_I4
:
8199 case MONO_CEE_LDIND_U4
:
8200 case MONO_CEE_LDIND_I8
:
8201 case MONO_CEE_LDIND_I
:
8202 case MONO_CEE_LDIND_R4
:
8203 case MONO_CEE_LDIND_R8
:
8204 case MONO_CEE_LDIND_REF
:
8207 ins
= mini_emit_memory_load (cfg
, m_class_get_byval_arg (ldind_to_type (il_op
)), sp
[0], 0, ins_flag
);
8211 case MONO_CEE_STIND_REF
:
8212 case MONO_CEE_STIND_I1
:
8213 case MONO_CEE_STIND_I2
:
8214 case MONO_CEE_STIND_I4
:
8215 case MONO_CEE_STIND_I8
:
8216 case MONO_CEE_STIND_R4
:
8217 case MONO_CEE_STIND_R8
:
8218 case MONO_CEE_STIND_I
: {
8221 if (ins_flag
& MONO_INST_VOLATILE
) {
8222 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8223 mini_emit_memory_barrier (cfg
, MONO_MEMORY_BARRIER_REL
);
8226 if (il_op
== MONO_CEE_STIND_R4
&& sp
[1]->type
== STACK_R8
)
8227 sp
[1] = convert_value (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), sp
[1]);
8228 NEW_STORE_MEMBASE (cfg
, ins
, stind_to_store_membase (il_op
), sp
[0]->dreg
, 0, sp
[1]->dreg
);
8229 ins
->flags
|= ins_flag
;
8232 MONO_ADD_INS (cfg
->cbb
, ins
);
8234 if (il_op
== MONO_CEE_STIND_REF
) {
8235 /* stind.ref must only be used with object references. */
8236 if (sp
[1]->type
!= STACK_OBJ
)
8238 if (cfg
->gen_write_barriers
&& method
->wrapper_type
!= MONO_WRAPPER_WRITE_BARRIER
&& !MONO_INS_IS_PCONST_NULL (sp
[1]))
8239 mini_emit_write_barrier (cfg
, sp
[0], sp
[1]);
8246 MONO_INST_NEW (cfg
, ins
, il_op
);
8248 ins
->sreg1
= sp
[0]->dreg
;
8249 ins
->sreg2
= sp
[1]->dreg
;
8250 type_from_op (cfg
, ins
, sp
[0], sp
[1]);
8252 ins
->dreg
= alloc_dreg ((cfg
), (MonoStackType
)(ins
)->type
);
8254 /* Use the immediate opcodes if possible */
8255 int imm_opcode
; imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
8257 if ((sp
[1]->opcode
== OP_ICONST
) && mono_arch_is_inst_imm (ins
->opcode
, imm_opcode
, sp
[1]->inst_c0
)) {
8258 if (imm_opcode
!= -1) {
8259 ins
->opcode
= imm_opcode
;
8260 ins
->inst_p1
= (gpointer
)(gssize
)(sp
[1]->inst_c0
);
8263 NULLIFY_INS (sp
[1]);
8267 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
8269 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8274 case MONO_CEE_DIV_UN
:
8276 case MONO_CEE_REM_UN
:
8282 case MONO_CEE_SHR_UN
: {
8283 MONO_INST_NEW (cfg
, ins
, il_op
);
8285 ins
->sreg1
= sp
[0]->dreg
;
8286 ins
->sreg2
= sp
[1]->dreg
;
8287 type_from_op (cfg
, ins
, sp
[0], sp
[1]);
8289 add_widen_op (cfg
, ins
, &sp
[0], &sp
[1]);
8290 ins
->dreg
= alloc_dreg ((cfg
), (MonoStackType
)(ins
)->type
);
8292 /* Use the immediate opcodes if possible */
8293 int imm_opcode
; imm_opcode
= mono_op_to_op_imm_noemul (ins
->opcode
);
8295 if (((sp
[1]->opcode
== OP_ICONST
) || (sp
[1]->opcode
== OP_I8CONST
)) &&
8296 mono_arch_is_inst_imm (ins
->opcode
, imm_opcode
, sp
[1]->opcode
== OP_ICONST
? sp
[1]->inst_c0
: sp
[1]->inst_l
)) {
8297 if (imm_opcode
!= -1) {
8298 ins
->opcode
= imm_opcode
;
8299 if (sp
[1]->opcode
== OP_I8CONST
) {
8300 #if SIZEOF_REGISTER == 8
8301 ins
->inst_imm
= sp
[1]->inst_l
;
8303 ins
->inst_l
= sp
[1]->inst_l
;
8306 ins
->inst_imm
= (gssize
)(sp
[1]->inst_c0
);
8310 /* Might be followed by an instruction added by add_widen_op */
8311 if (sp
[1]->next
== NULL
)
8312 NULLIFY_INS (sp
[1]);
8315 MONO_ADD_INS ((cfg
)->cbb
, (ins
));
8317 *sp
++ = mono_decompose_opcode (cfg
, ins
);
8322 case MONO_CEE_CONV_I1
:
8323 case MONO_CEE_CONV_I2
:
8324 case MONO_CEE_CONV_I4
:
8325 case MONO_CEE_CONV_R4
:
8326 case MONO_CEE_CONV_R8
:
8327 case MONO_CEE_CONV_U4
:
8328 case MONO_CEE_CONV_I8
:
8329 case MONO_CEE_CONV_U8
:
8330 case MONO_CEE_CONV_OVF_I8
:
8331 case MONO_CEE_CONV_OVF_U8
:
8332 case MONO_CEE_CONV_R_UN
:
8333 /* Special case this earlier so we have long constants in the IR */
8334 if ((il_op
== MONO_CEE_CONV_I8
|| il_op
== MONO_CEE_CONV_U8
) && (sp
[-1]->opcode
== OP_ICONST
)) {
8335 int data
= sp
[-1]->inst_c0
;
8336 sp
[-1]->opcode
= OP_I8CONST
;
8337 sp
[-1]->type
= STACK_I8
;
8338 #if SIZEOF_REGISTER == 8
8339 if (il_op
== MONO_CEE_CONV_U8
)
8340 sp
[-1]->inst_c0
= (guint32
)data
;
8342 sp
[-1]->inst_c0
= data
;
8344 if (il_op
== MONO_CEE_CONV_U8
)
8345 sp
[-1]->inst_l
= (guint32
)data
;
8347 sp
[-1]->inst_l
= data
;
8349 sp
[-1]->dreg
= alloc_dreg (cfg
, STACK_I8
);
8355 case MONO_CEE_CONV_OVF_I4
:
8356 case MONO_CEE_CONV_OVF_I1
:
8357 case MONO_CEE_CONV_OVF_I2
:
8358 case MONO_CEE_CONV_OVF_I
:
8359 case MONO_CEE_CONV_OVF_U
:
8360 if (sp
[-1]->type
== STACK_R8
|| sp
[-1]->type
== STACK_R4
) {
8361 ADD_UNOP (CEE_CONV_OVF_I8
);
8367 case MONO_CEE_CONV_OVF_U1
:
8368 case MONO_CEE_CONV_OVF_U2
:
8369 case MONO_CEE_CONV_OVF_U4
:
8370 if (sp
[-1]->type
== STACK_R8
|| sp
[-1]->type
== STACK_R4
) {
8371 ADD_UNOP (CEE_CONV_OVF_U8
);
8377 case MONO_CEE_CONV_OVF_I1_UN
:
8378 case MONO_CEE_CONV_OVF_I2_UN
:
8379 case MONO_CEE_CONV_OVF_I4_UN
:
8380 case MONO_CEE_CONV_OVF_I8_UN
:
8381 case MONO_CEE_CONV_OVF_U1_UN
:
8382 case MONO_CEE_CONV_OVF_U2_UN
:
8383 case MONO_CEE_CONV_OVF_U4_UN
:
8384 case MONO_CEE_CONV_OVF_U8_UN
:
8385 case MONO_CEE_CONV_OVF_I_UN
:
8386 case MONO_CEE_CONV_OVF_U_UN
:
8387 case MONO_CEE_CONV_U2
:
8388 case MONO_CEE_CONV_U1
:
8389 case MONO_CEE_CONV_I
:
8390 case MONO_CEE_CONV_U
:
8392 CHECK_CFG_EXCEPTION
;
8394 case MONO_CEE_ADD_OVF
:
8395 case MONO_CEE_ADD_OVF_UN
:
8396 case MONO_CEE_MUL_OVF
:
8397 case MONO_CEE_MUL_OVF_UN
:
8398 case MONO_CEE_SUB_OVF
:
8399 case MONO_CEE_SUB_OVF_UN
:
8402 case MONO_CEE_CPOBJ
:
8403 GSHAREDVT_FAILURE (il_op
);
8404 GSHAREDVT_FAILURE (*ip
);
8405 klass
= mini_get_class (method
, token
, generic_context
);
8406 CHECK_TYPELOAD (klass
);
8408 mini_emit_memory_copy (cfg
, sp
[0], sp
[1], klass
, FALSE
, ins_flag
);
8411 case MONO_CEE_LDOBJ
: {
8416 klass
= mini_get_class (method
, token
, generic_context
);
8417 CHECK_TYPELOAD (klass
);
8419 /* Optimize the common ldobj+stloc combination */
8420 if (next_ip
< end
) {
8421 switch (next_ip
[0]) {
8422 case MONO_CEE_STLOC_S
:
8424 loc_index
= next_ip
[1];
8427 case MONO_CEE_STLOC_0
:
8428 case MONO_CEE_STLOC_1
:
8429 case MONO_CEE_STLOC_2
:
8430 case MONO_CEE_STLOC_3
:
8431 loc_index
= next_ip
[0] - CEE_STLOC_0
;
8439 if ((loc_index
!= -1) && ip_in_bb (cfg
, cfg
->cbb
, next_ip
)) {
8440 CHECK_LOCAL (loc_index
);
8442 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), sp
[0]->dreg
, 0);
8443 ins
->dreg
= cfg
->locals
[loc_index
]->dreg
;
8444 ins
->flags
|= ins_flag
;
8445 il_op
= (MonoOpcodeEnum
)next_ip
[0];
8446 next_ip
+= stloc_len
;
8447 if (ins_flag
& MONO_INST_VOLATILE
) {
8448 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8449 mini_emit_memory_barrier (cfg
, MONO_MEMORY_BARRIER_ACQ
);
8455 /* Optimize the ldobj+stobj combination */
8456 if (next_ip
+ 4 < end
&& next_ip
[0] == CEE_STOBJ
&& ip_in_bb (cfg
, cfg
->cbb
, next_ip
) && read32 (next_ip
+ 1) == token
) {
8461 mini_emit_memory_copy (cfg
, sp
[0], sp
[1], klass
, FALSE
, ins_flag
);
8463 il_op
= (MonoOpcodeEnum
)next_ip
[0];
8469 ins
= mini_emit_memory_load (cfg
, m_class_get_byval_arg (klass
), sp
[0], 0, ins_flag
);
8476 case MONO_CEE_LDSTR
:
8477 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
) {
8478 EMIT_NEW_PCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, n
));
8479 ins
->type
= STACK_OBJ
;
8482 else if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
8483 MonoInst
*iargs
[1];
8484 char *str
= (char *)mono_method_get_wrapper_data (method
, n
);
8486 if (cfg
->compile_aot
)
8487 EMIT_NEW_LDSTRLITCONST (cfg
, iargs
[0], str
);
8489 EMIT_NEW_PCONST (cfg
, iargs
[0], str
);
8490 *sp
= mono_emit_jit_icall (cfg
, mono_string_new_wrapper_internal
, iargs
);
8492 if (cfg
->opt
& MONO_OPT_SHARED
) {
8493 MonoInst
*iargs
[3];
8495 if (cfg
->compile_aot
) {
8496 cfg
->ldstr_list
= g_list_prepend (cfg
->ldstr_list
, GINT_TO_POINTER (n
));
8498 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
8499 EMIT_NEW_IMAGECONST (cfg
, iargs
[1], image
);
8500 EMIT_NEW_ICONST (cfg
, iargs
[2], mono_metadata_token_index (n
));
8501 *sp
= mono_emit_jit_icall (cfg
, ves_icall_mono_ldstr
, iargs
);
8502 mono_ldstr_checked (cfg
->domain
, image
, mono_metadata_token_index (n
), cfg
->error
);
8505 if (cfg
->cbb
->out_of_line
) {
8506 MonoInst
*iargs
[2];
8508 if (image
== mono_defaults
.corlib
) {
8510 * Avoid relocations in AOT and save some space by using a
8511 * version of helper_ldstr specialized to mscorlib.
8513 EMIT_NEW_ICONST (cfg
, iargs
[0], mono_metadata_token_index (n
));
8514 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr_mscorlib
, iargs
);
8516 /* Avoid creating the string object */
8517 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
8518 EMIT_NEW_ICONST (cfg
, iargs
[1], mono_metadata_token_index (n
));
8519 *sp
= mono_emit_jit_icall (cfg
, mono_helper_ldstr
, iargs
);
8523 if (cfg
->compile_aot
) {
8524 NEW_LDSTRCONST (cfg
, ins
, image
, n
);
8526 MONO_ADD_INS (cfg
->cbb
, ins
);
8529 NEW_PCONST (cfg
, ins
, NULL
);
8530 ins
->type
= STACK_OBJ
;
8531 ins
->inst_p0
= mono_ldstr_checked (cfg
->domain
, image
, mono_metadata_token_index (n
), cfg
->error
);
8535 OUT_OF_MEMORY_FAILURE
;
8538 MONO_ADD_INS (cfg
->cbb
, ins
);
8545 case MONO_CEE_NEWOBJ
: {
8546 MonoInst
*iargs
[2];
8547 MonoMethodSignature
*fsig
;
8550 MonoInst
*vtable_arg
= NULL
;
8552 cmethod
= mini_get_method (cfg
, method
, token
, NULL
, generic_context
);
8555 fsig
= mono_method_get_signature_checked (cmethod
, image
, token
, generic_context
, cfg
->error
);
8558 mono_save_token_info (cfg
, image
, token
, cmethod
);
8560 if (!mono_class_init_internal (cmethod
->klass
))
8561 TYPE_LOAD_ERROR (cmethod
->klass
);
8563 context_used
= mini_method_check_context_used (cfg
, cmethod
);
8565 if (!dont_verify
&& !cfg
->skip_visibility
) {
8566 MonoMethod
*cil_method
= cmethod
;
8567 MonoMethod
*target_method
= cil_method
;
8569 if (method
->is_inflated
) {
8570 target_method
= mini_get_method_allow_open (method
, token
, NULL
, &(mono_method_get_generic_container (method_definition
)->context
), cfg
->error
);
8574 if (!mono_method_can_access_method (method_definition
, target_method
) &&
8575 !mono_method_can_access_method (method
, cil_method
))
8576 emit_method_access_failure (cfg
, method
, cil_method
);
8579 if (mono_security_core_clr_enabled ())
8580 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
);
8582 if (cfg
->gshared
&& cmethod
&& cmethod
->klass
!= method
->klass
&& mono_class_is_ginst (cmethod
->klass
) && mono_method_is_generic_sharable (cmethod
, TRUE
) && mono_class_needs_cctor_run (cmethod
->klass
, method
)) {
8583 emit_class_init (cfg
, cmethod
->klass
);
8584 CHECK_TYPELOAD (cmethod
->klass
);
8588 if (cfg->gsharedvt) {
8589 if (mini_is_gsharedvt_variable_signature (sig))
8590 GSHAREDVT_FAILURE (il_op);
8594 n
= fsig
->param_count
;
8598 * Generate smaller code for the common newobj <exception> instruction in
8599 * argument checking code.
8601 if (cfg
->cbb
->out_of_line
&& m_class_get_image (cmethod
->klass
) == mono_defaults
.corlib
&&
8602 is_exception_class (cmethod
->klass
) && n
<= 2 &&
8603 ((n
< 1) || (!fsig
->params
[0]->byref
&& fsig
->params
[0]->type
== MONO_TYPE_STRING
)) &&
8604 ((n
< 2) || (!fsig
->params
[1]->byref
&& fsig
->params
[1]->type
== MONO_TYPE_STRING
))) {
8605 MonoInst
*iargs
[3];
8609 EMIT_NEW_ICONST (cfg
, iargs
[0], m_class_get_type_token (cmethod
->klass
));
8612 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_0
, iargs
);
8616 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_1
, iargs
);
8621 *sp
++ = mono_emit_jit_icall (cfg
, mono_create_corlib_exception_2
, iargs
);
8624 g_assert_not_reached ();
8631 /* move the args to allow room for 'this' in the first position */
8637 for (int i
= 0; i
< fsig
->param_count
; ++i
)
8638 sp
[i
+ fsig
->hasthis
] = convert_value (cfg
, fsig
->params
[i
], sp
[i
+ fsig
->hasthis
]);
8640 /* check_call_signature () requires sp[0] to be set */
8641 this_ins
.type
= STACK_OBJ
;
8643 if (check_call_signature (cfg
, fsig
, sp
))
8648 if (mini_class_is_system_array (cmethod
->klass
)) {
8649 *sp
= emit_get_rgctx_method (cfg
, context_used
,
8650 cmethod
, MONO_RGCTX_INFO_METHOD
);
8651 /* Optimize the common cases */
8652 MonoJitICallId function
= MONO_JIT_ICALL_ZeroIsReserved
;;
8653 int n
= fsig
->param_count
;
8655 case 1: function
= MONO_JIT_ICALL_mono_array_new_1
;
8657 case 2: function
= MONO_JIT_ICALL_mono_array_new_2
;
8659 case 3: function
= MONO_JIT_ICALL_mono_array_new_3
;
8661 case 4: function
= MONO_JIT_ICALL_mono_array_new_4
;
8664 // FIXME Maximum value of param_count? Realistically 64. Fits in imm?
8665 if (!array_new_localalloc_ins
) {
8666 MONO_INST_NEW (cfg
, array_new_localalloc_ins
, OP_LOCALLOC_IMM
);
8667 array_new_localalloc_ins
->dreg
= alloc_preg (cfg
);
8668 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
8669 MONO_ADD_INS (init_localsbb
, array_new_localalloc_ins
);
8671 array_new_localalloc_ins
->inst_imm
= MAX (array_new_localalloc_ins
->inst_imm
, n
* sizeof (target_mgreg_t
));
8672 int dreg
= array_new_localalloc_ins
->dreg
;
8673 for (int i
= 0; i
< n
; ++i
) {
8674 NEW_STORE_MEMBASE (cfg
, ins
, OP_STORE_MEMBASE_REG
, dreg
, i
* sizeof (target_mgreg_t
), sp
[i
+ 1]->dreg
);
8675 MONO_ADD_INS (cfg
->cbb
, ins
);
8677 EMIT_NEW_ICONST (cfg
, ins
, n
);
8679 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, alloc_preg (cfg
), dreg
);
8680 ins
->type
= STACK_PTR
;
8682 // FIXME Adjust sp by n - 3? Attempts failed.
8683 function
= MONO_JIT_ICALL_mono_array_new_n_icall
;
8686 alloc
= mono_emit_jit_icall_id (cfg
, function
, sp
);
8687 } else if (cmethod
->string_ctor
) {
8688 g_assert (!context_used
);
8689 g_assert (!vtable_arg
);
8690 /* we simply pass a null pointer */
8691 EMIT_NEW_PCONST (cfg
, *sp
, NULL
);
8692 /* now call the string ctor */
8693 alloc
= mini_emit_method_call_full (cfg
, cmethod
, fsig
, FALSE
, sp
, NULL
, NULL
, NULL
);
8695 if (m_class_is_valuetype (cmethod
->klass
)) {
8696 iargs
[0] = mono_compile_create_var (cfg
, m_class_get_byval_arg (cmethod
->klass
), OP_LOCAL
);
8697 emit_init_rvar (cfg
, iargs
[0]->dreg
, m_class_get_byval_arg (cmethod
->klass
));
8698 EMIT_NEW_TEMPLOADA (cfg
, *sp
, iargs
[0]->inst_c0
);
8703 * The code generated by mini_emit_virtual_call () expects
8704 * iargs [0] to be a boxed instance, but luckily the vcall
8705 * will be transformed into a normal call there.
8707 } else if (context_used
) {
8708 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, context_used
);
8711 MonoVTable
*vtable
= NULL
;
8713 if (!cfg
->compile_aot
)
8714 vtable
= mono_class_vtable_checked (cfg
->domain
, cmethod
->klass
, cfg
->error
);
8716 CHECK_TYPELOAD (cmethod
->klass
);
8719 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8720 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8721 * As a workaround, we call class cctors before allocating objects.
8723 if (mini_field_access_needs_cctor_run (cfg
, method
, cmethod
->klass
, vtable
) && !(g_slist_find (class_inits
, cmethod
->klass
))) {
8724 emit_class_init (cfg
, cmethod
->klass
);
8725 if (cfg
->verbose_level
> 2)
8726 printf ("class %s.%s needs init call for ctor\n", m_class_get_name_space (cmethod
->klass
), m_class_get_name (cmethod
->klass
));
8727 class_inits
= g_slist_prepend (class_inits
, cmethod
->klass
);
8730 alloc
= handle_alloc (cfg
, cmethod
->klass
, FALSE
, 0);
8733 CHECK_CFG_EXCEPTION
; /*for handle_alloc*/
8736 MONO_EMIT_NEW_UNALU (cfg
, OP_NOT_NULL
, -1, alloc
->dreg
);
8738 /* Now call the actual ctor */
8739 handle_ctor_call (cfg
, cmethod
, fsig
, context_used
, sp
, ip
, &inline_costs
);
8740 CHECK_CFG_EXCEPTION
;
8743 if (alloc
== NULL
) {
8745 EMIT_NEW_TEMPLOAD (cfg
, ins
, iargs
[0]->inst_c0
);
8746 mini_type_to_eval_stack_type (cfg
, m_class_get_byval_arg (ins
->klass
), ins
);
8753 if (!(seq_point_locs
&& mono_bitset_test_fast (seq_point_locs
, next_ip
- header
->code
)))
8754 emit_seq_point (cfg
, method
, next_ip
, FALSE
, TRUE
);
8757 case MONO_CEE_CASTCLASS
:
8758 case MONO_CEE_ISINST
: {
8760 klass
= mini_get_class (method
, token
, generic_context
);
8761 CHECK_TYPELOAD (klass
);
8762 if (sp
[0]->type
!= STACK_OBJ
)
8765 MONO_INST_NEW (cfg
, ins
, (il_op
== MONO_CEE_ISINST
) ? OP_ISINST
: OP_CASTCLASS
);
8766 ins
->dreg
= alloc_preg (cfg
);
8767 ins
->sreg1
= (*sp
)->dreg
;
8769 ins
->type
= STACK_OBJ
;
8770 MONO_ADD_INS (cfg
->cbb
, ins
);
8772 CHECK_CFG_EXCEPTION
;
8775 cfg
->flags
|= MONO_CFG_HAS_TYPE_CHECK
;
8778 case MONO_CEE_UNBOX_ANY
: {
8779 MonoInst
*res
, *addr
;
8782 klass
= mini_get_class (method
, token
, generic_context
);
8783 CHECK_TYPELOAD (klass
);
8785 mono_save_token_info (cfg
, image
, token
, klass
);
8787 context_used
= mini_class_check_context_used (cfg
, klass
);
8789 if (mini_is_gsharedvt_klass (klass
)) {
8790 res
= handle_unbox_gsharedvt (cfg
, klass
, *sp
);
8792 } else if (mini_class_is_reference (klass
)) {
8793 if (MONO_INS_IS_PCONST_NULL (*sp
)) {
8794 EMIT_NEW_PCONST (cfg
, res
, NULL
);
8795 res
->type
= STACK_OBJ
;
8797 MONO_INST_NEW (cfg
, res
, OP_CASTCLASS
);
8798 res
->dreg
= alloc_preg (cfg
);
8799 res
->sreg1
= (*sp
)->dreg
;
8801 res
->type
= STACK_OBJ
;
8802 MONO_ADD_INS (cfg
->cbb
, res
);
8803 cfg
->flags
|= MONO_CFG_HAS_TYPE_CHECK
;
8805 } else if (mono_class_is_nullable (klass
)) {
8806 res
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
8808 addr
= handle_unbox (cfg
, klass
, sp
, context_used
);
8810 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), addr
->dreg
, 0);
8818 case MONO_CEE_BOX
: {
8820 MonoClass
*enum_class
;
8821 MonoMethod
*has_flag
;
8825 klass
= mini_get_class (method
, token
, generic_context
);
8826 CHECK_TYPELOAD (klass
);
8828 mono_save_token_info (cfg
, image
, token
, klass
);
8830 context_used
= mini_class_check_context_used (cfg
, klass
);
8832 if (mini_class_is_reference (klass
)) {
8837 val
= convert_value (cfg
, m_class_get_byval_arg (klass
), val
);
8839 if (klass
== mono_defaults
.void_class
)
8841 if (target_type_is_incompatible (cfg
, m_class_get_byval_arg (klass
), val
))
8843 /* frequent check in generic code: box (struct), brtrue */
8848 * <push int/long ptr>
8851 * constrained. MyFlags
8852 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
8854 * If we find this sequence and the operand types on box and constrained
8855 * are equal, we can emit a specialized instruction sequence instead of
8856 * the very slow HasFlag () call.
8857 * This code sequence is generated by older mcs/csc, the newer one is handled in
8858 * emit_inst_for_method ().
8860 guint32 constrained_token
;
8861 guint32 callvirt_token
;
8863 if ((cfg
->opt
& MONO_OPT_INTRINS
) &&
8864 // FIXME ip_in_bb as we go?
8865 next_ip
< end
&& ip_in_bb (cfg
, cfg
->cbb
, next_ip
) &&
8866 (ip
= il_read_constrained (next_ip
, end
, &constrained_token
)) &&
8867 ip_in_bb (cfg
, cfg
->cbb
, ip
) &&
8868 (ip
= il_read_callvirt (ip
, end
, &callvirt_token
)) &&
8869 ip_in_bb (cfg
, cfg
->cbb
, ip
) &&
8870 m_class_is_enumtype (klass
) &&
8871 (enum_class
= mini_get_class (method
, constrained_token
, generic_context
)) &&
8872 (has_flag
= mini_get_method (cfg
, method
, callvirt_token
, NULL
, generic_context
)) &&
8873 has_flag
->klass
== mono_defaults
.enum_class
&&
8874 !strcmp (has_flag
->name
, "HasFlag") &&
8875 has_flag
->signature
->hasthis
&&
8876 has_flag
->signature
->param_count
== 1) {
8877 CHECK_TYPELOAD (enum_class
);
8879 if (enum_class
== klass
) {
8880 MonoInst
*enum_this
, *enum_flag
;
8883 il_op
= MONO_CEE_CALLVIRT
;
8889 *sp
++ = mini_handle_enum_has_flag (cfg
, klass
, enum_this
, -1, enum_flag
);
8894 guint32 unbox_any_token
;
8897 * Common in generic code:
8898 * box T1, unbox.any T2.
8900 if ((cfg
->opt
& MONO_OPT_INTRINS
) &&
8901 next_ip
< end
&& ip_in_bb (cfg
, cfg
->cbb
, next_ip
) &&
8902 (ip
= il_read_unbox_any (next_ip
, end
, &unbox_any_token
))) {
8903 MonoClass
*unbox_klass
= mini_get_class (method
, unbox_any_token
, generic_context
);
8904 CHECK_TYPELOAD (unbox_klass
);
8906 if (klass
== unbox_klass
) {
8915 // FIXME: LLVM can't handle the inconsistent bb linking
8916 if (!mono_class_is_nullable (klass
) &&
8917 !mini_is_gsharedvt_klass (klass
) &&
8918 next_ip
< end
&& ip_in_bb (cfg
, cfg
->cbb
, next_ip
) &&
8919 ( (is_true
= !!(ip
= il_read_brtrue (next_ip
, end
, &target
))) ||
8920 (is_true
= !!(ip
= il_read_brtrue_s (next_ip
, end
, &target
))) ||
8921 (ip
= il_read_brfalse (next_ip
, end
, &target
)) ||
8922 (ip
= il_read_brfalse_s (next_ip
, end
, &target
)))) {
8925 MonoBasicBlock
*true_bb
, *false_bb
;
8927 il_op
= (MonoOpcodeEnum
)next_ip
[0];
8930 if (cfg
->verbose_level
> 3) {
8931 printf ("converting (in B%d: stack: %d) %s", cfg
->cbb
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
, NULL
));
8932 printf ("<box+brtrue opt>\n");
8936 * We need to link both bblocks, since it is needed for handling stack
8937 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8938 * Branching to only one of them would lead to inconsistencies, so
8939 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8941 GET_BBLOCK (cfg
, true_bb
, target
);
8942 GET_BBLOCK (cfg
, false_bb
, next_ip
);
8944 mono_link_bblock (cfg
, cfg
->cbb
, true_bb
);
8945 mono_link_bblock (cfg
, cfg
->cbb
, false_bb
);
8947 if (sp
!= stack_start
) {
8948 handle_stack_args (cfg
, stack_start
, sp
- stack_start
);
8950 CHECK_UNVERIFIABLE (cfg
);
8953 if (COMPILE_LLVM (cfg
)) {
8954 dreg
= alloc_ireg (cfg
);
8955 MONO_EMIT_NEW_ICONST (cfg
, dreg
, 0);
8956 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, dreg
, is_true
? 0 : 1);
8958 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg
, OP_IBEQ
, true_bb
, false_bb
);
8960 /* The JIT can't eliminate the iconst+compare */
8961 MONO_INST_NEW (cfg
, ins
, OP_BR
);
8962 ins
->inst_target_bb
= is_true
? true_bb
: false_bb
;
8963 MONO_ADD_INS (cfg
->cbb
, ins
);
8966 start_new_bblock
= 1;
8970 if (m_class_is_enumtype (klass
) && !mini_is_gsharedvt_klass (klass
) && !(val
->type
== STACK_I8
&& TARGET_SIZEOF_VOID_P
== 4)) {
8971 /* Can't do this with 64 bit enums on 32 bit since the vtype decomp pass is ran after the long decomp pass */
8972 if (val
->opcode
== OP_ICONST
) {
8973 MONO_INST_NEW (cfg
, ins
, OP_BOX_ICONST
);
8974 ins
->type
= STACK_OBJ
;
8976 ins
->inst_c0
= val
->inst_c0
;
8977 ins
->dreg
= alloc_dreg (cfg
, (MonoStackType
)val
->type
);
8979 MONO_INST_NEW (cfg
, ins
, OP_BOX
);
8980 ins
->type
= STACK_OBJ
;
8982 ins
->sreg1
= val
->dreg
;
8983 ins
->dreg
= alloc_dreg (cfg
, (MonoStackType
)val
->type
);
8985 MONO_ADD_INS (cfg
->cbb
, ins
);
8987 /* Create domainvar early so it gets initialized earlier than this code */
8988 if (cfg
->opt
& MONO_OPT_SHARED
)
8989 mono_get_domainvar (cfg
);
8991 *sp
++ = mini_emit_box (cfg
, val
, klass
, context_used
);
8993 CHECK_CFG_EXCEPTION
;
8997 case MONO_CEE_UNBOX
: {
8999 klass
= mini_get_class (method
, token
, generic_context
);
9000 CHECK_TYPELOAD (klass
);
9002 mono_save_token_info (cfg
, image
, token
, klass
);
9004 context_used
= mini_class_check_context_used (cfg
, klass
);
9006 if (mono_class_is_nullable (klass
)) {
9009 val
= handle_unbox_nullable (cfg
, *sp
, klass
, context_used
);
9010 EMIT_NEW_VARLOADA (cfg
, ins
, get_vreg_to_inst (cfg
, val
->dreg
), m_class_get_byval_arg (val
->klass
));
9014 ins
= handle_unbox (cfg
, klass
, sp
, context_used
);
9020 case MONO_CEE_LDFLD
:
9021 case MONO_CEE_LDFLDA
:
9022 case MONO_CEE_STFLD
:
9023 case MONO_CEE_LDSFLD
:
9024 case MONO_CEE_LDSFLDA
:
9025 case MONO_CEE_STSFLD
: {
9026 MonoClassField
*field
;
9027 #ifndef DISABLE_REMOTING
9031 gboolean is_instance
;
9032 gpointer addr
= NULL
;
9033 gboolean is_special_static
;
9035 MonoInst
*store_val
= NULL
;
9036 MonoInst
*thread_ins
;
9038 is_instance
= (il_op
== MONO_CEE_LDFLD
|| il_op
== MONO_CEE_LDFLDA
|| il_op
== MONO_CEE_STFLD
);
9040 if (il_op
== MONO_CEE_STFLD
) {
9046 if (sp
[0]->type
== STACK_I4
|| sp
[0]->type
== STACK_I8
|| sp
[0]->type
== STACK_R8
)
9048 if (il_op
!= MONO_CEE_LDFLD
&& sp
[0]->type
== STACK_VTYPE
)
9051 if (il_op
== MONO_CEE_STSFLD
) {
9057 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
9058 field
= (MonoClassField
*)mono_method_get_wrapper_data (method
, token
);
9059 klass
= field
->parent
;
9062 field
= mono_field_from_token_checked (image
, token
, &klass
, generic_context
, cfg
->error
);
9065 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_field (method
, field
))
9066 FIELD_ACCESS_FAILURE (method
, field
);
9067 mono_class_init_internal (klass
);
9069 /* if the class is Critical then transparent code cannot access it's fields */
9070 if (!is_instance
&& mono_security_core_clr_enabled ())
9071 ensure_method_is_allowed_to_access_field (cfg
, method
, field
);
9073 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9074 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9075 if (mono_security_core_clr_enabled ())
9076 ensure_method_is_allowed_to_access_field (cfg, method, field);
9079 ftype
= mono_field_get_type_internal (field
);
9082 * LDFLD etc. is usable on static fields as well, so convert those cases to
9085 if (is_instance
&& ftype
->attrs
& FIELD_ATTRIBUTE_STATIC
) {
9087 case MONO_CEE_LDFLD
:
9088 il_op
= MONO_CEE_LDSFLD
;
9090 case MONO_CEE_STFLD
:
9091 il_op
= MONO_CEE_STSFLD
;
9093 case MONO_CEE_LDFLDA
:
9094 il_op
= MONO_CEE_LDSFLDA
;
9097 g_assert_not_reached ();
9099 is_instance
= FALSE
;
9102 context_used
= mini_class_check_context_used (cfg
, klass
);
9104 if (il_op
== MONO_CEE_LDSFLD
) {
9105 ins
= mini_emit_inst_for_field_load (cfg
, field
);
9108 goto field_access_end
;
9114 foffset
= m_class_is_valuetype (klass
) ? field
->offset
- MONO_ABI_SIZEOF (MonoObject
): field
->offset
;
9115 if (il_op
== MONO_CEE_STFLD
) {
9116 sp
[1] = convert_value (cfg
, field
->type
, sp
[1]);
9117 if (target_type_is_incompatible (cfg
, field
->type
, sp
[1]))
9119 #ifndef DISABLE_REMOTING
9120 if ((mono_class_is_marshalbyref (klass
) && !MONO_CHECK_THIS (sp
[0])) || mono_class_is_contextbound (klass
) || klass
== mono_defaults
.marshalbyrefobject_class
) {
9121 MonoMethod
*stfld_wrapper
= mono_marshal_get_stfld_wrapper (field
->type
);
9122 MonoInst
*iargs
[5];
9124 GSHAREDVT_FAILURE (il_op
);
9127 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9128 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
9129 EMIT_NEW_ICONST (cfg
, iargs
[3], m_class_is_valuetype (klass
) ? field
->offset
- MONO_ABI_SIZEOF (MonoObject
) :
9133 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
9134 costs
= inline_method (cfg
, stfld_wrapper
, mono_method_signature_internal (stfld_wrapper
),
9135 iargs
, ip
, cfg
->real_offset
, TRUE
);
9136 CHECK_CFG_EXCEPTION
;
9137 g_assert (costs
> 0);
9139 cfg
->real_offset
+= 5;
9141 inline_costs
+= costs
;
9143 mono_emit_method_call (cfg
, stfld_wrapper
, iargs
, NULL
);
9150 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
, foffset
> mono_target_pagesize ());
9152 if (ins_flag
& MONO_INST_VOLATILE
) {
9153 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9154 mini_emit_memory_barrier (cfg
, MONO_MEMORY_BARRIER_REL
);
9157 if (mini_is_gsharedvt_klass (klass
)) {
9158 MonoInst
*offset_ins
;
9160 context_used
= mini_class_check_context_used (cfg
, klass
);
9162 offset_ins
= emit_get_gsharedvt_info (cfg
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9163 /* The value is offset by 1 */
9164 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PSUB_IMM
, offset_ins
->dreg
, offset_ins
->dreg
, 1);
9165 dreg
= alloc_ireg_mp (cfg
);
9166 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, sp
[0]->dreg
, offset_ins
->dreg
);
9167 if (cfg
->gen_write_barriers
&& mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !MONO_INS_IS_PCONST_NULL (sp
[1])) {
9168 store
= mini_emit_storing_write_barrier (cfg
, ins
, sp
[1]);
9170 /* The decomposition will call mini_emit_memory_copy () which will emit a wbarrier if needed */
9171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, dreg
, 0, sp
[1]->dreg
);
9174 if (cfg
->gen_write_barriers
&& mini_type_to_stind (cfg
, field
->type
) == CEE_STIND_REF
&& !MONO_INS_IS_PCONST_NULL (sp
[1])) {
9175 /* insert call to write barrier */
9179 dreg
= alloc_ireg_mp (cfg
);
9180 EMIT_NEW_BIALU_IMM (cfg
, ptr
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
9181 store
= mini_emit_storing_write_barrier (cfg
, ptr
, sp
[1]);
9183 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, field
->type
, sp
[0]->dreg
, foffset
, sp
[1]->dreg
);
9187 if (sp
[0]->opcode
!= OP_LDADDR
)
9188 store
->flags
|= MONO_INST_FAULT
;
9190 store
->flags
|= ins_flag
;
9192 goto field_access_end
;
9195 #ifndef DISABLE_REMOTING
9196 if (is_instance
&& ((mono_class_is_marshalbyref (klass
) && !MONO_CHECK_THIS (sp
[0])) || mono_class_is_contextbound (klass
) || klass
== mono_defaults
.marshalbyrefobject_class
)) {
9197 MonoMethod
*wrapper
= (il_op
== MONO_CEE_LDFLDA
) ? mono_marshal_get_ldflda_wrapper (field
->type
) : mono_marshal_get_ldfld_wrapper (field
->type
);
9198 MonoInst
*iargs
[4];
9200 GSHAREDVT_FAILURE (il_op
);
9203 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9204 EMIT_NEW_FIELDCONST (cfg
, iargs
[2], field
);
9205 EMIT_NEW_ICONST (cfg
, iargs
[3], m_class_is_valuetype (klass
) ? field
->offset
- MONO_ABI_SIZEOF (MonoObject
) : field
->offset
);
9206 if (cfg
->opt
& MONO_OPT_INLINE
|| cfg
->compile_aot
) {
9207 costs
= inline_method (cfg
, wrapper
, mono_method_signature_internal (wrapper
),
9208 iargs
, ip
, cfg
->real_offset
, TRUE
);
9209 CHECK_CFG_EXCEPTION
;
9210 g_assert (costs
> 0);
9212 cfg
->real_offset
+= 5;
9216 inline_costs
+= costs
;
9218 ins
= mono_emit_method_call (cfg
, wrapper
, iargs
, NULL
);
9224 if (sp
[0]->type
== STACK_VTYPE
) {
9227 /* Have to compute the address of the variable */
9229 var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9231 var
= mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (klass
), OP_LOCAL
, sp
[0]->dreg
);
9233 g_assert (var
->klass
== klass
);
9235 EMIT_NEW_VARLOADA (cfg
, ins
, var
, m_class_get_byval_arg (var
->klass
));
9239 if (il_op
== MONO_CEE_LDFLDA
) {
9240 if (sp
[0]->type
== STACK_OBJ
) {
9241 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, sp
[0]->dreg
, 0);
9242 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "NullReferenceException");
9245 dreg
= alloc_ireg_mp (cfg
);
9247 if (mini_is_gsharedvt_klass (klass
)) {
9248 MonoInst
*offset_ins
;
9250 offset_ins
= emit_get_gsharedvt_info (cfg
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9251 /* The value is offset by 1 */
9252 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PSUB_IMM
, offset_ins
->dreg
, offset_ins
->dreg
, 1);
9253 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, sp
[0]->dreg
, offset_ins
->dreg
);
9255 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, dreg
, sp
[0]->dreg
, foffset
);
9257 ins
->klass
= mono_class_from_mono_type_internal (field
->type
);
9258 ins
->type
= STACK_MP
;
9263 MONO_EMIT_NULL_CHECK (cfg
, sp
[0]->dreg
, foffset
> mono_target_pagesize ());
9265 #ifdef MONO_ARCH_SIMD_INTRINSICS
9266 if (sp
[0]->opcode
== OP_LDADDR
&& m_class_is_simd_type (klass
) && cfg
->opt
& MONO_OPT_SIMD
) {
9267 ins
= mono_emit_simd_field_load (cfg
, field
, sp
[0]);
9270 goto field_access_end
;
9275 MonoInst
*field_add_inst
= sp
[0];
9276 if (mini_is_gsharedvt_klass (klass
)) {
9277 MonoInst
*offset_ins
;
9279 offset_ins
= emit_get_gsharedvt_info (cfg
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9280 /* The value is offset by 1 */
9281 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PSUB_IMM
, offset_ins
->dreg
, offset_ins
->dreg
, 1);
9282 EMIT_NEW_BIALU (cfg
, field_add_inst
, OP_PADD
, alloc_ireg_mp (cfg
), sp
[0]->dreg
, offset_ins
->dreg
);
9286 load
= mini_emit_memory_load (cfg
, field
->type
, field_add_inst
, foffset
, ins_flag
);
9288 if (sp
[0]->opcode
!= OP_LDADDR
)
9289 load
->flags
|= MONO_INST_FAULT
;
9295 goto field_access_end
;
9298 context_used
= mini_class_check_context_used (cfg
, klass
);
9300 if (ftype
->attrs
& FIELD_ATTRIBUTE_LITERAL
) {
9301 mono_error_set_field_missing (cfg
->error
, field
->parent
, field
->name
, NULL
, "Using static instructions with literal field");
9305 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9306 * to be called here.
9308 if (!context_used
&& !(cfg
->opt
& MONO_OPT_SHARED
)) {
9309 mono_class_vtable_checked (cfg
->domain
, klass
, cfg
->error
);
9311 CHECK_TYPELOAD (klass
);
9313 mono_domain_lock (cfg
->domain
);
9314 if (cfg
->domain
->special_static_fields
)
9315 addr
= g_hash_table_lookup (cfg
->domain
->special_static_fields
, field
);
9316 mono_domain_unlock (cfg
->domain
);
9318 is_special_static
= mono_class_field_is_special_static (field
);
9320 if (is_special_static
&& ((gsize
)addr
& 0x80000000) == 0)
9321 thread_ins
= mono_create_tls_get (cfg
, TLS_KEY_THREAD
);
9325 /* Generate IR to compute the field address */
9326 if (is_special_static
&& ((gsize
)addr
& 0x80000000) == 0 && thread_ins
&& !(cfg
->opt
& MONO_OPT_SHARED
) && !context_used
) {
9328 * Fast access to TLS data
9329 * Inline version of get_thread_static_data () in
9333 int idx
, static_data_reg
, array_reg
, dreg
;
9335 if (context_used
&& cfg
->gsharedvt
&& mini_is_gsharedvt_klass (klass
))
9336 GSHAREDVT_FAILURE (il_op
);
9338 static_data_reg
= alloc_ireg (cfg
);
9339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, static_data_reg
, thread_ins
->dreg
, MONO_STRUCT_OFFSET (MonoInternalThread
, static_data
));
9341 if (cfg
->compile_aot
) {
9342 int offset_reg
, offset2_reg
, idx_reg
;
9344 /* For TLS variables, this will return the TLS offset */
9345 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
9346 offset_reg
= ins
->dreg
;
9347 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset_reg
, offset_reg
, 0x7fffffff);
9348 idx_reg
= alloc_ireg (cfg
);
9349 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, idx_reg
, offset_reg
, 0x3f);
9350 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHL_IMM
, idx_reg
, idx_reg
, TARGET_SIZEOF_VOID_P
== 8 ? 3 : 2);
9351 MONO_EMIT_NEW_BIALU (cfg
, OP_PADD
, static_data_reg
, static_data_reg
, idx_reg
);
9352 array_reg
= alloc_ireg (cfg
);
9353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, 0);
9354 offset2_reg
= alloc_ireg (cfg
);
9355 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ISHR_UN_IMM
, offset2_reg
, offset_reg
, 6);
9356 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_IAND_IMM
, offset2_reg
, offset2_reg
, 0x1ffffff);
9357 dreg
= alloc_ireg (cfg
);
9358 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, array_reg
, offset2_reg
);
9360 offset
= (gsize
)addr
& 0x7fffffff;
9361 idx
= offset
& 0x3f;
9363 array_reg
= alloc_ireg (cfg
);
9364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, array_reg
, static_data_reg
, idx
* TARGET_SIZEOF_VOID_P
);
9365 dreg
= alloc_ireg (cfg
);
9366 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_ADD_IMM
, dreg
, array_reg
, ((offset
>> 6) & 0x1ffffff));
9368 } else if ((cfg
->opt
& MONO_OPT_SHARED
) ||
9369 (cfg
->compile_aot
&& is_special_static
) ||
9370 (context_used
&& is_special_static
)) {
9371 MonoInst
*iargs
[2];
9373 g_assert (field
->parent
);
9374 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9376 iargs
[1] = emit_get_rgctx_field (cfg
, context_used
,
9377 field
, MONO_RGCTX_INFO_CLASS_FIELD
);
9379 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
9381 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
9382 } else if (context_used
) {
9383 MonoInst
*static_data
;
9386 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9387 method->klass->name_space, method->klass->name, method->name,
9388 depth, field->offset);
9391 if (mono_class_needs_cctor_run (klass
, method
))
9392 emit_class_init (cfg
, klass
);
9395 * The pointer we're computing here is
9397 * super_info.static_data + field->offset
9399 static_data
= mini_emit_get_rgctx_klass (cfg
, context_used
,
9400 klass
, MONO_RGCTX_INFO_STATIC_DATA
);
9402 if (mini_is_gsharedvt_klass (klass
)) {
9403 MonoInst
*offset_ins
;
9405 offset_ins
= emit_get_rgctx_field (cfg
, context_used
, field
, MONO_RGCTX_INFO_FIELD_OFFSET
);
9406 /* The value is offset by 1 */
9407 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PSUB_IMM
, offset_ins
->dreg
, offset_ins
->dreg
, 1);
9408 dreg
= alloc_ireg_mp (cfg
);
9409 EMIT_NEW_BIALU (cfg
, ins
, OP_PADD
, dreg
, static_data
->dreg
, offset_ins
->dreg
);
9410 } else if (field
->offset
== 0) {
9413 int addr_reg
= mono_alloc_preg (cfg
);
9414 EMIT_NEW_BIALU_IMM (cfg
, ins
, OP_PADD_IMM
, addr_reg
, static_data
->dreg
, field
->offset
);
9416 } else if ((cfg
->opt
& MONO_OPT_SHARED
) || (cfg
->compile_aot
&& addr
)) {
9417 MonoInst
*iargs
[2];
9419 g_assert (field
->parent
);
9420 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9421 EMIT_NEW_FIELDCONST (cfg
, iargs
[1], field
);
9422 ins
= mono_emit_jit_icall (cfg
, mono_class_static_field_address
, iargs
);
9424 MonoVTable
*vtable
= NULL
;
9426 if (!cfg
->compile_aot
)
9427 vtable
= mono_class_vtable_checked (cfg
->domain
, klass
, cfg
->error
);
9429 CHECK_TYPELOAD (klass
);
9432 if (mini_field_access_needs_cctor_run (cfg
, method
, klass
, vtable
)) {
9433 if (!(g_slist_find (class_inits
, klass
))) {
9434 emit_class_init (cfg
, klass
);
9435 if (cfg
->verbose_level
> 2)
9436 printf ("class %s.%s needs init call for %s\n", m_class_get_name_space (klass
), m_class_get_name (klass
), mono_field_get_name (field
));
9437 class_inits
= g_slist_prepend (class_inits
, klass
);
9440 if (cfg
->run_cctors
) {
9441 /* This makes so that inline cannot trigger */
9442 /* .cctors: too many apps depend on them */
9443 /* running with a specific order... */
9445 if (!vtable
->initialized
&& m_class_has_cctor (vtable
->klass
))
9446 INLINE_FAILURE ("class init");
9447 if (!mono_runtime_class_init_full (vtable
, cfg
->error
)) {
9448 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
9449 goto exception_exit
;
9453 if (cfg
->compile_aot
)
9454 EMIT_NEW_SFLDACONST (cfg
, ins
, field
);
9457 addr
= (char*)mono_vtable_get_static_field_data (vtable
) + field
->offset
;
9459 EMIT_NEW_PCONST (cfg
, ins
, addr
);
9462 MonoInst
*iargs
[1];
9463 EMIT_NEW_ICONST (cfg
, iargs
[0], GPOINTER_TO_UINT (addr
));
9464 ins
= mono_emit_jit_icall (cfg
, mono_get_special_static_data
, iargs
);
9468 /* Generate IR to do the actual load/store operation */
9470 if ((il_op
== MONO_CEE_STFLD
|| il_op
== MONO_CEE_STSFLD
) && (ins_flag
& MONO_INST_VOLATILE
)) {
9471 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9472 mini_emit_memory_barrier (cfg
, MONO_MEMORY_BARRIER_REL
);
9475 if (il_op
== MONO_CEE_LDSFLDA
) {
9476 ins
->klass
= mono_class_from_mono_type_internal (ftype
);
9477 ins
->type
= STACK_PTR
;
9479 } else if (il_op
== MONO_CEE_STSFLD
) {
9482 EMIT_NEW_STORE_MEMBASE_TYPE (cfg
, store
, ftype
, ins
->dreg
, 0, store_val
->dreg
);
9483 store
->flags
|= ins_flag
;
9485 gboolean is_const
= FALSE
;
9486 MonoVTable
*vtable
= NULL
;
9487 gpointer addr
= NULL
;
9489 if (!context_used
) {
9490 vtable
= mono_class_vtable_checked (cfg
->domain
, klass
, cfg
->error
);
9492 CHECK_TYPELOAD (klass
);
9494 if ((ftype
->attrs
& FIELD_ATTRIBUTE_INIT_ONLY
) && (((addr
= mono_aot_readonly_field_override (field
)) != NULL
) ||
9495 (!context_used
&& !((cfg
->opt
& MONO_OPT_SHARED
) || cfg
->compile_aot
) && vtable
->initialized
))) {
9496 int ro_type
= ftype
->type
;
9498 addr
= (char*)mono_vtable_get_static_field_data (vtable
) + field
->offset
;
9499 if (ro_type
== MONO_TYPE_VALUETYPE
&& m_class_is_enumtype (ftype
->data
.klass
)) {
9500 ro_type
= mono_class_enum_basetype_internal (ftype
->data
.klass
)->type
;
9503 GSHAREDVT_FAILURE (il_op
);
9505 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9508 case MONO_TYPE_BOOLEAN
:
9510 EMIT_NEW_ICONST (cfg
, *sp
, *((guint8
*)addr
));
9514 EMIT_NEW_ICONST (cfg
, *sp
, *((gint8
*)addr
));
9517 case MONO_TYPE_CHAR
:
9519 EMIT_NEW_ICONST (cfg
, *sp
, *((guint16
*)addr
));
9523 EMIT_NEW_ICONST (cfg
, *sp
, *((gint16
*)addr
));
9528 EMIT_NEW_ICONST (cfg
, *sp
, *((gint32
*)addr
));
9532 EMIT_NEW_ICONST (cfg
, *sp
, *((guint32
*)addr
));
9538 case MONO_TYPE_FNPTR
:
9539 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
9540 mini_type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
9543 case MONO_TYPE_STRING
:
9544 case MONO_TYPE_OBJECT
:
9545 case MONO_TYPE_CLASS
:
9546 case MONO_TYPE_SZARRAY
:
9547 case MONO_TYPE_ARRAY
:
9548 if (!mono_gc_is_moving ()) {
9549 EMIT_NEW_PCONST (cfg
, *sp
, *((gpointer
*)addr
));
9550 mini_type_to_eval_stack_type ((cfg
), field
->type
, *sp
);
9558 EMIT_NEW_I8CONST (cfg
, *sp
, *((gint64
*)addr
));
9563 case MONO_TYPE_VALUETYPE
:
9573 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, load
, field
->type
, ins
->dreg
, 0);
9574 load
->flags
|= ins_flag
;
9580 if ((il_op
== MONO_CEE_LDFLD
|| il_op
== MONO_CEE_LDSFLD
) && (ins_flag
& MONO_INST_VOLATILE
)) {
9581 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9582 mini_emit_memory_barrier (cfg
, MONO_MEMORY_BARRIER_ACQ
);
9588 case MONO_CEE_STOBJ
:
9590 klass
= mini_get_class (method
, token
, generic_context
);
9591 CHECK_TYPELOAD (klass
);
9593 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9594 mini_emit_memory_store (cfg
, m_class_get_byval_arg (klass
), sp
[0], sp
[1], ins_flag
);
9602 case MONO_CEE_NEWARR
: {
9604 const char *data_ptr
;
9606 guint32 field_token
;
9610 klass
= mini_get_class (method
, token
, generic_context
);
9611 CHECK_TYPELOAD (klass
);
9612 if (m_class_get_byval_arg (klass
)->type
== MONO_TYPE_VOID
)
9615 context_used
= mini_class_check_context_used (cfg
, klass
);
9617 if (sp
[0]->type
== STACK_I8
|| (TARGET_SIZEOF_VOID_P
== 8 && sp
[0]->type
== STACK_PTR
)) {
9618 MONO_INST_NEW (cfg
, ins
, OP_LCONV_TO_OVF_U4
);
9619 ins
->sreg1
= sp
[0]->dreg
;
9620 ins
->type
= STACK_I4
;
9621 ins
->dreg
= alloc_ireg (cfg
);
9622 MONO_ADD_INS (cfg
->cbb
, ins
);
9623 *sp
= mono_decompose_opcode (cfg
, ins
);
9628 MonoClass
*array_class
= mono_class_create_array (klass
, 1);
9629 MonoMethod
*managed_alloc
= mono_gc_get_managed_array_allocator (array_class
);
9631 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
9634 args
[0] = mini_emit_get_rgctx_klass (cfg
, context_used
,
9635 array_class
, MONO_RGCTX_INFO_VTABLE
);
9640 ins
= mono_emit_method_call (cfg
, managed_alloc
, args
, NULL
);
9642 ins
= mono_emit_jit_icall (cfg
, ves_icall_array_new_specific
, args
);
9644 if (cfg
->opt
& MONO_OPT_SHARED
) {
9645 /* Decompose now to avoid problems with references to the domainvar */
9646 MonoInst
*iargs
[3];
9648 EMIT_NEW_DOMAINCONST (cfg
, iargs
[0]);
9649 EMIT_NEW_CLASSCONST (cfg
, iargs
[1], klass
);
9652 ins
= mono_emit_jit_icall (cfg
, ves_icall_array_new
, iargs
);
9654 /* Decompose later since it is needed by abcrem */
9655 MonoClass
*array_type
= mono_class_create_array (klass
, 1);
9656 mono_class_vtable_checked (cfg
->domain
, array_type
, cfg
->error
);
9658 CHECK_TYPELOAD (array_type
);
9660 MONO_INST_NEW (cfg
, ins
, OP_NEWARR
);
9661 ins
->dreg
= alloc_ireg_ref (cfg
);
9662 ins
->sreg1
= sp
[0]->dreg
;
9663 ins
->inst_newa_class
= klass
;
9664 ins
->type
= STACK_OBJ
;
9665 ins
->klass
= array_type
;
9666 MONO_ADD_INS (cfg
->cbb
, ins
);
9667 cfg
->flags
|= MONO_CFG_NEEDS_DECOMPOSE
;
9668 cfg
->cbb
->needs_decompose
= TRUE
;
9670 /* Needed so mono_emit_load_get_addr () gets called */
9671 mono_get_got_var (cfg
);
9681 * we inline/optimize the initialization sequence if possible.
9682 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9683 * for small sizes open code the memcpy
9684 * ensure the rva field is big enough
9686 if ((cfg
->opt
& MONO_OPT_INTRINS
) && next_ip
< end
9687 && ip_in_bb (cfg
, cfg
->cbb
, next_ip
)
9688 && (len_ins
->opcode
== OP_ICONST
)
9689 && (data_ptr
= initialize_array_data (cfg
, method
,
9690 cfg
->compile_aot
, next_ip
, end
, klass
,
9691 len_ins
->inst_c0
, &data_size
, &field_token
,
9692 &il_op
, &next_ip
))) {
9693 MonoMethod
*memcpy_method
= mini_get_memcpy_method ();
9694 MonoInst
*iargs
[3];
9695 int add_reg
= alloc_ireg_mp (cfg
);
9697 EMIT_NEW_BIALU_IMM (cfg
, iargs
[0], OP_PADD_IMM
, add_reg
, ins
->dreg
, MONO_STRUCT_OFFSET (MonoArray
, vector
));
9698 if (cfg
->compile_aot
) {
9699 EMIT_NEW_AOTCONST_TOKEN (cfg
, iargs
[1], MONO_PATCH_INFO_RVA
, m_class_get_image (method
->klass
), GPOINTER_TO_UINT(field_token
), STACK_PTR
, NULL
);
9701 EMIT_NEW_PCONST (cfg
, iargs
[1], (char*)data_ptr
);
9703 EMIT_NEW_ICONST (cfg
, iargs
[2], data_size
);
9704 mono_emit_method_call (cfg
, memcpy_method
, iargs
, NULL
);
9709 case MONO_CEE_LDLEN
:
9711 if (sp
[0]->type
!= STACK_OBJ
)
9714 MONO_INST_NEW (cfg
, ins
, OP_LDLEN
);
9715 ins
->dreg
= alloc_preg (cfg
);
9716 ins
->sreg1
= sp
[0]->dreg
;
9717 ins
->inst_imm
= MONO_STRUCT_OFFSET (MonoArray
, max_length
);
9718 ins
->type
= STACK_I4
;
9719 /* This flag will be inherited by the decomposition */
9720 ins
->flags
|= MONO_INST_FAULT
| MONO_INST_INVARIANT_LOAD
;
9721 MONO_ADD_INS (cfg
->cbb
, ins
);
9722 cfg
->flags
|= MONO_CFG_NEEDS_DECOMPOSE
;
9723 cfg
->cbb
->needs_decompose
= TRUE
;
9726 case MONO_CEE_LDELEMA
:
9728 if (sp
[0]->type
!= STACK_OBJ
)
9731 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9733 klass
= mini_get_class (method
, token
, generic_context
);
9734 CHECK_TYPELOAD (klass
);
9735 /* we need to make sure that this array is exactly the type it needs
9736 * to be for correctness. the wrappers are lax with their usage
9737 * so we need to ignore them here
9739 if (!m_class_is_valuetype (klass
) && method
->wrapper_type
== MONO_WRAPPER_NONE
&& !readonly
) {
9740 MonoClass
*array_class
= mono_class_create_array (klass
, 1);
9741 mini_emit_check_array_type (cfg
, sp
[0], array_class
);
9742 CHECK_TYPELOAD (array_class
);
9746 ins
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9749 case MONO_CEE_LDELEM
:
9750 case MONO_CEE_LDELEM_I1
:
9751 case MONO_CEE_LDELEM_U1
:
9752 case MONO_CEE_LDELEM_I2
:
9753 case MONO_CEE_LDELEM_U2
:
9754 case MONO_CEE_LDELEM_I4
:
9755 case MONO_CEE_LDELEM_U4
:
9756 case MONO_CEE_LDELEM_I8
:
9757 case MONO_CEE_LDELEM_I
:
9758 case MONO_CEE_LDELEM_R4
:
9759 case MONO_CEE_LDELEM_R8
:
9760 case MONO_CEE_LDELEM_REF
: {
9765 if (il_op
== MONO_CEE_LDELEM
) {
9766 klass
= mini_get_class (method
, token
, generic_context
);
9767 CHECK_TYPELOAD (klass
);
9768 mono_class_init_internal (klass
);
9771 klass
= array_access_to_klass (il_op
);
9773 if (sp
[0]->type
!= STACK_OBJ
)
9776 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9778 if (mini_is_gsharedvt_variable_klass (klass
)) {
9779 // FIXME-VT: OP_ICONST optimization
9780 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9781 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), addr
->dreg
, 0);
9782 ins
->opcode
= OP_LOADV_MEMBASE
;
9783 } else if (sp
[1]->opcode
== OP_ICONST
) {
9784 int array_reg
= sp
[0]->dreg
;
9785 int index_reg
= sp
[1]->dreg
;
9786 int offset
= (mono_class_array_element_size (klass
) * sp
[1]->inst_c0
) + MONO_STRUCT_OFFSET (MonoArray
, vector
);
9788 if (SIZEOF_REGISTER
== 8 && COMPILE_LLVM (cfg
))
9789 MONO_EMIT_NEW_UNALU (cfg
, OP_ZEXT_I4
, index_reg
, index_reg
);
9791 MONO_EMIT_BOUNDS_CHECK (cfg
, array_reg
, MonoArray
, max_length
, index_reg
);
9792 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), array_reg
, offset
);
9794 addr
= mini_emit_ldelema_1_ins (cfg
, klass
, sp
[0], sp
[1], TRUE
);
9795 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (klass
), addr
->dreg
, 0);
9800 case MONO_CEE_STELEM_I
:
9801 case MONO_CEE_STELEM_I1
:
9802 case MONO_CEE_STELEM_I2
:
9803 case MONO_CEE_STELEM_I4
:
9804 case MONO_CEE_STELEM_I8
:
9805 case MONO_CEE_STELEM_R4
:
9806 case MONO_CEE_STELEM_R8
:
9807 case MONO_CEE_STELEM_REF
:
9808 case MONO_CEE_STELEM
: {
9811 cfg
->flags
|= MONO_CFG_HAS_LDELEMA
;
9813 if (il_op
== MONO_CEE_STELEM
) {
9814 klass
= mini_get_class (method
, token
, generic_context
);
9815 CHECK_TYPELOAD (klass
);
9816 mono_class_init_internal (klass
);
9819 klass
= array_access_to_klass (il_op
);
9821 if (sp
[0]->type
!= STACK_OBJ
)
9824 sp
[2] = convert_value (cfg
, m_class_get_byval_arg (klass
), sp
[2]);
9825 mini_emit_array_store (cfg
, klass
, sp
, TRUE
);
9830 case MONO_CEE_CKFINITE
: {
9833 if (cfg
->llvm_only
) {
9834 MonoInst
*iargs
[1];
9837 *sp
++ = mono_emit_jit_icall (cfg
, mono_ckfinite
, iargs
);
9839 sp
[0] = convert_value (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), sp
[0]);
9840 MONO_INST_NEW (cfg
, ins
, OP_CKFINITE
);
9841 ins
->sreg1
= sp
[0]->dreg
;
9842 ins
->dreg
= alloc_freg (cfg
);
9843 ins
->type
= STACK_R8
;
9844 MONO_ADD_INS (cfg
->cbb
, ins
);
9846 *sp
++ = mono_decompose_opcode (cfg
, ins
);
9851 case MONO_CEE_REFANYVAL
: {
9852 MonoInst
*src_var
, *src
;
9854 int klass_reg
= alloc_preg (cfg
);
9855 int dreg
= alloc_preg (cfg
);
9857 GSHAREDVT_FAILURE (il_op
);
9859 MONO_INST_NEW (cfg
, ins
, il_op
);
9861 klass
= mini_get_class (method
, token
, generic_context
);
9862 CHECK_TYPELOAD (klass
);
9864 context_used
= mini_class_check_context_used (cfg
, klass
);
9867 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
9869 src_var
= mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (mono_defaults
.typed_reference_class
), OP_LOCAL
, sp
[0]->dreg
);
9870 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
9871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, klass_reg
, src
->dreg
, MONO_STRUCT_OFFSET (MonoTypedRef
, klass
));
9874 MonoInst
*klass_ins
;
9876 klass_ins
= mini_emit_get_rgctx_klass (cfg
, context_used
,
9877 klass
, MONO_RGCTX_INFO_KLASS
);
9880 MONO_EMIT_NEW_BIALU (cfg
, OP_COMPARE
, -1, klass_reg
, klass_ins
->dreg
);
9881 MONO_EMIT_NEW_COND_EXC (cfg
, NE_UN
, "InvalidCastException");
9883 mini_emit_class_check (cfg
, klass_reg
, klass
);
9885 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, src
->dreg
, MONO_STRUCT_OFFSET (MonoTypedRef
, value
));
9886 ins
->type
= STACK_MP
;
9891 case MONO_CEE_MKREFANY
: {
9892 MonoInst
*loc
, *addr
;
9894 GSHAREDVT_FAILURE (il_op
);
9896 MONO_INST_NEW (cfg
, ins
, il_op
);
9898 klass
= mini_get_class (method
, token
, generic_context
);
9899 CHECK_TYPELOAD (klass
);
9901 context_used
= mini_class_check_context_used (cfg
, klass
);
9903 loc
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.typed_reference_class
), OP_LOCAL
);
9904 EMIT_NEW_TEMPLOADA (cfg
, addr
, loc
->inst_c0
);
9906 MonoInst
*const_ins
= mini_emit_get_rgctx_klass (cfg
, context_used
, klass
, MONO_RGCTX_INFO_KLASS
);
9907 int type_reg
= alloc_preg (cfg
);
9909 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, MONO_STRUCT_OFFSET (MonoTypedRef
, klass
), const_ins
->dreg
);
9910 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ADD_IMM
, type_reg
, const_ins
->dreg
, m_class_offsetof_byval_arg ());
9911 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, MONO_STRUCT_OFFSET (MonoTypedRef
, type
), type_reg
);
9913 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREP_MEMBASE_REG
, addr
->dreg
, MONO_STRUCT_OFFSET (MonoTypedRef
, value
), sp
[0]->dreg
);
9915 EMIT_NEW_TEMPLOAD (cfg
, ins
, loc
->inst_c0
);
9916 ins
->type
= STACK_VTYPE
;
9917 ins
->klass
= mono_defaults
.typed_reference_class
;
9921 case MONO_CEE_LDTOKEN
: {
9923 MonoClass
*handle_class
;
9925 if (method
->wrapper_type
== MONO_WRAPPER_DYNAMIC_METHOD
||
9926 method
->wrapper_type
== MONO_WRAPPER_SYNCHRONIZED
) {
9927 handle
= mono_method_get_wrapper_data (method
, n
);
9928 handle_class
= (MonoClass
*)mono_method_get_wrapper_data (method
, n
+ 1);
9929 if (handle_class
== mono_defaults
.typehandle_class
)
9930 handle
= m_class_get_byval_arg ((MonoClass
*)handle
);
9933 handle
= mono_ldtoken_checked (image
, n
, &handle_class
, generic_context
, cfg
->error
);
9938 mono_class_init_internal (handle_class
);
9940 if (mono_metadata_token_table (n
) == MONO_TABLE_TYPEDEF
||
9941 mono_metadata_token_table (n
) == MONO_TABLE_TYPEREF
) {
9942 /* This case handles ldtoken
9943 of an open type, like for
9946 } else if (handle_class
== mono_defaults
.typehandle_class
) {
9947 context_used
= mini_class_check_context_used (cfg
, mono_class_from_mono_type_internal ((MonoType
*)handle
));
9948 } else if (handle_class
== mono_defaults
.fieldhandle_class
)
9949 context_used
= mini_class_check_context_used (cfg
, ((MonoClassField
*)handle
)->parent
);
9950 else if (handle_class
== mono_defaults
.methodhandle_class
)
9951 context_used
= mini_method_check_context_used (cfg
, (MonoMethod
*)handle
);
9953 g_assert_not_reached ();
9956 if ((cfg
->opt
& MONO_OPT_SHARED
) &&
9957 method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
&&
9958 method
->wrapper_type
!= MONO_WRAPPER_SYNCHRONIZED
) {
9959 MonoInst
*addr
, *vtvar
, *iargs
[3];
9960 int method_context_used
;
9962 method_context_used
= mini_method_check_context_used (cfg
, method
);
9964 vtvar
= mono_compile_create_var (cfg
, m_class_get_byval_arg (handle_class
), OP_LOCAL
);
9966 EMIT_NEW_IMAGECONST (cfg
, iargs
[0], image
);
9967 EMIT_NEW_ICONST (cfg
, iargs
[1], n
);
9968 if (method_context_used
) {
9969 iargs
[2] = emit_get_rgctx_method (cfg
, method_context_used
,
9970 method
, MONO_RGCTX_INFO_METHOD
);
9971 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper_generic_shared
, iargs
);
9973 EMIT_NEW_PCONST (cfg
, iargs
[2], generic_context
);
9974 ins
= mono_emit_jit_icall (cfg
, mono_ldtoken_wrapper
, iargs
);
9976 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
9978 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
9980 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
9982 if ((next_ip
+ 4 < end
) && ip_in_bb (cfg
, cfg
->cbb
, next_ip
) &&
9983 ((next_ip
[0] == CEE_CALL
) || (next_ip
[0] == CEE_CALLVIRT
)) &&
9984 (cmethod
= mini_get_method (cfg
, method
, read32 (next_ip
+ 1), NULL
, generic_context
)) &&
9985 (cmethod
->klass
== mono_defaults
.systemtype_class
) &&
9986 (strcmp (cmethod
->name
, "GetTypeFromHandle") == 0)) {
9987 MonoClass
*tclass
= mono_class_from_mono_type_internal ((MonoType
*)handle
);
9989 mono_class_init_internal (tclass
);
9991 ins
= mini_emit_get_rgctx_klass (cfg
, context_used
,
9992 tclass
, MONO_RGCTX_INFO_REFLECTION_TYPE
);
9993 } else if (cfg
->compile_aot
) {
9994 if (method
->wrapper_type
) {
9995 error_init (error
); //got to do it since there are multiple conditionals below
9996 if (mono_class_get_checked (m_class_get_image (tclass
), m_class_get_type_token (tclass
), error
) == tclass
&& !generic_context
) {
9997 /* Special case for static synchronized wrappers */
9998 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, m_class_get_image (tclass
), m_class_get_type_token (tclass
), generic_context
);
10000 mono_error_cleanup (error
); /* FIXME don't swallow the error */
10001 /* FIXME: n is not a normal token */
10003 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
10006 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg
, ins
, image
, n
, generic_context
);
10009 MonoReflectionType
*rt
= mono_type_get_object_checked (cfg
->domain
, (MonoType
*)handle
, cfg
->error
);
10011 EMIT_NEW_PCONST (cfg
, ins
, rt
);
10013 ins
->type
= STACK_OBJ
;
10014 ins
->klass
= cmethod
->klass
;
10015 il_op
= (MonoOpcodeEnum
)next_ip
[0];
10018 MonoInst
*addr
, *vtvar
;
10020 vtvar
= mono_compile_create_var (cfg
, m_class_get_byval_arg (handle_class
), OP_LOCAL
);
10022 if (context_used
) {
10023 if (handle_class
== mono_defaults
.typehandle_class
) {
10024 ins
= mini_emit_get_rgctx_klass (cfg
, context_used
,
10025 mono_class_from_mono_type_internal ((MonoType
*)handle
),
10026 MONO_RGCTX_INFO_TYPE
);
10027 } else if (handle_class
== mono_defaults
.methodhandle_class
) {
10028 ins
= emit_get_rgctx_method (cfg
, context_used
,
10029 (MonoMethod
*)handle
, MONO_RGCTX_INFO_METHOD
);
10030 } else if (handle_class
== mono_defaults
.fieldhandle_class
) {
10031 ins
= emit_get_rgctx_field (cfg
, context_used
,
10032 (MonoClassField
*)handle
, MONO_RGCTX_INFO_CLASS_FIELD
);
10034 g_assert_not_reached ();
10036 } else if (cfg
->compile_aot
) {
10037 EMIT_NEW_LDTOKENCONST (cfg
, ins
, image
, n
, generic_context
);
10039 EMIT_NEW_PCONST (cfg
, ins
, handle
);
10041 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
10042 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, addr
->dreg
, 0, ins
->dreg
);
10043 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
10050 case MONO_CEE_THROW
:
10051 if (sp
[-1]->type
!= STACK_OBJ
)
10054 MONO_INST_NEW (cfg
, ins
, OP_THROW
);
10056 ins
->sreg1
= sp
[0]->dreg
;
10057 cfg
->cbb
->out_of_line
= TRUE
;
10058 MONO_ADD_INS (cfg
->cbb
, ins
);
10059 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
10060 MONO_ADD_INS (cfg
->cbb
, ins
);
10063 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
10064 start_new_bblock
= 1;
10065 /* This can complicate code generation for llvm since the return value might not be defined */
10066 if (COMPILE_LLVM (cfg
))
10067 INLINE_FAILURE ("throw");
10069 case MONO_CEE_ENDFINALLY
:
10070 if (!ip_in_finally_clause (cfg
, ip
- header
->code
))
10072 /* mono_save_seq_point_info () depends on this */
10073 if (sp
!= stack_start
)
10074 emit_seq_point (cfg
, method
, ip
, FALSE
, FALSE
);
10075 MONO_INST_NEW (cfg
, ins
, OP_ENDFINALLY
);
10076 MONO_ADD_INS (cfg
->cbb
, ins
);
10077 start_new_bblock
= 1;
10080 * Control will leave the method so empty the stack, otherwise
10081 * the next basic block will start with a nonempty stack.
10083 while (sp
!= stack_start
) {
10087 case MONO_CEE_LEAVE
:
10088 case MONO_CEE_LEAVE_S
: {
10091 /* empty the stack */
10092 g_assert (sp
>= stack_start
);
10096 * If this leave statement is in a catch block, check for a
10097 * pending exception, and rethrow it if necessary.
10098 * We avoid doing this in runtime invoke wrappers, since those are called
10099 * by native code which excepts the wrapper to catch all exceptions.
10101 for (i
= 0; i
< header
->num_clauses
; ++i
) {
10102 MonoExceptionClause
*clause
= &header
->clauses
[i
];
10105 * Use <= in the final comparison to handle clauses with multiple
10106 * leave statements, like in bug #78024.
10107 * The ordering of the exception clauses guarantees that we find the
10108 * innermost clause.
10110 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && (clause
->flags
== MONO_EXCEPTION_CLAUSE_NONE
) && (ip
- header
->code
+ ((il_op
== MONO_CEE_LEAVE
) ? 5 : 2)) <= (clause
->handler_offset
+ clause
->handler_len
) && method
->wrapper_type
!= MONO_WRAPPER_RUNTIME_INVOKE
) {
10112 MonoBasicBlock
*dont_throw
;
10117 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10120 exc_ins
= mono_emit_jit_icall (cfg
, mono_thread_get_undeniable_exception
, NULL
);
10122 NEW_BBLOCK (cfg
, dont_throw
);
10125 * Currently, we always rethrow the abort exception, despite the
10126 * fact that this is not correct. See thread6.cs for an example.
10127 * But propagating the abort exception is more important than
10128 * getting the semantics right.
10130 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, exc_ins
->dreg
, 0);
10131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
10132 MONO_EMIT_NEW_UNALU (cfg
, OP_THROW
, -1, exc_ins
->dreg
);
10134 MONO_START_BB (cfg
, dont_throw
);
10139 cfg
->cbb
->try_end
= (intptr_t)(ip
- header
->code
);
10142 if ((handlers
= mono_find_leave_clauses (cfg
, ip
, target
))) {
10145 * For each finally clause that we exit we need to invoke the finally block.
10146 * After each invocation we need to add try holes for all the clauses that
10147 * we already exited.
10149 for (tmp
= handlers
; tmp
; tmp
= tmp
->next
) {
10150 MonoLeaveClause
*leave
= (MonoLeaveClause
*) tmp
->data
;
10151 MonoExceptionClause
*clause
= leave
->clause
;
10153 if (clause
->flags
!= MONO_EXCEPTION_CLAUSE_FINALLY
)
10156 MonoInst
*abort_exc
= (MonoInst
*)mono_find_exvar_for_offset (cfg
, clause
->handler_offset
);
10157 MonoBasicBlock
*dont_throw
;
10160 * Emit instrumentation code before linking the basic blocks below as this
10161 * will alter cfg->cbb.
10163 mini_profiler_emit_call_finally (cfg
, header
, ip
, leave
->index
, clause
);
10165 tblock
= cfg
->cil_offset_to_bb
[clause
->handler_offset
];
10167 link_bblock (cfg
, cfg
->cbb
, tblock
);
10169 MONO_EMIT_NEW_PCONST (cfg
, abort_exc
->dreg
, 0);
10171 MONO_INST_NEW (cfg
, ins
, OP_CALL_HANDLER
);
10172 ins
->inst_target_bb
= tblock
;
10173 ins
->inst_eh_blocks
= tmp
;
10174 MONO_ADD_INS (cfg
->cbb
, ins
);
10175 cfg
->cbb
->has_call_handler
= 1;
10177 /* Throw exception if exvar is set */
10178 /* FIXME Do we need this for calls from catch/filter ? */
10179 NEW_BBLOCK (cfg
, dont_throw
);
10180 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, abort_exc
->dreg
, 0);
10181 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBEQ
, dont_throw
);
10182 mono_emit_jit_icall (cfg
, ves_icall_thread_finish_async_abort
, NULL
);
10183 cfg
->cbb
->clause_holes
= tmp
;
10185 MONO_START_BB (cfg
, dont_throw
);
10186 cfg
->cbb
->clause_holes
= tmp
;
10188 if (COMPILE_LLVM (cfg
)) {
10189 MonoBasicBlock
*target_bb
;
10192 * Link the finally bblock with the target, since it will
10193 * conceptually branch there.
10195 GET_BBLOCK (cfg
, tblock
, cfg
->cil_start
+ clause
->handler_offset
+ clause
->handler_len
- 1);
10196 GET_BBLOCK (cfg
, target_bb
, target
);
10197 link_bblock (cfg
, tblock
, target_bb
);
10202 MONO_INST_NEW (cfg
, ins
, OP_BR
);
10203 MONO_ADD_INS (cfg
->cbb
, ins
);
10204 GET_BBLOCK (cfg
, tblock
, target
);
10205 link_bblock (cfg
, cfg
->cbb
, tblock
);
10206 ins
->inst_target_bb
= tblock
;
10208 start_new_bblock
= 1;
10213 * Mono specific opcodes
10216 case MONO_CEE_MONO_ICALL
: {
10217 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10218 const MonoJitICallId jit_icall_id
= (MonoJitICallId
)token
;
10219 MonoJitICallInfo
* const info
= mono_find_jit_icall_info (jit_icall_id
);
10221 CHECK_STACK (info
->sig
->param_count
);
10222 sp
-= info
->sig
->param_count
;
10224 if (token
== MONO_JIT_ICALL_mono_threads_attach_coop
) {
10226 MonoBasicBlock
*next_bb
;
10228 if (cfg
->compile_aot
) {
10230 * This is called on unattached threads, so it cannot go through the trampoline
10231 * infrastructure. Use an indirect call through a got slot initialized at load time
10234 EMIT_NEW_AOTCONST (cfg
, addr
, MONO_PATCH_INFO_JIT_ICALL_ADDR_NOCALL
, GUINT_TO_POINTER (jit_icall_id
));
10235 ins
= mini_emit_calli (cfg
, info
->sig
, sp
, addr
, NULL
, NULL
);
10237 ins
= mono_emit_jit_icall_id (cfg
, jit_icall_id
, sp
);
10241 * Parts of the initlocals code needs to come after this, since it might call methods like memset.
10243 init_localsbb2
= cfg
->cbb
;
10244 NEW_BBLOCK (cfg
, next_bb
);
10245 MONO_START_BB (cfg
, next_bb
);
10247 ins
= mono_emit_jit_icall_id (cfg
, jit_icall_id
, sp
);
10250 if (!MONO_TYPE_IS_VOID (info
->sig
->ret
))
10253 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10257 MonoJumpInfoType ldptr_type
;
10259 case MONO_CEE_MONO_LDPTR_CARD_TABLE
:
10260 ldptr_type
= MONO_PATCH_INFO_GC_CARD_TABLE_ADDR
;
10262 case MONO_CEE_MONO_LDPTR_NURSERY_START
:
10263 ldptr_type
= MONO_PATCH_INFO_GC_NURSERY_START
;
10265 case MONO_CEE_MONO_LDPTR_NURSERY_BITS
:
10266 ldptr_type
= MONO_PATCH_INFO_GC_NURSERY_BITS
;
10268 case MONO_CEE_MONO_LDPTR_INT_REQ_FLAG
:
10269 ldptr_type
= MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG
;
10271 case MONO_CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT
:
10272 ldptr_type
= MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT
;
10274 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10275 ins
= mini_emit_runtime_constant (cfg
, ldptr_type
, NULL
);
10277 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10280 case MONO_CEE_MONO_LDPTR
: {
10283 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10284 ptr
= mono_method_get_wrapper_data (method
, token
);
10285 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
10287 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10288 /* Can't embed random pointers into AOT code */
10292 case MONO_CEE_MONO_JIT_ICALL_ADDR
:
10293 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10294 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg
, ins
, GUINT_TO_POINTER (token
));
10296 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10299 case MONO_CEE_MONO_ICALL_ADDR
: {
10300 MonoMethod
*cmethod
;
10303 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10305 cmethod
= (MonoMethod
*)mono_method_get_wrapper_data (method
, token
);
10307 if (cfg
->compile_aot
) {
10308 if (cfg
->direct_pinvoke
&& ip
+ 6 < end
&& (ip
[6] == CEE_POP
)) {
10310 * This is generated by emit_native_wrapper () to resolve the pinvoke address
10311 * before the call, its not needed when using direct pinvoke.
10312 * This is not an optimization, but its used to avoid looking up pinvokes
10313 * on platforms which don't support dlopen ().
10315 EMIT_NEW_PCONST (cfg
, ins
, NULL
);
10317 EMIT_NEW_AOTCONST (cfg
, ins
, MONO_PATCH_INFO_ICALL_ADDR
, cmethod
);
10320 ptr
= mono_lookup_internal_call (cmethod
);
10322 EMIT_NEW_PCONST (cfg
, ins
, ptr
);
10327 case MONO_CEE_MONO_VTADDR
: {
10328 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10329 MonoInst
*src_var
, *src
;
10334 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
10335 EMIT_NEW_VARLOADA ((cfg
), (src
), src_var
, src_var
->inst_vtype
);
10339 case MONO_CEE_MONO_NEWOBJ
: {
10340 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10341 MonoInst
*iargs
[2];
10343 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10344 mono_class_init_internal (klass
);
10345 NEW_DOMAINCONST (cfg
, iargs
[0]);
10346 MONO_ADD_INS (cfg
->cbb
, iargs
[0]);
10347 NEW_CLASSCONST (cfg
, iargs
[1], klass
);
10348 MONO_ADD_INS (cfg
->cbb
, iargs
[1]);
10349 *sp
++ = mono_emit_jit_icall (cfg
, ves_icall_object_new
, iargs
);
10350 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10353 case MONO_CEE_MONO_OBJADDR
:
10354 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10356 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
10357 ins
->dreg
= alloc_ireg_mp (cfg
);
10358 ins
->sreg1
= sp
[0]->dreg
;
10359 ins
->type
= STACK_MP
;
10360 MONO_ADD_INS (cfg
->cbb
, ins
);
10363 case MONO_CEE_MONO_LDNATIVEOBJ
:
10365 * Similar to LDOBJ, but instead load the unmanaged
10366 * representation of the vtype to the stack.
10368 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10370 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10371 g_assert (m_class_is_valuetype (klass
));
10372 mono_class_init_internal (klass
);
10375 MonoInst
*src
, *dest
, *temp
;
10378 temp
= mono_compile_create_var (cfg
, m_class_get_byval_arg (klass
), OP_LOCAL
);
10379 temp
->backend
.is_pinvoke
= 1;
10380 EMIT_NEW_TEMPLOADA (cfg
, dest
, temp
->inst_c0
);
10381 mini_emit_memory_copy (cfg
, dest
, src
, klass
, TRUE
, 0);
10383 EMIT_NEW_TEMPLOAD (cfg
, dest
, temp
->inst_c0
);
10384 dest
->type
= STACK_VTYPE
;
10385 dest
->klass
= klass
;
10390 case MONO_CEE_MONO_RETOBJ
: {
10392 * Same as RET, but return the native representation of a vtype
10395 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10396 g_assert (cfg
->ret
);
10397 g_assert (mono_method_signature_internal (method
)->pinvoke
);
10400 klass
= (MonoClass
*)mono_method_get_wrapper_data (method
, token
);
10402 if (!cfg
->vret_addr
) {
10403 g_assert (cfg
->ret_var_is_local
);
10405 EMIT_NEW_VARLOADA (cfg
, ins
, cfg
->ret
, cfg
->ret
->inst_vtype
);
10407 EMIT_NEW_RETLOADA (cfg
, ins
);
10409 mini_emit_memory_copy (cfg
, ins
, sp
[0], klass
, TRUE
, 0);
10411 if (sp
!= stack_start
)
10414 mini_profiler_emit_leave (cfg
, sp
[0]);
10416 MONO_INST_NEW (cfg
, ins
, OP_BR
);
10417 ins
->inst_target_bb
= end_bblock
;
10418 MONO_ADD_INS (cfg
->cbb
, ins
);
10419 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
10420 start_new_bblock
= 1;
10423 case MONO_CEE_MONO_SAVE_LMF
:
10424 case MONO_CEE_MONO_RESTORE_LMF
:
10425 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10427 case MONO_CEE_MONO_CLASSCONST
:
10428 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10429 EMIT_NEW_CLASSCONST (cfg
, ins
, mono_method_get_wrapper_data (method
, token
));
10431 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10433 case MONO_CEE_MONO_NOT_TAKEN
:
10434 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10435 cfg
->cbb
->out_of_line
= TRUE
;
10437 case MONO_CEE_MONO_TLS
: {
10440 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10441 key
= (MonoTlsKey
)n
;
10442 g_assert (key
< TLS_KEY_NUM
);
10444 ins
= mono_create_tls_get (cfg
, key
);
10446 ins
->type
= STACK_PTR
;
10450 case MONO_CEE_MONO_DYN_CALL
: {
10451 MonoCallInst
*call
;
10453 /* It would be easier to call a trampoline, but that would put an
10454 * extra frame on the stack, confusing exception handling. So
10455 * implement it inline using an opcode for now.
10458 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10459 if (!cfg
->dyn_call_var
) {
10460 cfg
->dyn_call_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
10461 /* prevent it from being register allocated */
10462 cfg
->dyn_call_var
->flags
|= MONO_INST_VOLATILE
;
10465 /* Has to use a call inst since local regalloc expects it */
10466 MONO_INST_NEW_CALL (cfg
, call
, OP_DYN_CALL
);
10467 ins
= (MonoInst
*)call
;
10469 ins
->sreg1
= sp
[0]->dreg
;
10470 ins
->sreg2
= sp
[1]->dreg
;
10471 MONO_ADD_INS (cfg
->cbb
, ins
);
10473 cfg
->param_area
= MAX (cfg
->param_area
, cfg
->backend
->dyn_call_param_area
);
10474 /* OP_DYN_CALL might need to allocate a dynamically sized param area */
10475 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10477 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10480 case MONO_CEE_MONO_MEMORY_BARRIER
: {
10481 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10482 mini_emit_memory_barrier (cfg
, (int)n
);
10485 case MONO_CEE_MONO_ATOMIC_STORE_I4
: {
10486 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10487 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4
));
10491 MONO_INST_NEW (cfg
, ins
, OP_ATOMIC_STORE_I4
);
10492 ins
->dreg
= sp
[0]->dreg
;
10493 ins
->sreg1
= sp
[1]->dreg
;
10494 ins
->backend
.memory_barrier_kind
= (int)n
;
10495 MONO_ADD_INS (cfg
->cbb
, ins
);
10498 case MONO_CEE_MONO_LD_DELEGATE_METHOD_PTR
: {
10502 dreg
= alloc_preg (cfg
);
10503 EMIT_NEW_LOAD_MEMBASE (cfg
, ins
, OP_LOAD_MEMBASE
, dreg
, sp
[0]->dreg
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
10507 case MONO_CEE_MONO_CALLI_EXTRA_ARG
: {
10509 MonoMethodSignature
*fsig
;
10513 * This is the same as CEE_CALLI, but passes an additional argument
10514 * to the called method in llvmonly mode.
10515 * This is only used by delegate invoke wrappers to call the
10516 * actual delegate method.
10518 g_assert (method
->wrapper_type
== MONO_WRAPPER_DELEGATE_INVOKE
);
10526 fsig
= mini_get_signature (method
, token
, generic_context
, cfg
->error
);
10529 if (cfg
->llvm_only
)
10530 cfg
->signatures
= g_slist_prepend_mempool (cfg
->mempool
, cfg
->signatures
, fsig
);
10532 n
= fsig
->param_count
+ fsig
->hasthis
+ 1;
10539 if (cfg
->llvm_only
) {
10541 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
10542 * cconv. This is set by mono_init_delegate ().
10544 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_signature (fsig
)) {
10545 MonoInst
*callee
= addr
;
10546 MonoInst
*call
, *localloc_ins
;
10547 MonoBasicBlock
*is_gsharedvt_bb
, *end_bb
;
10548 int low_bit_reg
= alloc_preg (cfg
);
10550 NEW_BBLOCK (cfg
, is_gsharedvt_bb
);
10551 NEW_BBLOCK (cfg
, end_bb
);
10553 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PAND_IMM
, low_bit_reg
, arg
->dreg
, 1);
10554 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, low_bit_reg
, 0);
10555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, is_gsharedvt_bb
);
10557 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
10558 addr
= emit_get_rgctx_sig (cfg
, context_used
,
10559 fsig
, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI
);
10561 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
10563 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC_IMM
);
10564 ins
->dreg
= alloc_preg (cfg
);
10565 ins
->inst_imm
= 2 * TARGET_SIZEOF_VOID_P
;
10566 MONO_ADD_INS (cfg
->cbb
, ins
);
10567 localloc_ins
= ins
;
10568 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10569 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, localloc_ins
->dreg
, 0, callee
->dreg
);
10570 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, localloc_ins
->dreg
, TARGET_SIZEOF_VOID_P
, arg
->dreg
);
10572 call
= mini_emit_extra_arg_calli (cfg
, fsig
, sp
, localloc_ins
->dreg
, addr
);
10573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
10575 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
10576 MONO_START_BB (cfg
, is_gsharedvt_bb
);
10577 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PXOR_IMM
, arg
->dreg
, arg
->dreg
, 1);
10578 ins
= mini_emit_extra_arg_calli (cfg
, fsig
, sp
, arg
->dreg
, callee
);
10579 ins
->dreg
= call
->dreg
;
10581 MONO_START_BB (cfg
, end_bb
);
10583 /* Caller uses a normal calling conv */
10585 MonoInst
*callee
= addr
;
10586 MonoInst
*call
, *localloc_ins
;
10587 MonoBasicBlock
*is_gsharedvt_bb
, *end_bb
;
10588 int low_bit_reg
= alloc_preg (cfg
);
10590 NEW_BBLOCK (cfg
, is_gsharedvt_bb
);
10591 NEW_BBLOCK (cfg
, end_bb
);
10593 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PAND_IMM
, low_bit_reg
, arg
->dreg
, 1);
10594 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, low_bit_reg
, 0);
10595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, is_gsharedvt_bb
);
10597 /* Normal case: callee uses a normal cconv, no conversion is needed */
10598 call
= mini_emit_extra_arg_calli (cfg
, fsig
, sp
, arg
->dreg
, callee
);
10599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
10600 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
10601 MONO_START_BB (cfg
, is_gsharedvt_bb
);
10602 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_PXOR_IMM
, arg
->dreg
, arg
->dreg
, 1);
10603 NEW_AOTCONST (cfg
, addr
, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER
, fsig
);
10604 MONO_ADD_INS (cfg
->cbb
, addr
);
10606 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
10608 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC_IMM
);
10609 ins
->dreg
= alloc_preg (cfg
);
10610 ins
->inst_imm
= 2 * TARGET_SIZEOF_VOID_P
;
10611 MONO_ADD_INS (cfg
->cbb
, ins
);
10612 localloc_ins
= ins
;
10613 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10614 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, localloc_ins
->dreg
, 0, callee
->dreg
);
10615 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, localloc_ins
->dreg
, TARGET_SIZEOF_VOID_P
, arg
->dreg
);
10617 ins
= mini_emit_extra_arg_calli (cfg
, fsig
, sp
, localloc_ins
->dreg
, addr
);
10618 ins
->dreg
= call
->dreg
;
10619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
10621 MONO_START_BB (cfg
, end_bb
);
10624 /* Same as CEE_CALLI */
10625 if (cfg
->gsharedvt
&& mini_is_gsharedvt_signature (fsig
)) {
10627 * We pass the address to the gsharedvt trampoline in the rgctx reg
10629 MonoInst
*callee
= addr
;
10631 addr
= emit_get_rgctx_sig (cfg
, context_used
,
10632 fsig
, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI
);
10633 ins
= (MonoInst
*)mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, callee
);
10635 ins
= (MonoInst
*)mini_emit_calli (cfg
, fsig
, sp
, addr
, NULL
, NULL
);
10639 if (!MONO_TYPE_IS_VOID (fsig
->ret
))
10640 *sp
++ = mono_emit_widen_call_res (cfg
, ins
, fsig
);
10642 CHECK_CFG_EXCEPTION
;
10645 constrained_class
= NULL
;
10648 case MONO_CEE_MONO_LDDOMAIN
:
10649 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10650 EMIT_NEW_PCONST (cfg
, ins
, cfg
->compile_aot
? NULL
: cfg
->domain
);
10653 case MONO_CEE_MONO_SAVE_LAST_ERROR
:
10654 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10656 // Just an IL prefix, setting this flag, picked up by call instructions.
10657 save_last_error
= TRUE
;
10659 case MONO_CEE_MONO_GET_RGCTX_ARG
:
10660 g_assert (method
->wrapper_type
!= MONO_WRAPPER_NONE
);
10662 mono_create_rgctx_var (cfg
);
10664 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
10665 ins
->dreg
= alloc_dreg (cfg
, STACK_PTR
);
10666 ins
->sreg1
= cfg
->rgctx_var
->dreg
;
10667 ins
->type
= STACK_PTR
;
10668 MONO_ADD_INS (cfg
->cbb
, ins
);
10673 case MONO_CEE_ARGLIST
: {
10674 /* somewhat similar to LDTOKEN */
10675 MonoInst
*addr
, *vtvar
;
10676 vtvar
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.argumenthandle_class
), OP_LOCAL
);
10678 EMIT_NEW_TEMPLOADA (cfg
, addr
, vtvar
->inst_c0
);
10679 EMIT_NEW_UNALU (cfg
, ins
, OP_ARGLIST
, -1, addr
->dreg
);
10681 EMIT_NEW_TEMPLOAD (cfg
, ins
, vtvar
->inst_c0
);
10682 ins
->type
= STACK_VTYPE
;
10683 ins
->klass
= mono_defaults
.argumenthandle_class
;
10689 case MONO_CEE_CGT_UN
:
10691 case MONO_CEE_CLT_UN
: {
10692 MonoInst
*cmp
, *arg1
, *arg2
;
10699 * The following transforms:
10700 * CEE_CEQ into OP_CEQ
10701 * CEE_CGT into OP_CGT
10702 * CEE_CGT_UN into OP_CGT_UN
10703 * CEE_CLT into OP_CLT
10704 * CEE_CLT_UN into OP_CLT_UN
10706 MONO_INST_NEW (cfg
, cmp
, (OP_CEQ
- CEE_CEQ
) + ip
[1]);
10708 MONO_INST_NEW (cfg
, ins
, cmp
->opcode
);
10709 cmp
->sreg1
= arg1
->dreg
;
10710 cmp
->sreg2
= arg2
->dreg
;
10711 type_from_op (cfg
, cmp
, arg1
, arg2
);
10713 add_widen_op (cfg
, cmp
, &arg1
, &arg2
);
10714 if ((arg1
->type
== STACK_I8
) || ((TARGET_SIZEOF_VOID_P
== 8) && ((arg1
->type
== STACK_PTR
) || (arg1
->type
== STACK_OBJ
) || (arg1
->type
== STACK_MP
))))
10715 cmp
->opcode
= OP_LCOMPARE
;
10716 else if (arg1
->type
== STACK_R4
)
10717 cmp
->opcode
= OP_RCOMPARE
;
10718 else if (arg1
->type
== STACK_R8
)
10719 cmp
->opcode
= OP_FCOMPARE
;
10721 cmp
->opcode
= OP_ICOMPARE
;
10722 MONO_ADD_INS (cfg
->cbb
, cmp
);
10723 ins
->type
= STACK_I4
;
10724 ins
->dreg
= alloc_dreg (cfg
, (MonoStackType
)ins
->type
);
10725 type_from_op (cfg
, ins
, arg1
, arg2
);
10727 if (cmp
->opcode
== OP_FCOMPARE
|| cmp
->opcode
== OP_RCOMPARE
) {
10729 * The backends expect the fceq opcodes to do the
10732 ins
->sreg1
= cmp
->sreg1
;
10733 ins
->sreg2
= cmp
->sreg2
;
10736 MONO_ADD_INS (cfg
->cbb
, ins
);
10740 case MONO_CEE_LDFTN
: {
10741 MonoInst
*argconst
;
10742 MonoMethod
*cil_method
;
10744 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10747 mono_class_init_internal (cmethod
->klass
);
10749 mono_save_token_info (cfg
, image
, n
, cmethod
);
10751 context_used
= mini_method_check_context_used (cfg
, cmethod
);
10753 cil_method
= cmethod
;
10754 if (!dont_verify
&& !cfg
->skip_visibility
&& !mono_method_can_access_method (method
, cmethod
))
10755 emit_method_access_failure (cfg
, method
, cil_method
);
10757 if (mono_security_core_clr_enabled ())
10758 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
);
10761 * Optimize the common case of ldftn+delegate creation
10763 if ((sp
> stack_start
) && (next_ip
+ 4 < end
) && ip_in_bb (cfg
, cfg
->cbb
, next_ip
) && (next_ip
[0] == CEE_NEWOBJ
)) {
10764 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (next_ip
+ 1), NULL
, generic_context
);
10765 if (ctor_method
&& (m_class_get_parent (ctor_method
->klass
) == mono_defaults
.multicastdelegate_class
)) {
10766 MonoInst
*target_ins
, *handle_ins
;
10767 MonoMethod
*invoke
;
10768 int invoke_context_used
;
10770 invoke
= mono_get_delegate_invoke_internal (ctor_method
->klass
);
10771 if (!invoke
|| !mono_method_signature_internal (invoke
))
10774 invoke_context_used
= mini_method_check_context_used (cfg
, invoke
);
10776 target_ins
= sp
[-1];
10778 if (mono_security_core_clr_enabled ())
10779 ensure_method_is_allowed_to_call_method (cfg
, method
, ctor_method
);
10781 if (!(cmethod
->flags
& METHOD_ATTRIBUTE_STATIC
)) {
10782 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10783 if (mono_method_signature_internal (invoke
)->param_count
== mono_method_signature_internal (cmethod
)->param_count
) {
10784 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, target_ins
->dreg
, 0);
10785 MONO_EMIT_NEW_COND_EXC (cfg
, EQ
, "ArgumentException");
10789 if ((invoke_context_used
== 0 || !cfg
->gsharedvt
) || cfg
->llvm_only
) {
10790 if (cfg
->verbose_level
> 3)
10791 g_print ("converting (in B%d: stack: %d) %s", cfg
->cbb
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
+ 6, NULL
));
10792 if ((handle_ins
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
, context_used
, invoke_context_used
, FALSE
))) {
10795 CHECK_CFG_EXCEPTION
;
10798 il_op
= MONO_CEE_NEWOBJ
;
10807 argconst
= emit_get_rgctx_method (cfg
, context_used
, cmethod
, MONO_RGCTX_INFO_METHOD
);
10808 ins
= mono_emit_jit_icall (cfg
, mono_ldftn
, &argconst
);
10811 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10814 case MONO_CEE_LDVIRTFTN
: {
10815 MonoInst
*args
[2];
10817 cmethod
= mini_get_method (cfg
, method
, n
, NULL
, generic_context
);
10820 mono_class_init_internal (cmethod
->klass
);
10822 context_used
= mini_method_check_context_used (cfg
, cmethod
);
10824 if (mono_security_core_clr_enabled ())
10825 ensure_method_is_allowed_to_call_method (cfg
, method
, cmethod
);
10828 * Optimize the common case of ldvirtftn+delegate creation
10830 if (previous_il_op
== MONO_CEE_DUP
&& (sp
> stack_start
) && (next_ip
+ 4 < end
) && ip_in_bb (cfg
, cfg
->cbb
, next_ip
) && (next_ip
[0] == CEE_NEWOBJ
)) {
10831 MonoMethod
*ctor_method
= mini_get_method (cfg
, method
, read32 (next_ip
+ 1), NULL
, generic_context
);
10832 if (ctor_method
&& (m_class_get_parent (ctor_method
->klass
) == mono_defaults
.multicastdelegate_class
)) {
10833 MonoInst
*target_ins
, *handle_ins
;
10834 MonoMethod
*invoke
;
10835 int invoke_context_used
;
10836 const gboolean is_virtual
= (cmethod
->flags
& METHOD_ATTRIBUTE_VIRTUAL
) != 0;
10838 invoke
= mono_get_delegate_invoke_internal (ctor_method
->klass
);
10839 if (!invoke
|| !mono_method_signature_internal (invoke
))
10842 invoke_context_used
= mini_method_check_context_used (cfg
, invoke
);
10844 target_ins
= sp
[-1];
10846 if (mono_security_core_clr_enabled ())
10847 ensure_method_is_allowed_to_call_method (cfg
, method
, ctor_method
);
10849 if (invoke_context_used
== 0 || !cfg
->gsharedvt
|| cfg
->llvm_only
) {
10850 if (cfg
->verbose_level
> 3)
10851 g_print ("converting (in B%d: stack: %d) %s", cfg
->cbb
->block_num
, (int)(sp
- stack_start
), mono_disasm_code_one (NULL
, method
, ip
+ 6, NULL
));
10852 if ((handle_ins
= handle_delegate_ctor (cfg
, ctor_method
->klass
, target_ins
, cmethod
, context_used
, invoke_context_used
, is_virtual
))) {
10855 CHECK_CFG_EXCEPTION
;
10857 previous_il_op
= MONO_CEE_NEWOBJ
;
10870 args
[1] = emit_get_rgctx_method (cfg
, context_used
,
10871 cmethod
, MONO_RGCTX_INFO_METHOD
);
10874 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn_gshared
, args
);
10876 *sp
++ = mono_emit_jit_icall (cfg
, mono_ldvirtfn
, args
);
10878 inline_costs
+= CALL_COST
* MIN(10, num_calls
++);
10881 case MONO_CEE_LOCALLOC
: {
10882 MonoBasicBlock
*non_zero_bb
, *end_bb
;
10883 int alloc_ptr
= alloc_preg (cfg
);
10885 if (sp
!= stack_start
)
10887 if (cfg
->method
!= method
)
10889 * Inlining this into a loop in a parent could lead to
10890 * stack overflows which is different behavior than the
10891 * non-inlined case, thus disable inlining in this case.
10893 INLINE_FAILURE("localloc");
10895 NEW_BBLOCK (cfg
, non_zero_bb
);
10896 NEW_BBLOCK (cfg
, end_bb
);
10898 /* if size != zero */
10899 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_COMPARE_IMM
, -1, sp
[0]->dreg
, 0);
10900 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_PBNE_UN
, non_zero_bb
);
10902 //size is zero, so result is NULL
10903 MONO_EMIT_NEW_PCONST (cfg
, alloc_ptr
, NULL
);
10904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg
, OP_BR
, end_bb
);
10906 MONO_START_BB (cfg
, non_zero_bb
);
10907 MONO_INST_NEW (cfg
, ins
, OP_LOCALLOC
);
10908 ins
->dreg
= alloc_ptr
;
10909 ins
->sreg1
= sp
[0]->dreg
;
10910 ins
->type
= STACK_PTR
;
10911 MONO_ADD_INS (cfg
->cbb
, ins
);
10913 cfg
->flags
|= MONO_CFG_HAS_ALLOCA
;
10915 ins
->flags
|= MONO_INST_INIT
;
10917 MONO_START_BB (cfg
, end_bb
);
10918 EMIT_NEW_UNALU (cfg
, ins
, OP_MOVE
, alloc_preg (cfg
), alloc_ptr
);
10919 ins
->type
= STACK_PTR
;
10924 case MONO_CEE_ENDFILTER
: {
10925 MonoExceptionClause
*clause
, *nearest
;
10929 if ((sp
!= stack_start
) || (sp
[0]->type
!= STACK_I4
))
10931 MONO_INST_NEW (cfg
, ins
, OP_ENDFILTER
);
10932 ins
->sreg1
= (*sp
)->dreg
;
10933 MONO_ADD_INS (cfg
->cbb
, ins
);
10934 start_new_bblock
= 1;
10937 for (cc
= 0; cc
< header
->num_clauses
; ++cc
) {
10938 clause
= &header
->clauses
[cc
];
10939 if ((clause
->flags
& MONO_EXCEPTION_CLAUSE_FILTER
) &&
10940 ((next_ip
- header
->code
) > clause
->data
.filter_offset
&& (next_ip
- header
->code
) <= clause
->handler_offset
) &&
10941 (!nearest
|| (clause
->data
.filter_offset
< nearest
->data
.filter_offset
)))
10944 g_assert (nearest
);
10945 if ((next_ip
- header
->code
) != nearest
->handler_offset
)
10950 case MONO_CEE_UNALIGNED_
:
10951 ins_flag
|= MONO_INST_UNALIGNED
;
10952 /* FIXME: record alignment? we can assume 1 for now */
10954 case MONO_CEE_VOLATILE_
:
10955 ins_flag
|= MONO_INST_VOLATILE
;
10957 case MONO_CEE_TAIL_
:
10958 ins_flag
|= MONO_INST_TAILCALL
;
10959 cfg
->flags
|= MONO_CFG_HAS_TAILCALL
;
10960 /* Can't inline tailcalls at this time */
10961 inline_costs
+= 100000;
10963 case MONO_CEE_INITOBJ
:
10965 klass
= mini_get_class (method
, token
, generic_context
);
10966 CHECK_TYPELOAD (klass
);
10967 if (mini_class_is_reference (klass
))
10968 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg
, OP_STORE_MEMBASE_IMM
, sp
[0]->dreg
, 0, 0);
10970 mini_emit_initobj (cfg
, *sp
, NULL
, klass
);
10973 case MONO_CEE_CONSTRAINED_
:
10974 constrained_class
= mini_get_class (method
, token
, generic_context
);
10975 CHECK_TYPELOAD (constrained_class
);
10977 case MONO_CEE_CPBLK
:
10979 mini_emit_memory_copy_bytes (cfg
, sp
[0], sp
[1], sp
[2], ins_flag
);
10983 case MONO_CEE_INITBLK
:
10985 mini_emit_memory_init_bytes (cfg
, sp
[0], sp
[1], sp
[2], ins_flag
);
10991 ins_flag
|= MONO_INST_NOTYPECHECK
;
10993 ins_flag
|= MONO_INST_NORANGECHECK
;
10994 /* we ignore the no-nullcheck for now since we
10995 * really do it explicitly only when doing callvirt->call
10998 case MONO_CEE_RETHROW
: {
11000 int handler_offset
= -1;
11002 for (i
= 0; i
< header
->num_clauses
; ++i
) {
11003 MonoExceptionClause
*clause
= &header
->clauses
[i
];
11004 if (MONO_OFFSET_IN_HANDLER (clause
, ip
- header
->code
) && !(clause
->flags
& MONO_EXCEPTION_CLAUSE_FINALLY
)) {
11005 handler_offset
= clause
->handler_offset
;
11010 cfg
->cbb
->flags
|= BB_EXCEPTION_UNSAFE
;
11012 if (handler_offset
== -1)
11015 EMIT_NEW_TEMPLOAD (cfg
, load
, mono_find_exvar_for_offset (cfg
, handler_offset
)->inst_c0
);
11016 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
11017 ins
->sreg1
= load
->dreg
;
11018 MONO_ADD_INS (cfg
->cbb
, ins
);
11020 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
11021 MONO_ADD_INS (cfg
->cbb
, ins
);
11024 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
11025 start_new_bblock
= 1;
11028 case MONO_CEE_MONO_RETHROW
: {
11029 if (sp
[-1]->type
!= STACK_OBJ
)
11032 MONO_INST_NEW (cfg
, ins
, OP_RETHROW
);
11034 ins
->sreg1
= sp
[0]->dreg
;
11035 cfg
->cbb
->out_of_line
= TRUE
;
11036 MONO_ADD_INS (cfg
->cbb
, ins
);
11037 MONO_INST_NEW (cfg
, ins
, OP_NOT_REACHED
);
11038 MONO_ADD_INS (cfg
->cbb
, ins
);
11041 link_bblock (cfg
, cfg
->cbb
, end_bblock
);
11042 start_new_bblock
= 1;
11043 /* This can complicate code generation for llvm since the return value might not be defined */
11044 if (COMPILE_LLVM (cfg
))
11045 INLINE_FAILURE ("mono_rethrow");
11048 case MONO_CEE_SIZEOF
: {
11052 if (mono_metadata_token_table (token
) == MONO_TABLE_TYPESPEC
&& !image_is_dynamic (m_class_get_image (method
->klass
)) && !generic_context
) {
11053 MonoType
*type
= mono_type_create_from_typespec_checked (image
, token
, cfg
->error
);
11056 val
= mono_type_size (type
, &ialign
);
11057 EMIT_NEW_ICONST (cfg
, ins
, val
);
11059 MonoClass
*klass
= mini_get_class (method
, token
, generic_context
);
11060 CHECK_TYPELOAD (klass
);
11062 if (mini_is_gsharedvt_klass (klass
)) {
11063 ins
= mini_emit_get_gsharedvt_info_klass (cfg
, klass
, MONO_RGCTX_INFO_CLASS_SIZEOF
);
11064 ins
->type
= STACK_I4
;
11066 val
= mono_type_size (m_class_get_byval_arg (klass
), &ialign
);
11067 EMIT_NEW_ICONST (cfg
, ins
, val
);
11074 case MONO_CEE_REFANYTYPE
: {
11075 MonoInst
*src_var
, *src
;
11077 GSHAREDVT_FAILURE (il_op
);
11082 src_var
= get_vreg_to_inst (cfg
, sp
[0]->dreg
);
11084 src_var
= mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (mono_defaults
.typed_reference_class
), OP_LOCAL
, sp
[0]->dreg
);
11085 EMIT_NEW_VARLOADA (cfg
, src
, src_var
, src_var
->inst_vtype
);
11086 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg
, ins
, m_class_get_byval_arg (mono_defaults
.typehandle_class
), src
->dreg
, MONO_STRUCT_OFFSET (MonoTypedRef
, type
));
11090 case MONO_CEE_READONLY_
:
11094 case MONO_CEE_UNUSED56
:
11095 case MONO_CEE_UNUSED57
:
11096 case MONO_CEE_UNUSED70
:
11097 case MONO_CEE_UNUSED
:
11098 case MONO_CEE_UNUSED99
:
11099 case MONO_CEE_UNUSED58
:
11100 case MONO_CEE_UNUSED1
:
11104 g_warning ("opcode 0x%02x not handled", il_op
);
11108 if (start_new_bblock
!= 1)
11111 cfg
->cbb
->cil_length
= ip
- cfg
->cbb
->cil_code
;
11112 if (cfg
->cbb
->next_bb
) {
11113 /* This could already be set because of inlining, #693905 */
11114 MonoBasicBlock
*bb
= cfg
->cbb
;
11116 while (bb
->next_bb
)
11118 bb
->next_bb
= end_bblock
;
11120 cfg
->cbb
->next_bb
= end_bblock
;
11123 if (cfg
->method
== method
&& cfg
->domainvar
) {
11125 MonoInst
*get_domain
;
11127 cfg
->cbb
= init_localsbb
;
11129 get_domain
= mono_create_tls_get (cfg
, TLS_KEY_DOMAIN
);
11130 NEW_TEMPSTORE (cfg
, store
, cfg
->domainvar
->inst_c0
, get_domain
);
11131 MONO_ADD_INS (cfg
->cbb
, store
);
11132 cfg
->domainvar_inited
= TRUE
;
11135 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11136 if (cfg
->compile_aot
)
11137 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11138 mono_get_got_var (cfg
);
11141 if (cfg
->method
== method
&& cfg
->got_var
)
11142 mono_emit_load_got_addr (cfg
);
11144 if (init_localsbb
) {
11145 cfg
->cbb
= init_localsbb
;
11147 for (i
= 0; i
< header
->num_locals
; ++i
) {
11149 * Vtype initialization might need to be done after CEE_JIT_ATTACH, since it can make calls to memset (),
11150 * which need the trampoline code to work.
11152 if (MONO_TYPE_ISSTRUCT (header
->locals
[i
]))
11153 cfg
->cbb
= init_localsbb2
;
11155 cfg
->cbb
= init_localsbb
;
11156 emit_init_local (cfg
, i
, header
->locals
[i
], init_locals
);
11160 if (cfg
->init_ref_vars
&& cfg
->method
== method
) {
11161 /* Emit initialization for ref vars */
11162 // FIXME: Avoid duplication initialization for IL locals.
11163 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11164 MonoInst
*ins
= cfg
->varinfo
[i
];
11166 if (ins
->opcode
== OP_LOCAL
&& ins
->type
== STACK_OBJ
)
11167 MONO_EMIT_NEW_PCONST (cfg
, ins
->dreg
, NULL
);
11171 if (cfg
->lmf_var
&& cfg
->method
== method
&& !cfg
->llvm_only
) {
11172 cfg
->cbb
= init_localsbb
;
11173 emit_push_lmf (cfg
);
11176 cfg
->cbb
= init_localsbb
;
11177 mini_profiler_emit_enter (cfg
);
11180 MonoBasicBlock
*bb
;
11183 * Make seq points at backward branch targets interruptable.
11185 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
11186 if (bb
->code
&& bb
->in_count
> 1 && bb
->code
->opcode
== OP_SEQ_POINT
)
11187 bb
->code
->flags
|= MONO_INST_SINGLE_STEP_LOC
;
11190 /* Add a sequence point for method entry/exit events */
11191 if (seq_points
&& cfg
->gen_sdb_seq_points
) {
11192 NEW_SEQ_POINT (cfg
, ins
, METHOD_ENTRY_IL_OFFSET
, FALSE
);
11193 MONO_ADD_INS (init_localsbb
, ins
);
11194 NEW_SEQ_POINT (cfg
, ins
, METHOD_EXIT_IL_OFFSET
, FALSE
);
11195 MONO_ADD_INS (cfg
->bb_exit
, ins
);
11199 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11200 * the code they refer to was dead (#11880).
11202 if (sym_seq_points
) {
11203 for (i
= 0; i
< header
->code_size
; ++i
) {
11204 if (mono_bitset_test_fast (seq_point_locs
, i
) && !mono_bitset_test_fast (seq_point_set_locs
, i
)) {
11207 NEW_SEQ_POINT (cfg
, ins
, i
, FALSE
);
11208 mono_add_seq_point (cfg
, NULL
, ins
, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE
);
11215 if (cfg
->method
== method
) {
11216 compute_bb_regions (cfg
);
11218 MonoBasicBlock
*bb
;
11219 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
11220 for (bb
= start_bblock
; bb
!= end_bblock
; bb
= bb
->next_bb
) {
11221 bb
->real_offset
= inline_offset
;
11225 if (inline_costs
< 0) {
11228 /* Method is too large */
11229 mname
= mono_method_full_name (method
, TRUE
);
11230 mono_cfg_set_exception_invalid_program (cfg
, g_strdup_printf ("Method %s is too complex.", mname
));
11234 if ((cfg
->verbose_level
> 2) && (cfg
->method
== method
))
11235 mono_print_code (cfg
, "AFTER METHOD-TO-IR");
11240 if (cfg
->verbose_level
> 3)
11241 g_print ("exiting due to error");
11243 g_assert (!is_ok (cfg
->error
));
11247 if (cfg
->verbose_level
> 3)
11248 g_print ("exiting due to exception");
11250 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_NONE
);
11254 if (cfg
->verbose_level
> 3)
11255 g_print ("exiting due to invalid il");
11257 set_exception_type_from_invalid_il (cfg
, method
, ip
);
11261 g_slist_free (class_inits
);
11262 mono_basic_block_free (original_bb
);
11263 cfg
->dont_inline
= g_list_remove (cfg
->dont_inline
, method
);
11264 if (cfg
->exception_type
)
11267 return inline_costs
;
11271 store_membase_reg_to_store_membase_imm (int opcode
)
11274 case OP_STORE_MEMBASE_REG
:
11275 return OP_STORE_MEMBASE_IMM
;
11276 case OP_STOREI1_MEMBASE_REG
:
11277 return OP_STOREI1_MEMBASE_IMM
;
11278 case OP_STOREI2_MEMBASE_REG
:
11279 return OP_STOREI2_MEMBASE_IMM
;
11280 case OP_STOREI4_MEMBASE_REG
:
11281 return OP_STOREI4_MEMBASE_IMM
;
11282 case OP_STOREI8_MEMBASE_REG
:
11283 return OP_STOREI8_MEMBASE_IMM
;
11285 g_assert_not_reached ();
11292 mono_op_to_op_imm (int opcode
)
11296 return OP_IADD_IMM
;
11298 return OP_ISUB_IMM
;
11300 return OP_IDIV_IMM
;
11302 return OP_IDIV_UN_IMM
;
11304 return OP_IREM_IMM
;
11306 return OP_IREM_UN_IMM
;
11308 return OP_IMUL_IMM
;
11310 return OP_IAND_IMM
;
11314 return OP_IXOR_IMM
;
11316 return OP_ISHL_IMM
;
11318 return OP_ISHR_IMM
;
11320 return OP_ISHR_UN_IMM
;
11323 return OP_LADD_IMM
;
11325 return OP_LSUB_IMM
;
11327 return OP_LAND_IMM
;
11331 return OP_LXOR_IMM
;
11333 return OP_LSHL_IMM
;
11335 return OP_LSHR_IMM
;
11337 return OP_LSHR_UN_IMM
;
11338 #if SIZEOF_REGISTER == 8
11340 return OP_LMUL_IMM
;
11342 return OP_LREM_IMM
;
11346 return OP_COMPARE_IMM
;
11348 return OP_ICOMPARE_IMM
;
11350 return OP_LCOMPARE_IMM
;
11352 case OP_STORE_MEMBASE_REG
:
11353 return OP_STORE_MEMBASE_IMM
;
11354 case OP_STOREI1_MEMBASE_REG
:
11355 return OP_STOREI1_MEMBASE_IMM
;
11356 case OP_STOREI2_MEMBASE_REG
:
11357 return OP_STOREI2_MEMBASE_IMM
;
11358 case OP_STOREI4_MEMBASE_REG
:
11359 return OP_STOREI4_MEMBASE_IMM
;
11361 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11363 return OP_X86_PUSH_IMM
;
11364 case OP_X86_COMPARE_MEMBASE_REG
:
11365 return OP_X86_COMPARE_MEMBASE_IMM
;
11367 #if defined(TARGET_AMD64)
11368 case OP_AMD64_ICOMPARE_MEMBASE_REG
:
11369 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
11371 case OP_VOIDCALL_REG
:
11372 return OP_VOIDCALL
;
11380 return OP_LOCALLOC_IMM
;
11387 stind_to_store_membase (int opcode
)
11390 case MONO_CEE_STIND_I1
:
11391 return OP_STOREI1_MEMBASE_REG
;
11392 case MONO_CEE_STIND_I2
:
11393 return OP_STOREI2_MEMBASE_REG
;
11394 case MONO_CEE_STIND_I4
:
11395 return OP_STOREI4_MEMBASE_REG
;
11396 case MONO_CEE_STIND_I
:
11397 case MONO_CEE_STIND_REF
:
11398 return OP_STORE_MEMBASE_REG
;
11399 case MONO_CEE_STIND_I8
:
11400 return OP_STOREI8_MEMBASE_REG
;
11401 case MONO_CEE_STIND_R4
:
11402 return OP_STORER4_MEMBASE_REG
;
11403 case MONO_CEE_STIND_R8
:
11404 return OP_STORER8_MEMBASE_REG
;
11406 g_assert_not_reached ();
11413 mono_load_membase_to_load_mem (int opcode
)
11415 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11416 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11418 case OP_LOAD_MEMBASE
:
11419 return OP_LOAD_MEM
;
11420 case OP_LOADU1_MEMBASE
:
11421 return OP_LOADU1_MEM
;
11422 case OP_LOADU2_MEMBASE
:
11423 return OP_LOADU2_MEM
;
11424 case OP_LOADI4_MEMBASE
:
11425 return OP_LOADI4_MEM
;
11426 case OP_LOADU4_MEMBASE
:
11427 return OP_LOADU4_MEM
;
11428 #if SIZEOF_REGISTER == 8
11429 case OP_LOADI8_MEMBASE
:
11430 return OP_LOADI8_MEM
;
11439 op_to_op_dest_membase (int store_opcode
, int opcode
)
11441 #if defined(TARGET_X86)
11442 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
)))
11447 return OP_X86_ADD_MEMBASE_REG
;
11449 return OP_X86_SUB_MEMBASE_REG
;
11451 return OP_X86_AND_MEMBASE_REG
;
11453 return OP_X86_OR_MEMBASE_REG
;
11455 return OP_X86_XOR_MEMBASE_REG
;
11458 return OP_X86_ADD_MEMBASE_IMM
;
11461 return OP_X86_SUB_MEMBASE_IMM
;
11464 return OP_X86_AND_MEMBASE_IMM
;
11467 return OP_X86_OR_MEMBASE_IMM
;
11470 return OP_X86_XOR_MEMBASE_IMM
;
11476 #if defined(TARGET_AMD64)
11477 if (!((store_opcode
== OP_STORE_MEMBASE_REG
) || (store_opcode
== OP_STOREI4_MEMBASE_REG
) || (store_opcode
== OP_STOREI8_MEMBASE_REG
)))
11482 return OP_X86_ADD_MEMBASE_REG
;
11484 return OP_X86_SUB_MEMBASE_REG
;
11486 return OP_X86_AND_MEMBASE_REG
;
11488 return OP_X86_OR_MEMBASE_REG
;
11490 return OP_X86_XOR_MEMBASE_REG
;
11492 return OP_X86_ADD_MEMBASE_IMM
;
11494 return OP_X86_SUB_MEMBASE_IMM
;
11496 return OP_X86_AND_MEMBASE_IMM
;
11498 return OP_X86_OR_MEMBASE_IMM
;
11500 return OP_X86_XOR_MEMBASE_IMM
;
11502 return OP_AMD64_ADD_MEMBASE_REG
;
11504 return OP_AMD64_SUB_MEMBASE_REG
;
11506 return OP_AMD64_AND_MEMBASE_REG
;
11508 return OP_AMD64_OR_MEMBASE_REG
;
11510 return OP_AMD64_XOR_MEMBASE_REG
;
11513 return OP_AMD64_ADD_MEMBASE_IMM
;
11516 return OP_AMD64_SUB_MEMBASE_IMM
;
11519 return OP_AMD64_AND_MEMBASE_IMM
;
11522 return OP_AMD64_OR_MEMBASE_IMM
;
11525 return OP_AMD64_XOR_MEMBASE_IMM
;
11535 op_to_op_store_membase (int store_opcode
, int opcode
)
11537 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11540 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
11541 return OP_X86_SETEQ_MEMBASE
;
11543 if (store_opcode
== OP_STOREI1_MEMBASE_REG
)
11544 return OP_X86_SETNE_MEMBASE
;
11552 op_to_op_src1_membase (MonoCompile
*cfg
, int load_opcode
, int opcode
)
11555 /* FIXME: This has sign extension issues */
11557 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11558 return OP_X86_COMPARE_MEMBASE8_IMM;
11561 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
11566 return OP_X86_PUSH_MEMBASE
;
11567 case OP_COMPARE_IMM
:
11568 case OP_ICOMPARE_IMM
:
11569 return OP_X86_COMPARE_MEMBASE_IMM
;
11572 return OP_X86_COMPARE_MEMBASE_REG
;
11576 #ifdef TARGET_AMD64
11577 /* FIXME: This has sign extension issues */
11579 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11580 return OP_X86_COMPARE_MEMBASE8_IMM;
11585 if ((load_opcode
== OP_LOAD_MEMBASE
&& !cfg
->backend
->ilp32
) || (load_opcode
== OP_LOADI8_MEMBASE
))
11586 return OP_X86_PUSH_MEMBASE
;
11588 /* FIXME: This only works for 32 bit immediates
11589 case OP_COMPARE_IMM:
11590 case OP_LCOMPARE_IMM:
11591 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11592 return OP_AMD64_COMPARE_MEMBASE_IMM;
11594 case OP_ICOMPARE_IMM
:
11595 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
11596 return OP_AMD64_ICOMPARE_MEMBASE_IMM
;
11600 if (cfg
->backend
->ilp32
&& load_opcode
== OP_LOAD_MEMBASE
)
11601 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
11602 if ((load_opcode
== OP_LOAD_MEMBASE
&& !cfg
->backend
->ilp32
) || (load_opcode
== OP_LOADI8_MEMBASE
))
11603 return OP_AMD64_COMPARE_MEMBASE_REG
;
11606 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
))
11607 return OP_AMD64_ICOMPARE_MEMBASE_REG
;
11616 op_to_op_src2_membase (MonoCompile
*cfg
, int load_opcode
, int opcode
)
11619 if (!((load_opcode
== OP_LOAD_MEMBASE
) || (load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
)))
11625 return OP_X86_COMPARE_REG_MEMBASE
;
11627 return OP_X86_ADD_REG_MEMBASE
;
11629 return OP_X86_SUB_REG_MEMBASE
;
11631 return OP_X86_AND_REG_MEMBASE
;
11633 return OP_X86_OR_REG_MEMBASE
;
11635 return OP_X86_XOR_REG_MEMBASE
;
11639 #ifdef TARGET_AMD64
11640 if ((load_opcode
== OP_LOADI4_MEMBASE
) || (load_opcode
== OP_LOADU4_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
&& cfg
->backend
->ilp32
)) {
11643 return OP_AMD64_ICOMPARE_REG_MEMBASE
;
11645 return OP_X86_ADD_REG_MEMBASE
;
11647 return OP_X86_SUB_REG_MEMBASE
;
11649 return OP_X86_AND_REG_MEMBASE
;
11651 return OP_X86_OR_REG_MEMBASE
;
11653 return OP_X86_XOR_REG_MEMBASE
;
11655 } else if ((load_opcode
== OP_LOADI8_MEMBASE
) || (load_opcode
== OP_LOAD_MEMBASE
&& !cfg
->backend
->ilp32
)) {
11659 return OP_AMD64_COMPARE_REG_MEMBASE
;
11661 return OP_AMD64_ADD_REG_MEMBASE
;
11663 return OP_AMD64_SUB_REG_MEMBASE
;
11665 return OP_AMD64_AND_REG_MEMBASE
;
11667 return OP_AMD64_OR_REG_MEMBASE
;
11669 return OP_AMD64_XOR_REG_MEMBASE
;
11678 mono_op_to_op_imm_noemul (int opcode
)
11681 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11687 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11694 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11699 return mono_op_to_op_imm (opcode
);
11704 * mono_handle_global_vregs:
11706 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11710 mono_handle_global_vregs (MonoCompile
*cfg
)
11712 gint32
*vreg_to_bb
;
11713 MonoBasicBlock
*bb
;
11716 vreg_to_bb
= (gint32
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gint32
*) * cfg
->next_vreg
+ 1);
11718 #ifdef MONO_ARCH_SIMD_INTRINSICS
11719 if (cfg
->uses_simd_intrinsics
& MONO_CFG_USES_SIMD_INTRINSICS_SIMPLIFY_INDIRECTION
)
11720 mono_simd_simplify_indirection (cfg
);
11723 /* Find local vregs used in more than one bb */
11724 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
11725 MonoInst
*ins
= bb
->code
;
11726 int block_num
= bb
->block_num
;
11728 if (cfg
->verbose_level
> 2)
11729 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb
->block_num
);
11732 for (; ins
; ins
= ins
->next
) {
11733 const char *spec
= INS_INFO (ins
->opcode
);
11734 int regtype
= 0, regindex
;
11737 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11738 mono_print_ins (ins
);
11740 g_assert (ins
->opcode
>= MONO_CEE_LAST
);
11742 for (regindex
= 0; regindex
< 4; regindex
++) {
11745 if (regindex
== 0) {
11746 regtype
= spec
[MONO_INST_DEST
];
11747 if (regtype
== ' ')
11750 } else if (regindex
== 1) {
11751 regtype
= spec
[MONO_INST_SRC1
];
11752 if (regtype
== ' ')
11755 } else if (regindex
== 2) {
11756 regtype
= spec
[MONO_INST_SRC2
];
11757 if (regtype
== ' ')
11760 } else if (regindex
== 3) {
11761 regtype
= spec
[MONO_INST_SRC3
];
11762 if (regtype
== ' ')
11767 #if SIZEOF_REGISTER == 4
11768 /* In the LLVM case, the long opcodes are not decomposed */
11769 if (regtype
== 'l' && !COMPILE_LLVM (cfg
)) {
11771 * Since some instructions reference the original long vreg,
11772 * and some reference the two component vregs, it is quite hard
11773 * to determine when it needs to be global. So be conservative.
11775 if (!get_vreg_to_inst (cfg
, vreg
)) {
11776 mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (mono_defaults
.int64_class
), OP_LOCAL
, vreg
);
11778 if (cfg
->verbose_level
> 2)
11779 printf ("LONG VREG R%d made global.\n", vreg
);
11783 * Make the component vregs volatile since the optimizations can
11784 * get confused otherwise.
11786 get_vreg_to_inst (cfg
, MONO_LVREG_LS (vreg
))->flags
|= MONO_INST_VOLATILE
;
11787 get_vreg_to_inst (cfg
, MONO_LVREG_MS (vreg
))->flags
|= MONO_INST_VOLATILE
;
11791 g_assert (vreg
!= -1);
11793 prev_bb
= vreg_to_bb
[vreg
];
11794 if (prev_bb
== 0) {
11795 /* 0 is a valid block num */
11796 vreg_to_bb
[vreg
] = block_num
+ 1;
11797 } else if ((prev_bb
!= block_num
+ 1) && (prev_bb
!= -1)) {
11798 if (((regtype
== 'i' && (vreg
< MONO_MAX_IREGS
))) || (regtype
== 'f' && (vreg
< MONO_MAX_FREGS
)))
11801 if (!get_vreg_to_inst (cfg
, vreg
)) {
11802 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11803 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg
, vreg_to_bb
[vreg
], block_num
);
11807 if (vreg_is_ref (cfg
, vreg
))
11808 mono_compile_create_var_for_vreg (cfg
, mono_get_object_type (), OP_LOCAL
, vreg
);
11810 mono_compile_create_var_for_vreg (cfg
, mono_get_int_type (), OP_LOCAL
, vreg
);
11813 mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (mono_defaults
.int64_class
), OP_LOCAL
, vreg
);
11816 mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), OP_LOCAL
, vreg
);
11820 mono_compile_create_var_for_vreg (cfg
, m_class_get_byval_arg (ins
->klass
), OP_LOCAL
, vreg
);
11823 g_assert_not_reached ();
11827 /* Flag as having been used in more than one bb */
11828 vreg_to_bb
[vreg
] = -1;
11834 /* If a variable is used in only one bblock, convert it into a local vreg */
11835 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
11836 MonoInst
*var
= cfg
->varinfo
[i
];
11837 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
11839 switch (var
->type
) {
11845 #if SIZEOF_REGISTER == 8
11848 #if !defined(TARGET_X86)
11849 /* Enabling this screws up the fp stack on x86 */
11852 if (mono_arch_is_soft_float ())
11856 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
11860 /* Arguments are implicitly global */
11861 /* Putting R4 vars into registers doesn't work currently */
11862 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
11863 if ((var
->opcode
!= OP_ARG
) && (var
!= cfg
->ret
) && !(var
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && (vreg_to_bb
[var
->dreg
] != -1) && (m_class_get_byval_arg (var
->klass
)->type
!= MONO_TYPE_R4
) && !cfg
->disable_vreg_to_lvreg
&& var
!= cfg
->gsharedvt_info_var
&& var
!= cfg
->gsharedvt_locals_var
&& var
!= cfg
->lmf_addr_var
) {
11865 * Make that the variable's liveness interval doesn't contain a call, since
11866 * that would cause the lvreg to be spilled, making the whole optimization
11869 /* This is too slow for JIT compilation */
11871 if (cfg
->compile_aot
&& vreg_to_bb
[var
->dreg
]) {
11873 int def_index
, call_index
, ins_index
;
11874 gboolean spilled
= FALSE
;
11879 for (ins
= vreg_to_bb
[var
->dreg
]->code
; ins
; ins
= ins
->next
) {
11880 const char *spec
= INS_INFO (ins
->opcode
);
11882 if ((spec
[MONO_INST_DEST
] != ' ') && (ins
->dreg
== var
->dreg
))
11883 def_index
= ins_index
;
11885 if (((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
)) ||
11886 ((spec
[MONO_INST_SRC1
] != ' ') && (ins
->sreg1
== var
->dreg
))) {
11887 if (call_index
> def_index
) {
11893 if (MONO_IS_CALL (ins
))
11894 call_index
= ins_index
;
11904 if (G_UNLIKELY (cfg
->verbose_level
> 2))
11905 printf ("CONVERTED R%d(%d) TO VREG.\n", var
->dreg
, vmv
->idx
);
11906 var
->flags
|= MONO_INST_IS_DEAD
;
11907 cfg
->vreg_to_inst
[var
->dreg
] = NULL
;
11914 * Compress the varinfo and vars tables so the liveness computation is faster and
11915 * takes up less space.
11918 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11919 MonoInst
*var
= cfg
->varinfo
[i
];
11920 if (pos
< i
&& cfg
->locals_start
== i
)
11921 cfg
->locals_start
= pos
;
11922 if (!(var
->flags
& MONO_INST_IS_DEAD
)) {
11924 cfg
->varinfo
[pos
] = cfg
->varinfo
[i
];
11925 cfg
->varinfo
[pos
]->inst_c0
= pos
;
11926 memcpy (&cfg
->vars
[pos
], &cfg
->vars
[i
], sizeof (MonoMethodVar
));
11927 cfg
->vars
[pos
].idx
= pos
;
11928 #if SIZEOF_REGISTER == 4
11929 if (cfg
->varinfo
[pos
]->type
== STACK_I8
) {
11930 /* Modify the two component vars too */
11933 var1
= get_vreg_to_inst (cfg
, MONO_LVREG_LS (cfg
->varinfo
[pos
]->dreg
));
11934 var1
->inst_c0
= pos
;
11935 var1
= get_vreg_to_inst (cfg
, MONO_LVREG_MS (cfg
->varinfo
[pos
]->dreg
));
11936 var1
->inst_c0
= pos
;
11943 cfg
->num_varinfo
= pos
;
11944 if (cfg
->locals_start
> cfg
->num_varinfo
)
11945 cfg
->locals_start
= cfg
->num_varinfo
;
11949 * mono_allocate_gsharedvt_vars:
11951 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
11952 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
11955 mono_allocate_gsharedvt_vars (MonoCompile
*cfg
)
11959 cfg
->gsharedvt_vreg_to_idx
= (int *)mono_mempool_alloc0 (cfg
->mempool
, sizeof (int) * cfg
->next_vreg
);
11961 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
11962 MonoInst
*ins
= cfg
->varinfo
[i
];
11965 if (mini_is_gsharedvt_variable_type (ins
->inst_vtype
)) {
11966 if (i
>= cfg
->locals_start
) {
11968 idx
= get_gsharedvt_info_slot (cfg
, ins
->inst_vtype
, MONO_RGCTX_INFO_LOCAL_OFFSET
);
11969 cfg
->gsharedvt_vreg_to_idx
[ins
->dreg
] = idx
+ 1;
11970 ins
->opcode
= OP_GSHAREDVT_LOCAL
;
11971 ins
->inst_imm
= idx
;
11974 cfg
->gsharedvt_vreg_to_idx
[ins
->dreg
] = -1;
11975 ins
->opcode
= OP_GSHAREDVT_ARG_REGOFFSET
;
11982 * mono_spill_global_vars:
11984 * Generate spill code for variables which are not allocated to registers,
11985 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11986 * code is generated which could be optimized by the local optimization passes.
11989 mono_spill_global_vars (MonoCompile
*cfg
, gboolean
*need_local_opts
)
11991 MonoBasicBlock
*bb
;
11993 int orig_next_vreg
;
11994 guint32
*vreg_to_lvreg
;
11996 guint32 i
, lvregs_len
, lvregs_size
;
11997 gboolean dest_has_lvreg
= FALSE
;
11998 MonoStackType stacktypes
[128];
11999 MonoInst
**live_range_start
, **live_range_end
;
12000 MonoBasicBlock
**live_range_start_bb
, **live_range_end_bb
;
12002 *need_local_opts
= FALSE
;
12004 memset (spec2
, 0, sizeof (spec2
));
12006 /* FIXME: Move this function to mini.c */
12007 stacktypes
[(int)'i'] = STACK_PTR
;
12008 stacktypes
[(int)'l'] = STACK_I8
;
12009 stacktypes
[(int)'f'] = STACK_R8
;
12010 #ifdef MONO_ARCH_SIMD_INTRINSICS
12011 stacktypes
[(int)'x'] = STACK_VTYPE
;
12014 #if SIZEOF_REGISTER == 4
12015 /* Create MonoInsts for longs */
12016 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
12017 MonoInst
*ins
= cfg
->varinfo
[i
];
12019 if ((ins
->opcode
!= OP_REGVAR
) && !(ins
->flags
& MONO_INST_IS_DEAD
)) {
12020 switch (ins
->type
) {
12025 if (ins
->type
== STACK_R8
&& !COMPILE_SOFT_FLOAT (cfg
))
12028 g_assert (ins
->opcode
== OP_REGOFFSET
);
12030 tree
= get_vreg_to_inst (cfg
, MONO_LVREG_LS (ins
->dreg
));
12032 tree
->opcode
= OP_REGOFFSET
;
12033 tree
->inst_basereg
= ins
->inst_basereg
;
12034 tree
->inst_offset
= ins
->inst_offset
+ MINI_LS_WORD_OFFSET
;
12036 tree
= get_vreg_to_inst (cfg
, MONO_LVREG_MS (ins
->dreg
));
12038 tree
->opcode
= OP_REGOFFSET
;
12039 tree
->inst_basereg
= ins
->inst_basereg
;
12040 tree
->inst_offset
= ins
->inst_offset
+ MINI_MS_WORD_OFFSET
;
12050 if (cfg
->compute_gc_maps
) {
12051 /* registers need liveness info even for !non refs */
12052 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
12053 MonoInst
*ins
= cfg
->varinfo
[i
];
12055 if (ins
->opcode
== OP_REGVAR
)
12056 ins
->flags
|= MONO_INST_GC_TRACK
;
12060 /* FIXME: widening and truncation */
12063 * As an optimization, when a variable allocated to the stack is first loaded into
12064 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12065 * the variable again.
12067 orig_next_vreg
= cfg
->next_vreg
;
12068 vreg_to_lvreg
= (guint32
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * cfg
->next_vreg
);
12069 lvregs_size
= 1024;
12070 lvregs
= (guint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (guint32
) * lvregs_size
);
12074 * These arrays contain the first and last instructions accessing a given
12076 * Since we emit bblocks in the same order we process them here, and we
12077 * don't split live ranges, these will precisely describe the live range of
12078 * the variable, i.e. the instruction range where a valid value can be found
12079 * in the variables location.
12080 * The live range is computed using the liveness info computed by the liveness pass.
12081 * We can't use vmv->range, since that is an abstract live range, and we need
12082 * one which is instruction precise.
12083 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12085 /* FIXME: Only do this if debugging info is requested */
12086 live_range_start
= g_new0 (MonoInst
*, cfg
->next_vreg
);
12087 live_range_end
= g_new0 (MonoInst
*, cfg
->next_vreg
);
12088 live_range_start_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
12089 live_range_end_bb
= g_new (MonoBasicBlock
*, cfg
->next_vreg
);
12091 /* Add spill loads/stores */
12092 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
12095 if (cfg
->verbose_level
> 2)
12096 printf ("\nSPILL BLOCK %d:\n", bb
->block_num
);
12098 /* Clear vreg_to_lvreg array */
12099 for (i
= 0; i
< lvregs_len
; i
++)
12100 vreg_to_lvreg
[lvregs
[i
]] = 0;
12104 MONO_BB_FOR_EACH_INS (bb
, ins
) {
12105 const char *spec
= INS_INFO (ins
->opcode
);
12106 int regtype
, srcindex
, sreg
, tmp_reg
, prev_dreg
, num_sregs
;
12107 gboolean store
, no_lvreg
;
12108 int sregs
[MONO_MAX_SRC_REGS
];
12110 if (G_UNLIKELY (cfg
->verbose_level
> 2))
12111 mono_print_ins (ins
);
12113 if (ins
->opcode
== OP_NOP
)
12117 * We handle LDADDR here as well, since it can only be decomposed
12118 * when variable addresses are known.
12120 if (ins
->opcode
== OP_LDADDR
) {
12121 MonoInst
*var
= (MonoInst
*)ins
->inst_p0
;
12123 if (var
->opcode
== OP_VTARG_ADDR
) {
12124 /* Happens on SPARC/S390 where vtypes are passed by reference */
12125 MonoInst
*vtaddr
= var
->inst_left
;
12126 if (vtaddr
->opcode
== OP_REGVAR
) {
12127 ins
->opcode
= OP_MOVE
;
12128 ins
->sreg1
= vtaddr
->dreg
;
12130 else if (var
->inst_left
->opcode
== OP_REGOFFSET
) {
12131 ins
->opcode
= OP_LOAD_MEMBASE
;
12132 ins
->inst_basereg
= vtaddr
->inst_basereg
;
12133 ins
->inst_offset
= vtaddr
->inst_offset
;
12136 } else if (cfg
->gsharedvt
&& cfg
->gsharedvt_vreg_to_idx
[var
->dreg
] < 0) {
12137 /* gsharedvt arg passed by ref */
12138 g_assert (var
->opcode
== OP_GSHAREDVT_ARG_REGOFFSET
);
12140 ins
->opcode
= OP_LOAD_MEMBASE
;
12141 ins
->inst_basereg
= var
->inst_basereg
;
12142 ins
->inst_offset
= var
->inst_offset
;
12143 } else if (cfg
->gsharedvt
&& cfg
->gsharedvt_vreg_to_idx
[var
->dreg
]) {
12144 MonoInst
*load
, *load2
, *load3
;
12145 int idx
= cfg
->gsharedvt_vreg_to_idx
[var
->dreg
] - 1;
12146 int reg1
, reg2
, reg3
;
12147 MonoInst
*info_var
= cfg
->gsharedvt_info_var
;
12148 MonoInst
*locals_var
= cfg
->gsharedvt_locals_var
;
12152 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12155 g_assert (var
->opcode
== OP_GSHAREDVT_LOCAL
);
12157 g_assert (info_var
);
12158 g_assert (locals_var
);
12160 /* Mark the instruction used to compute the locals var as used */
12161 cfg
->gsharedvt_locals_var_ins
= NULL
;
12163 /* Load the offset */
12164 if (info_var
->opcode
== OP_REGOFFSET
) {
12165 reg1
= alloc_ireg (cfg
);
12166 NEW_LOAD_MEMBASE (cfg
, load
, OP_LOAD_MEMBASE
, reg1
, info_var
->inst_basereg
, info_var
->inst_offset
);
12167 } else if (info_var
->opcode
== OP_REGVAR
) {
12169 reg1
= info_var
->dreg
;
12171 g_assert_not_reached ();
12173 reg2
= alloc_ireg (cfg
);
12174 NEW_LOAD_MEMBASE (cfg
, load2
, OP_LOADI4_MEMBASE
, reg2
, reg1
, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo
, entries
) + (idx
* TARGET_SIZEOF_VOID_P
));
12175 /* Load the locals area address */
12176 reg3
= alloc_ireg (cfg
);
12177 if (locals_var
->opcode
== OP_REGOFFSET
) {
12178 NEW_LOAD_MEMBASE (cfg
, load3
, OP_LOAD_MEMBASE
, reg3
, locals_var
->inst_basereg
, locals_var
->inst_offset
);
12179 } else if (locals_var
->opcode
== OP_REGVAR
) {
12180 NEW_UNALU (cfg
, load3
, OP_MOVE
, reg3
, locals_var
->dreg
);
12182 g_assert_not_reached ();
12184 /* Compute the address */
12185 ins
->opcode
= OP_PADD
;
12189 mono_bblock_insert_before_ins (bb
, ins
, load3
);
12190 mono_bblock_insert_before_ins (bb
, load3
, load2
);
12192 mono_bblock_insert_before_ins (bb
, load2
, load
);
12194 g_assert (var
->opcode
== OP_REGOFFSET
);
12196 ins
->opcode
= OP_ADD_IMM
;
12197 ins
->sreg1
= var
->inst_basereg
;
12198 ins
->inst_imm
= var
->inst_offset
;
12201 *need_local_opts
= TRUE
;
12202 spec
= INS_INFO (ins
->opcode
);
12205 if (ins
->opcode
< MONO_CEE_LAST
) {
12206 mono_print_ins (ins
);
12207 g_assert_not_reached ();
12211 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12215 if (MONO_IS_STORE_MEMBASE (ins
)) {
12216 tmp_reg
= ins
->dreg
;
12217 ins
->dreg
= ins
->sreg2
;
12218 ins
->sreg2
= tmp_reg
;
12221 spec2
[MONO_INST_DEST
] = ' ';
12222 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
12223 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
12224 spec2
[MONO_INST_SRC3
] = ' ';
12226 } else if (MONO_IS_STORE_MEMINDEX (ins
))
12227 g_assert_not_reached ();
12232 if (G_UNLIKELY (cfg
->verbose_level
> 2)) {
12233 printf ("\t %.3s %d", spec
, ins
->dreg
);
12234 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
12235 for (srcindex
= 0; srcindex
< num_sregs
; ++srcindex
)
12236 printf (" %d", sregs
[srcindex
]);
12243 regtype
= spec
[MONO_INST_DEST
];
12244 g_assert (((ins
->dreg
== -1) && (regtype
== ' ')) || ((ins
->dreg
!= -1) && (regtype
!= ' ')));
12246 int dreg_using_dest_to_membase_op
= -1;
12248 if ((ins
->dreg
!= -1) && get_vreg_to_inst (cfg
, ins
->dreg
)) {
12249 MonoInst
*var
= get_vreg_to_inst (cfg
, ins
->dreg
);
12250 MonoInst
*store_ins
;
12252 MonoInst
*def_ins
= ins
;
12253 int dreg
= ins
->dreg
; /* The original vreg */
12255 store_opcode
= mono_type_to_store_membase (cfg
, var
->inst_vtype
);
12257 if (var
->opcode
== OP_REGVAR
) {
12258 ins
->dreg
= var
->dreg
;
12259 } else if ((ins
->dreg
== ins
->sreg1
) && (spec
[MONO_INST_DEST
] == 'i') && (spec
[MONO_INST_SRC1
] == 'i') && !vreg_to_lvreg
[ins
->dreg
] && (op_to_op_dest_membase (store_opcode
, ins
->opcode
) != -1)) {
12261 * Instead of emitting a load+store, use a _membase opcode.
12263 g_assert (var
->opcode
== OP_REGOFFSET
);
12264 if (ins
->opcode
== OP_MOVE
) {
12268 dreg_using_dest_to_membase_op
= ins
->dreg
;
12269 ins
->opcode
= op_to_op_dest_membase (store_opcode
, ins
->opcode
);
12270 ins
->inst_basereg
= var
->inst_basereg
;
12271 ins
->inst_offset
= var
->inst_offset
;
12274 spec
= INS_INFO (ins
->opcode
);
12278 g_assert (var
->opcode
== OP_REGOFFSET
);
12280 prev_dreg
= ins
->dreg
;
12282 /* Invalidate any previous lvreg for this vreg */
12283 vreg_to_lvreg
[ins
->dreg
] = 0;
12287 if (COMPILE_SOFT_FLOAT (cfg
) && store_opcode
== OP_STORER8_MEMBASE_REG
) {
12289 store_opcode
= OP_STOREI8_MEMBASE_REG
;
12292 ins
->dreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
12294 #if SIZEOF_REGISTER != 8
12295 if (regtype
== 'l') {
12296 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
, MONO_LVREG_LS (ins
->dreg
));
12297 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
12298 NEW_STORE_MEMBASE (cfg
, store_ins
, OP_STOREI4_MEMBASE_REG
, var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
, MONO_LVREG_MS (ins
->dreg
));
12299 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
12300 def_ins
= store_ins
;
12305 g_assert (store_opcode
!= OP_STOREV_MEMBASE
);
12307 /* Try to fuse the store into the instruction itself */
12308 /* FIXME: Add more instructions */
12309 if (!lvreg
&& ((ins
->opcode
== OP_ICONST
) || ((ins
->opcode
== OP_I8CONST
) && (ins
->inst_c0
== 0)))) {
12310 ins
->opcode
= store_membase_reg_to_store_membase_imm (store_opcode
);
12311 ins
->inst_imm
= ins
->inst_c0
;
12312 ins
->inst_destbasereg
= var
->inst_basereg
;
12313 ins
->inst_offset
= var
->inst_offset
;
12314 spec
= INS_INFO (ins
->opcode
);
12315 } else if (!lvreg
&& ((ins
->opcode
== OP_MOVE
) || (ins
->opcode
== OP_FMOVE
) || (ins
->opcode
== OP_LMOVE
) || (ins
->opcode
== OP_RMOVE
))) {
12316 ins
->opcode
= store_opcode
;
12317 ins
->inst_destbasereg
= var
->inst_basereg
;
12318 ins
->inst_offset
= var
->inst_offset
;
12322 tmp_reg
= ins
->dreg
;
12323 ins
->dreg
= ins
->sreg2
;
12324 ins
->sreg2
= tmp_reg
;
12327 spec2
[MONO_INST_DEST
] = ' ';
12328 spec2
[MONO_INST_SRC1
] = spec
[MONO_INST_SRC1
];
12329 spec2
[MONO_INST_SRC2
] = spec
[MONO_INST_DEST
];
12330 spec2
[MONO_INST_SRC3
] = ' ';
12332 } else if (!lvreg
&& (op_to_op_store_membase (store_opcode
, ins
->opcode
) != -1)) {
12333 // FIXME: The backends expect the base reg to be in inst_basereg
12334 ins
->opcode
= op_to_op_store_membase (store_opcode
, ins
->opcode
);
12336 ins
->inst_basereg
= var
->inst_basereg
;
12337 ins
->inst_offset
= var
->inst_offset
;
12338 spec
= INS_INFO (ins
->opcode
);
12340 /* printf ("INS: "); mono_print_ins (ins); */
12341 /* Create a store instruction */
12342 NEW_STORE_MEMBASE (cfg
, store_ins
, store_opcode
, var
->inst_basereg
, var
->inst_offset
, ins
->dreg
);
12344 /* Insert it after the instruction */
12345 mono_bblock_insert_after_ins (bb
, ins
, store_ins
);
12347 def_ins
= store_ins
;
12350 * We can't assign ins->dreg to var->dreg here, since the
12351 * sregs could use it. So set a flag, and do it after
12354 if ((!cfg
->backend
->use_fpstack
|| ((store_opcode
!= OP_STORER8_MEMBASE_REG
) && (store_opcode
!= OP_STORER4_MEMBASE_REG
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)))
12355 dest_has_lvreg
= TRUE
;
12360 if (def_ins
&& !live_range_start
[dreg
]) {
12361 live_range_start
[dreg
] = def_ins
;
12362 live_range_start_bb
[dreg
] = bb
;
12365 if (cfg
->compute_gc_maps
&& def_ins
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
12368 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_DEF
);
12369 tmp
->inst_c1
= dreg
;
12370 mono_bblock_insert_after_ins (bb
, def_ins
, tmp
);
12377 num_sregs
= mono_inst_get_src_registers (ins
, sregs
);
12378 for (srcindex
= 0; srcindex
< 3; ++srcindex
) {
12379 regtype
= spec
[MONO_INST_SRC1
+ srcindex
];
12380 sreg
= sregs
[srcindex
];
12382 g_assert (((sreg
== -1) && (regtype
== ' ')) || ((sreg
!= -1) && (regtype
!= ' ')));
12383 if ((sreg
!= -1) && get_vreg_to_inst (cfg
, sreg
)) {
12384 MonoInst
*var
= get_vreg_to_inst (cfg
, sreg
);
12385 MonoInst
*use_ins
= ins
;
12386 MonoInst
*load_ins
;
12387 guint32 load_opcode
;
12389 if (var
->opcode
== OP_REGVAR
) {
12390 sregs
[srcindex
] = var
->dreg
;
12391 //mono_inst_set_src_registers (ins, sregs);
12392 live_range_end
[sreg
] = use_ins
;
12393 live_range_end_bb
[sreg
] = bb
;
12395 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
12398 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
12399 /* var->dreg is a hreg */
12400 tmp
->inst_c1
= sreg
;
12401 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
12407 g_assert (var
->opcode
== OP_REGOFFSET
);
12409 load_opcode
= mono_type_to_load_membase (cfg
, var
->inst_vtype
);
12411 g_assert (load_opcode
!= OP_LOADV_MEMBASE
);
12413 if (vreg_to_lvreg
[sreg
]) {
12414 g_assert (vreg_to_lvreg
[sreg
] != -1);
12416 /* The variable is already loaded to an lvreg */
12417 if (G_UNLIKELY (cfg
->verbose_level
> 2))
12418 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg
[sreg
], sreg
);
12419 sregs
[srcindex
] = vreg_to_lvreg
[sreg
];
12420 //mono_inst_set_src_registers (ins, sregs);
12424 /* Try to fuse the load into the instruction */
12425 if ((srcindex
== 0) && (op_to_op_src1_membase (cfg
, load_opcode
, ins
->opcode
) != -1)) {
12426 ins
->opcode
= op_to_op_src1_membase (cfg
, load_opcode
, ins
->opcode
);
12427 sregs
[0] = var
->inst_basereg
;
12428 //mono_inst_set_src_registers (ins, sregs);
12429 ins
->inst_offset
= var
->inst_offset
;
12430 } else if ((srcindex
== 1) && (op_to_op_src2_membase (cfg
, load_opcode
, ins
->opcode
) != -1)) {
12431 ins
->opcode
= op_to_op_src2_membase (cfg
, load_opcode
, ins
->opcode
);
12432 sregs
[1] = var
->inst_basereg
;
12433 //mono_inst_set_src_registers (ins, sregs);
12434 ins
->inst_offset
= var
->inst_offset
;
12436 if (MONO_IS_REAL_MOVE (ins
)) {
12437 ins
->opcode
= OP_NOP
;
12440 //printf ("%d ", srcindex); mono_print_ins (ins);
12442 sreg
= alloc_dreg (cfg
, stacktypes
[regtype
]);
12444 if ((!cfg
->backend
->use_fpstack
|| ((load_opcode
!= OP_LOADR8_MEMBASE
) && (load_opcode
!= OP_LOADR4_MEMBASE
))) && !((var
)->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
)) && !no_lvreg
) {
12445 if (var
->dreg
== prev_dreg
) {
12447 * sreg refers to the value loaded by the load
12448 * emitted below, but we need to use ins->dreg
12449 * since it refers to the store emitted earlier.
12453 g_assert (sreg
!= -1);
12454 if (var
->dreg
== dreg_using_dest_to_membase_op
) {
12455 if (cfg
->verbose_level
> 2)
12456 printf ("\tCan't cache R%d because it's part of a dreg dest_membase optimization\n", var
->dreg
);
12458 vreg_to_lvreg
[var
->dreg
] = sreg
;
12460 if (lvregs_len
>= lvregs_size
) {
12461 guint32
*new_lvregs
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * lvregs_size
* 2);
12462 memcpy (new_lvregs
, lvregs
, sizeof (guint32
) * lvregs_size
);
12463 lvregs
= new_lvregs
;
12466 lvregs
[lvregs_len
++] = var
->dreg
;
12470 sregs
[srcindex
] = sreg
;
12471 //mono_inst_set_src_registers (ins, sregs);
12473 #if SIZEOF_REGISTER != 8
12474 if (regtype
== 'l') {
12475 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, MONO_LVREG_MS (sreg
), var
->inst_basereg
, var
->inst_offset
+ MINI_MS_WORD_OFFSET
);
12476 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
12477 NEW_LOAD_MEMBASE (cfg
, load_ins
, OP_LOADI4_MEMBASE
, MONO_LVREG_LS (sreg
), var
->inst_basereg
, var
->inst_offset
+ MINI_LS_WORD_OFFSET
);
12478 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
12479 use_ins
= load_ins
;
12484 #if SIZEOF_REGISTER == 4
12485 g_assert (load_opcode
!= OP_LOADI8_MEMBASE
);
12487 NEW_LOAD_MEMBASE (cfg
, load_ins
, load_opcode
, sreg
, var
->inst_basereg
, var
->inst_offset
);
12488 mono_bblock_insert_before_ins (bb
, ins
, load_ins
);
12489 use_ins
= load_ins
;
12493 if (var
->dreg
< orig_next_vreg
) {
12494 live_range_end
[var
->dreg
] = use_ins
;
12495 live_range_end_bb
[var
->dreg
] = bb
;
12498 if (cfg
->compute_gc_maps
&& var
->dreg
< orig_next_vreg
&& (var
->flags
& MONO_INST_GC_TRACK
)) {
12501 MONO_INST_NEW (cfg
, tmp
, OP_GC_LIVENESS_USE
);
12502 tmp
->inst_c1
= var
->dreg
;
12503 mono_bblock_insert_after_ins (bb
, ins
, tmp
);
12507 mono_inst_set_src_registers (ins
, sregs
);
12509 if (dest_has_lvreg
) {
12510 g_assert (ins
->dreg
!= -1);
12511 vreg_to_lvreg
[prev_dreg
] = ins
->dreg
;
12512 if (lvregs_len
>= lvregs_size
) {
12513 guint32
*new_lvregs
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint32
) * lvregs_size
* 2);
12514 memcpy (new_lvregs
, lvregs
, sizeof (guint32
) * lvregs_size
);
12515 lvregs
= new_lvregs
;
12518 lvregs
[lvregs_len
++] = prev_dreg
;
12519 dest_has_lvreg
= FALSE
;
12523 tmp_reg
= ins
->dreg
;
12524 ins
->dreg
= ins
->sreg2
;
12525 ins
->sreg2
= tmp_reg
;
12528 if (MONO_IS_CALL (ins
)) {
12529 /* Clear vreg_to_lvreg array */
12530 for (i
= 0; i
< lvregs_len
; i
++)
12531 vreg_to_lvreg
[lvregs
[i
]] = 0;
12533 } else if (ins
->opcode
== OP_NOP
) {
12535 MONO_INST_NULLIFY_SREGS (ins
);
12538 if (cfg
->verbose_level
> 2)
12539 mono_print_ins_index (1, ins
);
12542 /* Extend the live range based on the liveness info */
12543 if (cfg
->compute_precise_live_ranges
&& bb
->live_out_set
&& bb
->code
) {
12544 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
12545 MonoMethodVar
*vi
= MONO_VARINFO (cfg
, i
);
12547 if (vreg_is_volatile (cfg
, vi
->vreg
))
12548 /* The liveness info is incomplete */
12551 if (mono_bitset_test_fast (bb
->live_in_set
, i
) && !live_range_start
[vi
->vreg
]) {
12552 /* Live from at least the first ins of this bb */
12553 live_range_start
[vi
->vreg
] = bb
->code
;
12554 live_range_start_bb
[vi
->vreg
] = bb
;
12557 if (mono_bitset_test_fast (bb
->live_out_set
, i
)) {
12558 /* Live at least until the last ins of this bb */
12559 live_range_end
[vi
->vreg
] = bb
->last_ins
;
12560 live_range_end_bb
[vi
->vreg
] = bb
;
12567 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12568 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12570 if (cfg
->compute_precise_live_ranges
&& cfg
->comp_done
& MONO_COMP_LIVENESS
) {
12571 for (i
= 0; i
< cfg
->num_varinfo
; ++i
) {
12572 int vreg
= MONO_VARINFO (cfg
, i
)->vreg
;
12575 if (live_range_start
[vreg
]) {
12576 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_START
);
12578 ins
->inst_c1
= vreg
;
12579 mono_bblock_insert_after_ins (live_range_start_bb
[vreg
], live_range_start
[vreg
], ins
);
12581 if (live_range_end
[vreg
]) {
12582 MONO_INST_NEW (cfg
, ins
, OP_LIVERANGE_END
);
12584 ins
->inst_c1
= vreg
;
12585 if (live_range_end
[vreg
] == live_range_end_bb
[vreg
]->last_ins
)
12586 mono_add_ins_to_end (live_range_end_bb
[vreg
], ins
);
12588 mono_bblock_insert_after_ins (live_range_end_bb
[vreg
], live_range_end
[vreg
], ins
);
12593 if (cfg
->gsharedvt_locals_var_ins
) {
12594 /* Nullify if unused */
12595 cfg
->gsharedvt_locals_var_ins
->opcode
= OP_PCONST
;
12596 cfg
->gsharedvt_locals_var_ins
->inst_imm
= 0;
12599 g_free (live_range_start
);
12600 g_free (live_range_end
);
12601 g_free (live_range_start_bb
);
12602 g_free (live_range_end_bb
);
12607 * - use 'iadd' instead of 'int_add'
12608 * - handling ovf opcodes: decompose in method_to_ir.
12609 * - unify iregs/fregs
12610 * -> partly done, the missing parts are:
12611 * - a more complete unification would involve unifying the hregs as well, so
12612 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12613 * would no longer map to the machine hregs, so the code generators would need to
12614 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12615 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12616 * fp/non-fp branches speeds it up by about 15%.
12617 * - use sext/zext opcodes instead of shifts
12619 * - get rid of TEMPLOADs if possible and use vregs instead
12620 * - clean up usage of OP_P/OP_ opcodes
12621 * - cleanup usage of DUMMY_USE
12622 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12624 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12625 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12626 * - make sure handle_stack_args () is called before the branch is emitted
12627 * - when the new IR is done, get rid of all unused stuff
12628 * - COMPARE/BEQ as separate instructions or unify them ?
12629 * - keeping them separate allows specialized compare instructions like
12630 * compare_imm, compare_membase
12631 * - most back ends unify fp compare+branch, fp compare+ceq
12632 * - integrate mono_save_args into inline_method
12633 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12634 * - handle long shift opts on 32 bit platforms somehow: they require
12635 * 3 sregs (2 for arg1 and 1 for arg2)
12636 * - make byref a 'normal' type.
12637 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12638 * variable if needed.
12639 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12640 * like inline_method.
12641 * - remove inlining restrictions
12642 * - fix LNEG and enable cfold of INEG
12643 * - generalize x86 optimizations like ldelema as a peephole optimization
12644 * - add store_mem_imm for amd64
12645 * - optimize the loading of the interruption flag in the managed->native wrappers
12646 * - avoid special handling of OP_NOP in passes
12647 * - move code inserting instructions into one function/macro.
12648 * - try a coalescing phase after liveness analysis
12649 * - add float -> vreg conversion + local optimizations on !x86
12650 * - figure out how to handle decomposed branches during optimizations, ie.
12651 * compare+branch, op_jump_table+op_br etc.
12652 * - promote RuntimeXHandles to vregs
12653 * - vtype cleanups:
12654 * - add a NEW_VARLOADA_VREG macro
12655 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12656 * accessing vtype fields.
12657 * - get rid of I8CONST on 64 bit platforms
12658 * - dealing with the increase in code size due to branches created during opcode
12660 * - use extended basic blocks
12661 * - all parts of the JIT
12662 * - handle_global_vregs () && local regalloc
12663 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12664 * - sources of increase in code size:
12667 * - isinst and castclass
12668 * - lvregs not allocated to global registers even if used multiple times
12669 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12671 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12672 * - add all micro optimizations from the old JIT
12673 * - put tree optimizations into the deadce pass
12674 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12675 * specific function.
12676 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12677 * fcompare + branchCC.
12678 * - create a helper function for allocating a stack slot, taking into account
12679 * MONO_CFG_HAS_SPILLUP.
12681 * - optimize mono_regstate2_alloc_int/float.
12682 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12683 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12684 * parts of the tree could be separated by other instructions, killing the tree
12685 * arguments, or stores killing loads etc. Also, should we fold loads into other
12686 * instructions if the result of the load is used multiple times ?
12687 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12688 * - LAST MERGE: 108395.
12689 * - when returning vtypes in registers, generate IR and append it to the end of the
12690 * last bb instead of doing it in the epilog.
12691 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12699 - When to decompose opcodes:
12700 - earlier: this makes some optimizations hard to implement, since the low level IR
12701 no longer contains the neccessary information. But it is easier to do.
12702 - later: harder to implement, enables more optimizations.
12703 - Branches inside bblocks:
12704 - created when decomposing complex opcodes.
12705 - branches to another bblock: harmless, but not tracked by the branch
12706 optimizations, so need to branch to a label at the start of the bblock.
12707 - branches to inside the same bblock: very problematic, trips up the local
12708 reg allocator. Can be fixed by spitting the current bblock, but that is a
12709 complex operation, since some local vregs can become global vregs etc.
12710 - Local/global vregs:
12711 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12712 local register allocator.
12713 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12714 structure, created by mono_create_var (). Assigned to hregs or the stack by
12715 the global register allocator.
12716 - When to do optimizations like alu->alu_imm:
12717 - earlier -> saves work later on since the IR will be smaller/simpler
12718 - later -> can work on more instructions
12719 - Handling of valuetypes:
12720 - When a vtype is pushed on the stack, a new temporary is created, an
12721 instruction computing its address (LDADDR) is emitted and pushed on
12722 the stack. Need to optimize cases when the vtype is used immediately as in
12723 argument passing, stloc etc.
12724 - Instead of the to_end stuff in the old JIT, simply call the function handling
12725 the values on the stack before emitting the last instruction of the bb.
12727 #else /* !DISABLE_JIT */
12729 MONO_EMPTY_SOURCE_FILE (method_to_ir
);
12730 #endif /* !DISABLE_JIT */