3 * The new Mono code generator.
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
23 #ifdef HAVE_SYS_TIME_H
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
63 #include <mono/utils/unlocked.h>
64 #include <mono/utils/mono-time.h>
67 #include "seq-points.h"
75 #include "jit-icalls.h"
78 #include "debugger-agent.h"
79 #include "llvm-runtime.h"
80 #include "mini-llvm.h"
82 #include "aot-runtime.h"
83 #include "mini-runtime.h"
85 MonoCallSpec
*mono_jit_trace_calls
;
86 MonoMethodDesc
*mono_inject_async_exc_method
;
87 int mono_inject_async_exc_pos
;
88 MonoMethodDesc
*mono_break_at_bb_method
;
89 int mono_break_at_bb_bb_num
;
90 gboolean mono_do_x86_stack_align
= TRUE
;
91 gboolean mono_using_xdebug
;
94 static guint32 discarded_code
;
95 static gint64 discarded_jit_time
;
96 static guint32 jinfo_try_holes_size
;
98 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
99 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
100 static mono_mutex_t jit_mutex
;
102 static MonoBackend
*current_backend
;
107 mono_realloc_native_code (MonoCompile
*cfg
)
109 return g_realloc (cfg
->native_code
, cfg
->code_size
);
113 MonoExceptionClause
*clause
;
114 MonoBasicBlock
*basic_block
;
119 * mono_emit_unwind_op:
121 * Add an unwind op with the given parameters for the list of unwind ops stored in
125 mono_emit_unwind_op (MonoCompile
*cfg
, int when
, int tag
, int reg
, int val
)
127 MonoUnwindOp
*op
= (MonoUnwindOp
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoUnwindOp
));
134 cfg
->unwind_ops
= g_slist_append_mempool (cfg
->mempool
, cfg
->unwind_ops
, op
);
135 if (cfg
->verbose_level
> 1) {
138 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when
, mono_arch_regname (reg
), val
);
140 case DW_CFA_def_cfa_register
:
141 printf ("CFA: [%x] def_cfa_reg: %s\n", when
, mono_arch_regname (reg
));
143 case DW_CFA_def_cfa_offset
:
144 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when
, val
);
147 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when
, mono_arch_regname (reg
), -val
);
154 * mono_unlink_bblock:
156 * Unlink two basic blocks.
159 mono_unlink_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
165 for (i
= 0; i
< from
->out_count
; ++i
) {
166 if (to
== from
->out_bb
[i
]) {
173 for (i
= 0; i
< from
->out_count
; ++i
) {
174 if (from
->out_bb
[i
] != to
)
175 from
->out_bb
[pos
++] = from
->out_bb
[i
];
177 g_assert (pos
== from
->out_count
- 1);
182 for (i
= 0; i
< to
->in_count
; ++i
) {
183 if (from
== to
->in_bb
[i
]) {
190 for (i
= 0; i
< to
->in_count
; ++i
) {
191 if (to
->in_bb
[i
] != from
)
192 to
->in_bb
[pos
++] = to
->in_bb
[i
];
194 g_assert (pos
== to
->in_count
- 1);
200 * mono_bblocks_linked:
202 * Return whenever BB1 and BB2 are linked in the CFG.
205 mono_bblocks_linked (MonoBasicBlock
*bb1
, MonoBasicBlock
*bb2
)
209 for (i
= 0; i
< bb1
->out_count
; ++i
) {
210 if (bb1
->out_bb
[i
] == bb2
)
218 mono_find_block_region_notry (MonoCompile
*cfg
, int offset
)
220 MonoMethodHeader
*header
= cfg
->header
;
221 MonoExceptionClause
*clause
;
224 for (i
= 0; i
< header
->num_clauses
; ++i
) {
225 clause
= &header
->clauses
[i
];
226 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
227 (offset
< (clause
->handler_offset
)))
228 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
230 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
231 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
232 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
233 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
234 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
236 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
244 * mono_get_block_region_notry:
246 * Return the region corresponding to REGION, ignoring try clauses nested inside
250 mono_get_block_region_notry (MonoCompile
*cfg
, int region
)
252 if ((region
& (0xf << 4)) == MONO_REGION_TRY
) {
253 MonoMethodHeader
*header
= cfg
->header
;
256 * This can happen if a try clause is nested inside a finally clause.
258 int clause_index
= (region
>> 8) - 1;
259 g_assert (clause_index
>= 0 && clause_index
< header
->num_clauses
);
261 region
= mono_find_block_region_notry (cfg
, header
->clauses
[clause_index
].try_offset
);
268 mono_find_spvar_for_region (MonoCompile
*cfg
, int region
)
270 region
= mono_get_block_region_notry (cfg
, region
);
272 return (MonoInst
*)g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
276 df_visit (MonoBasicBlock
*start
, int *dfn
, MonoBasicBlock
**array
)
280 array
[*dfn
] = start
;
281 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
282 for (i
= 0; i
< start
->out_count
; ++i
) {
283 if (start
->out_bb
[i
]->dfn
)
286 start
->out_bb
[i
]->dfn
= *dfn
;
287 start
->out_bb
[i
]->df_parent
= start
;
288 array
[*dfn
] = start
->out_bb
[i
];
289 df_visit (start
->out_bb
[i
], dfn
, array
);
294 mono_reverse_branch_op (guint32 opcode
)
296 static const int reverse_map
[] = {
297 CEE_BNE_UN
, CEE_BLT
, CEE_BLE
, CEE_BGT
, CEE_BGE
,
298 CEE_BEQ
, CEE_BLT_UN
, CEE_BLE_UN
, CEE_BGT_UN
, CEE_BGE_UN
300 static const int reverse_fmap
[] = {
301 OP_FBNE_UN
, OP_FBLT
, OP_FBLE
, OP_FBGT
, OP_FBGE
,
302 OP_FBEQ
, OP_FBLT_UN
, OP_FBLE_UN
, OP_FBGT_UN
, OP_FBGE_UN
304 static const int reverse_lmap
[] = {
305 OP_LBNE_UN
, OP_LBLT
, OP_LBLE
, OP_LBGT
, OP_LBGE
,
306 OP_LBEQ
, OP_LBLT_UN
, OP_LBLE_UN
, OP_LBGT_UN
, OP_LBGE_UN
308 static const int reverse_imap
[] = {
309 OP_IBNE_UN
, OP_IBLT
, OP_IBLE
, OP_IBGT
, OP_IBGE
,
310 OP_IBEQ
, OP_IBLT_UN
, OP_IBLE_UN
, OP_IBGT_UN
, OP_IBGE_UN
313 if (opcode
>= CEE_BEQ
&& opcode
<= CEE_BLT_UN
) {
314 opcode
= reverse_map
[opcode
- CEE_BEQ
];
315 } else if (opcode
>= OP_FBEQ
&& opcode
<= OP_FBLT_UN
) {
316 opcode
= reverse_fmap
[opcode
- OP_FBEQ
];
317 } else if (opcode
>= OP_LBEQ
&& opcode
<= OP_LBLT_UN
) {
318 opcode
= reverse_lmap
[opcode
- OP_LBEQ
];
319 } else if (opcode
>= OP_IBEQ
&& opcode
<= OP_IBLT_UN
) {
320 opcode
= reverse_imap
[opcode
- OP_IBEQ
];
322 g_assert_not_reached ();
328 mono_type_to_store_membase (MonoCompile
*cfg
, MonoType
*type
)
330 type
= mini_get_underlying_type (type
);
333 switch (type
->type
) {
336 return OP_STOREI1_MEMBASE_REG
;
339 return OP_STOREI2_MEMBASE_REG
;
342 return OP_STOREI4_MEMBASE_REG
;
346 case MONO_TYPE_FNPTR
:
347 return OP_STORE_MEMBASE_REG
;
348 case MONO_TYPE_CLASS
:
349 case MONO_TYPE_STRING
:
350 case MONO_TYPE_OBJECT
:
351 case MONO_TYPE_SZARRAY
:
352 case MONO_TYPE_ARRAY
:
353 return OP_STORE_MEMBASE_REG
;
356 return OP_STOREI8_MEMBASE_REG
;
358 return OP_STORER4_MEMBASE_REG
;
360 return OP_STORER8_MEMBASE_REG
;
361 case MONO_TYPE_VALUETYPE
:
362 if (m_class_is_enumtype (type
->data
.klass
)) {
363 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
366 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
367 return OP_STOREX_MEMBASE
;
368 return OP_STOREV_MEMBASE
;
369 case MONO_TYPE_TYPEDBYREF
:
370 return OP_STOREV_MEMBASE
;
371 case MONO_TYPE_GENERICINST
:
372 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
373 return OP_STOREX_MEMBASE
;
374 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
378 g_assert (mini_type_var_is_vt (type
));
379 return OP_STOREV_MEMBASE
;
381 g_error ("unknown type 0x%02x in type_to_store_membase", type
->type
);
387 mono_type_to_load_membase (MonoCompile
*cfg
, MonoType
*type
)
389 type
= mini_get_underlying_type (type
);
391 switch (type
->type
) {
393 return OP_LOADI1_MEMBASE
;
395 return OP_LOADU1_MEMBASE
;
397 return OP_LOADI2_MEMBASE
;
399 return OP_LOADU2_MEMBASE
;
401 return OP_LOADI4_MEMBASE
;
403 return OP_LOADU4_MEMBASE
;
407 case MONO_TYPE_FNPTR
:
408 return OP_LOAD_MEMBASE
;
409 case MONO_TYPE_CLASS
:
410 case MONO_TYPE_STRING
:
411 case MONO_TYPE_OBJECT
:
412 case MONO_TYPE_SZARRAY
:
413 case MONO_TYPE_ARRAY
:
414 return OP_LOAD_MEMBASE
;
417 return OP_LOADI8_MEMBASE
;
419 return OP_LOADR4_MEMBASE
;
421 return OP_LOADR8_MEMBASE
;
422 case MONO_TYPE_VALUETYPE
:
423 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
424 return OP_LOADX_MEMBASE
;
425 case MONO_TYPE_TYPEDBYREF
:
426 return OP_LOADV_MEMBASE
;
427 case MONO_TYPE_GENERICINST
:
428 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
429 return OP_LOADX_MEMBASE
;
430 if (mono_type_generic_inst_is_valuetype (type
))
431 return OP_LOADV_MEMBASE
;
433 return OP_LOAD_MEMBASE
;
437 g_assert (cfg
->gshared
);
438 g_assert (mini_type_var_is_vt (type
));
439 return OP_LOADV_MEMBASE
;
441 g_error ("unknown type 0x%02x in type_to_load_membase", type
->type
);
447 mini_type_to_stind (MonoCompile
* cfg
, MonoType
*type
)
449 type
= mini_get_underlying_type (type
);
450 if (cfg
->gshared
&& !type
->byref
&& (type
->type
== MONO_TYPE_VAR
|| type
->type
== MONO_TYPE_MVAR
)) {
451 g_assert (mini_type_var_is_vt (type
));
454 return mono_type_to_stind (type
);
458 mono_op_imm_to_op (int opcode
)
462 #if SIZEOF_REGISTER == 4
480 #if SIZEOF_REGISTER == 4
486 #if SIZEOF_REGISTER == 4
492 #if SIZEOF_REGISTER == 4
538 #if SIZEOF_REGISTER == 4
544 #if SIZEOF_REGISTER == 4
563 case OP_ICOMPARE_IMM
:
565 case OP_LOCALLOC_IMM
:
573 * mono_decompose_op_imm:
575 * Replace the OP_.._IMM INS with its non IMM variant.
578 mono_decompose_op_imm (MonoCompile
*cfg
, MonoBasicBlock
*bb
, MonoInst
*ins
)
580 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
583 const char *spec
= INS_INFO (ins
->opcode
);
585 if (spec
[MONO_INST_SRC2
] == 'l') {
586 dreg
= mono_alloc_lreg (cfg
);
588 /* Load the 64bit constant using decomposed ops */
589 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
590 temp
->inst_c0
= ins_get_l_low (ins
);
591 temp
->dreg
= MONO_LVREG_LS (dreg
);
592 mono_bblock_insert_before_ins (bb
, ins
, temp
);
594 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
595 temp
->inst_c0
= ins_get_l_high (ins
);
596 temp
->dreg
= MONO_LVREG_MS (dreg
);
598 dreg
= mono_alloc_ireg (cfg
);
600 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
601 temp
->inst_c0
= ins
->inst_imm
;
605 mono_bblock_insert_before_ins (bb
, ins
, temp
);
608 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
609 ins
->opcode
= opcode2
;
611 if (ins
->opcode
== OP_LOCALLOC
)
616 bb
->max_vreg
= MAX (bb
->max_vreg
, cfg
->next_vreg
);
620 set_vreg_to_inst (MonoCompile
*cfg
, int vreg
, MonoInst
*inst
)
622 if (vreg
>= cfg
->vreg_to_inst_len
) {
623 MonoInst
**tmp
= cfg
->vreg_to_inst
;
624 int size
= cfg
->vreg_to_inst_len
;
626 while (vreg
>= cfg
->vreg_to_inst_len
)
627 cfg
->vreg_to_inst_len
= cfg
->vreg_to_inst_len
? cfg
->vreg_to_inst_len
* 2 : 32;
628 cfg
->vreg_to_inst
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * cfg
->vreg_to_inst_len
);
630 memcpy (cfg
->vreg_to_inst
, tmp
, size
* sizeof (MonoInst
*));
632 cfg
->vreg_to_inst
[vreg
] = inst
;
635 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
636 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
639 mono_compile_create_var_for_vreg (MonoCompile
*cfg
, MonoType
*type
, int opcode
, int vreg
)
642 int num
= cfg
->num_varinfo
;
645 type
= mini_get_underlying_type (type
);
647 if ((num
+ 1) >= cfg
->varinfo_count
) {
648 int orig_count
= cfg
->varinfo_count
;
649 cfg
->varinfo_count
= cfg
->varinfo_count
? (cfg
->varinfo_count
* 2) : 32;
650 cfg
->varinfo
= (MonoInst
**)g_realloc (cfg
->varinfo
, sizeof (MonoInst
*) * cfg
->varinfo_count
);
651 cfg
->vars
= (MonoMethodVar
*)g_realloc (cfg
->vars
, sizeof (MonoMethodVar
) * cfg
->varinfo_count
);
652 memset (&cfg
->vars
[orig_count
], 0, (cfg
->varinfo_count
- orig_count
) * sizeof (MonoMethodVar
));
655 cfg
->stat_allocate_var
++;
657 MONO_INST_NEW (cfg
, inst
, opcode
);
659 inst
->inst_vtype
= type
;
660 inst
->klass
= mono_class_from_mono_type_internal (type
);
661 mini_type_to_eval_stack_type (cfg
, type
, inst
);
662 /* if set to 1 the variable is native */
663 inst
->backend
.is_pinvoke
= 0;
666 if (mono_class_has_failure (inst
->klass
))
667 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
669 if (cfg
->compute_gc_maps
) {
671 mono_mark_vreg_as_mp (cfg
, vreg
);
673 if ((MONO_TYPE_ISSTRUCT (type
) && m_class_has_references (inst
->klass
)) || mini_type_is_reference (type
)) {
674 inst
->flags
|= MONO_INST_GC_TRACK
;
675 mono_mark_vreg_as_ref (cfg
, vreg
);
680 cfg
->varinfo
[num
] = inst
;
682 cfg
->vars
[num
].idx
= num
;
683 cfg
->vars
[num
].vreg
= vreg
;
684 cfg
->vars
[num
].range
.first_use
.pos
.bid
= 0xffff;
685 cfg
->vars
[num
].reg
= -1;
688 set_vreg_to_inst (cfg
, vreg
, inst
);
690 #if SIZEOF_REGISTER == 4
691 if (mono_arch_is_soft_float ()) {
692 regpair
= mono_type_is_long (type
) || mono_type_is_float (type
);
694 regpair
= mono_type_is_long (type
);
704 * These two cannot be allocated using create_var_for_vreg since that would
705 * put it into the cfg->varinfo array, confusing many parts of the JIT.
709 * Set flags to VOLATILE so SSA skips it.
712 if (cfg
->verbose_level
>= 4) {
713 printf (" Create LVAR R%d (R%d, R%d)\n", inst
->dreg
, MONO_LVREG_LS (inst
->dreg
), MONO_LVREG_MS (inst
->dreg
));
716 if (mono_arch_is_soft_float () && cfg
->opt
& MONO_OPT_SSA
) {
717 if (mono_type_is_float (type
))
718 inst
->flags
= MONO_INST_VOLATILE
;
721 /* Allocate a dummy MonoInst for the first vreg */
722 MONO_INST_NEW (cfg
, tree
, OP_LOCAL
);
723 tree
->dreg
= MONO_LVREG_LS (inst
->dreg
);
724 if (cfg
->opt
& MONO_OPT_SSA
)
725 tree
->flags
= MONO_INST_VOLATILE
;
727 tree
->type
= STACK_I4
;
728 tree
->inst_vtype
= mono_get_int32_type ();
729 tree
->klass
= mono_class_from_mono_type_internal (tree
->inst_vtype
);
731 set_vreg_to_inst (cfg
, MONO_LVREG_LS (inst
->dreg
), tree
);
733 /* Allocate a dummy MonoInst for the second vreg */
734 MONO_INST_NEW (cfg
, tree
, OP_LOCAL
);
735 tree
->dreg
= MONO_LVREG_MS (inst
->dreg
);
736 if (cfg
->opt
& MONO_OPT_SSA
)
737 tree
->flags
= MONO_INST_VOLATILE
;
739 tree
->type
= STACK_I4
;
740 tree
->inst_vtype
= mono_get_int32_type ();
741 tree
->klass
= mono_class_from_mono_type_internal (tree
->inst_vtype
);
743 set_vreg_to_inst (cfg
, MONO_LVREG_MS (inst
->dreg
), tree
);
747 if (cfg
->verbose_level
> 2)
748 g_print ("created temp %d (R%d) of type %s\n", num
, vreg
, mono_type_get_name (type
));
754 mono_compile_create_var (MonoCompile
*cfg
, MonoType
*type
, int opcode
)
758 #ifdef ENABLE_NETCORE
759 if (type
->type
== MONO_TYPE_VALUETYPE
&& !type
->byref
) {
760 MonoClass
*klass
= mono_class_from_mono_type_internal (type
);
761 if (m_class_is_enumtype (klass
) && m_class_get_image (klass
) == mono_get_corlib () && !strcmp (m_class_get_name (klass
), "StackCrawlMark")) {
762 if (!(cfg
->method
->flags
& METHOD_ATTRIBUTE_REQSECOBJ
))
763 g_error ("Method '%s' which contains a StackCrawlMark local variable must be decorated with [System.Security.DynamicSecurityMethod].", mono_method_get_full_name (cfg
->method
));
768 type
= mini_get_underlying_type (type
);
770 if (mono_type_is_long (type
))
771 dreg
= mono_alloc_dreg (cfg
, STACK_I8
);
772 else if (mono_arch_is_soft_float () && mono_type_is_float (type
))
773 dreg
= mono_alloc_dreg (cfg
, STACK_R8
);
775 /* All the others are unified */
776 dreg
= mono_alloc_preg (cfg
);
778 return mono_compile_create_var_for_vreg (cfg
, type
, opcode
, dreg
);
782 mini_get_int_to_float_spill_area (MonoCompile
*cfg
)
785 if (!cfg
->iconv_raw_var
) {
786 cfg
->iconv_raw_var
= mono_compile_create_var (cfg
, mono_get_int32_type (), OP_LOCAL
);
787 cfg
->iconv_raw_var
->flags
|= MONO_INST_VOLATILE
; /*FIXME, use the don't regalloc flag*/
789 return cfg
->iconv_raw_var
;
796 mono_mark_vreg_as_ref (MonoCompile
*cfg
, int vreg
)
798 if (vreg
>= cfg
->vreg_is_ref_len
) {
799 gboolean
*tmp
= cfg
->vreg_is_ref
;
800 int size
= cfg
->vreg_is_ref_len
;
802 while (vreg
>= cfg
->vreg_is_ref_len
)
803 cfg
->vreg_is_ref_len
= cfg
->vreg_is_ref_len
? cfg
->vreg_is_ref_len
* 2 : 32;
804 cfg
->vreg_is_ref
= (gboolean
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gboolean
) * cfg
->vreg_is_ref_len
);
806 memcpy (cfg
->vreg_is_ref
, tmp
, size
* sizeof (gboolean
));
808 cfg
->vreg_is_ref
[vreg
] = TRUE
;
812 mono_mark_vreg_as_mp (MonoCompile
*cfg
, int vreg
)
814 if (vreg
>= cfg
->vreg_is_mp_len
) {
815 gboolean
*tmp
= cfg
->vreg_is_mp
;
816 int size
= cfg
->vreg_is_mp_len
;
818 while (vreg
>= cfg
->vreg_is_mp_len
)
819 cfg
->vreg_is_mp_len
= cfg
->vreg_is_mp_len
? cfg
->vreg_is_mp_len
* 2 : 32;
820 cfg
->vreg_is_mp
= (gboolean
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gboolean
) * cfg
->vreg_is_mp_len
);
822 memcpy (cfg
->vreg_is_mp
, tmp
, size
* sizeof (gboolean
));
824 cfg
->vreg_is_mp
[vreg
] = TRUE
;
828 type_from_stack_type (MonoInst
*ins
)
831 case STACK_I4
: return mono_get_int32_type ();
832 case STACK_I8
: return m_class_get_byval_arg (mono_defaults
.int64_class
);
833 case STACK_PTR
: return mono_get_int_type ();
834 case STACK_R8
: return m_class_get_byval_arg (mono_defaults
.double_class
);
837 * this if used to be commented without any specific reason, but
838 * it breaks #80235 when commented
841 return m_class_get_this_arg (ins
->klass
);
843 return m_class_get_this_arg (mono_defaults
.object_class
);
845 /* ins->klass may not be set for ldnull.
846 * Also, if we have a boxed valuetype, we want an object lass,
847 * not the valuetype class
849 if (ins
->klass
&& !m_class_is_valuetype (ins
->klass
))
850 return m_class_get_byval_arg (ins
->klass
);
851 return mono_get_object_type ();
852 case STACK_VTYPE
: return m_class_get_byval_arg (ins
->klass
);
854 g_error ("stack type %d to montype not handled\n", ins
->type
);
860 mono_type_from_stack_type (MonoInst
*ins
)
862 return type_from_stack_type (ins
);
866 * mono_add_ins_to_end:
868 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
871 mono_add_ins_to_end (MonoBasicBlock
*bb
, MonoInst
*inst
)
876 MONO_ADD_INS (bb
, inst
);
880 switch (bb
->last_ins
->opcode
) {
894 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
897 if (MONO_IS_COND_BRANCH_OP (bb
->last_ins
)) {
898 /* Need to insert the ins before the compare */
899 if (bb
->code
== bb
->last_ins
) {
900 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
904 if (bb
->code
->next
== bb
->last_ins
) {
905 /* Only two instructions */
906 opcode
= bb
->code
->opcode
;
908 if ((opcode
== OP_COMPARE
) || (opcode
== OP_COMPARE_IMM
) || (opcode
== OP_ICOMPARE
) || (opcode
== OP_ICOMPARE_IMM
) || (opcode
== OP_FCOMPARE
) || (opcode
== OP_LCOMPARE
) || (opcode
== OP_LCOMPARE_IMM
) || (opcode
== OP_RCOMPARE
)) {
910 mono_bblock_insert_before_ins (bb
, bb
->code
, inst
);
912 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
915 opcode
= bb
->last_ins
->prev
->opcode
;
917 if ((opcode
== OP_COMPARE
) || (opcode
== OP_COMPARE_IMM
) || (opcode
== OP_ICOMPARE
) || (opcode
== OP_ICOMPARE_IMM
) || (opcode
== OP_FCOMPARE
) || (opcode
== OP_LCOMPARE
) || (opcode
== OP_LCOMPARE_IMM
) || (opcode
== OP_RCOMPARE
)) {
919 mono_bblock_insert_before_ins (bb
, bb
->last_ins
->prev
, inst
);
921 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
926 MONO_ADD_INS (bb
, inst
);
932 mono_create_jump_table (MonoCompile
*cfg
, MonoInst
*label
, MonoBasicBlock
**bbs
, int num_blocks
)
934 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
935 MonoJumpInfoBBTable
*table
;
937 table
= (MonoJumpInfoBBTable
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
939 table
->table_size
= num_blocks
;
941 ji
->ip
.label
= label
;
942 ji
->type
= MONO_PATCH_INFO_SWITCH
;
943 ji
->data
.table
= table
;
944 ji
->next
= cfg
->patch_info
;
945 cfg
->patch_info
= ji
;
949 mini_assembly_can_skip_verification (MonoDomain
*domain
, MonoMethod
*method
)
951 MonoAssembly
*assembly
= m_class_get_image (method
->klass
)->assembly
;
952 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
&& method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
954 if (assembly
->in_gac
|| assembly
->image
== mono_defaults
.corlib
)
956 return mono_assembly_has_skip_verification (assembly
);
960 * mini_method_verify:
962 * Verify the method using the verfier.
964 * Returns true if the method is invalid.
967 mini_method_verify (MonoCompile
*cfg
, MonoMethod
*method
, gboolean fail_compile
)
970 gboolean is_fulltrust
;
972 if (method
->verification_success
)
975 if (!mono_verifier_is_enabled_for_method (method
))
978 /*skip verification implies the assembly must be */
979 is_fulltrust
= mono_verifier_is_method_full_trust (method
) || mini_assembly_can_skip_verification (cfg
->domain
, method
);
981 res
= mono_method_verify_with_current_settings (method
, cfg
->skip_visibility
, is_fulltrust
);
984 for (tmp
= res
; tmp
; tmp
= tmp
->next
) {
985 MonoVerifyInfoExtended
*info
= (MonoVerifyInfoExtended
*)tmp
->data
;
986 if (info
->info
.status
== MONO_VERIFY_ERROR
) {
988 char *method_name
= mono_method_full_name (method
, TRUE
);
989 cfg
->exception_type
= (MonoExceptionType
)info
->exception_type
;
990 cfg
->exception_message
= g_strdup_printf ("Error verifying %s: %s", method_name
, info
->info
.message
);
991 g_free (method_name
);
993 mono_free_verify_list (res
);
996 if (info
->info
.status
== MONO_VERIFY_NOT_VERIFIABLE
&& (!is_fulltrust
|| info
->exception_type
== MONO_EXCEPTION_METHOD_ACCESS
|| info
->exception_type
== MONO_EXCEPTION_FIELD_ACCESS
)) {
998 char *method_name
= mono_method_full_name (method
, TRUE
);
999 char *msg
= g_strdup_printf ("Error verifying %s: %s", method_name
, info
->info
.message
);
1001 if (info
->exception_type
== MONO_EXCEPTION_METHOD_ACCESS
)
1002 mono_error_set_generic_error (&cfg
->error
, "System", "MethodAccessException", "%s", msg
);
1003 else if (info
->exception_type
== MONO_EXCEPTION_FIELD_ACCESS
)
1004 mono_error_set_generic_error (&cfg
->error
, "System", "FieldAccessException", "%s", msg
);
1005 else if (info
->exception_type
== MONO_EXCEPTION_UNVERIFIABLE_IL
)
1006 mono_error_set_generic_error (&cfg
->error
, "System.Security", "VerificationException", "%s", msg
);
1007 if (!mono_error_ok (&cfg
->error
)) {
1008 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
1011 cfg
->exception_type
= (MonoExceptionType
)info
->exception_type
;
1012 cfg
->exception_message
= msg
;
1014 g_free (method_name
);
1016 mono_free_verify_list (res
);
1020 mono_free_verify_list (res
);
1022 method
->verification_success
= 1;
1026 /*Returns true if something went wrong*/
1028 mono_compile_is_broken (MonoCompile
*cfg
, MonoMethod
*method
, gboolean fail_compile
)
1030 MonoMethod
*method_definition
= method
;
1031 gboolean dont_verify
= m_class_get_image (method
->klass
)->assembly
->corlib_internal
;
1033 while (method_definition
->is_inflated
) {
1034 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
1035 method_definition
= imethod
->declaring
;
1038 return !dont_verify
&& mini_method_verify (cfg
, method_definition
, fail_compile
);
1042 mono_dynamic_code_hash_insert (MonoDomain
*domain
, MonoMethod
*method
, MonoJitDynamicMethodInfo
*ji
)
1044 if (!domain_jit_info (domain
)->dynamic_code_hash
)
1045 domain_jit_info (domain
)->dynamic_code_hash
= g_hash_table_new (NULL
, NULL
);
1046 g_hash_table_insert (domain_jit_info (domain
)->dynamic_code_hash
, method
, ji
);
1049 static MonoJitDynamicMethodInfo
*
1050 mono_dynamic_code_hash_lookup (MonoDomain
*domain
, MonoMethod
*method
)
1052 MonoJitDynamicMethodInfo
*res
;
1054 if (domain_jit_info (domain
)->dynamic_code_hash
)
1055 res
= (MonoJitDynamicMethodInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->dynamic_code_hash
, method
);
1063 GList
*active
, *inactive
;
1068 compare_by_interval_start_pos_func (gconstpointer a
, gconstpointer b
)
1070 MonoMethodVar
*v1
= (MonoMethodVar
*)a
;
1071 MonoMethodVar
*v2
= (MonoMethodVar
*)b
;
1075 else if (v1
->interval
->range
&& v2
->interval
->range
)
1076 return v1
->interval
->range
->from
- v2
->interval
->range
->from
;
1077 else if (v1
->interval
->range
)
1084 #define LSCAN_DEBUG(a) do { a; } while (0)
1086 #define LSCAN_DEBUG(a)
1090 mono_allocate_stack_slots2 (MonoCompile
*cfg
, gboolean backward
, guint32
*stack_size
, guint32
*stack_align
)
1092 int i
, slot
, offset
, size
;
1097 GList
*vars
= NULL
, *l
, *unhandled
;
1098 StackSlotInfo
*scalar_stack_slots
, *vtype_stack_slots
, *slot_info
;
1101 int vtype_stack_slots_size
= 256;
1102 gboolean reuse_slot
;
1104 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg
->method
, TRUE
)));
1106 scalar_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * MONO_TYPE_PINNED
);
1107 vtype_stack_slots
= NULL
;
1110 offsets
= (gint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (gint32
) * cfg
->num_varinfo
);
1111 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
1114 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1115 inst
= cfg
->varinfo
[i
];
1116 vmv
= MONO_VARINFO (cfg
, i
);
1118 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
|| inst
->opcode
== OP_REGOFFSET
)
1121 vars
= g_list_prepend (vars
, vmv
);
1124 vars
= g_list_sort (vars
, compare_by_interval_start_pos_func
);
1129 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1130 MonoMethodVar *current = unhandled->data;
1132 if (current->interval->range) {
1133 g_assert (current->interval->range->from >= i);
1134 i = current->interval->range->from;
1141 for (unhandled
= vars
; unhandled
; unhandled
= unhandled
->next
) {
1142 MonoMethodVar
*current
= (MonoMethodVar
*)unhandled
->data
;
1145 inst
= cfg
->varinfo
[vmv
->idx
];
1147 t
= mono_type_get_underlying_type (inst
->inst_vtype
);
1148 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1151 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1152 * pinvoke wrappers when they call functions returning structures */
1153 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1154 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1159 size
= mini_type_stack_size (t
, &ialign
);
1162 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (t
)))
1167 if (cfg
->disable_reuse_stack_slots
)
1170 t
= mini_get_underlying_type (t
);
1172 case MONO_TYPE_GENERICINST
:
1173 if (!mono_type_generic_inst_is_valuetype (t
)) {
1174 slot_info
= &scalar_stack_slots
[t
->type
];
1178 case MONO_TYPE_VALUETYPE
:
1179 if (!vtype_stack_slots
)
1180 vtype_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1181 for (i
= 0; i
< nvtypes
; ++i
)
1182 if (t
->data
.klass
== vtype_stack_slots
[i
].vtype
)
1185 slot_info
= &vtype_stack_slots
[i
];
1187 if (nvtypes
== vtype_stack_slots_size
) {
1188 int new_slots_size
= vtype_stack_slots_size
* 2;
1189 StackSlotInfo
* new_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * new_slots_size
);
1191 memcpy (new_slots
, vtype_stack_slots
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1193 vtype_stack_slots
= new_slots
;
1194 vtype_stack_slots_size
= new_slots_size
;
1196 vtype_stack_slots
[nvtypes
].vtype
= t
->data
.klass
;
1197 slot_info
= &vtype_stack_slots
[nvtypes
];
1200 if (cfg
->disable_reuse_ref_stack_slots
)
1207 #if TARGET_SIZEOF_VOID_P == 4
1212 if (cfg
->disable_ref_noref_stack_slot_share
) {
1213 slot_info
= &scalar_stack_slots
[MONO_TYPE_I
];
1218 case MONO_TYPE_CLASS
:
1219 case MONO_TYPE_OBJECT
:
1220 case MONO_TYPE_ARRAY
:
1221 case MONO_TYPE_SZARRAY
:
1222 case MONO_TYPE_STRING
:
1223 /* Share non-float stack slots of the same size */
1224 slot_info
= &scalar_stack_slots
[MONO_TYPE_CLASS
];
1225 if (cfg
->disable_reuse_ref_stack_slots
)
1230 slot_info
= &scalar_stack_slots
[t
->type
];
1234 if (cfg
->comp_done
& MONO_COMP_LIVENESS
) {
1238 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1240 if (!current
->interval
->range
) {
1241 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
1245 inst
->flags
|= MONO_INST_IS_DEAD
;
1250 pos
= current
->interval
->range
->from
;
1252 LSCAN_DEBUG (printf ("process R%d ", inst
->dreg
));
1253 if (current
->interval
->range
)
1254 LSCAN_DEBUG (mono_linterval_print (current
->interval
));
1255 LSCAN_DEBUG (printf ("\n"));
1257 /* Check for intervals in active which expired or inactive */
1259 /* FIXME: Optimize this */
1262 for (l
= slot_info
->active
; l
!= NULL
; l
= l
->next
) {
1263 MonoMethodVar
*v
= (MonoMethodVar
*)l
->data
;
1265 if (v
->interval
->last_range
->to
< pos
) {
1266 slot_info
->active
= g_list_delete_link (slot_info
->active
, l
);
1267 slot_info
->slots
= g_slist_prepend_mempool (cfg
->mempool
, slot_info
->slots
, GINT_TO_POINTER (offsets
[v
->idx
]));
1268 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg
->varinfo
[v
->idx
]->dreg
, offsets
[v
->idx
]));
1272 else if (!mono_linterval_covers (v
->interval
, pos
)) {
1273 slot_info
->inactive
= g_list_append (slot_info
->inactive
, v
);
1274 slot_info
->active
= g_list_delete_link (slot_info
->active
, l
);
1275 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg
->varinfo
[v
->idx
]->dreg
));
1282 /* Check for intervals in inactive which expired or active */
1284 /* FIXME: Optimize this */
1287 for (l
= slot_info
->inactive
; l
!= NULL
; l
= l
->next
) {
1288 MonoMethodVar
*v
= (MonoMethodVar
*)l
->data
;
1290 if (v
->interval
->last_range
->to
< pos
) {
1291 slot_info
->inactive
= g_list_delete_link (slot_info
->inactive
, l
);
1292 // FIXME: Enabling this seems to cause impossible to debug crashes
1293 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1294 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg
->varinfo
[v
->idx
]->dreg
, offsets
[v
->idx
]));
1298 else if (mono_linterval_covers (v
->interval
, pos
)) {
1299 slot_info
->active
= g_list_append (slot_info
->active
, v
);
1300 slot_info
->inactive
= g_list_delete_link (slot_info
->inactive
, l
);
1301 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg
->varinfo
[v
->idx
]->dreg
));
1309 * This also handles the case when the variable is used in an
1310 * exception region, as liveness info is not computed there.
1313 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1316 if (! (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))) {
1317 if (slot_info
->slots
) {
1318 slot
= GPOINTER_TO_INT (slot_info
->slots
->data
);
1320 slot_info
->slots
= slot_info
->slots
->next
;
1323 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1325 slot_info
->active
= mono_varlist_insert_sorted (cfg
, slot_info
->active
, vmv
, TRUE
);
1331 static int count
= 0;
1334 if (count
== atoi (g_getenv ("COUNT3")))
1335 printf ("LAST: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
1336 if (count
> atoi (g_getenv ("COUNT3")))
1339 mono_print_ins (inst
);
1343 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst
->dreg
, mono_type_full_name (t
), slot
));
1345 if (inst
->flags
& MONO_INST_LMF
) {
1346 size
= MONO_ABI_SIZEOF (MonoLMF
);
1347 align
= sizeof (target_mgreg_t
);
1354 if (slot
== 0xffffff) {
1356 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1357 * efficient copying (and to work around the fact that OP_MEMCPY
1358 * and OP_MEMSET ignores alignment).
1360 if (MONO_TYPE_ISSTRUCT (t
)) {
1361 align
= MAX (align
, sizeof (target_mgreg_t
));
1362 align
= MAX (align
, mono_class_min_align (mono_class_from_mono_type_internal (t
)));
1367 offset
+= align
- 1;
1368 offset
&= ~(align
- 1);
1372 offset
+= align
- 1;
1373 offset
&= ~(align
- 1);
1378 if (*stack_align
== 0)
1379 *stack_align
= align
;
1382 offsets
[vmv
->idx
] = slot
;
1385 for (i
= 0; i
< MONO_TYPE_PINNED
; ++i
) {
1386 if (scalar_stack_slots
[i
].active
)
1387 g_list_free (scalar_stack_slots
[i
].active
);
1389 for (i
= 0; i
< nvtypes
; ++i
) {
1390 if (vtype_stack_slots
[i
].active
)
1391 g_list_free (vtype_stack_slots
[i
].active
);
1394 cfg
->stat_locals_stack_size
+= offset
;
1396 *stack_size
= offset
;
1401 * mono_allocate_stack_slots:
1403 * Allocate stack slots for all non register allocated variables using a
1404 * linear scan algorithm.
1405 * Returns: an array of stack offsets.
1406 * STACK_SIZE is set to the amount of stack space needed.
1407 * STACK_ALIGN is set to the alignment needed by the locals area.
1410 mono_allocate_stack_slots (MonoCompile
*cfg
, gboolean backward
, guint32
*stack_size
, guint32
*stack_align
)
1412 int i
, slot
, offset
, size
;
1417 GList
*vars
= NULL
, *l
;
1418 StackSlotInfo
*scalar_stack_slots
, *vtype_stack_slots
, *slot_info
;
1421 int vtype_stack_slots_size
= 256;
1422 gboolean reuse_slot
;
1424 if ((cfg
->num_varinfo
> 0) && MONO_VARINFO (cfg
, 0)->interval
)
1425 return mono_allocate_stack_slots2 (cfg
, backward
, stack_size
, stack_align
);
1427 scalar_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * MONO_TYPE_PINNED
);
1428 vtype_stack_slots
= NULL
;
1431 offsets
= (gint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (gint32
) * cfg
->num_varinfo
);
1432 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
1435 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1436 inst
= cfg
->varinfo
[i
];
1437 vmv
= MONO_VARINFO (cfg
, i
);
1439 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
|| inst
->opcode
== OP_REGOFFSET
)
1442 vars
= g_list_prepend (vars
, vmv
);
1445 vars
= mono_varlist_sort (cfg
, vars
, 0);
1447 *stack_align
= sizeof (target_mgreg_t
);
1448 for (l
= vars
; l
; l
= l
->next
) {
1449 vmv
= (MonoMethodVar
*)l
->data
;
1450 inst
= cfg
->varinfo
[vmv
->idx
];
1452 t
= mono_type_get_underlying_type (inst
->inst_vtype
);
1453 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1456 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1457 * pinvoke wrappers when they call functions returning structures */
1458 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1459 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1463 size
= mini_type_stack_size (t
, &ialign
);
1466 if (mono_class_has_failure (mono_class_from_mono_type_internal (t
)))
1467 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
1469 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (t
)))
1474 if (cfg
->disable_reuse_stack_slots
)
1477 t
= mini_get_underlying_type (t
);
1479 case MONO_TYPE_GENERICINST
:
1480 if (!mono_type_generic_inst_is_valuetype (t
)) {
1481 slot_info
= &scalar_stack_slots
[t
->type
];
1485 case MONO_TYPE_VALUETYPE
:
1486 if (!vtype_stack_slots
)
1487 vtype_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1488 for (i
= 0; i
< nvtypes
; ++i
)
1489 if (t
->data
.klass
== vtype_stack_slots
[i
].vtype
)
1492 slot_info
= &vtype_stack_slots
[i
];
1494 if (nvtypes
== vtype_stack_slots_size
) {
1495 int new_slots_size
= vtype_stack_slots_size
* 2;
1496 StackSlotInfo
* new_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * new_slots_size
);
1498 memcpy (new_slots
, vtype_stack_slots
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1500 vtype_stack_slots
= new_slots
;
1501 vtype_stack_slots_size
= new_slots_size
;
1503 vtype_stack_slots
[nvtypes
].vtype
= t
->data
.klass
;
1504 slot_info
= &vtype_stack_slots
[nvtypes
];
1507 if (cfg
->disable_reuse_ref_stack_slots
)
1514 #if TARGET_SIZEOF_VOID_P == 4
1519 if (cfg
->disable_ref_noref_stack_slot_share
) {
1520 slot_info
= &scalar_stack_slots
[MONO_TYPE_I
];
1525 case MONO_TYPE_CLASS
:
1526 case MONO_TYPE_OBJECT
:
1527 case MONO_TYPE_ARRAY
:
1528 case MONO_TYPE_SZARRAY
:
1529 case MONO_TYPE_STRING
:
1530 /* Share non-float stack slots of the same size */
1531 slot_info
= &scalar_stack_slots
[MONO_TYPE_CLASS
];
1532 if (cfg
->disable_reuse_ref_stack_slots
)
1536 case MONO_TYPE_MVAR
:
1537 slot_info
= &scalar_stack_slots
[t
->type
];
1540 slot_info
= &scalar_stack_slots
[t
->type
];
1545 if (cfg
->comp_done
& MONO_COMP_LIVENESS
) {
1546 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1548 /* expire old intervals in active */
1549 while (slot_info
->active
) {
1550 MonoMethodVar
*amv
= (MonoMethodVar
*)slot_info
->active
->data
;
1552 if (amv
->range
.last_use
.abs_pos
> vmv
->range
.first_use
.abs_pos
)
1555 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1557 slot_info
->active
= g_list_delete_link (slot_info
->active
, slot_info
->active
);
1558 slot_info
->slots
= g_slist_prepend_mempool (cfg
->mempool
, slot_info
->slots
, GINT_TO_POINTER (offsets
[amv
->idx
]));
1562 * This also handles the case when the variable is used in an
1563 * exception region, as liveness info is not computed there.
1566 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1569 if (! (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))) {
1570 if (slot_info
->slots
) {
1571 slot
= GPOINTER_TO_INT (slot_info
->slots
->data
);
1573 slot_info
->slots
= slot_info
->slots
->next
;
1576 slot_info
->active
= mono_varlist_insert_sorted (cfg
, slot_info
->active
, vmv
, TRUE
);
1582 static int count
= 0;
1585 if (count
== atoi (g_getenv ("COUNT")))
1586 printf ("LAST: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
1587 if (count
> atoi (g_getenv ("COUNT")))
1590 mono_print_ins (inst
);
1594 if (inst
->flags
& MONO_INST_LMF
) {
1596 * This variable represents a MonoLMF structure, which has no corresponding
1597 * CLR type, so hard-code its size/alignment.
1599 size
= MONO_ABI_SIZEOF (MonoLMF
);
1600 align
= sizeof (target_mgreg_t
);
1607 if (slot
== 0xffffff) {
1609 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1610 * efficient copying (and to work around the fact that OP_MEMCPY
1611 * and OP_MEMSET ignores alignment).
1613 if (MONO_TYPE_ISSTRUCT (t
)) {
1614 align
= MAX (align
, sizeof (target_mgreg_t
));
1615 align
= MAX (align
, mono_class_min_align (mono_class_from_mono_type_internal (t
)));
1617 * Align the size too so the code generated for passing vtypes in
1618 * registers doesn't overwrite random locals.
1620 size
= (size
+ (align
- 1)) & ~(align
-1);
1625 offset
+= align
- 1;
1626 offset
&= ~(align
- 1);
1630 offset
+= align
- 1;
1631 offset
&= ~(align
- 1);
1636 *stack_align
= MAX (*stack_align
, align
);
1639 offsets
[vmv
->idx
] = slot
;
1642 for (i
= 0; i
< MONO_TYPE_PINNED
; ++i
) {
1643 if (scalar_stack_slots
[i
].active
)
1644 g_list_free (scalar_stack_slots
[i
].active
);
1646 for (i
= 0; i
< nvtypes
; ++i
) {
1647 if (vtype_stack_slots
[i
].active
)
1648 g_list_free (vtype_stack_slots
[i
].active
);
1651 cfg
->stat_locals_stack_size
+= offset
;
1653 *stack_size
= offset
;
1657 #define EMUL_HIT_SHIFT 3
1658 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1659 /* small hit bitmap cache */
1660 static mono_byte emul_opcode_hit_cache
[(OP_LAST
>>EMUL_HIT_SHIFT
) + 1] = {0};
1661 static short emul_opcode_num
= 0;
1662 static short emul_opcode_alloced
= 0;
1663 static short *emul_opcode_opcodes
;
1664 static MonoJitICallInfo
**emul_opcode_map
;
1667 mono_find_jit_opcode_emulation (int opcode
)
1669 g_assert (opcode
>= 0 && opcode
<= OP_LAST
);
1670 if (emul_opcode_hit_cache
[opcode
>> (EMUL_HIT_SHIFT
+ 3)] & (1 << (opcode
& EMUL_HIT_MASK
))) {
1672 for (i
= 0; i
< emul_opcode_num
; ++i
) {
1673 if (emul_opcode_opcodes
[i
] == opcode
)
1674 return emul_opcode_map
[i
];
1681 mini_register_opcode_emulation (int opcode
, MonoJitICallInfo
*info
, const char *name
, MonoMethodSignature
*sig
, gpointer func
, const char *symbol
, gboolean no_wrapper
)
1684 g_assert (!sig
->hasthis
);
1685 g_assert (sig
->param_count
< 3);
1687 mono_register_jit_icall_info (info
, func
, name
, sig
, no_wrapper
, symbol
);
1689 if (emul_opcode_num
>= emul_opcode_alloced
) {
1690 int incr
= emul_opcode_alloced
? emul_opcode_alloced
/2: 16;
1691 emul_opcode_alloced
+= incr
;
1692 emul_opcode_map
= (MonoJitICallInfo
**)g_realloc (emul_opcode_map
, sizeof (emul_opcode_map
[0]) * emul_opcode_alloced
);
1693 emul_opcode_opcodes
= (short *)g_realloc (emul_opcode_opcodes
, sizeof (emul_opcode_opcodes
[0]) * emul_opcode_alloced
);
1695 emul_opcode_map
[emul_opcode_num
] = info
;
1696 emul_opcode_opcodes
[emul_opcode_num
] = opcode
;
1698 emul_opcode_hit_cache
[opcode
>> (EMUL_HIT_SHIFT
+ 3)] |= (1 << (opcode
& EMUL_HIT_MASK
));
1702 print_dfn (MonoCompile
*cfg
)
1710 char *method_name
= mono_method_full_name (cfg
->method
, TRUE
);
1711 g_print ("IR code for method %s\n", method_name
);
1712 g_free (method_name
);
1715 for (i
= 0; i
< cfg
->num_bblocks
; ++i
) {
1716 bb
= cfg
->bblocks
[i
];
1717 /*if (bb->cil_code) {
1718 char* code1, *code2;
1719 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1720 if (bb->last_ins->cil_code)
1721 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1723 code2 = g_strdup ("");
1725 code1 [strlen (code1) - 1] = 0;
1726 code = g_strdup_printf ("%s -> %s", code1, code2);
1730 code
= g_strdup ("\n");
1731 g_print ("\nBB%d (%d) (len: %d): %s", bb
->block_num
, i
, bb
->cil_length
, code
);
1732 MONO_BB_FOR_EACH_INS (bb
, c
) {
1733 mono_print_ins_index (-1, c
);
1736 g_print ("\tprev:");
1737 for (j
= 0; j
< bb
->in_count
; ++j
) {
1738 g_print (" BB%d", bb
->in_bb
[j
]->block_num
);
1740 g_print ("\t\tsucc:");
1741 for (j
= 0; j
< bb
->out_count
; ++j
) {
1742 g_print (" BB%d", bb
->out_bb
[j
]->block_num
);
1744 g_print ("\n\tidom: BB%d\n", bb
->idom
? bb
->idom
->block_num
: -1);
1747 g_assert (mono_bitset_test_fast (bb
->dominators
, bb
->idom
->dfn
));
1750 mono_blockset_print (cfg
, bb
->dominators
, "\tdominators", bb
->idom
? bb
->idom
->dfn
: -1);
1752 mono_blockset_print (cfg
, bb
->dfrontier
, "\tdfrontier", -1);
1760 mono_bblock_add_inst (MonoBasicBlock
*bb
, MonoInst
*inst
)
1762 MONO_ADD_INS (bb
, inst
);
1766 mono_bblock_insert_after_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*ins_to_insert
)
1770 bb
->code
= ins_to_insert
;
1772 /* Link with next */
1773 ins_to_insert
->next
= ins
;
1775 ins
->prev
= ins_to_insert
;
1777 if (bb
->last_ins
== NULL
)
1778 bb
->last_ins
= ins_to_insert
;
1780 /* Link with next */
1781 ins_to_insert
->next
= ins
->next
;
1783 ins
->next
->prev
= ins_to_insert
;
1785 /* Link with previous */
1786 ins
->next
= ins_to_insert
;
1787 ins_to_insert
->prev
= ins
;
1789 if (bb
->last_ins
== ins
)
1790 bb
->last_ins
= ins_to_insert
;
1795 mono_bblock_insert_before_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*ins_to_insert
)
1800 ins
->prev
= ins_to_insert
;
1801 bb
->code
= ins_to_insert
;
1802 ins_to_insert
->next
= ins
;
1803 if (bb
->last_ins
== NULL
)
1804 bb
->last_ins
= ins_to_insert
;
1806 /* Link with previous */
1808 ins
->prev
->next
= ins_to_insert
;
1809 ins_to_insert
->prev
= ins
->prev
;
1811 /* Link with next */
1812 ins
->prev
= ins_to_insert
;
1813 ins_to_insert
->next
= ins
;
1815 if (bb
->code
== ins
)
1816 bb
->code
= ins_to_insert
;
1821 * mono_verify_bblock:
1823 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1826 mono_verify_bblock (MonoBasicBlock
*bb
)
1828 MonoInst
*ins
, *prev
;
1831 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
1832 g_assert (ins
->prev
== prev
);
1836 g_assert (!bb
->last_ins
->next
);
1842 * Perform consistency checks on the JIT data structures and the IR
1845 mono_verify_cfg (MonoCompile
*cfg
)
1849 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
1850 mono_verify_bblock (bb
);
1853 // This will free many fields in cfg to save
1854 // memory. Note that this must be safe to call
1855 // multiple times. It must be idempotent.
1857 mono_empty_compile (MonoCompile
*cfg
)
1859 mono_free_loop_info (cfg
);
1861 // These live in the mempool, and so must be freed
1863 for (GSList
*l
= cfg
->headers_to_free
; l
; l
= l
->next
) {
1864 mono_metadata_free_mh ((MonoMethodHeader
*)l
->data
);
1866 cfg
->headers_to_free
= NULL
;
1869 //mono_mempool_stats (cfg->mempool);
1870 mono_mempool_destroy (cfg
->mempool
);
1871 cfg
->mempool
= NULL
;
1874 g_free (cfg
->varinfo
);
1875 cfg
->varinfo
= NULL
;
1881 mono_regstate_free (cfg
->rs
);
1887 mono_destroy_compile (MonoCompile
*cfg
)
1889 mono_empty_compile (cfg
);
1891 mono_metadata_free_mh (cfg
->header
);
1893 g_hash_table_destroy (cfg
->spvars
);
1894 g_hash_table_destroy (cfg
->exvars
);
1895 g_list_free (cfg
->ldstr_list
);
1896 g_hash_table_destroy (cfg
->token_info_hash
);
1897 g_hash_table_destroy (cfg
->abs_patches
);
1899 mono_debug_free_method (cfg
);
1901 g_free (cfg
->varinfo
);
1903 g_free (cfg
->exception_message
);
1908 mono_add_patch_info (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
1910 if (type
== MONO_PATCH_INFO_NONE
)
1913 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfo
));
1917 ji
->data
.target
= target
;
1918 ji
->next
= cfg
->patch_info
;
1920 cfg
->patch_info
= ji
;
1924 mono_add_patch_info_rel (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
, int relocation
)
1926 if (type
== MONO_PATCH_INFO_NONE
)
1929 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfo
));
1933 ji
->relocation
= relocation
;
1934 ji
->data
.target
= target
;
1935 ji
->next
= cfg
->patch_info
;
1937 cfg
->patch_info
= ji
;
1941 mono_remove_patch_info (MonoCompile
*cfg
, int ip
)
1943 MonoJumpInfo
**ji
= &cfg
->patch_info
;
1946 if ((*ji
)->ip
.i
== ip
)
1949 ji
= &((*ji
)->next
);
1954 mono_add_seq_point (MonoCompile
*cfg
, MonoBasicBlock
*bb
, MonoInst
*ins
, int native_offset
)
1956 ins
->inst_offset
= native_offset
;
1957 g_ptr_array_add (cfg
->seq_points
, ins
);
1959 bb
->seq_points
= g_slist_prepend_mempool (cfg
->mempool
, bb
->seq_points
, ins
);
1960 bb
->last_seq_point
= ins
;
1965 mono_add_var_location (MonoCompile
*cfg
, MonoInst
*var
, gboolean is_reg
, int reg
, int offset
, int from
, int to
)
1967 MonoDwarfLocListEntry
*entry
= (MonoDwarfLocListEntry
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoDwarfLocListEntry
));
1970 g_assert (offset
== 0);
1972 entry
->is_reg
= is_reg
;
1974 entry
->offset
= offset
;
1978 if (var
== cfg
->args
[0])
1979 cfg
->this_loclist
= g_slist_append_mempool (cfg
->mempool
, cfg
->this_loclist
, entry
);
1980 else if (var
== cfg
->rgctx_var
)
1981 cfg
->rgctx_loclist
= g_slist_append_mempool (cfg
->mempool
, cfg
->rgctx_loclist
, entry
);
1985 mono_apply_volatile (MonoInst
*inst
, MonoBitSet
*set
, gsize index
)
1987 inst
->flags
|= mono_bitset_test_safe (set
, index
) ? MONO_INST_VOLATILE
: 0;
1991 mono_compile_create_vars (MonoCompile
*cfg
)
1993 MonoMethodSignature
*sig
;
1994 MonoMethodHeader
*header
;
1997 header
= cfg
->header
;
1999 sig
= mono_method_signature_internal (cfg
->method
);
2001 if (!MONO_TYPE_IS_VOID (sig
->ret
)) {
2002 cfg
->ret
= mono_compile_create_var (cfg
, sig
->ret
, OP_ARG
);
2003 /* Inhibit optimizations */
2004 cfg
->ret
->flags
|= MONO_INST_VOLATILE
;
2006 if (cfg
->verbose_level
> 2)
2007 g_print ("creating vars\n");
2009 cfg
->args
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, (sig
->param_count
+ sig
->hasthis
) * sizeof (MonoInst
*));
2012 MonoInst
* arg
= mono_compile_create_var (cfg
, m_class_get_this_arg (cfg
->method
->klass
), OP_ARG
);
2013 mono_apply_volatile (arg
, header
->volatile_args
, 0);
2014 cfg
->args
[0] = arg
;
2015 cfg
->this_arg
= arg
;
2018 for (i
= 0; i
< sig
->param_count
; ++i
) {
2019 MonoInst
* arg
= mono_compile_create_var (cfg
, sig
->params
[i
], OP_ARG
);
2020 mono_apply_volatile (arg
, header
->volatile_args
, i
+ sig
->hasthis
);
2021 cfg
->args
[i
+ sig
->hasthis
] = arg
;
2024 if (cfg
->verbose_level
> 2) {
2026 printf ("\treturn : ");
2027 mono_print_ins (cfg
->ret
);
2031 printf ("\tthis: ");
2032 mono_print_ins (cfg
->args
[0]);
2035 for (i
= 0; i
< sig
->param_count
; ++i
) {
2036 printf ("\targ [%d]: ", i
);
2037 mono_print_ins (cfg
->args
[i
+ sig
->hasthis
]);
2041 cfg
->locals_start
= cfg
->num_varinfo
;
2042 cfg
->locals
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, header
->num_locals
* sizeof (MonoInst
*));
2044 if (cfg
->verbose_level
> 2)
2045 g_print ("creating locals\n");
2047 for (i
= 0; i
< header
->num_locals
; ++i
) {
2048 if (cfg
->verbose_level
> 2)
2049 g_print ("\tlocal [%d]: ", i
);
2050 cfg
->locals
[i
] = mono_compile_create_var (cfg
, header
->locals
[i
], OP_LOCAL
);
2051 mono_apply_volatile (cfg
->locals
[i
], header
->volatile_locals
, i
);
2054 if (cfg
->verbose_level
> 2)
2055 g_print ("locals done\n");
2058 if (COMPILE_LLVM (cfg
))
2059 mono_llvm_create_vars (cfg
);
2061 mono_arch_create_vars (cfg
);
2063 mono_arch_create_vars (cfg
);
2066 if (cfg
->method
->save_lmf
&& cfg
->create_lmf_var
) {
2067 MonoInst
*lmf_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2068 lmf_var
->flags
|= MONO_INST_VOLATILE
;
2069 lmf_var
->flags
|= MONO_INST_LMF
;
2070 cfg
->lmf_var
= lmf_var
;
2075 mono_print_code (MonoCompile
*cfg
, const char* msg
)
2079 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2080 mono_print_bb (bb
, msg
);
2084 mono_postprocess_patches (MonoCompile
*cfg
)
2086 MonoJumpInfo
*patch_info
;
2089 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2090 switch (patch_info
->type
) {
2091 case MONO_PATCH_INFO_ABS
: {
2093 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2096 if (cfg
->abs_patches
) {
2097 MonoJumpInfo
*abs_ji
= (MonoJumpInfo
*)g_hash_table_lookup (cfg
->abs_patches
, patch_info
->data
.target
);
2099 patch_info
->type
= abs_ji
->type
;
2100 patch_info
->data
.target
= abs_ji
->data
.target
;
2105 case MONO_PATCH_INFO_SWITCH
: {
2107 if (cfg
->method
->dynamic
) {
2108 table
= (void **)mono_code_manager_reserve (cfg
->dynamic_info
->code_mp
, sizeof (gpointer
) * patch_info
->data
.table
->table_size
);
2110 table
= (void **)mono_domain_code_reserve (cfg
->domain
, sizeof (gpointer
) * patch_info
->data
.table
->table_size
);
2113 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++) {
2114 /* Might be NULL if the switch is eliminated */
2115 if (patch_info
->data
.table
->table
[i
]) {
2116 g_assert (patch_info
->data
.table
->table
[i
]->native_offset
);
2117 table
[i
] = GINT_TO_POINTER (patch_info
->data
.table
->table
[i
]->native_offset
);
2122 patch_info
->data
.table
->table
= (MonoBasicBlock
**)table
;
2125 case MONO_PATCH_INFO_METHOD_JUMP
: {
2126 unsigned char *ip
= cfg
->native_code
+ patch_info
->ip
.i
;
2128 mini_register_jump_site (cfg
->domain
, patch_info
->data
.method
, ip
);
2139 mono_codegen (MonoCompile
*cfg
)
2142 int max_epilog_size
;
2144 MonoDomain
*code_domain
;
2145 guint unwindlen
= 0;
2147 if (mono_using_xdebug
)
2149 * Recent gdb versions have trouble processing symbol files containing
2150 * overlapping address ranges, so allocate all code from the code manager
2151 * of the root domain. (#666152).
2153 code_domain
= mono_get_root_domain ();
2155 code_domain
= cfg
->domain
;
2157 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2158 cfg
->spill_count
= 0;
2159 /* we reuse dfn here */
2160 /* bb->dfn = bb_count++; */
2162 mono_arch_lowering_pass (cfg
, bb
);
2164 if (cfg
->opt
& MONO_OPT_PEEPHOLE
)
2165 mono_arch_peephole_pass_1 (cfg
, bb
);
2167 mono_local_regalloc (cfg
, bb
);
2169 if (cfg
->opt
& MONO_OPT_PEEPHOLE
)
2170 mono_arch_peephole_pass_2 (cfg
, bb
);
2172 if (cfg
->gen_seq_points
&& !cfg
->gen_sdb_seq_points
)
2173 mono_bb_deduplicate_op_il_seq_points (cfg
, bb
);
2176 code
= mono_arch_emit_prolog (cfg
);
2178 set_code_cursor (cfg
, code
);
2179 cfg
->prolog_end
= cfg
->code_len
;
2180 cfg
->cfa_reg
= cfg
->cur_cfa_reg
;
2181 cfg
->cfa_offset
= cfg
->cur_cfa_offset
;
2183 mono_debug_open_method (cfg
);
2185 /* emit code all basic blocks */
2186 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2187 bb
->native_offset
= cfg
->code_len
;
2188 bb
->real_native_offset
= cfg
->code_len
;
2189 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2190 mono_arch_output_basic_block (cfg
, bb
);
2191 bb
->native_length
= cfg
->code_len
- bb
->native_offset
;
2193 if (bb
== cfg
->bb_exit
) {
2194 cfg
->epilog_begin
= cfg
->code_len
;
2195 mono_arch_emit_epilog (cfg
);
2196 cfg
->epilog_end
= cfg
->code_len
;
2199 if (bb
->clause_holes
) {
2201 for (tmp
= bb
->clause_holes
; tmp
; tmp
= tmp
->prev
)
2202 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, cfg
->native_code
+ bb
->native_offset
, bb
);
2206 mono_arch_emit_exceptions (cfg
);
2208 max_epilog_size
= 0;
2210 /* we always allocate code in cfg->domain->code_mp to increase locality */
2211 cfg
->code_size
= cfg
->code_len
+ max_epilog_size
;
2213 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2215 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2216 if (!cfg
->compile_aot
)
2217 unwindlen
= mono_arch_unwindinfo_init_method_unwind_info (cfg
);
2220 if (cfg
->method
->dynamic
) {
2221 /* Allocate the code into a separate memory pool so it can be freed */
2222 cfg
->dynamic_info
= g_new0 (MonoJitDynamicMethodInfo
, 1);
2223 cfg
->dynamic_info
->code_mp
= mono_code_manager_new_dynamic ();
2224 mono_domain_lock (cfg
->domain
);
2225 mono_dynamic_code_hash_insert (cfg
->domain
, cfg
->method
, cfg
->dynamic_info
);
2226 mono_domain_unlock (cfg
->domain
);
2228 if (mono_using_xdebug
)
2229 /* See the comment for cfg->code_domain */
2230 code
= (guint8
*)mono_domain_code_reserve (code_domain
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2232 code
= (guint8
*)mono_code_manager_reserve (cfg
->dynamic_info
->code_mp
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2234 code
= (guint8
*)mono_domain_code_reserve (code_domain
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2237 if (cfg
->thunk_area
) {
2238 cfg
->thunks_offset
= cfg
->code_size
+ unwindlen
;
2239 cfg
->thunks
= code
+ cfg
->thunks_offset
;
2240 memset (cfg
->thunks
, 0, cfg
->thunk_area
);
2244 memcpy (code
, cfg
->native_code
, cfg
->code_len
);
2245 g_free (cfg
->native_code
);
2246 cfg
->native_code
= code
;
2247 code
= cfg
->native_code
+ cfg
->code_len
;
2249 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2250 mono_postprocess_patches (cfg
);
2252 #ifdef VALGRIND_JIT_REGISTER_MAP
2253 if (valgrind_register
){
2254 char* nm
= mono_method_full_name (cfg
->method
, TRUE
);
2255 VALGRIND_JIT_REGISTER_MAP (nm
, cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
);
2260 if (cfg
->verbose_level
> 0) {
2261 char* nm
= mono_method_get_full_name (cfg
->method
);
2262 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2264 cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
, cfg
->code_len
, cfg
->domain
->friendly_name
);
2269 gboolean is_generic
= FALSE
;
2271 if (cfg
->method
->is_inflated
|| mono_method_get_generic_container (cfg
->method
) ||
2272 mono_class_is_gtd (cfg
->method
->klass
) || mono_class_is_ginst (cfg
->method
->klass
)) {
2277 g_assert (is_generic
);
2280 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2281 mono_arch_save_unwind_info (cfg
);
2284 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2289 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
2290 if (cfg
->compile_aot
) {
2292 case MONO_PATCH_INFO_BB
:
2293 case MONO_PATCH_INFO_LABEL
:
2296 /* No need to patch these */
2301 if (ji
->type
== MONO_PATCH_INFO_NONE
)
2304 target
= mono_resolve_patch_target (cfg
->method
, cfg
->domain
, cfg
->native_code
, ji
, cfg
->run_cctors
, &cfg
->error
);
2305 if (!mono_error_ok (&cfg
->error
)) {
2306 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2309 mono_arch_patch_code_new (cfg
, cfg
->domain
, cfg
->native_code
, ji
, target
);
2313 mono_arch_patch_code (cfg
, cfg
->method
, cfg
->domain
, cfg
->native_code
, cfg
->patch_info
, cfg
->run_cctors
, &cfg
->error
);
2314 if (!is_ok (&cfg
->error
)) {
2315 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2320 if (cfg
->method
->dynamic
) {
2321 if (mono_using_xdebug
)
2322 mono_domain_code_commit (code_domain
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2324 mono_code_manager_commit (cfg
->dynamic_info
->code_mp
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2326 mono_domain_code_commit (code_domain
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2328 MONO_PROFILER_RAISE (jit_code_buffer
, (cfg
->native_code
, cfg
->code_len
, MONO_PROFILER_CODE_BUFFER_METHOD
, cfg
->method
));
2330 mono_arch_flush_icache (cfg
->native_code
, cfg
->code_len
);
2332 mono_debug_close_method (cfg
);
2334 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2335 if (!cfg
->compile_aot
)
2336 mono_arch_unwindinfo_install_method_unwind_info (&cfg
->arch
.unwindinfo
, cfg
->native_code
, cfg
->code_len
);
2341 compute_reachable (MonoBasicBlock
*bb
)
2345 if (!(bb
->flags
& BB_VISITED
)) {
2346 bb
->flags
|= BB_VISITED
;
2347 for (i
= 0; i
< bb
->out_count
; ++i
)
2348 compute_reachable (bb
->out_bb
[i
]);
2352 static void mono_bb_ordering (MonoCompile
*cfg
)
2355 /* Depth-first ordering on basic blocks */
2356 cfg
->bblocks
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * (cfg
->num_bblocks
+ 1));
2358 cfg
->max_block_num
= cfg
->num_bblocks
;
2360 df_visit (cfg
->bb_entry
, &dfn
, cfg
->bblocks
);
2362 #if defined(__GNUC__) && __GNUC__ == 7 && defined(__x86_64__)
2363 /* workaround for an AMD specific issue that only happens on GCC 7 so far,
2364 * for more information see https://github.com/mono/mono/issues/9298 */
2365 mono_memory_barrier ();
2367 g_assertf (cfg
->num_bblocks
>= dfn
, "cfg->num_bblocks=%d, dfn=%d\n", cfg
->num_bblocks
, dfn
);
2369 if (cfg
->num_bblocks
!= dfn
+ 1) {
2372 cfg
->num_bblocks
= dfn
+ 1;
2374 /* remove unreachable code, because the code in them may be
2375 * inconsistent (access to dead variables for example) */
2376 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2377 bb
->flags
&= ~BB_VISITED
;
2378 compute_reachable (cfg
->bb_entry
);
2379 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2380 if (bb
->flags
& BB_EXCEPTION_HANDLER
)
2381 compute_reachable (bb
);
2382 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2383 if (!(bb
->flags
& BB_VISITED
)) {
2384 if (cfg
->verbose_level
> 1)
2385 g_print ("found unreachable code in BB%d\n", bb
->block_num
);
2386 bb
->code
= bb
->last_ins
= NULL
;
2387 while (bb
->out_count
)
2388 mono_unlink_bblock (cfg
, bb
, bb
->out_bb
[0]);
2391 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2392 bb
->flags
&= ~BB_VISITED
;
2397 mono_handle_out_of_line_bblock (MonoCompile
*cfg
)
2400 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2401 if (bb
->next_bb
&& bb
->next_bb
->out_of_line
&& bb
->last_ins
&& !MONO_IS_BRANCH_OP (bb
->last_ins
)) {
2403 MONO_INST_NEW (cfg
, ins
, OP_BR
);
2404 MONO_ADD_INS (bb
, ins
);
2405 ins
->inst_target_bb
= bb
->next_bb
;
2411 create_jit_info (MonoCompile
*cfg
, MonoMethod
*method_to_compile
)
2414 MonoMethodHeader
*header
;
2416 MonoJitInfoFlags flags
= JIT_INFO_NONE
;
2417 int num_clauses
, num_holes
= 0;
2418 guint32 stack_size
= 0;
2420 g_assert (method_to_compile
== cfg
->method
);
2421 header
= cfg
->header
;
2424 flags
|= JIT_INFO_HAS_GENERIC_JIT_INFO
;
2426 if (cfg
->arch_eh_jit_info
) {
2427 MonoJitArgumentInfo
*arg_info
;
2428 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method_to_register
);
2431 * This cannot be computed during stack walking, as
2432 * mono_arch_get_argument_info () is not signal safe.
2434 arg_info
= g_newa (MonoJitArgumentInfo
, sig
->param_count
+ 1);
2435 stack_size
= mono_arch_get_argument_info (sig
, sig
->param_count
, arg_info
);
2438 flags
|= JIT_INFO_HAS_ARCH_EH_INFO
;
2441 if (cfg
->has_unwind_info_for_epilog
&& !(flags
& JIT_INFO_HAS_ARCH_EH_INFO
))
2442 flags
|= JIT_INFO_HAS_ARCH_EH_INFO
;
2444 if (cfg
->thunk_area
)
2445 flags
|= JIT_INFO_HAS_THUNK_INFO
;
2447 if (cfg
->try_block_holes
) {
2448 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2449 TryBlockHole
*hole
= (TryBlockHole
*)tmp
->data
;
2450 MonoExceptionClause
*ec
= hole
->clause
;
2451 int hole_end
= hole
->basic_block
->native_offset
+ hole
->basic_block
->native_length
;
2452 MonoBasicBlock
*clause_last_bb
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2453 g_assert (clause_last_bb
);
2455 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2456 if (clause_last_bb
->native_offset
!= hole_end
)
2460 flags
|= JIT_INFO_HAS_TRY_BLOCK_HOLES
;
2461 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2462 printf ("Number of try block holes %d\n", num_holes
);
2465 if (COMPILE_LLVM (cfg
))
2466 num_clauses
= cfg
->llvm_ex_info_len
;
2468 num_clauses
= header
->num_clauses
;
2470 if (cfg
->method
->dynamic
)
2471 jinfo
= (MonoJitInfo
*)g_malloc0 (mono_jit_info_size (flags
, num_clauses
, num_holes
));
2473 jinfo
= (MonoJitInfo
*)mono_domain_alloc0 (cfg
->domain
, mono_jit_info_size (flags
, num_clauses
, num_holes
));
2474 jinfo_try_holes_size
+= num_holes
* sizeof (MonoTryBlockHoleJitInfo
);
2476 mono_jit_info_init (jinfo
, cfg
->method_to_register
, cfg
->native_code
, cfg
->code_len
, flags
, num_clauses
, num_holes
);
2477 jinfo
->domain_neutral
= (cfg
->opt
& MONO_OPT_SHARED
) != 0;
2479 if (COMPILE_LLVM (cfg
))
2480 jinfo
->from_llvm
= TRUE
;
2484 MonoGenericJitInfo
*gi
;
2485 GSList
*loclist
= NULL
;
2487 gi
= mono_jit_info_get_generic_jit_info (jinfo
);
2490 if (cfg
->method
->dynamic
)
2491 gi
->generic_sharing_context
= g_new0 (MonoGenericSharingContext
, 1);
2493 gi
->generic_sharing_context
= (MonoGenericSharingContext
*)mono_domain_alloc0 (cfg
->domain
, sizeof (MonoGenericSharingContext
));
2494 mini_init_gsctx (cfg
->method
->dynamic
? NULL
: cfg
->domain
, NULL
, cfg
->gsctx_context
, gi
->generic_sharing_context
);
2496 if ((method_to_compile
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
2497 mini_method_get_context (method_to_compile
)->method_inst
||
2498 m_class_is_valuetype (method_to_compile
->klass
)) {
2499 g_assert (cfg
->rgctx_var
);
2504 if ((method_to_compile
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
2505 mini_method_get_context (method_to_compile
)->method_inst
||
2506 m_class_is_valuetype (method_to_compile
->klass
)) {
2507 inst
= cfg
->rgctx_var
;
2508 if (!COMPILE_LLVM (cfg
))
2509 g_assert (inst
->opcode
== OP_REGOFFSET
);
2510 loclist
= cfg
->rgctx_loclist
;
2512 inst
= cfg
->args
[0];
2513 loclist
= cfg
->this_loclist
;
2517 /* Needed to handle async exceptions */
2521 gi
->nlocs
= g_slist_length (loclist
);
2522 if (cfg
->method
->dynamic
)
2523 gi
->locations
= (MonoDwarfLocListEntry
*)g_malloc0 (gi
->nlocs
* sizeof (MonoDwarfLocListEntry
));
2525 gi
->locations
= (MonoDwarfLocListEntry
*)mono_domain_alloc0 (cfg
->domain
, gi
->nlocs
* sizeof (MonoDwarfLocListEntry
));
2527 for (l
= loclist
; l
; l
= l
->next
) {
2528 memcpy (&(gi
->locations
[i
]), l
->data
, sizeof (MonoDwarfLocListEntry
));
2533 if (COMPILE_LLVM (cfg
)) {
2534 g_assert (cfg
->llvm_this_reg
!= -1);
2535 gi
->this_in_reg
= 0;
2536 gi
->this_reg
= cfg
->llvm_this_reg
;
2537 gi
->this_offset
= cfg
->llvm_this_offset
;
2538 } else if (inst
->opcode
== OP_REGVAR
) {
2539 gi
->this_in_reg
= 1;
2540 gi
->this_reg
= inst
->dreg
;
2542 g_assert (inst
->opcode
== OP_REGOFFSET
);
2544 g_assert (inst
->inst_basereg
== X86_EBP
);
2545 #elif defined(TARGET_AMD64)
2546 g_assert (inst
->inst_basereg
== X86_EBP
|| inst
->inst_basereg
== X86_ESP
);
2548 g_assert (inst
->inst_offset
>= G_MININT32
&& inst
->inst_offset
<= G_MAXINT32
);
2550 gi
->this_in_reg
= 0;
2551 gi
->this_reg
= inst
->inst_basereg
;
2552 gi
->this_offset
= inst
->inst_offset
;
2557 MonoTryBlockHoleTableJitInfo
*table
;
2560 table
= mono_jit_info_get_try_block_hole_table_info (jinfo
);
2561 table
->num_holes
= (guint16
)num_holes
;
2563 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2564 guint32 start_bb_offset
;
2565 MonoTryBlockHoleJitInfo
*hole
;
2566 TryBlockHole
*hole_data
= (TryBlockHole
*)tmp
->data
;
2567 MonoExceptionClause
*ec
= hole_data
->clause
;
2568 int hole_end
= hole_data
->basic_block
->native_offset
+ hole_data
->basic_block
->native_length
;
2569 MonoBasicBlock
*clause_last_bb
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2570 g_assert (clause_last_bb
);
2572 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2573 if (clause_last_bb
->native_offset
== hole_end
)
2576 start_bb_offset
= hole_data
->start_offset
- hole_data
->basic_block
->native_offset
;
2577 hole
= &table
->holes
[i
++];
2578 hole
->clause
= hole_data
->clause
- &header
->clauses
[0];
2579 hole
->offset
= (guint32
)hole_data
->start_offset
;
2580 hole
->length
= (guint16
)(hole_data
->basic_block
->native_length
- start_bb_offset
);
2582 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2583 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole
->clause
, hole
->offset
, hole
->length
);
2585 g_assert (i
== num_holes
);
2588 if (jinfo
->has_arch_eh_info
) {
2589 MonoArchEHJitInfo
*info
;
2591 info
= mono_jit_info_get_arch_eh_info (jinfo
);
2593 info
->stack_size
= stack_size
;
2596 if (cfg
->thunk_area
) {
2597 MonoThunkJitInfo
*info
;
2599 info
= mono_jit_info_get_thunk_info (jinfo
);
2600 info
->thunks_offset
= cfg
->thunks_offset
;
2601 info
->thunks_size
= cfg
->thunk_area
;
2604 if (COMPILE_LLVM (cfg
)) {
2606 memcpy (&jinfo
->clauses
[0], &cfg
->llvm_ex_info
[0], num_clauses
* sizeof (MonoJitExceptionInfo
));
2607 } else if (header
->num_clauses
) {
2610 for (i
= 0; i
< header
->num_clauses
; i
++) {
2611 MonoExceptionClause
*ec
= &header
->clauses
[i
];
2612 MonoJitExceptionInfo
*ei
= &jinfo
->clauses
[i
];
2613 MonoBasicBlock
*tblock
;
2616 ei
->flags
= ec
->flags
;
2618 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2619 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec
->try_offset
, ec
->try_offset
+ ec
->try_len
, ec
->handler_offset
, ec
->handler_offset
+ ec
->handler_len
, ec
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
? ec
->data
.filter_offset
: 0);
2621 exvar
= mono_find_exvar_for_offset (cfg
, ec
->handler_offset
);
2622 ei
->exvar_offset
= exvar
? exvar
->inst_offset
: 0;
2624 if (ei
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
2625 tblock
= cfg
->cil_offset_to_bb
[ec
->data
.filter_offset
];
2627 ei
->data
.filter
= cfg
->native_code
+ tblock
->native_offset
;
2629 ei
->data
.catch_class
= ec
->data
.catch_class
;
2632 tblock
= cfg
->cil_offset_to_bb
[ec
->try_offset
];
2634 g_assert (tblock
->native_offset
);
2635 ei
->try_start
= cfg
->native_code
+ tblock
->native_offset
;
2636 if (tblock
->extend_try_block
) {
2638 * Extend the try block backwards to include parts of the previous call
2641 ei
->try_start
= (guint8
*)ei
->try_start
- cfg
->backend
->monitor_enter_adjustment
;
2643 if (ec
->try_offset
+ ec
->try_len
< header
->code_size
)
2644 tblock
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2646 tblock
= cfg
->bb_exit
;
2647 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2648 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec
->try_offset
, ec
->try_len
, tblock
, header
->code_size
);
2650 if (!tblock
->native_offset
) {
2652 for (j
= ec
->try_offset
+ ec
->try_len
, end
= ec
->try_offset
; j
>= end
; --j
) {
2653 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[j
];
2654 if (bb
&& bb
->native_offset
) {
2660 ei
->try_end
= cfg
->native_code
+ tblock
->native_offset
;
2661 g_assert (tblock
->native_offset
);
2662 tblock
= cfg
->cil_offset_to_bb
[ec
->handler_offset
];
2664 ei
->handler_start
= cfg
->native_code
+ tblock
->native_offset
;
2666 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2667 TryBlockHole
*hole
= (TryBlockHole
*)tmp
->data
;
2668 gpointer hole_end
= cfg
->native_code
+ (hole
->basic_block
->native_offset
+ hole
->basic_block
->native_length
);
2669 if (hole
->clause
== ec
&& hole_end
== ei
->try_end
) {
2670 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2671 printf ("\tShortening try block %d from %x to %x\n", i
, (int)((guint8
*)ei
->try_end
- cfg
->native_code
), hole
->start_offset
);
2673 ei
->try_end
= cfg
->native_code
+ hole
->start_offset
;
2678 if (ec
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
) {
2680 if (ec
->handler_offset
+ ec
->handler_len
< header
->code_size
) {
2681 tblock
= cfg
->cil_offset_to_bb
[ec
->handler_offset
+ ec
->handler_len
];
2682 if (tblock
->native_offset
) {
2683 end_offset
= tblock
->native_offset
;
2687 for (j
= ec
->handler_offset
+ ec
->handler_len
, end
= ec
->handler_offset
; j
>= end
; --j
) {
2688 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[j
];
2689 if (bb
&& bb
->native_offset
) {
2694 end_offset
= tblock
->native_offset
+ tblock
->native_length
;
2697 end_offset
= cfg
->epilog_begin
;
2699 ei
->data
.handler_end
= cfg
->native_code
+ end_offset
;
2704 if (G_UNLIKELY (cfg
->verbose_level
>= 4)) {
2706 for (i
= 0; i
< jinfo
->num_clauses
; i
++) {
2707 MonoJitExceptionInfo
*ei
= &jinfo
->clauses
[i
];
2708 int start
= (guint8
*)ei
->try_start
- cfg
->native_code
;
2709 int end
= (guint8
*)ei
->try_end
- cfg
->native_code
;
2710 int handler
= (guint8
*)ei
->handler_start
- cfg
->native_code
;
2711 int handler_end
= (guint8
*)ei
->data
.handler_end
- cfg
->native_code
;
2713 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i
, ei
->flags
, start
, end
, handler
, handler_end
);
2717 if (cfg
->encoded_unwind_ops
) {
2718 /* Generated by LLVM */
2719 jinfo
->unwind_info
= mono_cache_unwind_info (cfg
->encoded_unwind_ops
, cfg
->encoded_unwind_ops_len
);
2720 g_free (cfg
->encoded_unwind_ops
);
2721 } else if (cfg
->unwind_ops
) {
2723 guint8
*unwind_info
= mono_unwind_ops_encode (cfg
->unwind_ops
, &info_len
);
2724 guint32 unwind_desc
;
2726 unwind_desc
= mono_cache_unwind_info (unwind_info
, info_len
);
2728 if (cfg
->has_unwind_info_for_epilog
) {
2729 MonoArchEHJitInfo
*info
;
2731 info
= mono_jit_info_get_arch_eh_info (jinfo
);
2733 info
->epilog_size
= cfg
->code_len
- cfg
->epilog_begin
;
2735 jinfo
->unwind_info
= unwind_desc
;
2736 g_free (unwind_info
);
2738 jinfo
->unwind_info
= cfg
->used_int_regs
;
2744 /* Return whenever METHOD is a gsharedvt method */
2746 is_gsharedvt_method (MonoMethod
*method
)
2748 MonoGenericContext
*context
;
2749 MonoGenericInst
*inst
;
2752 if (!method
->is_inflated
)
2754 context
= mono_method_get_context (method
);
2755 inst
= context
->class_inst
;
2757 for (i
= 0; i
< inst
->type_argc
; ++i
)
2758 if (mini_is_gsharedvt_gparam (inst
->type_argv
[i
]))
2761 inst
= context
->method_inst
;
2763 for (i
= 0; i
< inst
->type_argc
; ++i
)
2764 if (mini_is_gsharedvt_gparam (inst
->type_argv
[i
]))
2771 is_open_method (MonoMethod
*method
)
2773 MonoGenericContext
*context
;
2775 if (!method
->is_inflated
)
2777 context
= mono_method_get_context (method
);
2778 if (context
->class_inst
&& context
->class_inst
->is_open
)
2780 if (context
->method_inst
&& context
->method_inst
->is_open
)
2786 mono_insert_nop_in_empty_bb (MonoCompile
*cfg
)
2789 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2793 MONO_INST_NEW (cfg
, nop
, OP_NOP
);
2794 MONO_ADD_INS (bb
, nop
);
2798 insert_safepoint (MonoCompile
*cfg
, MonoBasicBlock
*bblock
)
2800 MonoInst
*poll_addr
, *ins
;
2802 if (cfg
->disable_gc_safe_points
)
2805 if (cfg
->verbose_level
> 1)
2806 printf ("ADDING SAFE POINT TO BB %d\n", bblock
->block_num
);
2808 g_assert (mini_safepoints_enabled ());
2809 NEW_AOTCONST (cfg
, poll_addr
, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG
, (gpointer
)&mono_polling_required
);
2811 MONO_INST_NEW (cfg
, ins
, OP_GC_SAFE_POINT
);
2812 ins
->sreg1
= poll_addr
->dreg
;
2814 if (bblock
->flags
& BB_EXCEPTION_HANDLER
) {
2815 MonoInst
*eh_op
= bblock
->code
;
2817 if (eh_op
&& eh_op
->opcode
!= OP_START_HANDLER
&& eh_op
->opcode
!= OP_GET_EX_OBJ
) {
2820 MonoInst
*next_eh_op
= eh_op
? eh_op
->next
: NULL
;
2821 // skip all EH relateds ops
2822 while (next_eh_op
&& (next_eh_op
->opcode
== OP_START_HANDLER
|| next_eh_op
->opcode
== OP_GET_EX_OBJ
)) {
2824 next_eh_op
= eh_op
->next
;
2828 mono_bblock_insert_after_ins (bblock
, eh_op
, poll_addr
);
2829 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2830 } else if (bblock
== cfg
->bb_entry
) {
2831 mono_bblock_insert_after_ins (bblock
, bblock
->last_ins
, poll_addr
);
2832 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2835 mono_bblock_insert_before_ins (bblock
, NULL
, poll_addr
);
2836 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2841 This code inserts safepoints into managed code at important code paths.
2844 -the first basic block
2845 -landing BB for exception handlers
2850 insert_safepoints (MonoCompile
*cfg
)
2854 g_assert (mini_safepoints_enabled ());
2856 if (COMPILE_LLVM (cfg
)) {
2857 if (!cfg
->llvm_only
&& cfg
->compile_aot
) {
2858 /* We rely on LLVM's safepoints insertion capabilities. */
2859 if (cfg
->verbose_level
> 1)
2860 printf ("SKIPPING SAFEPOINTS for code compiled with LLVM\n");
2865 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
2866 WrapperInfo
*info
= mono_marshal_get_wrapper_info (cfg
->method
);
2867 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2868 if (info
&& info
->subtype
== WRAPPER_SUBTYPE_ICALL_WRAPPER
&&
2869 (info
->d
.icall
.jit_icall_id
== MONO_JIT_ICALL_mono_threads_state_poll
||
2870 info
->d
.icall
.jit_icall_id
== MONO_JIT_ICALL_mono_thread_interruption_checkpoint
||
2871 info
->d
.icall
.jit_icall_id
== MONO_JIT_ICALL_mono_threads_exit_gc_safe_region_unbalanced
)) {
2872 if (cfg
->verbose_level
> 1)
2873 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2878 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
2879 if (cfg
->verbose_level
> 1)
2880 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2884 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_OTHER
) {
2885 WrapperInfo
*info
= mono_marshal_get_wrapper_info (cfg
->method
);
2887 if (info
&& (info
->subtype
== WRAPPER_SUBTYPE_INTERP_IN
|| info
->subtype
== WRAPPER_SUBTYPE_INTERP_LMF
)) {
2888 /* These wrappers shouldn't do any icalls */
2889 if (cfg
->verbose_level
> 1)
2890 printf ("SKIPPING SAFEPOINTS for interp-in wrappers.\n");
2895 if (cfg
->verbose_level
> 1)
2896 printf ("INSERTING SAFEPOINTS\n");
2897 if (cfg
->verbose_level
> 2)
2898 mono_print_code (cfg
, "BEFORE SAFEPOINTS");
2900 /* if the method doesn't contain
2901 * (1) a call (so it's a leaf method)
2903 * we can skip the GC safepoint on method entry. */
2904 gboolean requires_safepoint
= cfg
->has_calls
;
2906 for (bb
= cfg
->bb_entry
->next_bb
; bb
; bb
= bb
->next_bb
) {
2907 if (bb
->loop_body_start
|| (bb
->flags
& BB_EXCEPTION_HANDLER
)) {
2908 requires_safepoint
= TRUE
;
2909 insert_safepoint (cfg
, bb
);
2913 if (requires_safepoint
)
2914 insert_safepoint (cfg
, cfg
->bb_entry
);
2916 if (cfg
->verbose_level
> 2)
2917 mono_print_code (cfg
, "AFTER SAFEPOINTS");
2923 mono_insert_branches_between_bblocks (MonoCompile
*cfg
)
2927 /* Add branches between non-consecutive bblocks */
2928 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2929 if (bb
->last_ins
&& MONO_IS_COND_BRANCH_OP (bb
->last_ins
) &&
2930 bb
->last_ins
->inst_false_bb
&& bb
->next_bb
!= bb
->last_ins
->inst_false_bb
) {
2931 /* we are careful when inverting, since bugs like #59580
2932 * could show up when dealing with NaNs.
2934 if (MONO_IS_COND_BRANCH_NOFP(bb
->last_ins
) && bb
->next_bb
== bb
->last_ins
->inst_true_bb
) {
2935 MonoBasicBlock
*tmp
= bb
->last_ins
->inst_true_bb
;
2936 bb
->last_ins
->inst_true_bb
= bb
->last_ins
->inst_false_bb
;
2937 bb
->last_ins
->inst_false_bb
= tmp
;
2939 bb
->last_ins
->opcode
= mono_reverse_branch_op (bb
->last_ins
->opcode
);
2941 MonoInst
*inst
= (MonoInst
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
));
2942 inst
->opcode
= OP_BR
;
2943 inst
->inst_target_bb
= bb
->last_ins
->inst_false_bb
;
2944 mono_bblock_add_inst (bb
, inst
);
2949 if (cfg
->verbose_level
>= 4) {
2950 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2951 MonoInst
*tree
= bb
->code
;
2952 g_print ("DUMP BLOCK %d:\n", bb
->block_num
);
2955 for (; tree
; tree
= tree
->next
) {
2956 mono_print_ins_index (-1, tree
);
2962 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2963 bb
->max_vreg
= cfg
->next_vreg
;
2968 init_backend (MonoBackend
*backend
)
2970 #ifdef MONO_ARCH_NEED_GOT_VAR
2971 backend
->need_got_var
= 1;
2973 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2974 backend
->have_card_table_wb
= 1;
2976 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
2977 backend
->have_op_generic_class_init
= 1;
2979 #ifdef MONO_ARCH_EMULATE_MUL_DIV
2980 backend
->emulate_mul_div
= 1;
2982 #ifdef MONO_ARCH_EMULATE_DIV
2983 backend
->emulate_div
= 1;
2985 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
2986 backend
->emulate_long_shift_opts
= 1;
2988 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
2989 backend
->have_objc_get_selector
= 1;
2991 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
2992 backend
->have_generalized_imt_trampoline
= 1;
2994 #ifdef MONO_ARCH_GSHARED_SUPPORTED
2995 backend
->gshared_supported
= 1;
2997 if (MONO_ARCH_USE_FPSTACK
)
2998 backend
->use_fpstack
= 1;
2999 // Does the ABI have a volatile non-parameter register, so tailcall
3000 // can pass context to generics or interfaces?
3001 backend
->have_volatile_non_param_register
= MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER
;
3002 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE
3003 backend
->have_op_tailcall_membase
= 1;
3005 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_REG
3006 backend
->have_op_tailcall_reg
= 1;
3008 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3009 backend
->monitor_enter_adjustment
= 1;
3011 backend
->monitor_enter_adjustment
= MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
;
3013 #if defined(MONO_ARCH_ILP32)
3016 #ifdef MONO_ARCH_NEED_DIV_CHECK
3017 backend
->need_div_check
= 1;
3019 #ifdef NO_UNALIGNED_ACCESS
3020 backend
->no_unaligned_access
= 1;
3022 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3023 backend
->dyn_call_param_area
= MONO_ARCH_DYN_CALL_PARAM_AREA
;
3025 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3026 backend
->disable_div_with_mul
= 1;
3028 #ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
3029 backend
->explicit_null_checks
= 1;
3031 #ifdef MONO_ARCH_HAVE_OPTIMIZED_DIV
3032 backend
->optimized_div
= 1;
3037 * mini_method_compile:
3038 * @method: the method to compile
3039 * @opts: the optimization flags to use
3040 * @domain: the domain where the method will be compiled in
3041 * @flags: compilation flags
3042 * @parts: debug flag
3044 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3045 * field in the returned struct to see if compilation succeded.
3048 mini_method_compile (MonoMethod
*method
, guint32 opts
, MonoDomain
*domain
, JitFlags flags
, int parts
, int aot_method_index
)
3050 MonoMethodHeader
*header
;
3051 MonoMethodSignature
*sig
;
3054 gboolean try_generic_shared
, try_llvm
= FALSE
;
3055 MonoMethod
*method_to_compile
, *method_to_register
;
3056 gboolean method_is_gshared
= FALSE
;
3057 gboolean run_cctors
= (flags
& JIT_FLAG_RUN_CCTORS
) ? 1 : 0;
3058 gboolean compile_aot
= (flags
& JIT_FLAG_AOT
) ? 1 : 0;
3059 gboolean full_aot
= (flags
& JIT_FLAG_FULL_AOT
) ? 1 : 0;
3060 gboolean disable_direct_icalls
= (flags
& JIT_FLAG_NO_DIRECT_ICALLS
) ? 1 : 0;
3061 gboolean gsharedvt_method
= FALSE
;
3063 gboolean llvm
= (flags
& JIT_FLAG_LLVM
) ? 1 : 0;
3065 static gboolean verbose_method_inited
;
3066 static char **verbose_method_names
;
3068 mono_atomic_inc_i32 (&mono_jit_stats
.methods_compiled
);
3069 MONO_PROFILER_RAISE (jit_begin
, (method
));
3070 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3071 MONO_PROBE_METHOD_COMPILE_BEGIN (method
);
3073 gsharedvt_method
= is_gsharedvt_method (method
);
3076 * In AOT mode, method can be the following:
3077 * - a gsharedvt method.
3078 * - a method inflated with type parameters. This is for ref/partial sharing.
3079 * - a method inflated with concrete types.
3082 if (is_open_method (method
)) {
3083 try_generic_shared
= TRUE
;
3084 method_is_gshared
= TRUE
;
3086 try_generic_shared
= FALSE
;
3088 g_assert (opts
& MONO_OPT_GSHARED
);
3090 try_generic_shared
= mono_class_generic_sharing_enabled (method
->klass
) &&
3091 (opts
& MONO_OPT_GSHARED
) && mono_method_is_generic_sharable_full (method
, FALSE
, FALSE
, FALSE
);
3092 if (mini_is_gsharedvt_sharable_method (method
)) {
3094 if (!mono_debug_count ())
3095 try_generic_shared = FALSE;
3101 if (try_generic_shared && !mono_debug_count ())
3102 try_generic_shared = FALSE;
3105 if (opts
& MONO_OPT_GSHARED
) {
3106 if (try_generic_shared
)
3107 mono_atomic_inc_i32 (&mono_stats
.generics_sharable_methods
);
3108 else if (mono_method_is_generic_impl (method
))
3109 mono_atomic_inc_i32 (&mono_stats
.generics_unsharable_methods
);
3113 try_llvm
= mono_use_llvm
|| llvm
;
3116 #ifndef MONO_ARCH_FLOAT32_SUPPORTED
3117 opts
&= ~MONO_OPT_FLOAT32
;
3121 if (method_is_gshared
) {
3122 method_to_compile
= method
;
3124 if (try_generic_shared
) {
3126 method_to_compile
= mini_get_shared_method_full (method
, SHARE_MODE_NONE
, error
);
3127 mono_error_assert_ok (error
);
3129 method_to_compile
= method
;
3133 cfg
= g_new0 (MonoCompile
, 1);
3134 cfg
->method
= method_to_compile
;
3135 cfg
->mempool
= mono_mempool_new ();
3137 cfg
->run_cctors
= run_cctors
;
3138 cfg
->domain
= domain
;
3139 cfg
->verbose_level
= mini_verbose
;
3140 cfg
->compile_aot
= compile_aot
;
3141 cfg
->full_aot
= full_aot
;
3142 cfg
->disable_omit_fp
= mini_debug_options
.disable_omit_fp
;
3143 cfg
->skip_visibility
= method
->skip_visibility
;
3144 cfg
->orig_method
= method
;
3145 cfg
->gen_seq_points
= !mini_debug_options
.no_seq_points_compact_data
|| mini_debug_options
.gen_sdb_seq_points
;
3146 cfg
->gen_sdb_seq_points
= mini_debug_options
.gen_sdb_seq_points
;
3147 cfg
->llvm_only
= (flags
& JIT_FLAG_LLVM_ONLY
) != 0;
3148 cfg
->interp
= (flags
& JIT_FLAG_INTERP
) != 0;
3149 cfg
->backend
= current_backend
;
3152 if (cfg
->method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
3153 /* FIXME: Why is this needed */
3154 cfg
->gen_seq_points
= FALSE
;
3155 cfg
->gen_sdb_seq_points
= FALSE
;
3158 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_ALLOC
) {
3159 /* We can't have seq points inside gc critical regions */
3160 cfg
->gen_seq_points
= FALSE
;
3161 cfg
->gen_sdb_seq_points
= FALSE
;
3163 /* coop requires loop detection to happen */
3164 if (mini_safepoints_enabled ())
3165 cfg
->opt
|= MONO_OPT_LOOP
;
3166 if (cfg
->backend
->explicit_null_checks
) {
3167 /* some platforms have null pages, so we can't SIGSEGV */
3168 cfg
->explicit_null_checks
= TRUE
;
3170 cfg
->explicit_null_checks
= mini_debug_options
.explicit_null_checks
|| (flags
& JIT_FLAG_EXPLICIT_NULL_CHECKS
);
3172 cfg
->soft_breakpoints
= mini_debug_options
.soft_breakpoints
;
3173 cfg
->check_pinvoke_callconv
= mini_debug_options
.check_pinvoke_callconv
;
3174 cfg
->disable_direct_icalls
= disable_direct_icalls
;
3175 cfg
->direct_pinvoke
= (flags
& JIT_FLAG_DIRECT_PINVOKE
) != 0;
3176 if (try_generic_shared
)
3177 cfg
->gshared
= TRUE
;
3178 cfg
->compile_llvm
= try_llvm
;
3179 cfg
->token_info_hash
= g_hash_table_new (NULL
, NULL
);
3180 if (cfg
->compile_aot
)
3181 cfg
->method_index
= aot_method_index
;
3184 if (!mono_debug_count ())
3185 cfg->opt &= ~MONO_OPT_FLOAT32;
3188 cfg
->opt
&= ~MONO_OPT_SIMD
;
3189 cfg
->r4fp
= (cfg
->opt
& MONO_OPT_FLOAT32
) ? 1 : 0;
3190 cfg
->r4_stack_type
= cfg
->r4fp
? STACK_R4
: STACK_R8
;
3192 if (cfg
->gen_seq_points
)
3193 cfg
->seq_points
= g_ptr_array_new ();
3194 error_init (&cfg
->error
);
3196 if (cfg
->compile_aot
&& !try_generic_shared
&& (method
->is_generic
|| mono_class_is_gtd (method
->klass
) || method_is_gshared
)) {
3197 cfg
->exception_type
= MONO_EXCEPTION_GENERIC_SHARING_FAILED
;
3201 if (cfg
->gshared
&& (gsharedvt_method
|| mini_is_gsharedvt_sharable_method (method
))) {
3202 MonoMethodInflated
*inflated
;
3203 MonoGenericContext
*context
;
3205 if (gsharedvt_method
) {
3206 g_assert (method
->is_inflated
);
3207 inflated
= (MonoMethodInflated
*)method
;
3208 context
= &inflated
->context
;
3210 /* We are compiling a gsharedvt method directly */
3211 g_assert (compile_aot
);
3213 g_assert (method_to_compile
->is_inflated
);
3214 inflated
= (MonoMethodInflated
*)method_to_compile
;
3215 context
= &inflated
->context
;
3218 mini_init_gsctx (NULL
, cfg
->mempool
, context
, &cfg
->gsctx
);
3219 cfg
->gsctx_context
= context
;
3221 cfg
->gsharedvt
= TRUE
;
3222 if (!cfg
->llvm_only
) {
3223 cfg
->disable_llvm
= TRUE
;
3224 cfg
->exception_message
= g_strdup ("gsharedvt");
3229 method_to_register
= method_to_compile
;
3231 g_assert (method
== method_to_compile
);
3232 method_to_register
= method
;
3234 cfg
->method_to_register
= method_to_register
;
3237 sig
= mono_method_signature_checked (cfg
->method
, err
);
3239 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3240 cfg
->exception_message
= g_strdup (mono_error_get_message (err
));
3241 mono_error_cleanup (err
);
3242 if (MONO_METHOD_COMPILE_END_ENABLED ())
3243 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3247 header
= cfg
->header
= mono_method_get_header_checked (cfg
->method
, &cfg
->error
);
3249 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3250 if (MONO_METHOD_COMPILE_END_ENABLED ())
3251 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3257 static gboolean inited
;
3263 * Check for methods which cannot be compiled by LLVM early, to avoid
3264 * the extra compilation pass.
3266 if (COMPILE_LLVM (cfg
)) {
3267 mono_llvm_check_method_supported (cfg
);
3268 if (cfg
->disable_llvm
) {
3269 if (cfg
->verbose_level
>= (cfg
->llvm_only
? 0 : 1)) {
3270 //nm = mono_method_full_name (cfg->method, TRUE);
3271 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method
->klass
), method
->name
, cfg
->exception_message
);
3274 if (cfg
->llvm_only
) {
3275 g_free (cfg
->exception_message
);
3276 cfg
->disable_aot
= TRUE
;
3279 mono_destroy_compile (cfg
);
3281 goto restart_compile
;
3287 cfg
->prof_flags
= mono_profiler_get_call_instrumentation_flags (cfg
->method
);
3288 cfg
->prof_coverage
= mono_profiler_coverage_instrumentation_enabled (cfg
->method
);
3290 gboolean trace
= mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
);
3292 cfg
->prof_flags
= (MonoProfilerCallInstrumentationFlags
)(
3293 MONO_PROFILER_CALL_INSTRUMENTATION_ENTER
| MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT
|
3294 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE
| MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT
);
3296 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3297 if (mini_debug_options
.mdb_optimizations
|| MONO_CFG_PROFILE_CALL_CONTEXT (cfg
)) {
3298 cfg
->disable_reuse_registers
= TRUE
;
3299 cfg
->disable_reuse_stack_slots
= TRUE
;
3301 * This decreases the change the debugger will read registers/stack slots which are
3302 * not yet initialized.
3304 cfg
->disable_initlocals_opt
= TRUE
;
3306 cfg
->extend_live_ranges
= TRUE
;
3308 /* The debugger needs all locals to be on the stack or in a global register */
3309 cfg
->disable_vreg_to_lvreg
= TRUE
;
3311 /* Don't remove unused variables when running inside the debugger since the user
3312 * may still want to view them. */
3313 cfg
->disable_deadce_vars
= TRUE
;
3315 cfg
->opt
&= ~MONO_OPT_DEADCE
;
3316 cfg
->opt
&= ~MONO_OPT_INLINE
;
3317 cfg
->opt
&= ~MONO_OPT_COPYPROP
;
3318 cfg
->opt
&= ~MONO_OPT_CONSPROP
;
3320 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3321 cfg
->disable_out_of_line_bblocks
= TRUE
;
3324 if (mono_using_xdebug
) {
3326 * Make each variable use its own register/stack slot and extend
3327 * their liveness to cover the whole method, making them displayable
3328 * in gdb even after they are dead.
3330 cfg
->disable_reuse_registers
= TRUE
;
3331 cfg
->disable_reuse_stack_slots
= TRUE
;
3332 cfg
->extend_live_ranges
= TRUE
;
3333 cfg
->compute_precise_live_ranges
= TRUE
;
3336 mini_gc_init_cfg (cfg
);
3338 if (method
->wrapper_type
== MONO_WRAPPER_OTHER
) {
3339 WrapperInfo
*info
= mono_marshal_get_wrapper_info (method
);
3341 if ((info
&& (info
->subtype
== WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG
|| info
->subtype
== WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG
))) {
3342 cfg
->disable_gc_safe_points
= TRUE
;
3343 /* This is safe, these wrappers only store to the stack */
3344 cfg
->gen_write_barriers
= FALSE
;
3348 if (COMPILE_LLVM (cfg
)) {
3349 cfg
->opt
|= MONO_OPT_ABCREM
;
3352 if (!verbose_method_inited
) {
3353 char *env
= g_getenv ("MONO_VERBOSE_METHOD");
3355 verbose_method_names
= g_strsplit (env
, ";", -1);
3357 verbose_method_inited
= TRUE
;
3359 if (verbose_method_names
) {
3362 for (i
= 0; verbose_method_names
[i
] != NULL
; i
++){
3363 const char *name
= verbose_method_names
[i
];
3365 if ((strchr (name
, '.') > name
) || strchr (name
, ':')) {
3366 MonoMethodDesc
*desc
;
3368 desc
= mono_method_desc_new (name
, TRUE
);
3370 if (mono_method_desc_full_match (desc
, cfg
->method
)) {
3371 cfg
->verbose_level
= 4;
3373 mono_method_desc_free (desc
);
3376 if (strcmp (cfg
->method
->name
, name
) == 0)
3377 cfg
->verbose_level
= 4;
3382 cfg
->intvars
= (guint16
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint16
) * STACK_MAX
* header
->max_stack
);
3384 if (cfg
->verbose_level
> 0) {
3387 method_name
= mono_method_get_full_name (method
);
3388 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg
) ? "llvm " : "", cfg
->gsharedvt
? "gsharedvt " : "", (cfg
->gshared
&& !cfg
->gsharedvt
) ? "gshared " : "", method_name
);
3390 if (COMPILE_LLVM (cfg))
3391 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3392 else if (cfg->gsharedvt)
3393 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3394 else if (cfg->gshared)
3395 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3397 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3399 g_free (method_name
);
3402 if (cfg
->opt
& MONO_OPT_ABCREM
)
3403 cfg
->opt
|= MONO_OPT_SSA
;
3405 cfg
->rs
= mono_regstate_new ();
3406 cfg
->next_vreg
= cfg
->rs
->next_vreg
;
3408 /* FIXME: Fix SSA to handle branches inside bblocks */
3409 if (cfg
->opt
& MONO_OPT_SSA
)
3410 cfg
->enable_extended_bblocks
= FALSE
;
3413 * FIXME: This confuses liveness analysis because variables which are assigned after
3414 * a branch inside a bblock become part of the kill set, even though the assignment
3415 * might not get executed. This causes the optimize_initlocals pass to delete some
3416 * assignments which are needed.
3417 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3420 //cfg->enable_extended_bblocks = TRUE;
3422 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3423 if (mono_compile_is_broken (cfg
, cfg
->method
, TRUE
)) {
3424 if (mini_get_debug_options ()->break_on_unverified
)
3430 * create MonoInst* which represents arguments and local variables
3432 mono_compile_create_vars (cfg
);
3434 mono_cfg_dump_create_context (cfg
);
3435 mono_cfg_dump_begin_group (cfg
);
3437 MONO_TIME_TRACK (mono_jit_stats
.jit_method_to_ir
, i
= mono_method_to_ir (cfg
, method_to_compile
, NULL
, NULL
, NULL
, NULL
, 0, FALSE
));
3438 mono_cfg_dump_ir (cfg
, "method-to-ir");
3440 if (cfg
->gdump_ctx
!= NULL
) {
3441 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3442 mono_insert_nop_in_empty_bb (cfg
);
3443 mono_cfg_dump_ir (cfg
, "mono_insert_nop_in_empty_bb");
3447 if (try_generic_shared
&& cfg
->exception_type
== MONO_EXCEPTION_GENERIC_SHARING_FAILED
) {
3449 if (MONO_METHOD_COMPILE_END_ENABLED ())
3450 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3453 mono_destroy_compile (cfg
);
3454 try_generic_shared
= FALSE
;
3455 goto restart_compile
;
3457 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_GENERIC_SHARING_FAILED
);
3459 if (MONO_METHOD_COMPILE_END_ENABLED ())
3460 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3461 /* cfg contains the details of the failure, so let the caller cleanup */
3465 cfg
->stat_basic_blocks
+= cfg
->num_bblocks
;
3467 if (COMPILE_LLVM (cfg
)) {
3470 /* The IR has to be in SSA form for LLVM */
3471 cfg
->opt
|= MONO_OPT_SSA
;
3475 // Allow SSA on the result value
3476 cfg
->ret
->flags
&= ~MONO_INST_VOLATILE
;
3478 // Add an explicit return instruction referencing the return value
3479 MONO_INST_NEW (cfg
, ins
, OP_SETRET
);
3480 ins
->sreg1
= cfg
->ret
->dreg
;
3482 MONO_ADD_INS (cfg
->bb_exit
, ins
);
3485 cfg
->opt
&= ~MONO_OPT_LINEARS
;
3488 cfg
->opt
&= ~MONO_OPT_BRANCH
;
3491 /* todo: remove code when we have verified that the liveness for try/catch blocks
3495 * Currently, this can't be commented out since exception blocks are not
3496 * processed during liveness analysis.
3497 * It is also needed, because otherwise the local optimization passes would
3498 * delete assignments in cases like this:
3500 * <something which throws>
3502 * This also allows SSA to be run on methods containing exception clauses, since
3503 * SSA will ignore variables marked VOLATILE.
3505 MONO_TIME_TRACK (mono_jit_stats
.jit_liveness_handle_exception_clauses
, mono_liveness_handle_exception_clauses (cfg
));
3506 mono_cfg_dump_ir (cfg
, "liveness_handle_exception_clauses");
3508 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_out_of_line_bblock
, mono_handle_out_of_line_bblock (cfg
));
3509 mono_cfg_dump_ir (cfg
, "handle_out_of_line_bblock");
3511 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3513 if (!COMPILE_LLVM (cfg
)) {
3514 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_long_opts
, mono_decompose_long_opts (cfg
));
3515 mono_cfg_dump_ir (cfg
, "decompose_long_opts");
3518 /* Should be done before branch opts */
3519 if (cfg
->opt
& (MONO_OPT_CONSPROP
| MONO_OPT_COPYPROP
)) {
3520 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop
, mono_local_cprop (cfg
));
3521 mono_cfg_dump_ir (cfg
, "local_cprop");
3524 if (cfg
->flags
& MONO_CFG_HAS_TYPE_CHECK
) {
3525 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_typechecks
, mono_decompose_typechecks (cfg
));
3526 if (cfg
->gdump_ctx
!= NULL
) {
3527 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3528 mono_insert_nop_in_empty_bb (cfg
);
3530 mono_cfg_dump_ir (cfg
, "decompose_typechecks");
3534 * Should be done after cprop which can do strength reduction on
3535 * some of these ops, after propagating immediates.
3537 if (cfg
->has_emulated_ops
) {
3538 MONO_TIME_TRACK (mono_jit_stats
.jit_local_emulate_ops
, mono_local_emulate_ops (cfg
));
3539 mono_cfg_dump_ir (cfg
, "local_emulate_ops");
3542 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3543 MONO_TIME_TRACK (mono_jit_stats
.jit_optimize_branches
, mono_optimize_branches (cfg
));
3544 mono_cfg_dump_ir (cfg
, "optimize_branches");
3547 /* This must be done _before_ global reg alloc and _after_ decompose */
3548 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_global_vregs
, mono_handle_global_vregs (cfg
));
3549 mono_cfg_dump_ir (cfg
, "handle_global_vregs");
3550 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3551 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce
, mono_local_deadce (cfg
));
3552 mono_cfg_dump_ir (cfg
, "local_deadce");
3554 if (cfg
->opt
& MONO_OPT_ALIAS_ANALYSIS
) {
3555 MONO_TIME_TRACK (mono_jit_stats
.jit_local_alias_analysis
, mono_local_alias_analysis (cfg
));
3556 mono_cfg_dump_ir (cfg
, "local_alias_analysis");
3558 /* Disable this for LLVM to make the IR easier to handle */
3559 if (!COMPILE_LLVM (cfg
)) {
3560 MONO_TIME_TRACK (mono_jit_stats
.jit_if_conversion
, mono_if_conversion (cfg
));
3561 mono_cfg_dump_ir (cfg
, "if_conversion");
3564 mono_threads_safepoint ();
3566 MONO_TIME_TRACK (mono_jit_stats
.jit_bb_ordering
, mono_bb_ordering (cfg
));
3567 mono_cfg_dump_ir (cfg
, "bb_ordering");
3569 if (((cfg
->num_varinfo
> 2000) || (cfg
->num_bblocks
> 1000)) && !cfg
->compile_aot
) {
3571 * we disable some optimizations if there are too many variables
3572 * because JIT time may become too expensive. The actual number needs
3573 * to be tweaked and eventually the non-linear algorithms should be fixed.
3575 cfg
->opt
&= ~ (MONO_OPT_LINEARS
| MONO_OPT_COPYPROP
| MONO_OPT_CONSPROP
);
3576 cfg
->disable_ssa
= TRUE
;
3579 if (cfg
->num_varinfo
> 10000 && !cfg
->llvm_only
)
3580 /* Disable llvm for overly complex methods */
3581 cfg
->disable_ssa
= TRUE
;
3583 if (cfg
->opt
& MONO_OPT_LOOP
) {
3584 MONO_TIME_TRACK (mono_jit_stats
.jit_compile_dominator_info
, mono_compile_dominator_info (cfg
, MONO_COMP_DOM
| MONO_COMP_IDOM
));
3585 MONO_TIME_TRACK (mono_jit_stats
.jit_compute_natural_loops
, mono_compute_natural_loops (cfg
));
3588 if (mono_threads_are_safepoints_enabled ()) {
3589 MONO_TIME_TRACK (mono_jit_stats
.jit_insert_safepoints
, insert_safepoints (cfg
));
3590 mono_cfg_dump_ir (cfg
, "insert_safepoints");
3593 /* after method_to_ir */
3595 if (MONO_METHOD_COMPILE_END_ENABLED ())
3596 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3601 if (header->num_clauses)
3602 cfg->disable_ssa = TRUE;
3605 //#define DEBUGSSA "logic_run"
3606 //#define DEBUGSSA_CLASS "Tests"
3609 if (!cfg
->disable_ssa
) {
3610 mono_local_cprop (cfg
);
3613 mono_ssa_compute (cfg
);
3617 if (cfg
->opt
& MONO_OPT_SSA
) {
3618 if (!(cfg
->comp_done
& MONO_COMP_SSA
) && !cfg
->disable_ssa
) {
3620 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_compute
, mono_ssa_compute (cfg
));
3621 mono_cfg_dump_ir (cfg
, "ssa_compute");
3624 if (cfg
->verbose_level
>= 2) {
3631 /* after SSA translation */
3633 if (MONO_METHOD_COMPILE_END_ENABLED ())
3634 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3638 if ((cfg
->opt
& MONO_OPT_CONSPROP
) || (cfg
->opt
& MONO_OPT_COPYPROP
)) {
3639 if (cfg
->comp_done
& MONO_COMP_SSA
&& !COMPILE_LLVM (cfg
)) {
3641 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_cprop
, mono_ssa_cprop (cfg
));
3642 mono_cfg_dump_ir (cfg
, "ssa_cprop");
3648 if (cfg
->comp_done
& MONO_COMP_SSA
&& !COMPILE_LLVM (cfg
)) {
3649 //mono_ssa_strength_reduction (cfg);
3651 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3652 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_deadce
, mono_ssa_deadce (cfg
));
3653 mono_cfg_dump_ir (cfg
, "ssa_deadce");
3656 if ((cfg
->flags
& (MONO_CFG_HAS_LDELEMA
|MONO_CFG_HAS_CHECK_THIS
)) && (cfg
->opt
& MONO_OPT_ABCREM
)) {
3657 MONO_TIME_TRACK (mono_jit_stats
.jit_perform_abc_removal
, mono_perform_abc_removal (cfg
));
3658 mono_cfg_dump_ir (cfg
, "perform_abc_removal");
3661 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_remove
, mono_ssa_remove (cfg
));
3662 mono_cfg_dump_ir (cfg
, "ssa_remove");
3663 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop2
, mono_local_cprop (cfg
));
3664 mono_cfg_dump_ir (cfg
, "local_cprop2");
3665 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_global_vregs2
, mono_handle_global_vregs (cfg
));
3666 mono_cfg_dump_ir (cfg
, "handle_global_vregs2");
3667 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3668 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce2
, mono_local_deadce (cfg
));
3669 mono_cfg_dump_ir (cfg
, "local_deadce2");
3672 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3673 MONO_TIME_TRACK (mono_jit_stats
.jit_optimize_branches2
, mono_optimize_branches (cfg
));
3674 mono_cfg_dump_ir (cfg
, "optimize_branches2");
3679 if (cfg
->comp_done
& MONO_COMP_SSA
&& COMPILE_LLVM (cfg
)) {
3680 mono_ssa_loop_invariant_code_motion (cfg
);
3681 mono_cfg_dump_ir (cfg
, "loop_invariant_code_motion");
3682 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3683 if (cfg
->opt
& MONO_OPT_ABCREM
) {
3684 mono_perform_abc_removal (cfg
);
3685 mono_cfg_dump_ir (cfg
, "abc_removal");
3689 /* after SSA removal */
3691 if (MONO_METHOD_COMPILE_END_ENABLED ())
3692 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3696 if (cfg
->llvm_only
&& cfg
->gsharedvt
)
3697 mono_ssa_remove_gsharedvt (cfg
);
3699 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3700 if (COMPILE_SOFT_FLOAT (cfg
))
3701 mono_decompose_soft_float (cfg
);
3703 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_vtype_opts
, mono_decompose_vtype_opts (cfg
));
3704 if (cfg
->flags
& MONO_CFG_NEEDS_DECOMPOSE
) {
3705 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_array_access_opts
, mono_decompose_array_access_opts (cfg
));
3706 mono_cfg_dump_ir (cfg
, "decompose_array_access_opts");
3710 #ifndef MONO_ARCH_GOT_REG
3715 g_assert (cfg
->got_var_allocated
);
3718 * Allways allocate the GOT var to a register, because keeping it
3719 * in memory will increase the number of live temporaries in some
3720 * code created by inssel.brg, leading to the well known spills+
3721 * branches problem. Testcase: mcs crash in
3722 * System.MonoCustomAttrs:GetCustomAttributes.
3724 #ifdef MONO_ARCH_GOT_REG
3725 got_reg
= MONO_ARCH_GOT_REG
;
3727 regs
= mono_arch_get_global_int_regs (cfg
);
3729 got_reg
= GPOINTER_TO_INT (regs
->data
);
3732 cfg
->got_var
->opcode
= OP_REGVAR
;
3733 cfg
->got_var
->dreg
= got_reg
;
3734 cfg
->used_int_regs
|= 1LL << cfg
->got_var
->dreg
;
3738 * Have to call this again to process variables added since the first call.
3740 MONO_TIME_TRACK(mono_jit_stats
.jit_liveness_handle_exception_clauses2
, mono_liveness_handle_exception_clauses (cfg
));
3742 if (cfg
->opt
& MONO_OPT_LINEARS
) {
3743 GList
*vars
, *regs
, *l
;
3745 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3746 cfg
->comp_done
&= ~MONO_COMP_LIVENESS
;
3747 if (!(cfg
->comp_done
& MONO_COMP_LIVENESS
))
3748 MONO_TIME_TRACK (mono_jit_stats
.jit_analyze_liveness
, mono_analyze_liveness (cfg
));
3750 if ((vars
= mono_arch_get_allocatable_int_vars (cfg
))) {
3751 regs
= mono_arch_get_global_int_regs (cfg
);
3752 /* Remove the reg reserved for holding the GOT address */
3754 for (l
= regs
; l
; l
= l
->next
) {
3755 if (GPOINTER_TO_UINT (l
->data
) == cfg
->got_var
->dreg
) {
3756 regs
= g_list_delete_link (regs
, l
);
3761 MONO_TIME_TRACK (mono_jit_stats
.jit_linear_scan
, mono_linear_scan (cfg
, vars
, regs
, &cfg
->used_int_regs
));
3762 mono_cfg_dump_ir (cfg
, "linear_scan");
3766 //mono_print_code (cfg, "");
3770 /* variables are allocated after decompose, since decompose could create temps */
3771 if (!COMPILE_LLVM (cfg
)) {
3772 MONO_TIME_TRACK (mono_jit_stats
.jit_arch_allocate_vars
, mono_arch_allocate_vars (cfg
));
3773 mono_cfg_dump_ir (cfg
, "arch_allocate_vars");
3774 if (cfg
->exception_type
)
3779 mono_allocate_gsharedvt_vars (cfg
);
3781 if (!COMPILE_LLVM (cfg
)) {
3782 gboolean need_local_opts
;
3783 MONO_TIME_TRACK (mono_jit_stats
.jit_spill_global_vars
, mono_spill_global_vars (cfg
, &need_local_opts
));
3784 mono_cfg_dump_ir (cfg
, "spill_global_vars");
3786 if (need_local_opts
|| cfg
->compile_aot
) {
3787 /* To optimize code created by spill_global_vars */
3788 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop3
, mono_local_cprop (cfg
));
3789 if (cfg
->opt
& MONO_OPT_DEADCE
)
3790 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce3
, mono_local_deadce (cfg
));
3791 mono_cfg_dump_ir (cfg
, "needs_local_opts");
3795 mono_insert_branches_between_bblocks (cfg
);
3797 if (COMPILE_LLVM (cfg
)) {
3801 /* The IR has to be in SSA form for LLVM */
3802 if (!(cfg
->comp_done
& MONO_COMP_SSA
)) {
3803 cfg
->exception_message
= g_strdup ("SSA disabled.");
3804 cfg
->disable_llvm
= TRUE
;
3807 if (cfg
->flags
& MONO_CFG_NEEDS_DECOMPOSE
)
3808 mono_decompose_array_access_opts (cfg
);
3810 if (!cfg
->disable_llvm
)
3811 mono_llvm_emit_method (cfg
);
3812 if (cfg
->disable_llvm
) {
3813 if (cfg
->verbose_level
>= (cfg
->llvm_only
? 0 : 1)) {
3814 //nm = mono_method_full_name (cfg->method, TRUE);
3815 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method
->klass
), method
->name
, cfg
->exception_message
);
3818 if (cfg
->llvm_only
) {
3819 cfg
->disable_aot
= TRUE
;
3822 mono_destroy_compile (cfg
);
3824 goto restart_compile
;
3827 if (cfg
->verbose_level
> 0 && !cfg
->compile_aot
) {
3828 nm
= mono_method_get_full_name (cfg
->method
);
3829 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3831 cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
, cfg
->code_len
, cfg
->domain
->friendly_name
);
3836 MONO_TIME_TRACK (mono_jit_stats
.jit_codegen
, mono_codegen (cfg
));
3837 mono_cfg_dump_ir (cfg
, "codegen");
3838 if (cfg
->exception_type
)
3842 if (COMPILE_LLVM (cfg
))
3843 mono_atomic_inc_i32 (&mono_jit_stats
.methods_with_llvm
);
3845 mono_atomic_inc_i32 (&mono_jit_stats
.methods_without_llvm
);
3847 MONO_TIME_TRACK (mono_jit_stats
.jit_create_jit_info
, cfg
->jit_info
= create_jit_info (cfg
, method_to_compile
));
3849 if (cfg
->extend_live_ranges
) {
3850 /* Extend live ranges to cover the whole method */
3851 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
3852 MONO_VARINFO (cfg
, i
)->live_range_end
= cfg
->code_len
;
3855 MONO_TIME_TRACK (mono_jit_stats
.jit_gc_create_gc_map
, mini_gc_create_gc_map (cfg
));
3856 MONO_TIME_TRACK (mono_jit_stats
.jit_save_seq_point_info
, mono_save_seq_point_info (cfg
, cfg
->jit_info
));
3858 if (!cfg
->compile_aot
) {
3859 mono_save_xdebug_info (cfg
);
3860 mono_lldb_save_method_info (cfg
);
3863 if (cfg
->verbose_level
>= 2) {
3864 char *id
= mono_method_full_name (cfg
->method
, FALSE
);
3865 mono_disassemble_code (cfg
, cfg
->native_code
, cfg
->code_len
, id
+ 3);
3869 if (!cfg
->compile_aot
&& !(flags
& JIT_FLAG_DISCARD_RESULTS
)) {
3870 mono_domain_lock (cfg
->domain
);
3871 mono_jit_info_table_add (cfg
->domain
, cfg
->jit_info
);
3873 if (cfg
->method
->dynamic
)
3874 mono_dynamic_code_hash_lookup (cfg
->domain
, cfg
->method
)->ji
= cfg
->jit_info
;
3875 mono_domain_unlock (cfg
->domain
);
3880 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
3883 /* collect statistics */
3884 #ifndef DISABLE_PERFCOUNTERS
3885 mono_atomic_inc_i32 (&mono_perfcounters
->jit_methods
);
3886 mono_atomic_fetch_add_i32 (&mono_perfcounters
->jit_bytes
, header
->code_size
);
3888 gint32 code_size_ratio
= cfg
->code_len
;
3889 mono_atomic_fetch_add_i32 (&mono_jit_stats
.allocated_code_size
, code_size_ratio
);
3890 mono_atomic_fetch_add_i32 (&mono_jit_stats
.native_code_size
, code_size_ratio
);
3891 /* FIXME: use an explicit function to read booleans */
3892 if ((gboolean
)mono_atomic_load_i32 ((gint32
*)&mono_jit_stats
.enabled
)) {
3893 if (code_size_ratio
> mono_atomic_load_i32 (&mono_jit_stats
.biggest_method_size
)) {
3894 mono_atomic_store_i32 (&mono_jit_stats
.biggest_method_size
, code_size_ratio
);
3895 char *biggest_method
= g_strdup_printf ("%s::%s)", m_class_get_name (method
->klass
), method
->name
);
3896 biggest_method
= (char*)mono_atomic_xchg_ptr ((gpointer
*)&mono_jit_stats
.biggest_method
, biggest_method
);
3897 g_free (biggest_method
);
3899 code_size_ratio
= (code_size_ratio
* 100) / header
->code_size
;
3900 if (code_size_ratio
> mono_atomic_load_i32 (&mono_jit_stats
.max_code_size_ratio
)) {
3901 mono_atomic_store_i32 (&mono_jit_stats
.max_code_size_ratio
, code_size_ratio
);
3902 char *max_ratio_method
= g_strdup_printf ("%s::%s)", m_class_get_name (method
->klass
), method
->name
);
3903 max_ratio_method
= (char*)mono_atomic_xchg_ptr ((gpointer
*)&mono_jit_stats
.max_ratio_method
, max_ratio_method
);
3904 g_free (max_ratio_method
);
3908 if (MONO_METHOD_COMPILE_END_ENABLED ())
3909 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3911 mono_cfg_dump_close_group (cfg
);
3917 mini_class_has_reference_variant_generic_argument (MonoCompile
*cfg
, MonoClass
*klass
, int context_used
)
3920 MonoGenericContainer
*container
;
3921 MonoGenericInst
*ginst
;
3923 if (mono_class_is_ginst (klass
)) {
3924 container
= mono_class_get_generic_container (mono_class_get_generic_class (klass
)->container_class
);
3925 ginst
= mono_class_get_generic_class (klass
)->context
.class_inst
;
3926 } else if (mono_class_is_gtd (klass
) && context_used
) {
3927 container
= mono_class_get_generic_container (klass
);
3928 ginst
= container
->context
.class_inst
;
3933 for (i
= 0; i
< container
->type_argc
; ++i
) {
3935 if (!(mono_generic_container_get_param_info (container
, i
)->flags
& (MONO_GEN_PARAM_VARIANT
|MONO_GEN_PARAM_COVARIANT
)))
3937 type
= ginst
->type_argv
[i
];
3938 if (mini_type_is_reference (type
))
3945 mono_cfg_add_try_hole (MonoCompile
*cfg
, MonoExceptionClause
*clause
, guint8
*start
, MonoBasicBlock
*bb
)
3947 TryBlockHole
*hole
= (TryBlockHole
*)mono_mempool_alloc (cfg
->mempool
, sizeof (TryBlockHole
));
3948 hole
->clause
= clause
;
3949 hole
->start_offset
= start
- cfg
->native_code
;
3950 hole
->basic_block
= bb
;
3952 cfg
->try_block_holes
= g_slist_append_mempool (cfg
->mempool
, cfg
->try_block_holes
, hole
);
3956 mono_cfg_set_exception (MonoCompile
*cfg
, MonoExceptionType type
)
3958 cfg
->exception_type
= type
;
3961 /* Assumes ownership of the MSG argument */
3963 mono_cfg_set_exception_invalid_program (MonoCompile
*cfg
, char *msg
)
3965 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3966 mono_error_set_generic_error (&cfg
->error
, "System", "InvalidProgramException", "%s", msg
);
3969 #endif /* DISABLE_JIT */
3971 gint64
mono_time_track_start ()
3973 return mono_100ns_ticks ();
3977 * mono_time_track_end:
3979 * Uses UnlockedAddDouble () to update \param time.
3981 void mono_time_track_end (gint64
*time
, gint64 start
)
3983 UnlockedAdd64 (time
, mono_100ns_ticks () - start
);
3987 * mono_update_jit_stats:
3989 * Only call this function in locked environments to avoid data races.
3991 MONO_NO_SANITIZE_THREAD
3993 mono_update_jit_stats (MonoCompile
*cfg
)
3995 mono_jit_stats
.allocate_var
+= cfg
->stat_allocate_var
;
3996 mono_jit_stats
.locals_stack_size
+= cfg
->stat_locals_stack_size
;
3997 mono_jit_stats
.basic_blocks
+= cfg
->stat_basic_blocks
;
3998 mono_jit_stats
.max_basic_blocks
= MAX (cfg
->stat_basic_blocks
, mono_jit_stats
.max_basic_blocks
);
3999 mono_jit_stats
.cil_code_size
+= cfg
->stat_cil_code_size
;
4000 mono_jit_stats
.regvars
+= cfg
->stat_n_regvars
;
4001 mono_jit_stats
.inlineable_methods
+= cfg
->stat_inlineable_methods
;
4002 mono_jit_stats
.inlined_methods
+= cfg
->stat_inlined_methods
;
4003 mono_jit_stats
.code_reallocs
+= cfg
->stat_code_reallocs
;
4007 * mono_jit_compile_method_inner:
4009 * Main entry point for the JIT.
4012 mono_jit_compile_method_inner (MonoMethod
*method
, MonoDomain
*target_domain
, int opt
, MonoError
*error
)
4015 gpointer code
= NULL
;
4016 MonoJitInfo
*jinfo
, *info
;
4018 MonoException
*ex
= NULL
;
4020 MonoMethod
*prof_method
, *shared
;
4024 start
= mono_time_track_start ();
4025 cfg
= mini_method_compile (method
, opt
, target_domain
, JIT_FLAG_RUN_CCTORS
, 0, -1);
4026 gint64 jit_time
= 0.0;
4027 mono_time_track_end (&jit_time
, start
);
4028 UnlockedAdd64 (&mono_jit_stats
.jit_time
, jit_time
);
4030 prof_method
= cfg
->method
;
4032 switch (cfg
->exception_type
) {
4033 case MONO_EXCEPTION_NONE
:
4035 case MONO_EXCEPTION_TYPE_LOAD
:
4036 case MONO_EXCEPTION_MISSING_FIELD
:
4037 case MONO_EXCEPTION_MISSING_METHOD
:
4038 case MONO_EXCEPTION_FILE_NOT_FOUND
:
4039 case MONO_EXCEPTION_BAD_IMAGE
:
4040 case MONO_EXCEPTION_INVALID_PROGRAM
: {
4041 /* Throw a type load exception if needed */
4042 if (cfg
->exception_ptr
) {
4043 ex
= mono_class_get_exception_for_failure ((MonoClass
*)cfg
->exception_ptr
);
4045 if (cfg
->exception_type
== MONO_EXCEPTION_MISSING_FIELD
)
4046 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "MissingFieldException", cfg
->exception_message
);
4047 else if (cfg
->exception_type
== MONO_EXCEPTION_MISSING_METHOD
)
4048 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "MissingMethodException", cfg
->exception_message
);
4049 else if (cfg
->exception_type
== MONO_EXCEPTION_TYPE_LOAD
)
4050 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "TypeLoadException", cfg
->exception_message
);
4051 else if (cfg
->exception_type
== MONO_EXCEPTION_FILE_NOT_FOUND
)
4052 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System.IO", "FileNotFoundException", cfg
->exception_message
);
4053 else if (cfg
->exception_type
== MONO_EXCEPTION_BAD_IMAGE
)
4054 ex
= mono_get_exception_bad_image_format (cfg
->exception_message
);
4055 else if (cfg
->exception_type
== MONO_EXCEPTION_INVALID_PROGRAM
)
4056 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "InvalidProgramException", cfg
->exception_message
);
4058 g_assert_not_reached ();
4062 case MONO_EXCEPTION_MONO_ERROR
:
4063 // FIXME: MonoError has no copy ctor
4064 g_assert (!mono_error_ok (&cfg
->error
));
4065 ex
= mono_error_convert_to_exception (&cfg
->error
);
4068 g_assert_not_reached ();
4072 MONO_PROFILER_RAISE (jit_failed
, (method
));
4074 mono_destroy_compile (cfg
);
4075 mono_error_set_exception_instance (error
, ex
);
4080 if (mono_method_is_generic_sharable (method
, FALSE
)) {
4081 shared
= mini_get_shared_method_full (method
, SHARE_MODE_NONE
, error
);
4082 if (!is_ok (error
)) {
4083 MONO_PROFILER_RAISE (jit_failed
, (method
));
4084 mono_destroy_compile (cfg
);
4091 mono_domain_lock (target_domain
);
4093 /* Check if some other thread already did the job. In this case, we can
4094 discard the code this thread generated. */
4096 info
= mini_lookup_method (target_domain
, method
, shared
);
4098 /* We can't use a domain specific method in another domain */
4099 if ((target_domain
== mono_domain_get ()) || info
->domain_neutral
) {
4100 code
= info
->code_start
;
4102 discarded_jit_time
+= jit_time
;
4106 /* The lookup + insert is atomic since this is done inside the domain lock */
4107 mono_domain_jit_code_hash_lock (target_domain
);
4108 mono_internal_hash_table_insert (&target_domain
->jit_code_hash
, cfg
->jit_info
->d
.method
, cfg
->jit_info
);
4109 mono_domain_jit_code_hash_unlock (target_domain
);
4111 code
= cfg
->native_code
;
4113 if (cfg
->gshared
&& mono_method_is_generic_sharable (method
, FALSE
))
4114 mono_atomic_inc_i32 (&mono_stats
.generics_shared_methods
);
4116 mono_atomic_inc_i32 (&mono_stats
.gsharedvt_methods
);
4119 jinfo
= cfg
->jit_info
;
4122 * Update global stats while holding a lock, instead of doing many
4123 * mono_atomic_inc_i32 operations during JITting.
4125 mono_update_jit_stats (cfg
);
4127 mono_destroy_compile (cfg
);
4129 mini_patch_llvm_jit_callees (target_domain
, method
, code
);
4131 mono_emit_jit_map (jinfo
);
4133 mono_domain_unlock (target_domain
);
4135 if (!mono_error_ok (error
))
4138 vtable
= mono_class_vtable_checked (target_domain
, method
->klass
, error
);
4139 return_val_if_nok (error
, NULL
);
4141 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
4142 if (mono_marshal_method_from_wrapper (method
)) {
4143 /* Native func wrappers have no method */
4144 /* The profiler doesn't know about wrappers, so pass the original icall method */
4145 MONO_PROFILER_RAISE (jit_done
, (mono_marshal_method_from_wrapper (method
), jinfo
));
4148 MONO_PROFILER_RAISE (jit_done
, (method
, jinfo
));
4149 if (prof_method
!= method
)
4150 MONO_PROFILER_RAISE (jit_done
, (prof_method
, jinfo
));
4152 if (!(method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE
||
4153 method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
||
4154 method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
)) {
4155 if (!mono_runtime_class_init_full (vtable
, error
))
4162 * mini_get_underlying_type:
4164 * Return the type the JIT will use during compilation.
4165 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4166 * For gsharedvt types, it will return the original VAR/MVAR.
4169 mini_get_underlying_type (MonoType
*type
)
4171 return mini_type_get_underlying_type (type
);
4175 mini_jit_init (void)
4177 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT
| MONO_COUNTER_INT
, &discarded_code
);
4178 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT
| MONO_COUNTER_LONG
| MONO_COUNTER_TIME
, &discarded_jit_time
);
4179 mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT
| MONO_COUNTER_INT
, &jinfo_try_holes_size
);
4181 mono_os_mutex_init_recursive (&jit_mutex
);
4183 current_backend
= g_new0 (MonoBackend
, 1);
4184 init_backend (current_backend
);
4189 mini_jit_cleanup (void)
4192 g_free (emul_opcode_map
);
4193 g_free (emul_opcode_opcodes
);
4199 mono_llvm_emit_aot_file_info (MonoAotFileInfo
*info
, gboolean has_jitted_code
)
4201 g_assert_not_reached ();
4204 void mono_llvm_emit_aot_data (const char *symbol
, guint8
*data
, int data_len
)
4206 g_assert_not_reached ();
4211 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4214 mono_llvm_cpp_throw_exception (void)
4216 g_assert_not_reached ();
4220 mono_llvm_cpp_catch_exception (MonoLLVMInvokeCallback cb
, gpointer arg
, gboolean
*out_thrown
)
4222 g_assert_not_reached ();
4230 mini_method_compile (MonoMethod
*method
, guint32 opts
, MonoDomain
*domain
, JitFlags flags
, int parts
, int aot_method_index
)
4232 g_assert_not_reached ();
4237 mono_destroy_compile (MonoCompile
*cfg
)
4239 g_assert_not_reached ();
4243 mono_add_patch_info (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
4245 g_assert_not_reached ();
4248 #else // DISABLE_JIT
4251 mini_realloc_code_slow (MonoCompile
*cfg
, int size
)
4253 const int EXTRA_CODE_SPACE
= 16;
4255 if (cfg
->code_len
+ size
> (cfg
->code_size
- EXTRA_CODE_SPACE
)) {
4256 while (cfg
->code_len
+ size
> (cfg
->code_size
- EXTRA_CODE_SPACE
))
4257 cfg
->code_size
= cfg
->code_size
* 2 + EXTRA_CODE_SPACE
;
4258 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4259 cfg
->stat_code_reallocs
++;
4261 return cfg
->native_code
+ cfg
->code_len
;
4264 #endif /* DISABLE_JIT */
4267 mini_class_is_system_array (MonoClass
*klass
)
4269 return m_class_get_parent (klass
) == mono_defaults
.array_class
;
4273 * mono_target_pagesize:
4275 * query pagesize used to determine if an implicit NRE can be used
4278 mono_target_pagesize (void)
4280 /* We could query the system's pagesize via mono_pagesize (), however there
4281 * are pitfalls: sysconf (3) is called on some posix like systems, and per
4282 * POSIX.1-2008 this function doesn't have to be async-safe. Since this
4283 * function can be called from a signal handler, we simplify things by
4284 * using 4k on all targets. Implicit null-checks with an offset larger than
4285 * 4k are _very_ uncommon, so we don't mind emitting an explicit null-check