3 * The new Mono code generator.
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
23 #ifdef HAVE_SYS_TIME_H
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
63 #include <mono/utils/unlocked.h>
64 #include <mono/utils/mono-time.h>
67 #include "seq-points.h"
75 #include "jit-icalls.h"
78 #include "debugger-agent.h"
79 #include "llvm-runtime.h"
80 #include "mini-llvm.h"
82 #include "aot-runtime.h"
83 #include "mini-runtime.h"
85 MonoCallSpec
*mono_jit_trace_calls
;
86 MonoMethodDesc
*mono_inject_async_exc_method
;
87 int mono_inject_async_exc_pos
;
88 MonoMethodDesc
*mono_break_at_bb_method
;
89 int mono_break_at_bb_bb_num
;
90 gboolean mono_do_x86_stack_align
= TRUE
;
91 gboolean mono_using_xdebug
;
94 static guint32 discarded_code
;
95 static gint64 discarded_jit_time
;
96 static guint32 jinfo_try_holes_size
;
98 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
99 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
100 static mono_mutex_t jit_mutex
;
103 static MonoBackend
*current_backend
;
106 mono_realloc_native_code (MonoCompile
*cfg
)
108 return g_realloc (cfg
->native_code
, cfg
->code_size
);
112 MonoExceptionClause
*clause
;
113 MonoBasicBlock
*basic_block
;
118 * mono_emit_unwind_op:
120 * Add an unwind op with the given parameters for the list of unwind ops stored in
124 mono_emit_unwind_op (MonoCompile
*cfg
, int when
, int tag
, int reg
, int val
)
126 MonoUnwindOp
*op
= (MonoUnwindOp
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoUnwindOp
));
133 cfg
->unwind_ops
= g_slist_append_mempool (cfg
->mempool
, cfg
->unwind_ops
, op
);
134 if (cfg
->verbose_level
> 1) {
137 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when
, mono_arch_regname (reg
), val
);
139 case DW_CFA_def_cfa_register
:
140 printf ("CFA: [%x] def_cfa_reg: %s\n", when
, mono_arch_regname (reg
));
142 case DW_CFA_def_cfa_offset
:
143 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when
, val
);
146 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when
, mono_arch_regname (reg
), -val
);
153 * mono_unlink_bblock:
155 * Unlink two basic blocks.
158 mono_unlink_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
164 for (i
= 0; i
< from
->out_count
; ++i
) {
165 if (to
== from
->out_bb
[i
]) {
172 for (i
= 0; i
< from
->out_count
; ++i
) {
173 if (from
->out_bb
[i
] != to
)
174 from
->out_bb
[pos
++] = from
->out_bb
[i
];
176 g_assert (pos
== from
->out_count
- 1);
181 for (i
= 0; i
< to
->in_count
; ++i
) {
182 if (from
== to
->in_bb
[i
]) {
189 for (i
= 0; i
< to
->in_count
; ++i
) {
190 if (to
->in_bb
[i
] != from
)
191 to
->in_bb
[pos
++] = to
->in_bb
[i
];
193 g_assert (pos
== to
->in_count
- 1);
199 * mono_bblocks_linked:
201 * Return whenever BB1 and BB2 are linked in the CFG.
204 mono_bblocks_linked (MonoBasicBlock
*bb1
, MonoBasicBlock
*bb2
)
208 for (i
= 0; i
< bb1
->out_count
; ++i
) {
209 if (bb1
->out_bb
[i
] == bb2
)
217 mono_find_block_region_notry (MonoCompile
*cfg
, int offset
)
219 MonoMethodHeader
*header
= cfg
->header
;
220 MonoExceptionClause
*clause
;
223 for (i
= 0; i
< header
->num_clauses
; ++i
) {
224 clause
= &header
->clauses
[i
];
225 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
226 (offset
< (clause
->handler_offset
)))
227 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
229 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
230 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
231 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
232 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
233 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
235 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
243 * mono_get_block_region_notry:
245 * Return the region corresponding to REGION, ignoring try clauses nested inside
249 mono_get_block_region_notry (MonoCompile
*cfg
, int region
)
251 if ((region
& (0xf << 4)) == MONO_REGION_TRY
) {
252 MonoMethodHeader
*header
= cfg
->header
;
255 * This can happen if a try clause is nested inside a finally clause.
257 int clause_index
= (region
>> 8) - 1;
258 g_assert (clause_index
>= 0 && clause_index
< header
->num_clauses
);
260 region
= mono_find_block_region_notry (cfg
, header
->clauses
[clause_index
].try_offset
);
267 mono_find_spvar_for_region (MonoCompile
*cfg
, int region
)
269 region
= mono_get_block_region_notry (cfg
, region
);
271 return (MonoInst
*)g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
275 df_visit (MonoBasicBlock
*start
, int *dfn
, MonoBasicBlock
**array
)
279 array
[*dfn
] = start
;
280 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
281 for (i
= 0; i
< start
->out_count
; ++i
) {
282 if (start
->out_bb
[i
]->dfn
)
285 start
->out_bb
[i
]->dfn
= *dfn
;
286 start
->out_bb
[i
]->df_parent
= start
;
287 array
[*dfn
] = start
->out_bb
[i
];
288 df_visit (start
->out_bb
[i
], dfn
, array
);
293 mono_reverse_branch_op (guint32 opcode
)
295 static const int reverse_map
[] = {
296 CEE_BNE_UN
, CEE_BLT
, CEE_BLE
, CEE_BGT
, CEE_BGE
,
297 CEE_BEQ
, CEE_BLT_UN
, CEE_BLE_UN
, CEE_BGT_UN
, CEE_BGE_UN
299 static const int reverse_fmap
[] = {
300 OP_FBNE_UN
, OP_FBLT
, OP_FBLE
, OP_FBGT
, OP_FBGE
,
301 OP_FBEQ
, OP_FBLT_UN
, OP_FBLE_UN
, OP_FBGT_UN
, OP_FBGE_UN
303 static const int reverse_lmap
[] = {
304 OP_LBNE_UN
, OP_LBLT
, OP_LBLE
, OP_LBGT
, OP_LBGE
,
305 OP_LBEQ
, OP_LBLT_UN
, OP_LBLE_UN
, OP_LBGT_UN
, OP_LBGE_UN
307 static const int reverse_imap
[] = {
308 OP_IBNE_UN
, OP_IBLT
, OP_IBLE
, OP_IBGT
, OP_IBGE
,
309 OP_IBEQ
, OP_IBLT_UN
, OP_IBLE_UN
, OP_IBGT_UN
, OP_IBGE_UN
312 if (opcode
>= CEE_BEQ
&& opcode
<= CEE_BLT_UN
) {
313 opcode
= reverse_map
[opcode
- CEE_BEQ
];
314 } else if (opcode
>= OP_FBEQ
&& opcode
<= OP_FBLT_UN
) {
315 opcode
= reverse_fmap
[opcode
- OP_FBEQ
];
316 } else if (opcode
>= OP_LBEQ
&& opcode
<= OP_LBLT_UN
) {
317 opcode
= reverse_lmap
[opcode
- OP_LBEQ
];
318 } else if (opcode
>= OP_IBEQ
&& opcode
<= OP_IBLT_UN
) {
319 opcode
= reverse_imap
[opcode
- OP_IBEQ
];
321 g_assert_not_reached ();
327 mono_type_to_store_membase (MonoCompile
*cfg
, MonoType
*type
)
329 type
= mini_get_underlying_type (type
);
332 switch (type
->type
) {
335 return OP_STOREI1_MEMBASE_REG
;
338 return OP_STOREI2_MEMBASE_REG
;
341 return OP_STOREI4_MEMBASE_REG
;
345 case MONO_TYPE_FNPTR
:
346 return OP_STORE_MEMBASE_REG
;
347 case MONO_TYPE_CLASS
:
348 case MONO_TYPE_STRING
:
349 case MONO_TYPE_OBJECT
:
350 case MONO_TYPE_SZARRAY
:
351 case MONO_TYPE_ARRAY
:
352 return OP_STORE_MEMBASE_REG
;
355 return OP_STOREI8_MEMBASE_REG
;
357 return OP_STORER4_MEMBASE_REG
;
359 return OP_STORER8_MEMBASE_REG
;
360 case MONO_TYPE_VALUETYPE
:
361 if (m_class_is_enumtype (type
->data
.klass
)) {
362 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
365 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
366 return OP_STOREX_MEMBASE
;
367 return OP_STOREV_MEMBASE
;
368 case MONO_TYPE_TYPEDBYREF
:
369 return OP_STOREV_MEMBASE
;
370 case MONO_TYPE_GENERICINST
:
371 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
372 return OP_STOREX_MEMBASE
;
373 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
377 g_assert (mini_type_var_is_vt (type
));
378 return OP_STOREV_MEMBASE
;
380 g_error ("unknown type 0x%02x in type_to_store_membase", type
->type
);
386 mono_type_to_load_membase (MonoCompile
*cfg
, MonoType
*type
)
388 type
= mini_get_underlying_type (type
);
390 switch (type
->type
) {
392 return OP_LOADI1_MEMBASE
;
394 return OP_LOADU1_MEMBASE
;
396 return OP_LOADI2_MEMBASE
;
398 return OP_LOADU2_MEMBASE
;
400 return OP_LOADI4_MEMBASE
;
402 return OP_LOADU4_MEMBASE
;
406 case MONO_TYPE_FNPTR
:
407 return OP_LOAD_MEMBASE
;
408 case MONO_TYPE_CLASS
:
409 case MONO_TYPE_STRING
:
410 case MONO_TYPE_OBJECT
:
411 case MONO_TYPE_SZARRAY
:
412 case MONO_TYPE_ARRAY
:
413 return OP_LOAD_MEMBASE
;
416 return OP_LOADI8_MEMBASE
;
418 return OP_LOADR4_MEMBASE
;
420 return OP_LOADR8_MEMBASE
;
421 case MONO_TYPE_VALUETYPE
:
422 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
423 return OP_LOADX_MEMBASE
;
424 case MONO_TYPE_TYPEDBYREF
:
425 return OP_LOADV_MEMBASE
;
426 case MONO_TYPE_GENERICINST
:
427 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
428 return OP_LOADX_MEMBASE
;
429 if (mono_type_generic_inst_is_valuetype (type
))
430 return OP_LOADV_MEMBASE
;
432 return OP_LOAD_MEMBASE
;
436 g_assert (cfg
->gshared
);
437 g_assert (mini_type_var_is_vt (type
));
438 return OP_LOADV_MEMBASE
;
440 g_error ("unknown type 0x%02x in type_to_load_membase", type
->type
);
446 mini_type_to_stind (MonoCompile
* cfg
, MonoType
*type
)
448 type
= mini_get_underlying_type (type
);
449 if (cfg
->gshared
&& !type
->byref
&& (type
->type
== MONO_TYPE_VAR
|| type
->type
== MONO_TYPE_MVAR
)) {
450 g_assert (mini_type_var_is_vt (type
));
453 return mono_type_to_stind (type
);
457 mono_op_imm_to_op (int opcode
)
461 #if SIZEOF_REGISTER == 4
479 #if SIZEOF_REGISTER == 4
485 #if SIZEOF_REGISTER == 4
491 #if SIZEOF_REGISTER == 4
537 #if SIZEOF_REGISTER == 4
543 #if SIZEOF_REGISTER == 4
562 case OP_ICOMPARE_IMM
:
564 case OP_LOCALLOC_IMM
:
572 * mono_decompose_op_imm:
574 * Replace the OP_.._IMM INS with its non IMM variant.
577 mono_decompose_op_imm (MonoCompile
*cfg
, MonoBasicBlock
*bb
, MonoInst
*ins
)
579 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
582 const char *spec
= INS_INFO (ins
->opcode
);
584 if (spec
[MONO_INST_SRC2
] == 'l') {
585 dreg
= mono_alloc_lreg (cfg
);
587 /* Load the 64bit constant using decomposed ops */
588 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
589 temp
->inst_c0
= ins_get_l_low (ins
);
590 temp
->dreg
= MONO_LVREG_LS (dreg
);
591 mono_bblock_insert_before_ins (bb
, ins
, temp
);
593 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
594 temp
->inst_c0
= ins_get_l_high (ins
);
595 temp
->dreg
= MONO_LVREG_MS (dreg
);
597 dreg
= mono_alloc_ireg (cfg
);
599 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
600 temp
->inst_c0
= ins
->inst_imm
;
604 mono_bblock_insert_before_ins (bb
, ins
, temp
);
607 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
608 ins
->opcode
= opcode2
;
610 if (ins
->opcode
== OP_LOCALLOC
)
615 bb
->max_vreg
= MAX (bb
->max_vreg
, cfg
->next_vreg
);
619 set_vreg_to_inst (MonoCompile
*cfg
, int vreg
, MonoInst
*inst
)
621 if (vreg
>= cfg
->vreg_to_inst_len
) {
622 MonoInst
**tmp
= cfg
->vreg_to_inst
;
623 int size
= cfg
->vreg_to_inst_len
;
625 while (vreg
>= cfg
->vreg_to_inst_len
)
626 cfg
->vreg_to_inst_len
= cfg
->vreg_to_inst_len
? cfg
->vreg_to_inst_len
* 2 : 32;
627 cfg
->vreg_to_inst
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * cfg
->vreg_to_inst_len
);
629 memcpy (cfg
->vreg_to_inst
, tmp
, size
* sizeof (MonoInst
*));
631 cfg
->vreg_to_inst
[vreg
] = inst
;
634 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
635 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
638 mono_compile_create_var_for_vreg (MonoCompile
*cfg
, MonoType
*type
, int opcode
, int vreg
)
641 int num
= cfg
->num_varinfo
;
644 type
= mini_get_underlying_type (type
);
646 if ((num
+ 1) >= cfg
->varinfo_count
) {
647 int orig_count
= cfg
->varinfo_count
;
648 cfg
->varinfo_count
= cfg
->varinfo_count
? (cfg
->varinfo_count
* 2) : 32;
649 cfg
->varinfo
= (MonoInst
**)g_realloc (cfg
->varinfo
, sizeof (MonoInst
*) * cfg
->varinfo_count
);
650 cfg
->vars
= (MonoMethodVar
*)g_realloc (cfg
->vars
, sizeof (MonoMethodVar
) * cfg
->varinfo_count
);
651 memset (&cfg
->vars
[orig_count
], 0, (cfg
->varinfo_count
- orig_count
) * sizeof (MonoMethodVar
));
654 cfg
->stat_allocate_var
++;
656 MONO_INST_NEW (cfg
, inst
, opcode
);
658 inst
->inst_vtype
= type
;
659 inst
->klass
= mono_class_from_mono_type_internal (type
);
660 mini_type_to_eval_stack_type (cfg
, type
, inst
);
661 /* if set to 1 the variable is native */
662 inst
->backend
.is_pinvoke
= 0;
665 if (mono_class_has_failure (inst
->klass
))
666 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
668 if (cfg
->compute_gc_maps
) {
670 mono_mark_vreg_as_mp (cfg
, vreg
);
672 if ((MONO_TYPE_ISSTRUCT (type
) && m_class_has_references (inst
->klass
)) || mini_type_is_reference (type
)) {
673 inst
->flags
|= MONO_INST_GC_TRACK
;
674 mono_mark_vreg_as_ref (cfg
, vreg
);
679 cfg
->varinfo
[num
] = inst
;
681 cfg
->vars
[num
].idx
= num
;
682 cfg
->vars
[num
].vreg
= vreg
;
683 cfg
->vars
[num
].range
.first_use
.pos
.bid
= 0xffff;
684 cfg
->vars
[num
].reg
= -1;
687 set_vreg_to_inst (cfg
, vreg
, inst
);
689 #if SIZEOF_REGISTER == 4
690 if (mono_arch_is_soft_float ()) {
691 regpair
= mono_type_is_long (type
) || mono_type_is_float (type
);
693 regpair
= mono_type_is_long (type
);
703 * These two cannot be allocated using create_var_for_vreg since that would
704 * put it into the cfg->varinfo array, confusing many parts of the JIT.
708 * Set flags to VOLATILE so SSA skips it.
711 if (cfg
->verbose_level
>= 4) {
712 printf (" Create LVAR R%d (R%d, R%d)\n", inst
->dreg
, MONO_LVREG_LS (inst
->dreg
), MONO_LVREG_MS (inst
->dreg
));
715 if (mono_arch_is_soft_float () && cfg
->opt
& MONO_OPT_SSA
) {
716 if (mono_type_is_float (type
))
717 inst
->flags
= MONO_INST_VOLATILE
;
720 /* Allocate a dummy MonoInst for the first vreg */
721 MONO_INST_NEW (cfg
, tree
, OP_LOCAL
);
722 tree
->dreg
= MONO_LVREG_LS (inst
->dreg
);
723 if (cfg
->opt
& MONO_OPT_SSA
)
724 tree
->flags
= MONO_INST_VOLATILE
;
726 tree
->type
= STACK_I4
;
727 tree
->inst_vtype
= mono_get_int32_type ();
728 tree
->klass
= mono_class_from_mono_type_internal (tree
->inst_vtype
);
730 set_vreg_to_inst (cfg
, MONO_LVREG_LS (inst
->dreg
), tree
);
732 /* Allocate a dummy MonoInst for the second vreg */
733 MONO_INST_NEW (cfg
, tree
, OP_LOCAL
);
734 tree
->dreg
= MONO_LVREG_MS (inst
->dreg
);
735 if (cfg
->opt
& MONO_OPT_SSA
)
736 tree
->flags
= MONO_INST_VOLATILE
;
738 tree
->type
= STACK_I4
;
739 tree
->inst_vtype
= mono_get_int32_type ();
740 tree
->klass
= mono_class_from_mono_type_internal (tree
->inst_vtype
);
742 set_vreg_to_inst (cfg
, MONO_LVREG_MS (inst
->dreg
), tree
);
746 if (cfg
->verbose_level
> 2)
747 g_print ("created temp %d (R%d) of type %s\n", num
, vreg
, mono_type_get_name (type
));
753 mono_compile_create_var (MonoCompile
*cfg
, MonoType
*type
, int opcode
)
757 #ifdef ENABLE_NETCORE
758 if (type
->type
== MONO_TYPE_VALUETYPE
&& !type
->byref
) {
759 MonoClass
*klass
= mono_class_from_mono_type_internal (type
);
760 if (m_class_is_enumtype (klass
) && m_class_get_image (klass
) == mono_get_corlib () && !strcmp (m_class_get_name (klass
), "StackCrawlMark")) {
761 if (!(cfg
->method
->flags
& METHOD_ATTRIBUTE_REQSECOBJ
))
762 g_error ("Method '%s' which contains a StackCrawlMark local variable must be decorated with [System.Security.DynamicSecurityMethod].", mono_method_get_full_name (cfg
->method
));
767 type
= mini_get_underlying_type (type
);
769 if (mono_type_is_long (type
))
770 dreg
= mono_alloc_dreg (cfg
, STACK_I8
);
771 else if (mono_arch_is_soft_float () && mono_type_is_float (type
))
772 dreg
= mono_alloc_dreg (cfg
, STACK_R8
);
774 /* All the others are unified */
775 dreg
= mono_alloc_preg (cfg
);
777 return mono_compile_create_var_for_vreg (cfg
, type
, opcode
, dreg
);
781 mini_get_int_to_float_spill_area (MonoCompile
*cfg
)
784 if (!cfg
->iconv_raw_var
) {
785 cfg
->iconv_raw_var
= mono_compile_create_var (cfg
, mono_get_int32_type (), OP_LOCAL
);
786 cfg
->iconv_raw_var
->flags
|= MONO_INST_VOLATILE
; /*FIXME, use the don't regalloc flag*/
788 return cfg
->iconv_raw_var
;
795 mono_mark_vreg_as_ref (MonoCompile
*cfg
, int vreg
)
797 if (vreg
>= cfg
->vreg_is_ref_len
) {
798 gboolean
*tmp
= cfg
->vreg_is_ref
;
799 int size
= cfg
->vreg_is_ref_len
;
801 while (vreg
>= cfg
->vreg_is_ref_len
)
802 cfg
->vreg_is_ref_len
= cfg
->vreg_is_ref_len
? cfg
->vreg_is_ref_len
* 2 : 32;
803 cfg
->vreg_is_ref
= (gboolean
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gboolean
) * cfg
->vreg_is_ref_len
);
805 memcpy (cfg
->vreg_is_ref
, tmp
, size
* sizeof (gboolean
));
807 cfg
->vreg_is_ref
[vreg
] = TRUE
;
811 mono_mark_vreg_as_mp (MonoCompile
*cfg
, int vreg
)
813 if (vreg
>= cfg
->vreg_is_mp_len
) {
814 gboolean
*tmp
= cfg
->vreg_is_mp
;
815 int size
= cfg
->vreg_is_mp_len
;
817 while (vreg
>= cfg
->vreg_is_mp_len
)
818 cfg
->vreg_is_mp_len
= cfg
->vreg_is_mp_len
? cfg
->vreg_is_mp_len
* 2 : 32;
819 cfg
->vreg_is_mp
= (gboolean
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gboolean
) * cfg
->vreg_is_mp_len
);
821 memcpy (cfg
->vreg_is_mp
, tmp
, size
* sizeof (gboolean
));
823 cfg
->vreg_is_mp
[vreg
] = TRUE
;
827 type_from_stack_type (MonoInst
*ins
)
830 case STACK_I4
: return mono_get_int32_type ();
831 case STACK_I8
: return m_class_get_byval_arg (mono_defaults
.int64_class
);
832 case STACK_PTR
: return mono_get_int_type ();
833 case STACK_R8
: return m_class_get_byval_arg (mono_defaults
.double_class
);
836 * this if used to be commented without any specific reason, but
837 * it breaks #80235 when commented
840 return m_class_get_this_arg (ins
->klass
);
842 return m_class_get_this_arg (mono_defaults
.object_class
);
844 /* ins->klass may not be set for ldnull.
845 * Also, if we have a boxed valuetype, we want an object lass,
846 * not the valuetype class
848 if (ins
->klass
&& !m_class_is_valuetype (ins
->klass
))
849 return m_class_get_byval_arg (ins
->klass
);
850 return mono_get_object_type ();
851 case STACK_VTYPE
: return m_class_get_byval_arg (ins
->klass
);
853 g_error ("stack type %d to montype not handled\n", ins
->type
);
859 mono_type_from_stack_type (MonoInst
*ins
)
861 return type_from_stack_type (ins
);
865 * mono_add_ins_to_end:
867 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
870 mono_add_ins_to_end (MonoBasicBlock
*bb
, MonoInst
*inst
)
875 MONO_ADD_INS (bb
, inst
);
879 switch (bb
->last_ins
->opcode
) {
893 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
896 if (MONO_IS_COND_BRANCH_OP (bb
->last_ins
)) {
897 /* Need to insert the ins before the compare */
898 if (bb
->code
== bb
->last_ins
) {
899 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
903 if (bb
->code
->next
== bb
->last_ins
) {
904 /* Only two instructions */
905 opcode
= bb
->code
->opcode
;
907 if ((opcode
== OP_COMPARE
) || (opcode
== OP_COMPARE_IMM
) || (opcode
== OP_ICOMPARE
) || (opcode
== OP_ICOMPARE_IMM
) || (opcode
== OP_FCOMPARE
) || (opcode
== OP_LCOMPARE
) || (opcode
== OP_LCOMPARE_IMM
) || (opcode
== OP_RCOMPARE
)) {
909 mono_bblock_insert_before_ins (bb
, bb
->code
, inst
);
911 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
914 opcode
= bb
->last_ins
->prev
->opcode
;
916 if ((opcode
== OP_COMPARE
) || (opcode
== OP_COMPARE_IMM
) || (opcode
== OP_ICOMPARE
) || (opcode
== OP_ICOMPARE_IMM
) || (opcode
== OP_FCOMPARE
) || (opcode
== OP_LCOMPARE
) || (opcode
== OP_LCOMPARE_IMM
) || (opcode
== OP_RCOMPARE
)) {
918 mono_bblock_insert_before_ins (bb
, bb
->last_ins
->prev
, inst
);
920 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
925 MONO_ADD_INS (bb
, inst
);
931 mono_create_jump_table (MonoCompile
*cfg
, MonoInst
*label
, MonoBasicBlock
**bbs
, int num_blocks
)
933 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
934 MonoJumpInfoBBTable
*table
;
936 table
= (MonoJumpInfoBBTable
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
938 table
->table_size
= num_blocks
;
940 ji
->ip
.label
= label
;
941 ji
->type
= MONO_PATCH_INFO_SWITCH
;
942 ji
->data
.table
= table
;
943 ji
->next
= cfg
->patch_info
;
944 cfg
->patch_info
= ji
;
948 mini_assembly_can_skip_verification (MonoDomain
*domain
, MonoMethod
*method
)
950 MonoAssembly
*assembly
= m_class_get_image (method
->klass
)->assembly
;
951 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
&& method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
953 if (assembly
->in_gac
|| assembly
->image
== mono_defaults
.corlib
)
955 return mono_assembly_has_skip_verification (assembly
);
959 * mini_method_verify:
961 * Verify the method using the verfier.
963 * Returns true if the method is invalid.
966 mini_method_verify (MonoCompile
*cfg
, MonoMethod
*method
, gboolean fail_compile
)
969 gboolean is_fulltrust
;
971 if (method
->verification_success
)
974 if (!mono_verifier_is_enabled_for_method (method
))
977 /*skip verification implies the assembly must be */
978 is_fulltrust
= mono_verifier_is_method_full_trust (method
) || mini_assembly_can_skip_verification (cfg
->domain
, method
);
980 res
= mono_method_verify_with_current_settings (method
, cfg
->skip_visibility
, is_fulltrust
);
983 for (tmp
= res
; tmp
; tmp
= tmp
->next
) {
984 MonoVerifyInfoExtended
*info
= (MonoVerifyInfoExtended
*)tmp
->data
;
985 if (info
->info
.status
== MONO_VERIFY_ERROR
) {
987 char *method_name
= mono_method_full_name (method
, TRUE
);
988 cfg
->exception_type
= (MonoExceptionType
)info
->exception_type
;
989 cfg
->exception_message
= g_strdup_printf ("Error verifying %s: %s", method_name
, info
->info
.message
);
990 g_free (method_name
);
992 mono_free_verify_list (res
);
995 if (info
->info
.status
== MONO_VERIFY_NOT_VERIFIABLE
&& (!is_fulltrust
|| info
->exception_type
== MONO_EXCEPTION_METHOD_ACCESS
|| info
->exception_type
== MONO_EXCEPTION_FIELD_ACCESS
)) {
997 char *method_name
= mono_method_full_name (method
, TRUE
);
998 char *msg
= g_strdup_printf ("Error verifying %s: %s", method_name
, info
->info
.message
);
1000 if (info
->exception_type
== MONO_EXCEPTION_METHOD_ACCESS
)
1001 mono_error_set_generic_error (cfg
->error
, "System", "MethodAccessException", "%s", msg
);
1002 else if (info
->exception_type
== MONO_EXCEPTION_FIELD_ACCESS
)
1003 mono_error_set_generic_error (cfg
->error
, "System", "FieldAccessException", "%s", msg
);
1004 else if (info
->exception_type
== MONO_EXCEPTION_UNVERIFIABLE_IL
)
1005 mono_error_set_generic_error (cfg
->error
, "System.Security", "VerificationException", "%s", msg
);
1006 if (!is_ok (cfg
->error
)) {
1007 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
1010 cfg
->exception_type
= (MonoExceptionType
)info
->exception_type
;
1011 cfg
->exception_message
= msg
;
1013 g_free (method_name
);
1015 mono_free_verify_list (res
);
1019 mono_free_verify_list (res
);
1021 method
->verification_success
= 1;
1025 /*Returns true if something went wrong*/
1027 mono_compile_is_broken (MonoCompile
*cfg
, MonoMethod
*method
, gboolean fail_compile
)
1029 MonoMethod
*method_definition
= method
;
1030 gboolean dont_verify
= m_class_get_image (method
->klass
)->assembly
->corlib_internal
;
1032 while (method_definition
->is_inflated
) {
1033 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
1034 method_definition
= imethod
->declaring
;
1037 return !dont_verify
&& mini_method_verify (cfg
, method_definition
, fail_compile
);
1041 mono_dynamic_code_hash_insert (MonoDomain
*domain
, MonoMethod
*method
, MonoJitDynamicMethodInfo
*ji
)
1043 if (!domain_jit_info (domain
)->dynamic_code_hash
)
1044 domain_jit_info (domain
)->dynamic_code_hash
= g_hash_table_new (NULL
, NULL
);
1045 g_hash_table_insert (domain_jit_info (domain
)->dynamic_code_hash
, method
, ji
);
1048 static MonoJitDynamicMethodInfo
*
1049 mono_dynamic_code_hash_lookup (MonoDomain
*domain
, MonoMethod
*method
)
1051 MonoJitDynamicMethodInfo
*res
;
1053 if (domain_jit_info (domain
)->dynamic_code_hash
)
1054 res
= (MonoJitDynamicMethodInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->dynamic_code_hash
, method
);
1062 GList
*active
, *inactive
;
1067 compare_by_interval_start_pos_func (gconstpointer a
, gconstpointer b
)
1069 MonoMethodVar
*v1
= (MonoMethodVar
*)a
;
1070 MonoMethodVar
*v2
= (MonoMethodVar
*)b
;
1074 else if (v1
->interval
->range
&& v2
->interval
->range
)
1075 return v1
->interval
->range
->from
- v2
->interval
->range
->from
;
1076 else if (v1
->interval
->range
)
1083 #define LSCAN_DEBUG(a) do { a; } while (0)
1085 #define LSCAN_DEBUG(a)
1089 mono_allocate_stack_slots2 (MonoCompile
*cfg
, gboolean backward
, guint32
*stack_size
, guint32
*stack_align
)
1091 int i
, slot
, offset
, size
;
1096 GList
*vars
= NULL
, *l
, *unhandled
;
1097 StackSlotInfo
*scalar_stack_slots
, *vtype_stack_slots
, *slot_info
;
1100 int vtype_stack_slots_size
= 256;
1101 gboolean reuse_slot
;
1103 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg
->method
, TRUE
)));
1105 scalar_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * MONO_TYPE_PINNED
);
1106 vtype_stack_slots
= NULL
;
1109 offsets
= (gint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (gint32
) * cfg
->num_varinfo
);
1110 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
1113 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1114 inst
= cfg
->varinfo
[i
];
1115 vmv
= MONO_VARINFO (cfg
, i
);
1117 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
|| inst
->opcode
== OP_REGOFFSET
)
1120 vars
= g_list_prepend (vars
, vmv
);
1123 vars
= g_list_sort (vars
, compare_by_interval_start_pos_func
);
1128 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1129 MonoMethodVar *current = unhandled->data;
1131 if (current->interval->range) {
1132 g_assert (current->interval->range->from >= i);
1133 i = current->interval->range->from;
1140 for (unhandled
= vars
; unhandled
; unhandled
= unhandled
->next
) {
1141 MonoMethodVar
*current
= (MonoMethodVar
*)unhandled
->data
;
1144 inst
= cfg
->varinfo
[vmv
->idx
];
1146 t
= mono_type_get_underlying_type (inst
->inst_vtype
);
1147 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1150 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1151 * pinvoke wrappers when they call functions returning structures */
1152 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1153 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1158 size
= mini_type_stack_size (t
, &ialign
);
1161 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (t
)))
1166 if (cfg
->disable_reuse_stack_slots
)
1169 t
= mini_get_underlying_type (t
);
1171 case MONO_TYPE_GENERICINST
:
1172 if (!mono_type_generic_inst_is_valuetype (t
)) {
1173 slot_info
= &scalar_stack_slots
[t
->type
];
1177 case MONO_TYPE_VALUETYPE
:
1178 if (!vtype_stack_slots
)
1179 vtype_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1180 for (i
= 0; i
< nvtypes
; ++i
)
1181 if (t
->data
.klass
== vtype_stack_slots
[i
].vtype
)
1184 slot_info
= &vtype_stack_slots
[i
];
1186 if (nvtypes
== vtype_stack_slots_size
) {
1187 int new_slots_size
= vtype_stack_slots_size
* 2;
1188 StackSlotInfo
* new_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * new_slots_size
);
1190 memcpy (new_slots
, vtype_stack_slots
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1192 vtype_stack_slots
= new_slots
;
1193 vtype_stack_slots_size
= new_slots_size
;
1195 vtype_stack_slots
[nvtypes
].vtype
= t
->data
.klass
;
1196 slot_info
= &vtype_stack_slots
[nvtypes
];
1199 if (cfg
->disable_reuse_ref_stack_slots
)
1206 #if TARGET_SIZEOF_VOID_P == 4
1211 if (cfg
->disable_ref_noref_stack_slot_share
) {
1212 slot_info
= &scalar_stack_slots
[MONO_TYPE_I
];
1217 case MONO_TYPE_CLASS
:
1218 case MONO_TYPE_OBJECT
:
1219 case MONO_TYPE_ARRAY
:
1220 case MONO_TYPE_SZARRAY
:
1221 case MONO_TYPE_STRING
:
1222 /* Share non-float stack slots of the same size */
1223 slot_info
= &scalar_stack_slots
[MONO_TYPE_CLASS
];
1224 if (cfg
->disable_reuse_ref_stack_slots
)
1229 slot_info
= &scalar_stack_slots
[t
->type
];
1233 if (cfg
->comp_done
& MONO_COMP_LIVENESS
) {
1237 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1239 if (!current
->interval
->range
) {
1240 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
1244 inst
->flags
|= MONO_INST_IS_DEAD
;
1249 pos
= current
->interval
->range
->from
;
1251 LSCAN_DEBUG (printf ("process R%d ", inst
->dreg
));
1252 if (current
->interval
->range
)
1253 LSCAN_DEBUG (mono_linterval_print (current
->interval
));
1254 LSCAN_DEBUG (printf ("\n"));
1256 /* Check for intervals in active which expired or inactive */
1258 /* FIXME: Optimize this */
1261 for (l
= slot_info
->active
; l
!= NULL
; l
= l
->next
) {
1262 MonoMethodVar
*v
= (MonoMethodVar
*)l
->data
;
1264 if (v
->interval
->last_range
->to
< pos
) {
1265 slot_info
->active
= g_list_delete_link (slot_info
->active
, l
);
1266 slot_info
->slots
= g_slist_prepend_mempool (cfg
->mempool
, slot_info
->slots
, GINT_TO_POINTER (offsets
[v
->idx
]));
1267 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg
->varinfo
[v
->idx
]->dreg
, offsets
[v
->idx
]));
1271 else if (!mono_linterval_covers (v
->interval
, pos
)) {
1272 slot_info
->inactive
= g_list_append (slot_info
->inactive
, v
);
1273 slot_info
->active
= g_list_delete_link (slot_info
->active
, l
);
1274 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg
->varinfo
[v
->idx
]->dreg
));
1281 /* Check for intervals in inactive which expired or active */
1283 /* FIXME: Optimize this */
1286 for (l
= slot_info
->inactive
; l
!= NULL
; l
= l
->next
) {
1287 MonoMethodVar
*v
= (MonoMethodVar
*)l
->data
;
1289 if (v
->interval
->last_range
->to
< pos
) {
1290 slot_info
->inactive
= g_list_delete_link (slot_info
->inactive
, l
);
1291 // FIXME: Enabling this seems to cause impossible to debug crashes
1292 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1293 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg
->varinfo
[v
->idx
]->dreg
, offsets
[v
->idx
]));
1297 else if (mono_linterval_covers (v
->interval
, pos
)) {
1298 slot_info
->active
= g_list_append (slot_info
->active
, v
);
1299 slot_info
->inactive
= g_list_delete_link (slot_info
->inactive
, l
);
1300 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg
->varinfo
[v
->idx
]->dreg
));
1308 * This also handles the case when the variable is used in an
1309 * exception region, as liveness info is not computed there.
1312 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1315 if (! (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))) {
1316 if (slot_info
->slots
) {
1317 slot
= GPOINTER_TO_INT (slot_info
->slots
->data
);
1319 slot_info
->slots
= slot_info
->slots
->next
;
1322 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1324 slot_info
->active
= mono_varlist_insert_sorted (cfg
, slot_info
->active
, vmv
, TRUE
);
1330 static int count
= 0;
1333 if (count
== atoi (g_getenv ("COUNT3")))
1334 printf ("LAST: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
1335 if (count
> atoi (g_getenv ("COUNT3")))
1338 mono_print_ins (inst
);
1342 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst
->dreg
, mono_type_full_name (t
), slot
));
1344 if (inst
->flags
& MONO_INST_LMF
) {
1345 size
= MONO_ABI_SIZEOF (MonoLMF
);
1346 align
= sizeof (target_mgreg_t
);
1353 if (slot
== 0xffffff) {
1355 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1356 * efficient copying (and to work around the fact that OP_MEMCPY
1357 * and OP_MEMSET ignores alignment).
1359 if (MONO_TYPE_ISSTRUCT (t
)) {
1360 align
= MAX (align
, sizeof (target_mgreg_t
));
1361 align
= MAX (align
, mono_class_min_align (mono_class_from_mono_type_internal (t
)));
1366 offset
+= align
- 1;
1367 offset
&= ~(align
- 1);
1371 offset
+= align
- 1;
1372 offset
&= ~(align
- 1);
1377 if (*stack_align
== 0)
1378 *stack_align
= align
;
1381 offsets
[vmv
->idx
] = slot
;
1384 for (i
= 0; i
< MONO_TYPE_PINNED
; ++i
) {
1385 if (scalar_stack_slots
[i
].active
)
1386 g_list_free (scalar_stack_slots
[i
].active
);
1388 for (i
= 0; i
< nvtypes
; ++i
) {
1389 if (vtype_stack_slots
[i
].active
)
1390 g_list_free (vtype_stack_slots
[i
].active
);
1393 cfg
->stat_locals_stack_size
+= offset
;
1395 *stack_size
= offset
;
1400 * mono_allocate_stack_slots:
1402 * Allocate stack slots for all non register allocated variables using a
1403 * linear scan algorithm.
1404 * Returns: an array of stack offsets.
1405 * STACK_SIZE is set to the amount of stack space needed.
1406 * STACK_ALIGN is set to the alignment needed by the locals area.
1409 mono_allocate_stack_slots (MonoCompile
*cfg
, gboolean backward
, guint32
*stack_size
, guint32
*stack_align
)
1411 int i
, slot
, offset
, size
;
1416 GList
*vars
= NULL
, *l
;
1417 StackSlotInfo
*scalar_stack_slots
, *vtype_stack_slots
, *slot_info
;
1420 int vtype_stack_slots_size
= 256;
1421 gboolean reuse_slot
;
1423 if ((cfg
->num_varinfo
> 0) && MONO_VARINFO (cfg
, 0)->interval
)
1424 return mono_allocate_stack_slots2 (cfg
, backward
, stack_size
, stack_align
);
1426 scalar_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * MONO_TYPE_PINNED
);
1427 vtype_stack_slots
= NULL
;
1430 offsets
= (gint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (gint32
) * cfg
->num_varinfo
);
1431 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
1434 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1435 inst
= cfg
->varinfo
[i
];
1436 vmv
= MONO_VARINFO (cfg
, i
);
1438 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
|| inst
->opcode
== OP_REGOFFSET
)
1441 vars
= g_list_prepend (vars
, vmv
);
1444 vars
= mono_varlist_sort (cfg
, vars
, 0);
1446 *stack_align
= sizeof (target_mgreg_t
);
1447 for (l
= vars
; l
; l
= l
->next
) {
1448 vmv
= (MonoMethodVar
*)l
->data
;
1449 inst
= cfg
->varinfo
[vmv
->idx
];
1451 t
= mono_type_get_underlying_type (inst
->inst_vtype
);
1452 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1455 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1456 * pinvoke wrappers when they call functions returning structures */
1457 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1458 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1462 size
= mini_type_stack_size (t
, &ialign
);
1465 if (mono_class_has_failure (mono_class_from_mono_type_internal (t
)))
1466 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
1468 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (t
)))
1473 if (cfg
->disable_reuse_stack_slots
)
1476 t
= mini_get_underlying_type (t
);
1478 case MONO_TYPE_GENERICINST
:
1479 if (!mono_type_generic_inst_is_valuetype (t
)) {
1480 slot_info
= &scalar_stack_slots
[t
->type
];
1484 case MONO_TYPE_VALUETYPE
:
1485 if (!vtype_stack_slots
)
1486 vtype_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1487 for (i
= 0; i
< nvtypes
; ++i
)
1488 if (t
->data
.klass
== vtype_stack_slots
[i
].vtype
)
1491 slot_info
= &vtype_stack_slots
[i
];
1493 if (nvtypes
== vtype_stack_slots_size
) {
1494 int new_slots_size
= vtype_stack_slots_size
* 2;
1495 StackSlotInfo
* new_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * new_slots_size
);
1497 memcpy (new_slots
, vtype_stack_slots
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1499 vtype_stack_slots
= new_slots
;
1500 vtype_stack_slots_size
= new_slots_size
;
1502 vtype_stack_slots
[nvtypes
].vtype
= t
->data
.klass
;
1503 slot_info
= &vtype_stack_slots
[nvtypes
];
1506 if (cfg
->disable_reuse_ref_stack_slots
)
1513 #if TARGET_SIZEOF_VOID_P == 4
1518 if (cfg
->disable_ref_noref_stack_slot_share
) {
1519 slot_info
= &scalar_stack_slots
[MONO_TYPE_I
];
1524 case MONO_TYPE_CLASS
:
1525 case MONO_TYPE_OBJECT
:
1526 case MONO_TYPE_ARRAY
:
1527 case MONO_TYPE_SZARRAY
:
1528 case MONO_TYPE_STRING
:
1529 /* Share non-float stack slots of the same size */
1530 slot_info
= &scalar_stack_slots
[MONO_TYPE_CLASS
];
1531 if (cfg
->disable_reuse_ref_stack_slots
)
1535 case MONO_TYPE_MVAR
:
1536 slot_info
= &scalar_stack_slots
[t
->type
];
1539 slot_info
= &scalar_stack_slots
[t
->type
];
1544 if (cfg
->comp_done
& MONO_COMP_LIVENESS
) {
1545 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1547 /* expire old intervals in active */
1548 while (slot_info
->active
) {
1549 MonoMethodVar
*amv
= (MonoMethodVar
*)slot_info
->active
->data
;
1551 if (amv
->range
.last_use
.abs_pos
> vmv
->range
.first_use
.abs_pos
)
1554 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1556 slot_info
->active
= g_list_delete_link (slot_info
->active
, slot_info
->active
);
1557 slot_info
->slots
= g_slist_prepend_mempool (cfg
->mempool
, slot_info
->slots
, GINT_TO_POINTER (offsets
[amv
->idx
]));
1561 * This also handles the case when the variable is used in an
1562 * exception region, as liveness info is not computed there.
1565 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1568 if (! (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))) {
1569 if (slot_info
->slots
) {
1570 slot
= GPOINTER_TO_INT (slot_info
->slots
->data
);
1572 slot_info
->slots
= slot_info
->slots
->next
;
1575 slot_info
->active
= mono_varlist_insert_sorted (cfg
, slot_info
->active
, vmv
, TRUE
);
1581 static int count
= 0;
1584 if (count
== atoi (g_getenv ("COUNT")))
1585 printf ("LAST: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
1586 if (count
> atoi (g_getenv ("COUNT")))
1589 mono_print_ins (inst
);
1593 if (inst
->flags
& MONO_INST_LMF
) {
1595 * This variable represents a MonoLMF structure, which has no corresponding
1596 * CLR type, so hard-code its size/alignment.
1598 size
= MONO_ABI_SIZEOF (MonoLMF
);
1599 align
= sizeof (target_mgreg_t
);
1606 if (slot
== 0xffffff) {
1608 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1609 * efficient copying (and to work around the fact that OP_MEMCPY
1610 * and OP_MEMSET ignores alignment).
1612 if (MONO_TYPE_ISSTRUCT (t
)) {
1613 align
= MAX (align
, sizeof (target_mgreg_t
));
1614 align
= MAX (align
, mono_class_min_align (mono_class_from_mono_type_internal (t
)));
1616 * Align the size too so the code generated for passing vtypes in
1617 * registers doesn't overwrite random locals.
1619 size
= (size
+ (align
- 1)) & ~(align
-1);
1624 offset
+= align
- 1;
1625 offset
&= ~(align
- 1);
1629 offset
+= align
- 1;
1630 offset
&= ~(align
- 1);
1635 *stack_align
= MAX (*stack_align
, align
);
1638 offsets
[vmv
->idx
] = slot
;
1641 for (i
= 0; i
< MONO_TYPE_PINNED
; ++i
) {
1642 if (scalar_stack_slots
[i
].active
)
1643 g_list_free (scalar_stack_slots
[i
].active
);
1645 for (i
= 0; i
< nvtypes
; ++i
) {
1646 if (vtype_stack_slots
[i
].active
)
1647 g_list_free (vtype_stack_slots
[i
].active
);
1650 cfg
->stat_locals_stack_size
+= offset
;
1652 *stack_size
= offset
;
1656 #define EMUL_HIT_SHIFT 3
1657 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1658 /* small hit bitmap cache */
1659 static mono_byte emul_opcode_hit_cache
[(OP_LAST
>>EMUL_HIT_SHIFT
) + 1] = {0};
1660 static short emul_opcode_num
= 0;
1661 static short emul_opcode_alloced
= 0;
1662 static short *emul_opcode_opcodes
;
1663 static MonoJitICallInfo
**emul_opcode_map
;
1666 mono_find_jit_opcode_emulation (int opcode
)
1668 g_assert (opcode
>= 0 && opcode
<= OP_LAST
);
1669 if (emul_opcode_hit_cache
[opcode
>> (EMUL_HIT_SHIFT
+ 3)] & (1 << (opcode
& EMUL_HIT_MASK
))) {
1671 for (i
= 0; i
< emul_opcode_num
; ++i
) {
1672 if (emul_opcode_opcodes
[i
] == opcode
)
1673 return emul_opcode_map
[i
];
1680 mini_register_opcode_emulation (int opcode
, MonoJitICallInfo
*info
, const char *name
, MonoMethodSignature
*sig
, gpointer func
, const char *symbol
, gboolean no_wrapper
)
1683 g_assert (!sig
->hasthis
);
1684 g_assert (sig
->param_count
< 3);
1686 mono_register_jit_icall_info (info
, func
, name
, sig
, no_wrapper
, symbol
);
1688 if (emul_opcode_num
>= emul_opcode_alloced
) {
1689 int incr
= emul_opcode_alloced
? emul_opcode_alloced
/2: 16;
1690 emul_opcode_alloced
+= incr
;
1691 emul_opcode_map
= (MonoJitICallInfo
**)g_realloc (emul_opcode_map
, sizeof (emul_opcode_map
[0]) * emul_opcode_alloced
);
1692 emul_opcode_opcodes
= (short *)g_realloc (emul_opcode_opcodes
, sizeof (emul_opcode_opcodes
[0]) * emul_opcode_alloced
);
1694 emul_opcode_map
[emul_opcode_num
] = info
;
1695 emul_opcode_opcodes
[emul_opcode_num
] = opcode
;
1697 emul_opcode_hit_cache
[opcode
>> (EMUL_HIT_SHIFT
+ 3)] |= (1 << (opcode
& EMUL_HIT_MASK
));
1701 print_dfn (MonoCompile
*cfg
)
1709 char *method_name
= mono_method_full_name (cfg
->method
, TRUE
);
1710 g_print ("IR code for method %s\n", method_name
);
1711 g_free (method_name
);
1714 for (i
= 0; i
< cfg
->num_bblocks
; ++i
) {
1715 bb
= cfg
->bblocks
[i
];
1716 /*if (bb->cil_code) {
1717 char* code1, *code2;
1718 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1719 if (bb->last_ins->cil_code)
1720 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1722 code2 = g_strdup ("");
1724 code1 [strlen (code1) - 1] = 0;
1725 code = g_strdup_printf ("%s -> %s", code1, code2);
1729 code
= g_strdup ("\n");
1730 g_print ("\nBB%d (%d) (len: %d): %s", bb
->block_num
, i
, bb
->cil_length
, code
);
1731 MONO_BB_FOR_EACH_INS (bb
, c
) {
1732 mono_print_ins_index (-1, c
);
1735 g_print ("\tprev:");
1736 for (j
= 0; j
< bb
->in_count
; ++j
) {
1737 g_print (" BB%d", bb
->in_bb
[j
]->block_num
);
1739 g_print ("\t\tsucc:");
1740 for (j
= 0; j
< bb
->out_count
; ++j
) {
1741 g_print (" BB%d", bb
->out_bb
[j
]->block_num
);
1743 g_print ("\n\tidom: BB%d\n", bb
->idom
? bb
->idom
->block_num
: -1);
1746 g_assert (mono_bitset_test_fast (bb
->dominators
, bb
->idom
->dfn
));
1749 mono_blockset_print (cfg
, bb
->dominators
, "\tdominators", bb
->idom
? bb
->idom
->dfn
: -1);
1751 mono_blockset_print (cfg
, bb
->dfrontier
, "\tdfrontier", -1);
1759 mono_bblock_add_inst (MonoBasicBlock
*bb
, MonoInst
*inst
)
1761 MONO_ADD_INS (bb
, inst
);
1765 mono_bblock_insert_after_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*ins_to_insert
)
1769 bb
->code
= ins_to_insert
;
1771 /* Link with next */
1772 ins_to_insert
->next
= ins
;
1774 ins
->prev
= ins_to_insert
;
1776 if (bb
->last_ins
== NULL
)
1777 bb
->last_ins
= ins_to_insert
;
1779 /* Link with next */
1780 ins_to_insert
->next
= ins
->next
;
1782 ins
->next
->prev
= ins_to_insert
;
1784 /* Link with previous */
1785 ins
->next
= ins_to_insert
;
1786 ins_to_insert
->prev
= ins
;
1788 if (bb
->last_ins
== ins
)
1789 bb
->last_ins
= ins_to_insert
;
1794 mono_bblock_insert_before_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*ins_to_insert
)
1799 ins
->prev
= ins_to_insert
;
1800 bb
->code
= ins_to_insert
;
1801 ins_to_insert
->next
= ins
;
1802 if (bb
->last_ins
== NULL
)
1803 bb
->last_ins
= ins_to_insert
;
1805 /* Link with previous */
1807 ins
->prev
->next
= ins_to_insert
;
1808 ins_to_insert
->prev
= ins
->prev
;
1810 /* Link with next */
1811 ins
->prev
= ins_to_insert
;
1812 ins_to_insert
->next
= ins
;
1814 if (bb
->code
== ins
)
1815 bb
->code
= ins_to_insert
;
1820 * mono_verify_bblock:
1822 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1825 mono_verify_bblock (MonoBasicBlock
*bb
)
1827 MonoInst
*ins
, *prev
;
1830 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
1831 g_assert (ins
->prev
== prev
);
1835 g_assert (!bb
->last_ins
->next
);
1841 * Perform consistency checks on the JIT data structures and the IR
1844 mono_verify_cfg (MonoCompile
*cfg
)
1848 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
1849 mono_verify_bblock (bb
);
1852 // This will free many fields in cfg to save
1853 // memory. Note that this must be safe to call
1854 // multiple times. It must be idempotent.
1856 mono_empty_compile (MonoCompile
*cfg
)
1858 mono_free_loop_info (cfg
);
1860 // These live in the mempool, and so must be freed
1862 for (GSList
*l
= cfg
->headers_to_free
; l
; l
= l
->next
) {
1863 mono_metadata_free_mh ((MonoMethodHeader
*)l
->data
);
1865 cfg
->headers_to_free
= NULL
;
1868 //mono_mempool_stats (cfg->mempool);
1869 mono_mempool_destroy (cfg
->mempool
);
1870 cfg
->mempool
= NULL
;
1873 g_free (cfg
->varinfo
);
1874 cfg
->varinfo
= NULL
;
1880 mono_regstate_free (cfg
->rs
);
1886 mono_destroy_compile (MonoCompile
*cfg
)
1888 mono_empty_compile (cfg
);
1890 mono_metadata_free_mh (cfg
->header
);
1892 g_hash_table_destroy (cfg
->spvars
);
1893 g_hash_table_destroy (cfg
->exvars
);
1894 g_list_free (cfg
->ldstr_list
);
1895 g_hash_table_destroy (cfg
->token_info_hash
);
1896 g_hash_table_destroy (cfg
->abs_patches
);
1898 mono_debug_free_method (cfg
);
1900 g_free (cfg
->varinfo
);
1902 g_free (cfg
->exception_message
);
1907 mono_add_patch_info (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
1909 if (type
== MONO_PATCH_INFO_NONE
)
1912 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfo
));
1916 ji
->data
.target
= target
;
1917 ji
->next
= cfg
->patch_info
;
1919 cfg
->patch_info
= ji
;
1923 mono_add_patch_info_rel (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
, int relocation
)
1925 if (type
== MONO_PATCH_INFO_NONE
)
1928 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfo
));
1932 ji
->relocation
= relocation
;
1933 ji
->data
.target
= target
;
1934 ji
->next
= cfg
->patch_info
;
1936 cfg
->patch_info
= ji
;
1940 mono_remove_patch_info (MonoCompile
*cfg
, int ip
)
1942 MonoJumpInfo
**ji
= &cfg
->patch_info
;
1945 if ((*ji
)->ip
.i
== ip
)
1948 ji
= &((*ji
)->next
);
1953 mono_add_seq_point (MonoCompile
*cfg
, MonoBasicBlock
*bb
, MonoInst
*ins
, int native_offset
)
1955 ins
->inst_offset
= native_offset
;
1956 g_ptr_array_add (cfg
->seq_points
, ins
);
1958 bb
->seq_points
= g_slist_prepend_mempool (cfg
->mempool
, bb
->seq_points
, ins
);
1959 bb
->last_seq_point
= ins
;
1964 mono_add_var_location (MonoCompile
*cfg
, MonoInst
*var
, gboolean is_reg
, int reg
, int offset
, int from
, int to
)
1966 MonoDwarfLocListEntry
*entry
= (MonoDwarfLocListEntry
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoDwarfLocListEntry
));
1969 g_assert (offset
== 0);
1971 entry
->is_reg
= is_reg
;
1973 entry
->offset
= offset
;
1977 if (var
== cfg
->args
[0])
1978 cfg
->this_loclist
= g_slist_append_mempool (cfg
->mempool
, cfg
->this_loclist
, entry
);
1979 else if (var
== cfg
->rgctx_var
)
1980 cfg
->rgctx_loclist
= g_slist_append_mempool (cfg
->mempool
, cfg
->rgctx_loclist
, entry
);
1984 mono_apply_volatile (MonoInst
*inst
, MonoBitSet
*set
, gsize index
)
1986 inst
->flags
|= mono_bitset_test_safe (set
, index
) ? MONO_INST_VOLATILE
: 0;
1990 mono_compile_create_vars (MonoCompile
*cfg
)
1992 MonoMethodSignature
*sig
;
1993 MonoMethodHeader
*header
;
1996 header
= cfg
->header
;
1998 sig
= mono_method_signature_internal (cfg
->method
);
2000 if (!MONO_TYPE_IS_VOID (sig
->ret
)) {
2001 cfg
->ret
= mono_compile_create_var (cfg
, sig
->ret
, OP_ARG
);
2002 /* Inhibit optimizations */
2003 cfg
->ret
->flags
|= MONO_INST_VOLATILE
;
2005 if (cfg
->verbose_level
> 2)
2006 g_print ("creating vars\n");
2008 cfg
->args
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, (sig
->param_count
+ sig
->hasthis
) * sizeof (MonoInst
*));
2011 MonoInst
* arg
= mono_compile_create_var (cfg
, m_class_get_this_arg (cfg
->method
->klass
), OP_ARG
);
2012 mono_apply_volatile (arg
, header
->volatile_args
, 0);
2013 cfg
->args
[0] = arg
;
2014 cfg
->this_arg
= arg
;
2017 for (i
= 0; i
< sig
->param_count
; ++i
) {
2018 MonoInst
* arg
= mono_compile_create_var (cfg
, sig
->params
[i
], OP_ARG
);
2019 mono_apply_volatile (arg
, header
->volatile_args
, i
+ sig
->hasthis
);
2020 cfg
->args
[i
+ sig
->hasthis
] = arg
;
2023 if (cfg
->verbose_level
> 2) {
2025 printf ("\treturn : ");
2026 mono_print_ins (cfg
->ret
);
2030 printf ("\tthis: ");
2031 mono_print_ins (cfg
->args
[0]);
2034 for (i
= 0; i
< sig
->param_count
; ++i
) {
2035 printf ("\targ [%d]: ", i
);
2036 mono_print_ins (cfg
->args
[i
+ sig
->hasthis
]);
2040 cfg
->locals_start
= cfg
->num_varinfo
;
2041 cfg
->locals
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, header
->num_locals
* sizeof (MonoInst
*));
2043 if (cfg
->verbose_level
> 2)
2044 g_print ("creating locals\n");
2046 for (i
= 0; i
< header
->num_locals
; ++i
) {
2047 if (cfg
->verbose_level
> 2)
2048 g_print ("\tlocal [%d]: ", i
);
2049 cfg
->locals
[i
] = mono_compile_create_var (cfg
, header
->locals
[i
], OP_LOCAL
);
2050 mono_apply_volatile (cfg
->locals
[i
], header
->volatile_locals
, i
);
2053 if (cfg
->verbose_level
> 2)
2054 g_print ("locals done\n");
2057 if (COMPILE_LLVM (cfg
))
2058 mono_llvm_create_vars (cfg
);
2060 mono_arch_create_vars (cfg
);
2062 mono_arch_create_vars (cfg
);
2065 if (cfg
->method
->save_lmf
&& cfg
->create_lmf_var
) {
2066 MonoInst
*lmf_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2067 lmf_var
->flags
|= MONO_INST_VOLATILE
;
2068 lmf_var
->flags
|= MONO_INST_LMF
;
2069 cfg
->lmf_var
= lmf_var
;
2074 mono_print_code (MonoCompile
*cfg
, const char* msg
)
2078 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2079 mono_print_bb (bb
, msg
);
2083 mono_postprocess_patches (MonoCompile
*cfg
)
2085 MonoJumpInfo
*patch_info
;
2088 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2089 switch (patch_info
->type
) {
2090 case MONO_PATCH_INFO_ABS
: {
2092 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2095 if (cfg
->abs_patches
) {
2096 MonoJumpInfo
*abs_ji
= (MonoJumpInfo
*)g_hash_table_lookup (cfg
->abs_patches
, patch_info
->data
.target
);
2098 patch_info
->type
= abs_ji
->type
;
2099 patch_info
->data
.target
= abs_ji
->data
.target
;
2104 case MONO_PATCH_INFO_SWITCH
: {
2106 if (cfg
->method
->dynamic
) {
2107 table
= (void **)mono_code_manager_reserve (cfg
->dynamic_info
->code_mp
, sizeof (gpointer
) * patch_info
->data
.table
->table_size
);
2109 table
= (void **)mono_domain_code_reserve (cfg
->domain
, sizeof (gpointer
) * patch_info
->data
.table
->table_size
);
2112 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++) {
2113 /* Might be NULL if the switch is eliminated */
2114 if (patch_info
->data
.table
->table
[i
]) {
2115 g_assert (patch_info
->data
.table
->table
[i
]->native_offset
);
2116 table
[i
] = GINT_TO_POINTER (patch_info
->data
.table
->table
[i
]->native_offset
);
2121 patch_info
->data
.table
->table
= (MonoBasicBlock
**)table
;
2131 /* Those patches require the JitInfo of the compiled method already be in place when used */
2133 mono_postprocess_patches_after_ji_publish (MonoCompile
*cfg
)
2135 MonoJumpInfo
*patch_info
;
2137 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2138 switch (patch_info
->type
) {
2139 case MONO_PATCH_INFO_METHOD_JUMP
: {
2140 unsigned char *ip
= cfg
->native_code
+ patch_info
->ip
.i
;
2142 mini_register_jump_site (cfg
->domain
, patch_info
->data
.method
, ip
);
2153 mono_codegen (MonoCompile
*cfg
)
2156 int max_epilog_size
;
2158 MonoDomain
*code_domain
;
2159 guint unwindlen
= 0;
2161 if (mono_using_xdebug
)
2163 * Recent gdb versions have trouble processing symbol files containing
2164 * overlapping address ranges, so allocate all code from the code manager
2165 * of the root domain. (#666152).
2167 code_domain
= mono_get_root_domain ();
2169 code_domain
= cfg
->domain
;
2171 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2172 cfg
->spill_count
= 0;
2173 /* we reuse dfn here */
2174 /* bb->dfn = bb_count++; */
2176 mono_arch_lowering_pass (cfg
, bb
);
2178 if (cfg
->opt
& MONO_OPT_PEEPHOLE
)
2179 mono_arch_peephole_pass_1 (cfg
, bb
);
2181 mono_local_regalloc (cfg
, bb
);
2183 if (cfg
->opt
& MONO_OPT_PEEPHOLE
)
2184 mono_arch_peephole_pass_2 (cfg
, bb
);
2186 if (cfg
->gen_seq_points
&& !cfg
->gen_sdb_seq_points
)
2187 mono_bb_deduplicate_op_il_seq_points (cfg
, bb
);
2190 code
= mono_arch_emit_prolog (cfg
);
2192 set_code_cursor (cfg
, code
);
2193 cfg
->prolog_end
= cfg
->code_len
;
2194 cfg
->cfa_reg
= cfg
->cur_cfa_reg
;
2195 cfg
->cfa_offset
= cfg
->cur_cfa_offset
;
2197 mono_debug_open_method (cfg
);
2199 /* emit code all basic blocks */
2200 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2201 bb
->native_offset
= cfg
->code_len
;
2202 bb
->real_native_offset
= cfg
->code_len
;
2203 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2204 mono_arch_output_basic_block (cfg
, bb
);
2205 bb
->native_length
= cfg
->code_len
- bb
->native_offset
;
2207 if (bb
== cfg
->bb_exit
) {
2208 cfg
->epilog_begin
= cfg
->code_len
;
2209 mono_arch_emit_epilog (cfg
);
2210 cfg
->epilog_end
= cfg
->code_len
;
2213 if (bb
->clause_holes
) {
2215 for (tmp
= bb
->clause_holes
; tmp
; tmp
= tmp
->prev
)
2216 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, cfg
->native_code
+ bb
->native_offset
, bb
);
2220 mono_arch_emit_exceptions (cfg
);
2222 max_epilog_size
= 0;
2224 /* we always allocate code in cfg->domain->code_mp to increase locality */
2225 cfg
->code_size
= cfg
->code_len
+ max_epilog_size
;
2227 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2229 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2230 if (!cfg
->compile_aot
)
2231 unwindlen
= mono_arch_unwindinfo_init_method_unwind_info (cfg
);
2234 if (cfg
->method
->dynamic
) {
2235 /* Allocate the code into a separate memory pool so it can be freed */
2236 cfg
->dynamic_info
= g_new0 (MonoJitDynamicMethodInfo
, 1);
2237 cfg
->dynamic_info
->code_mp
= mono_code_manager_new_dynamic ();
2238 mono_domain_lock (cfg
->domain
);
2239 mono_dynamic_code_hash_insert (cfg
->domain
, cfg
->method
, cfg
->dynamic_info
);
2240 mono_domain_unlock (cfg
->domain
);
2242 if (mono_using_xdebug
)
2243 /* See the comment for cfg->code_domain */
2244 code
= (guint8
*)mono_domain_code_reserve (code_domain
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2246 code
= (guint8
*)mono_code_manager_reserve (cfg
->dynamic_info
->code_mp
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2248 code
= (guint8
*)mono_domain_code_reserve (code_domain
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2251 if (cfg
->thunk_area
) {
2252 cfg
->thunks_offset
= cfg
->code_size
+ unwindlen
;
2253 cfg
->thunks
= code
+ cfg
->thunks_offset
;
2254 memset (cfg
->thunks
, 0, cfg
->thunk_area
);
2258 memcpy (code
, cfg
->native_code
, cfg
->code_len
);
2259 g_free (cfg
->native_code
);
2260 cfg
->native_code
= code
;
2261 code
= cfg
->native_code
+ cfg
->code_len
;
2263 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2264 mono_postprocess_patches (cfg
);
2266 #ifdef VALGRIND_JIT_REGISTER_MAP
2267 if (valgrind_register
){
2268 char* nm
= mono_method_full_name (cfg
->method
, TRUE
);
2269 VALGRIND_JIT_REGISTER_MAP (nm
, cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
);
2274 if (cfg
->verbose_level
> 0) {
2275 char* nm
= mono_method_get_full_name (cfg
->method
);
2276 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2278 cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
, cfg
->code_len
, cfg
->domain
->friendly_name
);
2283 gboolean is_generic
= FALSE
;
2285 if (cfg
->method
->is_inflated
|| mono_method_get_generic_container (cfg
->method
) ||
2286 mono_class_is_gtd (cfg
->method
->klass
) || mono_class_is_ginst (cfg
->method
->klass
)) {
2291 g_assert (is_generic
);
2294 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2295 mono_arch_save_unwind_info (cfg
);
2298 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2303 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
2304 if (cfg
->compile_aot
) {
2306 case MONO_PATCH_INFO_BB
:
2307 case MONO_PATCH_INFO_LABEL
:
2310 /* No need to patch these */
2315 if (ji
->type
== MONO_PATCH_INFO_NONE
)
2318 target
= mono_resolve_patch_target (cfg
->method
, cfg
->domain
, cfg
->native_code
, ji
, cfg
->run_cctors
, cfg
->error
);
2319 if (!is_ok (cfg
->error
)) {
2320 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2323 mono_arch_patch_code_new (cfg
, cfg
->domain
, cfg
->native_code
, ji
, target
);
2327 mono_arch_patch_code (cfg
, cfg
->method
, cfg
->domain
, cfg
->native_code
, cfg
->patch_info
, cfg
->run_cctors
, cfg
->error
);
2328 if (!is_ok (cfg
->error
)) {
2329 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2334 if (cfg
->method
->dynamic
) {
2335 if (mono_using_xdebug
)
2336 mono_domain_code_commit (code_domain
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2338 mono_code_manager_commit (cfg
->dynamic_info
->code_mp
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2340 mono_domain_code_commit (code_domain
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2342 MONO_PROFILER_RAISE (jit_code_buffer
, (cfg
->native_code
, cfg
->code_len
, MONO_PROFILER_CODE_BUFFER_METHOD
, cfg
->method
));
2344 mono_arch_flush_icache (cfg
->native_code
, cfg
->code_len
);
2346 mono_debug_close_method (cfg
);
2348 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2349 if (!cfg
->compile_aot
)
2350 mono_arch_unwindinfo_install_method_unwind_info (&cfg
->arch
.unwindinfo
, cfg
->native_code
, cfg
->code_len
);
2355 compute_reachable (MonoBasicBlock
*bb
)
2359 if (!(bb
->flags
& BB_VISITED
)) {
2360 bb
->flags
|= BB_VISITED
;
2361 for (i
= 0; i
< bb
->out_count
; ++i
)
2362 compute_reachable (bb
->out_bb
[i
]);
2366 static void mono_bb_ordering (MonoCompile
*cfg
)
2369 /* Depth-first ordering on basic blocks */
2370 cfg
->bblocks
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * (cfg
->num_bblocks
+ 1));
2372 cfg
->max_block_num
= cfg
->num_bblocks
;
2374 df_visit (cfg
->bb_entry
, &dfn
, cfg
->bblocks
);
2376 #if defined(__GNUC__) && __GNUC__ == 7 && defined(__x86_64__)
2377 /* workaround for an AMD specific issue that only happens on GCC 7 so far,
2378 * for more information see https://github.com/mono/mono/issues/9298 */
2379 mono_memory_barrier ();
2381 g_assertf (cfg
->num_bblocks
>= dfn
, "cfg->num_bblocks=%d, dfn=%d\n", cfg
->num_bblocks
, dfn
);
2383 if (cfg
->num_bblocks
!= dfn
+ 1) {
2386 cfg
->num_bblocks
= dfn
+ 1;
2388 /* remove unreachable code, because the code in them may be
2389 * inconsistent (access to dead variables for example) */
2390 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2391 bb
->flags
&= ~BB_VISITED
;
2392 compute_reachable (cfg
->bb_entry
);
2393 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2394 if (bb
->flags
& BB_EXCEPTION_HANDLER
)
2395 compute_reachable (bb
);
2396 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2397 if (!(bb
->flags
& BB_VISITED
)) {
2398 if (cfg
->verbose_level
> 1)
2399 g_print ("found unreachable code in BB%d\n", bb
->block_num
);
2400 bb
->code
= bb
->last_ins
= NULL
;
2401 while (bb
->out_count
)
2402 mono_unlink_bblock (cfg
, bb
, bb
->out_bb
[0]);
2405 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2406 bb
->flags
&= ~BB_VISITED
;
2411 mono_handle_out_of_line_bblock (MonoCompile
*cfg
)
2414 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2415 if (bb
->next_bb
&& bb
->next_bb
->out_of_line
&& bb
->last_ins
&& !MONO_IS_BRANCH_OP (bb
->last_ins
)) {
2417 MONO_INST_NEW (cfg
, ins
, OP_BR
);
2418 MONO_ADD_INS (bb
, ins
);
2419 ins
->inst_target_bb
= bb
->next_bb
;
2425 create_jit_info (MonoCompile
*cfg
, MonoMethod
*method_to_compile
)
2428 MonoMethodHeader
*header
;
2430 MonoJitInfoFlags flags
= JIT_INFO_NONE
;
2431 int num_clauses
, num_holes
= 0;
2432 guint32 stack_size
= 0;
2434 g_assert (method_to_compile
== cfg
->method
);
2435 header
= cfg
->header
;
2438 flags
|= JIT_INFO_HAS_GENERIC_JIT_INFO
;
2440 if (cfg
->arch_eh_jit_info
) {
2441 MonoJitArgumentInfo
*arg_info
;
2442 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method_to_register
);
2445 * This cannot be computed during stack walking, as
2446 * mono_arch_get_argument_info () is not signal safe.
2448 arg_info
= g_newa (MonoJitArgumentInfo
, sig
->param_count
+ 1);
2449 stack_size
= mono_arch_get_argument_info (sig
, sig
->param_count
, arg_info
);
2452 flags
|= JIT_INFO_HAS_ARCH_EH_INFO
;
2455 if (cfg
->has_unwind_info_for_epilog
&& !(flags
& JIT_INFO_HAS_ARCH_EH_INFO
))
2456 flags
|= JIT_INFO_HAS_ARCH_EH_INFO
;
2458 if (cfg
->thunk_area
)
2459 flags
|= JIT_INFO_HAS_THUNK_INFO
;
2461 if (cfg
->try_block_holes
) {
2462 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2463 TryBlockHole
*hole
= (TryBlockHole
*)tmp
->data
;
2464 MonoExceptionClause
*ec
= hole
->clause
;
2465 int hole_end
= hole
->basic_block
->native_offset
+ hole
->basic_block
->native_length
;
2466 MonoBasicBlock
*clause_last_bb
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2467 g_assert (clause_last_bb
);
2469 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2470 if (clause_last_bb
->native_offset
!= hole_end
)
2474 flags
|= JIT_INFO_HAS_TRY_BLOCK_HOLES
;
2475 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2476 printf ("Number of try block holes %d\n", num_holes
);
2479 if (COMPILE_LLVM (cfg
))
2480 num_clauses
= cfg
->llvm_ex_info_len
;
2482 num_clauses
= header
->num_clauses
;
2484 if (cfg
->method
->dynamic
)
2485 jinfo
= (MonoJitInfo
*)g_malloc0 (mono_jit_info_size (flags
, num_clauses
, num_holes
));
2487 jinfo
= (MonoJitInfo
*)mono_domain_alloc0 (cfg
->domain
, mono_jit_info_size (flags
, num_clauses
, num_holes
));
2488 jinfo_try_holes_size
+= num_holes
* sizeof (MonoTryBlockHoleJitInfo
);
2490 mono_jit_info_init (jinfo
, cfg
->method_to_register
, cfg
->native_code
, cfg
->code_len
, flags
, num_clauses
, num_holes
);
2491 jinfo
->domain_neutral
= (cfg
->opt
& MONO_OPT_SHARED
) != 0;
2493 if (COMPILE_LLVM (cfg
))
2494 jinfo
->from_llvm
= TRUE
;
2498 MonoGenericJitInfo
*gi
;
2499 GSList
*loclist
= NULL
;
2501 gi
= mono_jit_info_get_generic_jit_info (jinfo
);
2504 if (cfg
->method
->dynamic
)
2505 gi
->generic_sharing_context
= g_new0 (MonoGenericSharingContext
, 1);
2507 gi
->generic_sharing_context
= (MonoGenericSharingContext
*)mono_domain_alloc0 (cfg
->domain
, sizeof (MonoGenericSharingContext
));
2508 mini_init_gsctx (cfg
->method
->dynamic
? NULL
: cfg
->domain
, NULL
, cfg
->gsctx_context
, gi
->generic_sharing_context
);
2510 if ((method_to_compile
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
2511 mini_method_get_context (method_to_compile
)->method_inst
||
2512 m_class_is_valuetype (method_to_compile
->klass
)) {
2513 g_assert (cfg
->rgctx_var
);
2518 if ((method_to_compile
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
2519 mini_method_get_context (method_to_compile
)->method_inst
||
2520 m_class_is_valuetype (method_to_compile
->klass
)) {
2521 inst
= cfg
->rgctx_var
;
2522 if (!COMPILE_LLVM (cfg
))
2523 g_assert (inst
->opcode
== OP_REGOFFSET
);
2524 loclist
= cfg
->rgctx_loclist
;
2526 inst
= cfg
->args
[0];
2527 loclist
= cfg
->this_loclist
;
2531 /* Needed to handle async exceptions */
2535 gi
->nlocs
= g_slist_length (loclist
);
2536 if (cfg
->method
->dynamic
)
2537 gi
->locations
= (MonoDwarfLocListEntry
*)g_malloc0 (gi
->nlocs
* sizeof (MonoDwarfLocListEntry
));
2539 gi
->locations
= (MonoDwarfLocListEntry
*)mono_domain_alloc0 (cfg
->domain
, gi
->nlocs
* sizeof (MonoDwarfLocListEntry
));
2541 for (l
= loclist
; l
; l
= l
->next
) {
2542 memcpy (&(gi
->locations
[i
]), l
->data
, sizeof (MonoDwarfLocListEntry
));
2547 if (COMPILE_LLVM (cfg
)) {
2548 g_assert (cfg
->llvm_this_reg
!= -1);
2549 gi
->this_in_reg
= 0;
2550 gi
->this_reg
= cfg
->llvm_this_reg
;
2551 gi
->this_offset
= cfg
->llvm_this_offset
;
2552 } else if (inst
->opcode
== OP_REGVAR
) {
2553 gi
->this_in_reg
= 1;
2554 gi
->this_reg
= inst
->dreg
;
2556 g_assert (inst
->opcode
== OP_REGOFFSET
);
2558 g_assert (inst
->inst_basereg
== X86_EBP
);
2559 #elif defined(TARGET_AMD64)
2560 g_assert (inst
->inst_basereg
== X86_EBP
|| inst
->inst_basereg
== X86_ESP
);
2562 g_assert (inst
->inst_offset
>= G_MININT32
&& inst
->inst_offset
<= G_MAXINT32
);
2564 gi
->this_in_reg
= 0;
2565 gi
->this_reg
= inst
->inst_basereg
;
2566 gi
->this_offset
= inst
->inst_offset
;
2571 MonoTryBlockHoleTableJitInfo
*table
;
2574 table
= mono_jit_info_get_try_block_hole_table_info (jinfo
);
2575 table
->num_holes
= (guint16
)num_holes
;
2577 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2578 guint32 start_bb_offset
;
2579 MonoTryBlockHoleJitInfo
*hole
;
2580 TryBlockHole
*hole_data
= (TryBlockHole
*)tmp
->data
;
2581 MonoExceptionClause
*ec
= hole_data
->clause
;
2582 int hole_end
= hole_data
->basic_block
->native_offset
+ hole_data
->basic_block
->native_length
;
2583 MonoBasicBlock
*clause_last_bb
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2584 g_assert (clause_last_bb
);
2586 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2587 if (clause_last_bb
->native_offset
== hole_end
)
2590 start_bb_offset
= hole_data
->start_offset
- hole_data
->basic_block
->native_offset
;
2591 hole
= &table
->holes
[i
++];
2592 hole
->clause
= hole_data
->clause
- &header
->clauses
[0];
2593 hole
->offset
= (guint32
)hole_data
->start_offset
;
2594 hole
->length
= (guint16
)(hole_data
->basic_block
->native_length
- start_bb_offset
);
2596 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2597 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole
->clause
, hole
->offset
, hole
->length
);
2599 g_assert (i
== num_holes
);
2602 if (jinfo
->has_arch_eh_info
) {
2603 MonoArchEHJitInfo
*info
;
2605 info
= mono_jit_info_get_arch_eh_info (jinfo
);
2607 info
->stack_size
= stack_size
;
2610 if (cfg
->thunk_area
) {
2611 MonoThunkJitInfo
*info
;
2613 info
= mono_jit_info_get_thunk_info (jinfo
);
2614 info
->thunks_offset
= cfg
->thunks_offset
;
2615 info
->thunks_size
= cfg
->thunk_area
;
2618 if (COMPILE_LLVM (cfg
)) {
2620 memcpy (&jinfo
->clauses
[0], &cfg
->llvm_ex_info
[0], num_clauses
* sizeof (MonoJitExceptionInfo
));
2621 } else if (header
->num_clauses
) {
2624 for (i
= 0; i
< header
->num_clauses
; i
++) {
2625 MonoExceptionClause
*ec
= &header
->clauses
[i
];
2626 MonoJitExceptionInfo
*ei
= &jinfo
->clauses
[i
];
2627 MonoBasicBlock
*tblock
;
2630 ei
->flags
= ec
->flags
;
2632 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2633 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec
->try_offset
, ec
->try_offset
+ ec
->try_len
, ec
->handler_offset
, ec
->handler_offset
+ ec
->handler_len
, ec
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
? ec
->data
.filter_offset
: 0);
2635 exvar
= mono_find_exvar_for_offset (cfg
, ec
->handler_offset
);
2636 ei
->exvar_offset
= exvar
? exvar
->inst_offset
: 0;
2638 if (ei
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
2639 tblock
= cfg
->cil_offset_to_bb
[ec
->data
.filter_offset
];
2641 ei
->data
.filter
= cfg
->native_code
+ tblock
->native_offset
;
2643 ei
->data
.catch_class
= ec
->data
.catch_class
;
2646 tblock
= cfg
->cil_offset_to_bb
[ec
->try_offset
];
2648 g_assert (tblock
->native_offset
);
2649 ei
->try_start
= cfg
->native_code
+ tblock
->native_offset
;
2650 if (tblock
->extend_try_block
) {
2652 * Extend the try block backwards to include parts of the previous call
2655 ei
->try_start
= (guint8
*)ei
->try_start
- cfg
->backend
->monitor_enter_adjustment
;
2657 if (ec
->try_offset
+ ec
->try_len
< header
->code_size
)
2658 tblock
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2660 tblock
= cfg
->bb_exit
;
2661 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2662 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec
->try_offset
, ec
->try_len
, tblock
, header
->code_size
);
2664 if (!tblock
->native_offset
) {
2666 for (j
= ec
->try_offset
+ ec
->try_len
, end
= ec
->try_offset
; j
>= end
; --j
) {
2667 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[j
];
2668 if (bb
&& bb
->native_offset
) {
2674 ei
->try_end
= cfg
->native_code
+ tblock
->native_offset
;
2675 g_assert (tblock
->native_offset
);
2676 tblock
= cfg
->cil_offset_to_bb
[ec
->handler_offset
];
2678 ei
->handler_start
= cfg
->native_code
+ tblock
->native_offset
;
2680 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2681 TryBlockHole
*hole
= (TryBlockHole
*)tmp
->data
;
2682 gpointer hole_end
= cfg
->native_code
+ (hole
->basic_block
->native_offset
+ hole
->basic_block
->native_length
);
2683 if (hole
->clause
== ec
&& hole_end
== ei
->try_end
) {
2684 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2685 printf ("\tShortening try block %d from %x to %x\n", i
, (int)((guint8
*)ei
->try_end
- cfg
->native_code
), hole
->start_offset
);
2687 ei
->try_end
= cfg
->native_code
+ hole
->start_offset
;
2692 if (ec
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
) {
2694 if (ec
->handler_offset
+ ec
->handler_len
< header
->code_size
) {
2695 tblock
= cfg
->cil_offset_to_bb
[ec
->handler_offset
+ ec
->handler_len
];
2696 if (tblock
->native_offset
) {
2697 end_offset
= tblock
->native_offset
;
2701 for (j
= ec
->handler_offset
+ ec
->handler_len
, end
= ec
->handler_offset
; j
>= end
; --j
) {
2702 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[j
];
2703 if (bb
&& bb
->native_offset
) {
2708 end_offset
= tblock
->native_offset
+ tblock
->native_length
;
2711 end_offset
= cfg
->epilog_begin
;
2713 ei
->data
.handler_end
= cfg
->native_code
+ end_offset
;
2718 if (G_UNLIKELY (cfg
->verbose_level
>= 4)) {
2720 for (i
= 0; i
< jinfo
->num_clauses
; i
++) {
2721 MonoJitExceptionInfo
*ei
= &jinfo
->clauses
[i
];
2722 int start
= (guint8
*)ei
->try_start
- cfg
->native_code
;
2723 int end
= (guint8
*)ei
->try_end
- cfg
->native_code
;
2724 int handler
= (guint8
*)ei
->handler_start
- cfg
->native_code
;
2725 int handler_end
= (guint8
*)ei
->data
.handler_end
- cfg
->native_code
;
2727 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i
, ei
->flags
, start
, end
, handler
, handler_end
);
2731 if (cfg
->encoded_unwind_ops
) {
2732 /* Generated by LLVM */
2733 jinfo
->unwind_info
= mono_cache_unwind_info (cfg
->encoded_unwind_ops
, cfg
->encoded_unwind_ops_len
);
2734 g_free (cfg
->encoded_unwind_ops
);
2735 } else if (cfg
->unwind_ops
) {
2737 guint8
*unwind_info
= mono_unwind_ops_encode (cfg
->unwind_ops
, &info_len
);
2738 guint32 unwind_desc
;
2740 unwind_desc
= mono_cache_unwind_info (unwind_info
, info_len
);
2742 if (cfg
->has_unwind_info_for_epilog
) {
2743 MonoArchEHJitInfo
*info
;
2745 info
= mono_jit_info_get_arch_eh_info (jinfo
);
2747 info
->epilog_size
= cfg
->code_len
- cfg
->epilog_begin
;
2749 jinfo
->unwind_info
= unwind_desc
;
2750 g_free (unwind_info
);
2752 jinfo
->unwind_info
= cfg
->used_int_regs
;
2758 /* Return whenever METHOD is a gsharedvt method */
2760 is_gsharedvt_method (MonoMethod
*method
)
2762 MonoGenericContext
*context
;
2763 MonoGenericInst
*inst
;
2766 if (!method
->is_inflated
)
2768 context
= mono_method_get_context (method
);
2769 inst
= context
->class_inst
;
2771 for (i
= 0; i
< inst
->type_argc
; ++i
)
2772 if (mini_is_gsharedvt_gparam (inst
->type_argv
[i
]))
2775 inst
= context
->method_inst
;
2777 for (i
= 0; i
< inst
->type_argc
; ++i
)
2778 if (mini_is_gsharedvt_gparam (inst
->type_argv
[i
]))
2785 is_open_method (MonoMethod
*method
)
2787 MonoGenericContext
*context
;
2789 if (!method
->is_inflated
)
2791 context
= mono_method_get_context (method
);
2792 if (context
->class_inst
&& context
->class_inst
->is_open
)
2794 if (context
->method_inst
&& context
->method_inst
->is_open
)
2800 mono_insert_nop_in_empty_bb (MonoCompile
*cfg
)
2803 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2807 MONO_INST_NEW (cfg
, nop
, OP_NOP
);
2808 MONO_ADD_INS (bb
, nop
);
2812 insert_safepoint (MonoCompile
*cfg
, MonoBasicBlock
*bblock
)
2814 MonoInst
*poll_addr
, *ins
;
2816 if (cfg
->disable_gc_safe_points
)
2819 if (cfg
->verbose_level
> 1)
2820 printf ("ADDING SAFE POINT TO BB %d\n", bblock
->block_num
);
2822 g_assert (mini_safepoints_enabled ());
2823 NEW_AOTCONST (cfg
, poll_addr
, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG
, (gpointer
)&mono_polling_required
);
2825 MONO_INST_NEW (cfg
, ins
, OP_GC_SAFE_POINT
);
2826 ins
->sreg1
= poll_addr
->dreg
;
2828 if (bblock
->flags
& BB_EXCEPTION_HANDLER
) {
2829 MonoInst
*eh_op
= bblock
->code
;
2831 if (eh_op
&& eh_op
->opcode
!= OP_START_HANDLER
&& eh_op
->opcode
!= OP_GET_EX_OBJ
) {
2834 MonoInst
*next_eh_op
= eh_op
? eh_op
->next
: NULL
;
2835 // skip all EH relateds ops
2836 while (next_eh_op
&& (next_eh_op
->opcode
== OP_START_HANDLER
|| next_eh_op
->opcode
== OP_GET_EX_OBJ
)) {
2838 next_eh_op
= eh_op
->next
;
2842 mono_bblock_insert_after_ins (bblock
, eh_op
, poll_addr
);
2843 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2844 } else if (bblock
== cfg
->bb_entry
) {
2845 mono_bblock_insert_after_ins (bblock
, bblock
->last_ins
, poll_addr
);
2846 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2849 mono_bblock_insert_before_ins (bblock
, NULL
, poll_addr
);
2850 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2855 This code inserts safepoints into managed code at important code paths.
2858 -the first basic block
2859 -landing BB for exception handlers
2864 insert_safepoints (MonoCompile
*cfg
)
2868 g_assert (mini_safepoints_enabled ());
2870 if (COMPILE_LLVM (cfg
)) {
2871 if (!cfg
->llvm_only
&& cfg
->compile_aot
) {
2872 /* We rely on LLVM's safepoints insertion capabilities. */
2873 if (cfg
->verbose_level
> 1)
2874 printf ("SKIPPING SAFEPOINTS for code compiled with LLVM\n");
2879 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
2880 WrapperInfo
*info
= mono_marshal_get_wrapper_info (cfg
->method
);
2881 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2882 if (info
&& info
->subtype
== WRAPPER_SUBTYPE_ICALL_WRAPPER
&&
2883 (info
->d
.icall
.jit_icall_id
== MONO_JIT_ICALL_mono_threads_state_poll
||
2884 info
->d
.icall
.jit_icall_id
== MONO_JIT_ICALL_mono_thread_interruption_checkpoint
||
2885 info
->d
.icall
.jit_icall_id
== MONO_JIT_ICALL_mono_threads_exit_gc_safe_region_unbalanced
)) {
2886 if (cfg
->verbose_level
> 1)
2887 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2892 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
2893 if (cfg
->verbose_level
> 1)
2894 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2898 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_OTHER
) {
2899 WrapperInfo
*info
= mono_marshal_get_wrapper_info (cfg
->method
);
2901 if (info
&& (info
->subtype
== WRAPPER_SUBTYPE_INTERP_IN
|| info
->subtype
== WRAPPER_SUBTYPE_INTERP_LMF
)) {
2902 /* These wrappers shouldn't do any icalls */
2903 if (cfg
->verbose_level
> 1)
2904 printf ("SKIPPING SAFEPOINTS for interp-in wrappers.\n");
2909 if (cfg
->verbose_level
> 1)
2910 printf ("INSERTING SAFEPOINTS\n");
2911 if (cfg
->verbose_level
> 2)
2912 mono_print_code (cfg
, "BEFORE SAFEPOINTS");
2914 /* if the method doesn't contain
2915 * (1) a call (so it's a leaf method)
2917 * we can skip the GC safepoint on method entry. */
2918 gboolean requires_safepoint
= cfg
->has_calls
;
2920 for (bb
= cfg
->bb_entry
->next_bb
; bb
; bb
= bb
->next_bb
) {
2921 if (bb
->loop_body_start
|| (bb
->flags
& BB_EXCEPTION_HANDLER
)) {
2922 requires_safepoint
= TRUE
;
2923 insert_safepoint (cfg
, bb
);
2927 if (requires_safepoint
)
2928 insert_safepoint (cfg
, cfg
->bb_entry
);
2930 if (cfg
->verbose_level
> 2)
2931 mono_print_code (cfg
, "AFTER SAFEPOINTS");
2937 mono_insert_branches_between_bblocks (MonoCompile
*cfg
)
2941 /* Add branches between non-consecutive bblocks */
2942 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2943 if (bb
->last_ins
&& MONO_IS_COND_BRANCH_OP (bb
->last_ins
) &&
2944 bb
->last_ins
->inst_false_bb
&& bb
->next_bb
!= bb
->last_ins
->inst_false_bb
) {
2945 /* we are careful when inverting, since bugs like #59580
2946 * could show up when dealing with NaNs.
2948 if (MONO_IS_COND_BRANCH_NOFP(bb
->last_ins
) && bb
->next_bb
== bb
->last_ins
->inst_true_bb
) {
2949 MonoBasicBlock
*tmp
= bb
->last_ins
->inst_true_bb
;
2950 bb
->last_ins
->inst_true_bb
= bb
->last_ins
->inst_false_bb
;
2951 bb
->last_ins
->inst_false_bb
= tmp
;
2953 bb
->last_ins
->opcode
= mono_reverse_branch_op (bb
->last_ins
->opcode
);
2955 MonoInst
*inst
= (MonoInst
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
));
2956 inst
->opcode
= OP_BR
;
2957 inst
->inst_target_bb
= bb
->last_ins
->inst_false_bb
;
2958 mono_bblock_add_inst (bb
, inst
);
2963 if (cfg
->verbose_level
>= 4) {
2964 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2965 MonoInst
*tree
= bb
->code
;
2966 g_print ("DUMP BLOCK %d:\n", bb
->block_num
);
2969 for (; tree
; tree
= tree
->next
) {
2970 mono_print_ins_index (-1, tree
);
2976 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2977 bb
->max_vreg
= cfg
->next_vreg
;
2982 init_backend (MonoBackend
*backend
)
2984 #ifdef MONO_ARCH_NEED_GOT_VAR
2985 backend
->need_got_var
= 1;
2987 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2988 backend
->have_card_table_wb
= 1;
2990 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
2991 backend
->have_op_generic_class_init
= 1;
2993 #ifdef MONO_ARCH_EMULATE_MUL_DIV
2994 backend
->emulate_mul_div
= 1;
2996 #ifdef MONO_ARCH_EMULATE_DIV
2997 backend
->emulate_div
= 1;
2999 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
3000 backend
->emulate_long_shift_opts
= 1;
3002 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
3003 backend
->have_objc_get_selector
= 1;
3005 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
3006 backend
->have_generalized_imt_trampoline
= 1;
3008 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3009 backend
->gshared_supported
= 1;
3011 if (MONO_ARCH_USE_FPSTACK
)
3012 backend
->use_fpstack
= 1;
3013 // Does the ABI have a volatile non-parameter register, so tailcall
3014 // can pass context to generics or interfaces?
3015 backend
->have_volatile_non_param_register
= MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER
;
3016 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE
3017 backend
->have_op_tailcall_membase
= 1;
3019 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_REG
3020 backend
->have_op_tailcall_reg
= 1;
3022 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3023 backend
->monitor_enter_adjustment
= 1;
3025 backend
->monitor_enter_adjustment
= MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
;
3027 #if defined(MONO_ARCH_ILP32)
3030 #ifdef MONO_ARCH_NEED_DIV_CHECK
3031 backend
->need_div_check
= 1;
3033 #ifdef NO_UNALIGNED_ACCESS
3034 backend
->no_unaligned_access
= 1;
3036 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3037 backend
->dyn_call_param_area
= MONO_ARCH_DYN_CALL_PARAM_AREA
;
3039 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3040 backend
->disable_div_with_mul
= 1;
3042 #ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
3043 backend
->explicit_null_checks
= 1;
3045 #ifdef MONO_ARCH_HAVE_OPTIMIZED_DIV
3046 backend
->optimized_div
= 1;
3051 * mini_method_compile:
3052 * @method: the method to compile
3053 * @opts: the optimization flags to use
3054 * @domain: the domain where the method will be compiled in
3055 * @flags: compilation flags
3056 * @parts: debug flag
3058 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3059 * field in the returned struct to see if compilation succeded.
3062 mini_method_compile (MonoMethod
*method
, guint32 opts
, MonoDomain
*domain
, JitFlags flags
, int parts
, int aot_method_index
)
3064 MonoMethodHeader
*header
;
3065 MonoMethodSignature
*sig
;
3068 gboolean try_generic_shared
, try_llvm
= FALSE
;
3069 MonoMethod
*method_to_compile
, *method_to_register
;
3070 gboolean method_is_gshared
= FALSE
;
3071 gboolean run_cctors
= (flags
& JIT_FLAG_RUN_CCTORS
) ? 1 : 0;
3072 gboolean compile_aot
= (flags
& JIT_FLAG_AOT
) ? 1 : 0;
3073 gboolean full_aot
= (flags
& JIT_FLAG_FULL_AOT
) ? 1 : 0;
3074 gboolean disable_direct_icalls
= (flags
& JIT_FLAG_NO_DIRECT_ICALLS
) ? 1 : 0;
3075 gboolean gsharedvt_method
= FALSE
;
3077 gboolean llvm
= (flags
& JIT_FLAG_LLVM
) ? 1 : 0;
3079 static gboolean verbose_method_inited
;
3080 static char **verbose_method_names
;
3082 mono_atomic_inc_i32 (&mono_jit_stats
.methods_compiled
);
3083 MONO_PROFILER_RAISE (jit_begin
, (method
));
3084 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3085 MONO_PROBE_METHOD_COMPILE_BEGIN (method
);
3087 gsharedvt_method
= is_gsharedvt_method (method
);
3090 * In AOT mode, method can be the following:
3091 * - a gsharedvt method.
3092 * - a method inflated with type parameters. This is for ref/partial sharing.
3093 * - a method inflated with concrete types.
3096 if (is_open_method (method
)) {
3097 try_generic_shared
= TRUE
;
3098 method_is_gshared
= TRUE
;
3100 try_generic_shared
= FALSE
;
3102 g_assert (opts
& MONO_OPT_GSHARED
);
3104 try_generic_shared
= mono_class_generic_sharing_enabled (method
->klass
) &&
3105 (opts
& MONO_OPT_GSHARED
) && mono_method_is_generic_sharable_full (method
, FALSE
, FALSE
, FALSE
);
3106 if (mini_is_gsharedvt_sharable_method (method
)) {
3108 if (!mono_debug_count ())
3109 try_generic_shared = FALSE;
3115 if (try_generic_shared && !mono_debug_count ())
3116 try_generic_shared = FALSE;
3119 if (opts
& MONO_OPT_GSHARED
) {
3120 if (try_generic_shared
)
3121 mono_atomic_inc_i32 (&mono_stats
.generics_sharable_methods
);
3122 else if (mono_method_is_generic_impl (method
))
3123 mono_atomic_inc_i32 (&mono_stats
.generics_unsharable_methods
);
3127 try_llvm
= mono_use_llvm
|| llvm
;
3130 #ifndef MONO_ARCH_FLOAT32_SUPPORTED
3131 opts
&= ~MONO_OPT_FLOAT32
;
3135 if (method_is_gshared
) {
3136 method_to_compile
= method
;
3138 if (try_generic_shared
) {
3140 method_to_compile
= mini_get_shared_method_full (method
, SHARE_MODE_NONE
, error
);
3141 mono_error_assert_ok (error
);
3143 method_to_compile
= method
;
3147 cfg
= g_new0 (MonoCompile
, 1);
3148 cfg
->method
= method_to_compile
;
3149 cfg
->mempool
= mono_mempool_new ();
3151 cfg
->run_cctors
= run_cctors
;
3152 cfg
->domain
= domain
;
3153 cfg
->verbose_level
= mini_verbose
;
3154 cfg
->compile_aot
= compile_aot
;
3155 cfg
->full_aot
= full_aot
;
3156 cfg
->disable_omit_fp
= mini_debug_options
.disable_omit_fp
;
3157 cfg
->skip_visibility
= method
->skip_visibility
;
3158 cfg
->orig_method
= method
;
3159 cfg
->gen_seq_points
= !mini_debug_options
.no_seq_points_compact_data
|| mini_debug_options
.gen_sdb_seq_points
;
3160 cfg
->gen_sdb_seq_points
= mini_debug_options
.gen_sdb_seq_points
;
3161 cfg
->llvm_only
= (flags
& JIT_FLAG_LLVM_ONLY
) != 0;
3162 cfg
->interp
= (flags
& JIT_FLAG_INTERP
) != 0;
3163 cfg
->use_current_cpu
= (flags
& JIT_FLAG_USE_CURRENT_CPU
) != 0;
3164 cfg
->backend
= current_backend
;
3166 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_ALLOC
) {
3167 /* We can't have seq points inside gc critical regions */
3168 cfg
->gen_seq_points
= FALSE
;
3169 cfg
->gen_sdb_seq_points
= FALSE
;
3171 /* coop requires loop detection to happen */
3172 if (mini_safepoints_enabled ())
3173 cfg
->opt
|= MONO_OPT_LOOP
;
3174 if (cfg
->backend
->explicit_null_checks
) {
3175 /* some platforms have null pages, so we can't SIGSEGV */
3176 cfg
->explicit_null_checks
= TRUE
;
3178 cfg
->explicit_null_checks
= mini_debug_options
.explicit_null_checks
|| (flags
& JIT_FLAG_EXPLICIT_NULL_CHECKS
);
3180 cfg
->soft_breakpoints
= mini_debug_options
.soft_breakpoints
;
3181 cfg
->check_pinvoke_callconv
= mini_debug_options
.check_pinvoke_callconv
;
3182 cfg
->disable_direct_icalls
= disable_direct_icalls
;
3183 cfg
->direct_pinvoke
= (flags
& JIT_FLAG_DIRECT_PINVOKE
) != 0;
3184 if (try_generic_shared
)
3185 cfg
->gshared
= TRUE
;
3186 cfg
->compile_llvm
= try_llvm
;
3187 cfg
->token_info_hash
= g_hash_table_new (NULL
, NULL
);
3188 if (cfg
->compile_aot
)
3189 cfg
->method_index
= aot_method_index
;
3192 if (!mono_debug_count ())
3193 cfg->opt &= ~MONO_OPT_FLOAT32;
3196 cfg
->opt
&= ~MONO_OPT_SIMD
;
3197 cfg
->r4fp
= (cfg
->opt
& MONO_OPT_FLOAT32
) ? 1 : 0;
3198 cfg
->r4_stack_type
= cfg
->r4fp
? STACK_R4
: STACK_R8
;
3200 if (cfg
->gen_seq_points
)
3201 cfg
->seq_points
= g_ptr_array_new ();
3202 cfg
->error
= (MonoError
*)&cfg
->error_value
;
3203 error_init (cfg
->error
);
3205 if (cfg
->compile_aot
&& !try_generic_shared
&& (method
->is_generic
|| mono_class_is_gtd (method
->klass
) || method_is_gshared
)) {
3206 cfg
->exception_type
= MONO_EXCEPTION_GENERIC_SHARING_FAILED
;
3210 if (cfg
->gshared
&& (gsharedvt_method
|| mini_is_gsharedvt_sharable_method (method
))) {
3211 MonoMethodInflated
*inflated
;
3212 MonoGenericContext
*context
;
3214 if (gsharedvt_method
) {
3215 g_assert (method
->is_inflated
);
3216 inflated
= (MonoMethodInflated
*)method
;
3217 context
= &inflated
->context
;
3219 /* We are compiling a gsharedvt method directly */
3220 g_assert (compile_aot
);
3222 g_assert (method_to_compile
->is_inflated
);
3223 inflated
= (MonoMethodInflated
*)method_to_compile
;
3224 context
= &inflated
->context
;
3227 mini_init_gsctx (NULL
, cfg
->mempool
, context
, &cfg
->gsctx
);
3228 cfg
->gsctx_context
= context
;
3230 cfg
->gsharedvt
= TRUE
;
3231 if (!cfg
->llvm_only
) {
3232 cfg
->disable_llvm
= TRUE
;
3233 cfg
->exception_message
= g_strdup ("gsharedvt");
3238 method_to_register
= method_to_compile
;
3240 g_assert (method
== method_to_compile
);
3241 method_to_register
= method
;
3243 cfg
->method_to_register
= method_to_register
;
3246 sig
= mono_method_signature_checked (cfg
->method
, err
);
3248 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3249 cfg
->exception_message
= g_strdup (mono_error_get_message (err
));
3250 mono_error_cleanup (err
);
3251 if (MONO_METHOD_COMPILE_END_ENABLED ())
3252 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3256 header
= cfg
->header
= mono_method_get_header_checked (cfg
->method
, cfg
->error
);
3258 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3259 if (MONO_METHOD_COMPILE_END_ENABLED ())
3260 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3266 static gboolean inited
;
3272 * Check for methods which cannot be compiled by LLVM early, to avoid
3273 * the extra compilation pass.
3275 if (COMPILE_LLVM (cfg
)) {
3276 mono_llvm_check_method_supported (cfg
);
3277 if (cfg
->disable_llvm
) {
3278 if (cfg
->verbose_level
>= (cfg
->llvm_only
? 0 : 1)) {
3279 //nm = mono_method_full_name (cfg->method, TRUE);
3280 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method
->klass
), method
->name
, cfg
->exception_message
);
3283 if (cfg
->llvm_only
) {
3284 g_free (cfg
->exception_message
);
3285 cfg
->disable_aot
= TRUE
;
3288 mono_destroy_compile (cfg
);
3290 goto restart_compile
;
3296 cfg
->prof_flags
= mono_profiler_get_call_instrumentation_flags (cfg
->method
);
3297 cfg
->prof_coverage
= mono_profiler_coverage_instrumentation_enabled (cfg
->method
);
3299 gboolean trace
= mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
);
3301 cfg
->prof_flags
= (MonoProfilerCallInstrumentationFlags
)(
3302 MONO_PROFILER_CALL_INSTRUMENTATION_ENTER
| MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT
|
3303 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE
| MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT
);
3305 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3306 if (mini_debug_options
.mdb_optimizations
|| MONO_CFG_PROFILE_CALL_CONTEXT (cfg
)) {
3307 cfg
->disable_reuse_registers
= TRUE
;
3308 cfg
->disable_reuse_stack_slots
= TRUE
;
3310 * This decreases the change the debugger will read registers/stack slots which are
3311 * not yet initialized.
3313 cfg
->disable_initlocals_opt
= TRUE
;
3315 cfg
->extend_live_ranges
= TRUE
;
3317 /* The debugger needs all locals to be on the stack or in a global register */
3318 cfg
->disable_vreg_to_lvreg
= TRUE
;
3320 /* Don't remove unused variables when running inside the debugger since the user
3321 * may still want to view them. */
3322 cfg
->disable_deadce_vars
= TRUE
;
3324 cfg
->opt
&= ~MONO_OPT_DEADCE
;
3325 cfg
->opt
&= ~MONO_OPT_INLINE
;
3326 cfg
->opt
&= ~MONO_OPT_COPYPROP
;
3327 cfg
->opt
&= ~MONO_OPT_CONSPROP
;
3329 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3330 cfg
->disable_out_of_line_bblocks
= TRUE
;
3333 if (mono_using_xdebug
) {
3335 * Make each variable use its own register/stack slot and extend
3336 * their liveness to cover the whole method, making them displayable
3337 * in gdb even after they are dead.
3339 cfg
->disable_reuse_registers
= TRUE
;
3340 cfg
->disable_reuse_stack_slots
= TRUE
;
3341 cfg
->extend_live_ranges
= TRUE
;
3342 cfg
->compute_precise_live_ranges
= TRUE
;
3345 mini_gc_init_cfg (cfg
);
3347 if (method
->wrapper_type
== MONO_WRAPPER_OTHER
) {
3348 WrapperInfo
*info
= mono_marshal_get_wrapper_info (method
);
3350 if ((info
&& (info
->subtype
== WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG
|| info
->subtype
== WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG
))) {
3351 cfg
->disable_gc_safe_points
= TRUE
;
3352 /* This is safe, these wrappers only store to the stack */
3353 cfg
->gen_write_barriers
= FALSE
;
3357 if (COMPILE_LLVM (cfg
)) {
3358 cfg
->opt
|= MONO_OPT_ABCREM
;
3361 if (!verbose_method_inited
) {
3362 char *env
= g_getenv ("MONO_VERBOSE_METHOD");
3364 verbose_method_names
= g_strsplit (env
, ";", -1);
3366 verbose_method_inited
= TRUE
;
3368 if (verbose_method_names
) {
3371 for (i
= 0; verbose_method_names
[i
] != NULL
; i
++){
3372 const char *name
= verbose_method_names
[i
];
3374 if ((strchr (name
, '.') > name
) || strchr (name
, ':')) {
3375 MonoMethodDesc
*desc
;
3377 desc
= mono_method_desc_new (name
, TRUE
);
3379 if (mono_method_desc_full_match (desc
, cfg
->method
)) {
3380 cfg
->verbose_level
= 4;
3382 mono_method_desc_free (desc
);
3385 if (strcmp (cfg
->method
->name
, name
) == 0)
3386 cfg
->verbose_level
= 4;
3391 cfg
->intvars
= (guint16
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint16
) * STACK_MAX
* header
->max_stack
);
3393 if (cfg
->verbose_level
> 0) {
3396 method_name
= mono_method_get_full_name (method
);
3397 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg
) ? "llvm " : "", cfg
->gsharedvt
? "gsharedvt " : "", (cfg
->gshared
&& !cfg
->gsharedvt
) ? "gshared " : "", method_name
);
3399 if (COMPILE_LLVM (cfg))
3400 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3401 else if (cfg->gsharedvt)
3402 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3403 else if (cfg->gshared)
3404 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3406 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3408 g_free (method_name
);
3411 if (cfg
->opt
& MONO_OPT_ABCREM
)
3412 cfg
->opt
|= MONO_OPT_SSA
;
3414 cfg
->rs
= mono_regstate_new ();
3415 cfg
->next_vreg
= cfg
->rs
->next_vreg
;
3417 /* FIXME: Fix SSA to handle branches inside bblocks */
3418 if (cfg
->opt
& MONO_OPT_SSA
)
3419 cfg
->enable_extended_bblocks
= FALSE
;
3422 * FIXME: This confuses liveness analysis because variables which are assigned after
3423 * a branch inside a bblock become part of the kill set, even though the assignment
3424 * might not get executed. This causes the optimize_initlocals pass to delete some
3425 * assignments which are needed.
3426 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3429 //cfg->enable_extended_bblocks = TRUE;
3431 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3432 if (mono_compile_is_broken (cfg
, cfg
->method
, TRUE
)) {
3433 if (mini_debug_options
.break_on_unverified
)
3439 * create MonoInst* which represents arguments and local variables
3441 mono_compile_create_vars (cfg
);
3443 mono_cfg_dump_create_context (cfg
);
3444 mono_cfg_dump_begin_group (cfg
);
3446 MONO_TIME_TRACK (mono_jit_stats
.jit_method_to_ir
, i
= mono_method_to_ir (cfg
, method_to_compile
, NULL
, NULL
, NULL
, NULL
, 0, FALSE
));
3447 mono_cfg_dump_ir (cfg
, "method-to-ir");
3449 if (cfg
->gdump_ctx
!= NULL
) {
3450 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3451 mono_insert_nop_in_empty_bb (cfg
);
3452 mono_cfg_dump_ir (cfg
, "mono_insert_nop_in_empty_bb");
3456 if (try_generic_shared
&& cfg
->exception_type
== MONO_EXCEPTION_GENERIC_SHARING_FAILED
) {
3458 if (MONO_METHOD_COMPILE_END_ENABLED ())
3459 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3462 mono_destroy_compile (cfg
);
3463 try_generic_shared
= FALSE
;
3464 goto restart_compile
;
3466 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_GENERIC_SHARING_FAILED
);
3468 if (MONO_METHOD_COMPILE_END_ENABLED ())
3469 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3470 /* cfg contains the details of the failure, so let the caller cleanup */
3474 cfg
->stat_basic_blocks
+= cfg
->num_bblocks
;
3476 if (COMPILE_LLVM (cfg
)) {
3479 /* The IR has to be in SSA form for LLVM */
3480 cfg
->opt
|= MONO_OPT_SSA
;
3484 // Allow SSA on the result value
3485 cfg
->ret
->flags
&= ~MONO_INST_VOLATILE
;
3487 // Add an explicit return instruction referencing the return value
3488 MONO_INST_NEW (cfg
, ins
, OP_SETRET
);
3489 ins
->sreg1
= cfg
->ret
->dreg
;
3491 MONO_ADD_INS (cfg
->bb_exit
, ins
);
3494 cfg
->opt
&= ~MONO_OPT_LINEARS
;
3497 cfg
->opt
&= ~MONO_OPT_BRANCH
;
3500 /* todo: remove code when we have verified that the liveness for try/catch blocks
3504 * Currently, this can't be commented out since exception blocks are not
3505 * processed during liveness analysis.
3506 * It is also needed, because otherwise the local optimization passes would
3507 * delete assignments in cases like this:
3509 * <something which throws>
3511 * This also allows SSA to be run on methods containing exception clauses, since
3512 * SSA will ignore variables marked VOLATILE.
3514 MONO_TIME_TRACK (mono_jit_stats
.jit_liveness_handle_exception_clauses
, mono_liveness_handle_exception_clauses (cfg
));
3515 mono_cfg_dump_ir (cfg
, "liveness_handle_exception_clauses");
3517 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_out_of_line_bblock
, mono_handle_out_of_line_bblock (cfg
));
3518 mono_cfg_dump_ir (cfg
, "handle_out_of_line_bblock");
3520 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3522 if (!COMPILE_LLVM (cfg
)) {
3523 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_long_opts
, mono_decompose_long_opts (cfg
));
3524 mono_cfg_dump_ir (cfg
, "decompose_long_opts");
3527 /* Should be done before branch opts */
3528 if (cfg
->opt
& (MONO_OPT_CONSPROP
| MONO_OPT_COPYPROP
)) {
3529 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop
, mono_local_cprop (cfg
));
3530 mono_cfg_dump_ir (cfg
, "local_cprop");
3533 if (cfg
->flags
& MONO_CFG_HAS_TYPE_CHECK
) {
3534 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_typechecks
, mono_decompose_typechecks (cfg
));
3535 if (cfg
->gdump_ctx
!= NULL
) {
3536 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3537 mono_insert_nop_in_empty_bb (cfg
);
3539 mono_cfg_dump_ir (cfg
, "decompose_typechecks");
3543 * Should be done after cprop which can do strength reduction on
3544 * some of these ops, after propagating immediates.
3546 if (cfg
->has_emulated_ops
) {
3547 MONO_TIME_TRACK (mono_jit_stats
.jit_local_emulate_ops
, mono_local_emulate_ops (cfg
));
3548 mono_cfg_dump_ir (cfg
, "local_emulate_ops");
3551 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3552 MONO_TIME_TRACK (mono_jit_stats
.jit_optimize_branches
, mono_optimize_branches (cfg
));
3553 mono_cfg_dump_ir (cfg
, "optimize_branches");
3556 /* This must be done _before_ global reg alloc and _after_ decompose */
3557 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_global_vregs
, mono_handle_global_vregs (cfg
));
3558 mono_cfg_dump_ir (cfg
, "handle_global_vregs");
3559 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3560 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce
, mono_local_deadce (cfg
));
3561 mono_cfg_dump_ir (cfg
, "local_deadce");
3563 if (cfg
->opt
& MONO_OPT_ALIAS_ANALYSIS
) {
3564 MONO_TIME_TRACK (mono_jit_stats
.jit_local_alias_analysis
, mono_local_alias_analysis (cfg
));
3565 mono_cfg_dump_ir (cfg
, "local_alias_analysis");
3567 /* Disable this for LLVM to make the IR easier to handle */
3568 if (!COMPILE_LLVM (cfg
)) {
3569 MONO_TIME_TRACK (mono_jit_stats
.jit_if_conversion
, mono_if_conversion (cfg
));
3570 mono_cfg_dump_ir (cfg
, "if_conversion");
3573 mono_threads_safepoint ();
3575 MONO_TIME_TRACK (mono_jit_stats
.jit_bb_ordering
, mono_bb_ordering (cfg
));
3576 mono_cfg_dump_ir (cfg
, "bb_ordering");
3578 if (((cfg
->num_varinfo
> 2000) || (cfg
->num_bblocks
> 1000)) && !cfg
->compile_aot
) {
3580 * we disable some optimizations if there are too many variables
3581 * because JIT time may become too expensive. The actual number needs
3582 * to be tweaked and eventually the non-linear algorithms should be fixed.
3584 cfg
->opt
&= ~ (MONO_OPT_LINEARS
| MONO_OPT_COPYPROP
| MONO_OPT_CONSPROP
);
3585 cfg
->disable_ssa
= TRUE
;
3588 if (cfg
->num_varinfo
> 10000 && !cfg
->llvm_only
)
3589 /* Disable llvm for overly complex methods */
3590 cfg
->disable_ssa
= TRUE
;
3592 if (cfg
->opt
& MONO_OPT_LOOP
) {
3593 MONO_TIME_TRACK (mono_jit_stats
.jit_compile_dominator_info
, mono_compile_dominator_info (cfg
, MONO_COMP_DOM
| MONO_COMP_IDOM
));
3594 MONO_TIME_TRACK (mono_jit_stats
.jit_compute_natural_loops
, mono_compute_natural_loops (cfg
));
3597 if (mono_threads_are_safepoints_enabled ()) {
3598 MONO_TIME_TRACK (mono_jit_stats
.jit_insert_safepoints
, insert_safepoints (cfg
));
3599 mono_cfg_dump_ir (cfg
, "insert_safepoints");
3602 /* after method_to_ir */
3604 if (MONO_METHOD_COMPILE_END_ENABLED ())
3605 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3610 if (header->num_clauses)
3611 cfg->disable_ssa = TRUE;
3614 //#define DEBUGSSA "logic_run"
3615 //#define DEBUGSSA_CLASS "Tests"
3618 if (!cfg
->disable_ssa
) {
3619 mono_local_cprop (cfg
);
3622 mono_ssa_compute (cfg
);
3626 if (cfg
->opt
& MONO_OPT_SSA
) {
3627 if (!(cfg
->comp_done
& MONO_COMP_SSA
) && !cfg
->disable_ssa
) {
3629 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_compute
, mono_ssa_compute (cfg
));
3630 mono_cfg_dump_ir (cfg
, "ssa_compute");
3633 if (cfg
->verbose_level
>= 2) {
3640 /* after SSA translation */
3642 if (MONO_METHOD_COMPILE_END_ENABLED ())
3643 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3647 if ((cfg
->opt
& MONO_OPT_CONSPROP
) || (cfg
->opt
& MONO_OPT_COPYPROP
)) {
3648 if (cfg
->comp_done
& MONO_COMP_SSA
&& !COMPILE_LLVM (cfg
)) {
3650 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_cprop
, mono_ssa_cprop (cfg
));
3651 mono_cfg_dump_ir (cfg
, "ssa_cprop");
3657 if (cfg
->comp_done
& MONO_COMP_SSA
&& !COMPILE_LLVM (cfg
)) {
3658 //mono_ssa_strength_reduction (cfg);
3660 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3661 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_deadce
, mono_ssa_deadce (cfg
));
3662 mono_cfg_dump_ir (cfg
, "ssa_deadce");
3665 if ((cfg
->flags
& (MONO_CFG_HAS_LDELEMA
|MONO_CFG_HAS_CHECK_THIS
)) && (cfg
->opt
& MONO_OPT_ABCREM
)) {
3666 MONO_TIME_TRACK (mono_jit_stats
.jit_perform_abc_removal
, mono_perform_abc_removal (cfg
));
3667 mono_cfg_dump_ir (cfg
, "perform_abc_removal");
3670 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_remove
, mono_ssa_remove (cfg
));
3671 mono_cfg_dump_ir (cfg
, "ssa_remove");
3672 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop2
, mono_local_cprop (cfg
));
3673 mono_cfg_dump_ir (cfg
, "local_cprop2");
3674 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_global_vregs2
, mono_handle_global_vregs (cfg
));
3675 mono_cfg_dump_ir (cfg
, "handle_global_vregs2");
3676 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3677 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce2
, mono_local_deadce (cfg
));
3678 mono_cfg_dump_ir (cfg
, "local_deadce2");
3681 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3682 MONO_TIME_TRACK (mono_jit_stats
.jit_optimize_branches2
, mono_optimize_branches (cfg
));
3683 mono_cfg_dump_ir (cfg
, "optimize_branches2");
3688 if (cfg
->comp_done
& MONO_COMP_SSA
&& COMPILE_LLVM (cfg
)) {
3689 mono_ssa_loop_invariant_code_motion (cfg
);
3690 mono_cfg_dump_ir (cfg
, "loop_invariant_code_motion");
3691 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3692 if (cfg
->opt
& MONO_OPT_ABCREM
) {
3693 mono_perform_abc_removal (cfg
);
3694 mono_cfg_dump_ir (cfg
, "abc_removal");
3698 /* after SSA removal */
3700 if (MONO_METHOD_COMPILE_END_ENABLED ())
3701 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3705 if (cfg
->llvm_only
&& cfg
->gsharedvt
)
3706 mono_ssa_remove_gsharedvt (cfg
);
3708 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3709 if (COMPILE_SOFT_FLOAT (cfg
))
3710 mono_decompose_soft_float (cfg
);
3712 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_vtype_opts
, mono_decompose_vtype_opts (cfg
));
3713 if (cfg
->flags
& MONO_CFG_NEEDS_DECOMPOSE
) {
3714 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_array_access_opts
, mono_decompose_array_access_opts (cfg
));
3715 mono_cfg_dump_ir (cfg
, "decompose_array_access_opts");
3719 #ifndef MONO_ARCH_GOT_REG
3724 g_assert (cfg
->got_var_allocated
);
3727 * Allways allocate the GOT var to a register, because keeping it
3728 * in memory will increase the number of live temporaries in some
3729 * code created by inssel.brg, leading to the well known spills+
3730 * branches problem. Testcase: mcs crash in
3731 * System.MonoCustomAttrs:GetCustomAttributes.
3733 #ifdef MONO_ARCH_GOT_REG
3734 got_reg
= MONO_ARCH_GOT_REG
;
3736 regs
= mono_arch_get_global_int_regs (cfg
);
3738 got_reg
= GPOINTER_TO_INT (regs
->data
);
3741 cfg
->got_var
->opcode
= OP_REGVAR
;
3742 cfg
->got_var
->dreg
= got_reg
;
3743 cfg
->used_int_regs
|= 1LL << cfg
->got_var
->dreg
;
3747 * Have to call this again to process variables added since the first call.
3749 MONO_TIME_TRACK(mono_jit_stats
.jit_liveness_handle_exception_clauses2
, mono_liveness_handle_exception_clauses (cfg
));
3751 if (cfg
->opt
& MONO_OPT_LINEARS
) {
3752 GList
*vars
, *regs
, *l
;
3754 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3755 cfg
->comp_done
&= ~MONO_COMP_LIVENESS
;
3756 if (!(cfg
->comp_done
& MONO_COMP_LIVENESS
))
3757 MONO_TIME_TRACK (mono_jit_stats
.jit_analyze_liveness
, mono_analyze_liveness (cfg
));
3759 if ((vars
= mono_arch_get_allocatable_int_vars (cfg
))) {
3760 regs
= mono_arch_get_global_int_regs (cfg
);
3761 /* Remove the reg reserved for holding the GOT address */
3763 for (l
= regs
; l
; l
= l
->next
) {
3764 if (GPOINTER_TO_UINT (l
->data
) == cfg
->got_var
->dreg
) {
3765 regs
= g_list_delete_link (regs
, l
);
3770 MONO_TIME_TRACK (mono_jit_stats
.jit_linear_scan
, mono_linear_scan (cfg
, vars
, regs
, &cfg
->used_int_regs
));
3771 mono_cfg_dump_ir (cfg
, "linear_scan");
3775 //mono_print_code (cfg, "");
3779 /* variables are allocated after decompose, since decompose could create temps */
3780 if (!COMPILE_LLVM (cfg
)) {
3781 MONO_TIME_TRACK (mono_jit_stats
.jit_arch_allocate_vars
, mono_arch_allocate_vars (cfg
));
3782 mono_cfg_dump_ir (cfg
, "arch_allocate_vars");
3783 if (cfg
->exception_type
)
3788 mono_allocate_gsharedvt_vars (cfg
);
3790 if (!COMPILE_LLVM (cfg
)) {
3791 gboolean need_local_opts
;
3792 MONO_TIME_TRACK (mono_jit_stats
.jit_spill_global_vars
, mono_spill_global_vars (cfg
, &need_local_opts
));
3793 mono_cfg_dump_ir (cfg
, "spill_global_vars");
3795 if (need_local_opts
|| cfg
->compile_aot
) {
3796 /* To optimize code created by spill_global_vars */
3797 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop3
, mono_local_cprop (cfg
));
3798 if (cfg
->opt
& MONO_OPT_DEADCE
)
3799 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce3
, mono_local_deadce (cfg
));
3800 mono_cfg_dump_ir (cfg
, "needs_local_opts");
3804 mono_insert_branches_between_bblocks (cfg
);
3806 if (COMPILE_LLVM (cfg
)) {
3810 /* The IR has to be in SSA form for LLVM */
3811 if (!(cfg
->comp_done
& MONO_COMP_SSA
)) {
3812 cfg
->exception_message
= g_strdup ("SSA disabled.");
3813 cfg
->disable_llvm
= TRUE
;
3816 if (cfg
->flags
& MONO_CFG_NEEDS_DECOMPOSE
)
3817 mono_decompose_array_access_opts (cfg
);
3819 if (!cfg
->disable_llvm
)
3820 mono_llvm_emit_method (cfg
);
3821 if (cfg
->disable_llvm
) {
3822 if (cfg
->verbose_level
>= (cfg
->llvm_only
? 0 : 1)) {
3823 //nm = mono_method_full_name (cfg->method, TRUE);
3824 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method
->klass
), method
->name
, cfg
->exception_message
);
3827 if (cfg
->llvm_only
) {
3828 cfg
->disable_aot
= TRUE
;
3831 mono_destroy_compile (cfg
);
3833 goto restart_compile
;
3836 if (cfg
->verbose_level
> 0 && !cfg
->compile_aot
) {
3837 nm
= mono_method_get_full_name (cfg
->method
);
3838 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3840 cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
, cfg
->code_len
, cfg
->domain
->friendly_name
);
3845 MONO_TIME_TRACK (mono_jit_stats
.jit_codegen
, mono_codegen (cfg
));
3846 mono_cfg_dump_ir (cfg
, "codegen");
3847 if (cfg
->exception_type
)
3851 if (COMPILE_LLVM (cfg
))
3852 mono_atomic_inc_i32 (&mono_jit_stats
.methods_with_llvm
);
3854 mono_atomic_inc_i32 (&mono_jit_stats
.methods_without_llvm
);
3856 MONO_TIME_TRACK (mono_jit_stats
.jit_create_jit_info
, cfg
->jit_info
= create_jit_info (cfg
, method_to_compile
));
3858 if (cfg
->extend_live_ranges
) {
3859 /* Extend live ranges to cover the whole method */
3860 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
3861 MONO_VARINFO (cfg
, i
)->live_range_end
= cfg
->code_len
;
3864 MONO_TIME_TRACK (mono_jit_stats
.jit_gc_create_gc_map
, mini_gc_create_gc_map (cfg
));
3865 MONO_TIME_TRACK (mono_jit_stats
.jit_save_seq_point_info
, mono_save_seq_point_info (cfg
, cfg
->jit_info
));
3867 if (!cfg
->compile_aot
) {
3868 mono_save_xdebug_info (cfg
);
3869 mono_lldb_save_method_info (cfg
);
3872 if (cfg
->verbose_level
>= 2) {
3873 char *id
= mono_method_full_name (cfg
->method
, TRUE
);
3874 g_print ("\n*** ASM for %s ***\n", id
);
3875 mono_disassemble_code (cfg
, cfg
->native_code
, cfg
->code_len
, id
+ 3);
3876 g_print ("***\n\n");
3880 if (!cfg
->compile_aot
&& !(flags
& JIT_FLAG_DISCARD_RESULTS
)) {
3881 mono_domain_lock (cfg
->domain
);
3882 mono_jit_info_table_add (cfg
->domain
, cfg
->jit_info
);
3884 if (cfg
->method
->dynamic
)
3885 mono_dynamic_code_hash_lookup (cfg
->domain
, cfg
->method
)->ji
= cfg
->jit_info
;
3887 mono_postprocess_patches_after_ji_publish (cfg
);
3889 mono_domain_unlock (cfg
->domain
);
3894 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
3897 /* collect statistics */
3898 #ifndef DISABLE_PERFCOUNTERS
3899 mono_atomic_inc_i32 (&mono_perfcounters
->jit_methods
);
3900 mono_atomic_fetch_add_i32 (&mono_perfcounters
->jit_bytes
, header
->code_size
);
3902 gint32 code_size_ratio
= cfg
->code_len
;
3903 mono_atomic_fetch_add_i32 (&mono_jit_stats
.allocated_code_size
, code_size_ratio
);
3904 mono_atomic_fetch_add_i32 (&mono_jit_stats
.native_code_size
, code_size_ratio
);
3905 /* FIXME: use an explicit function to read booleans */
3906 if ((gboolean
)mono_atomic_load_i32 ((gint32
*)&mono_jit_stats
.enabled
)) {
3907 if (code_size_ratio
> mono_atomic_load_i32 (&mono_jit_stats
.biggest_method_size
)) {
3908 mono_atomic_store_i32 (&mono_jit_stats
.biggest_method_size
, code_size_ratio
);
3909 char *biggest_method
= g_strdup_printf ("%s::%s)", m_class_get_name (method
->klass
), method
->name
);
3910 biggest_method
= (char*)mono_atomic_xchg_ptr ((gpointer
*)&mono_jit_stats
.biggest_method
, biggest_method
);
3911 g_free (biggest_method
);
3913 code_size_ratio
= (code_size_ratio
* 100) / header
->code_size
;
3914 if (code_size_ratio
> mono_atomic_load_i32 (&mono_jit_stats
.max_code_size_ratio
)) {
3915 mono_atomic_store_i32 (&mono_jit_stats
.max_code_size_ratio
, code_size_ratio
);
3916 char *max_ratio_method
= g_strdup_printf ("%s::%s)", m_class_get_name (method
->klass
), method
->name
);
3917 max_ratio_method
= (char*)mono_atomic_xchg_ptr ((gpointer
*)&mono_jit_stats
.max_ratio_method
, max_ratio_method
);
3918 g_free (max_ratio_method
);
3922 if (MONO_METHOD_COMPILE_END_ENABLED ())
3923 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3925 mono_cfg_dump_close_group (cfg
);
3931 mini_class_has_reference_variant_generic_argument (MonoCompile
*cfg
, MonoClass
*klass
, int context_used
)
3934 MonoGenericContainer
*container
;
3935 MonoGenericInst
*ginst
;
3937 if (mono_class_is_ginst (klass
)) {
3938 container
= mono_class_get_generic_container (mono_class_get_generic_class (klass
)->container_class
);
3939 ginst
= mono_class_get_generic_class (klass
)->context
.class_inst
;
3940 } else if (mono_class_is_gtd (klass
) && context_used
) {
3941 container
= mono_class_get_generic_container (klass
);
3942 ginst
= container
->context
.class_inst
;
3947 for (i
= 0; i
< container
->type_argc
; ++i
) {
3949 if (!(mono_generic_container_get_param_info (container
, i
)->flags
& (MONO_GEN_PARAM_VARIANT
|MONO_GEN_PARAM_COVARIANT
)))
3951 type
= ginst
->type_argv
[i
];
3952 if (mini_type_is_reference (type
))
3959 mono_cfg_add_try_hole (MonoCompile
*cfg
, MonoExceptionClause
*clause
, guint8
*start
, MonoBasicBlock
*bb
)
3961 TryBlockHole
*hole
= (TryBlockHole
*)mono_mempool_alloc (cfg
->mempool
, sizeof (TryBlockHole
));
3962 hole
->clause
= clause
;
3963 hole
->start_offset
= start
- cfg
->native_code
;
3964 hole
->basic_block
= bb
;
3966 cfg
->try_block_holes
= g_slist_append_mempool (cfg
->mempool
, cfg
->try_block_holes
, hole
);
3970 mono_cfg_set_exception (MonoCompile
*cfg
, MonoExceptionType type
)
3972 cfg
->exception_type
= type
;
3975 /* Assumes ownership of the MSG argument */
3977 mono_cfg_set_exception_invalid_program (MonoCompile
*cfg
, char *msg
)
3979 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3980 mono_error_set_generic_error (cfg
->error
, "System", "InvalidProgramException", "%s", msg
);
3983 #endif /* DISABLE_JIT */
3985 gint64
mono_time_track_start ()
3987 return mono_100ns_ticks ();
3991 * mono_time_track_end:
3993 * Uses UnlockedAddDouble () to update \param time.
3995 void mono_time_track_end (gint64
*time
, gint64 start
)
3997 UnlockedAdd64 (time
, mono_100ns_ticks () - start
);
4001 * mono_update_jit_stats:
4003 * Only call this function in locked environments to avoid data races.
4005 MONO_NO_SANITIZE_THREAD
4007 mono_update_jit_stats (MonoCompile
*cfg
)
4009 mono_jit_stats
.allocate_var
+= cfg
->stat_allocate_var
;
4010 mono_jit_stats
.locals_stack_size
+= cfg
->stat_locals_stack_size
;
4011 mono_jit_stats
.basic_blocks
+= cfg
->stat_basic_blocks
;
4012 mono_jit_stats
.max_basic_blocks
= MAX (cfg
->stat_basic_blocks
, mono_jit_stats
.max_basic_blocks
);
4013 mono_jit_stats
.cil_code_size
+= cfg
->stat_cil_code_size
;
4014 mono_jit_stats
.regvars
+= cfg
->stat_n_regvars
;
4015 mono_jit_stats
.inlineable_methods
+= cfg
->stat_inlineable_methods
;
4016 mono_jit_stats
.inlined_methods
+= cfg
->stat_inlined_methods
;
4017 mono_jit_stats
.code_reallocs
+= cfg
->stat_code_reallocs
;
4021 * mono_jit_compile_method_inner:
4023 * Main entry point for the JIT.
4026 mono_jit_compile_method_inner (MonoMethod
*method
, MonoDomain
*target_domain
, int opt
, MonoError
*error
)
4029 gpointer code
= NULL
;
4030 MonoJitInfo
*jinfo
, *info
;
4032 MonoException
*ex
= NULL
;
4034 MonoMethod
*prof_method
, *shared
;
4038 start
= mono_time_track_start ();
4039 cfg
= mini_method_compile (method
, opt
, target_domain
, JIT_FLAG_RUN_CCTORS
, 0, -1);
4040 gint64 jit_time
= 0.0;
4041 mono_time_track_end (&jit_time
, start
);
4042 UnlockedAdd64 (&mono_jit_stats
.jit_time
, jit_time
);
4044 prof_method
= cfg
->method
;
4046 switch (cfg
->exception_type
) {
4047 case MONO_EXCEPTION_NONE
:
4049 case MONO_EXCEPTION_TYPE_LOAD
:
4050 case MONO_EXCEPTION_MISSING_FIELD
:
4051 case MONO_EXCEPTION_MISSING_METHOD
:
4052 case MONO_EXCEPTION_FILE_NOT_FOUND
:
4053 case MONO_EXCEPTION_BAD_IMAGE
:
4054 case MONO_EXCEPTION_INVALID_PROGRAM
: {
4055 /* Throw a type load exception if needed */
4056 if (cfg
->exception_ptr
) {
4057 ex
= mono_class_get_exception_for_failure ((MonoClass
*)cfg
->exception_ptr
);
4059 if (cfg
->exception_type
== MONO_EXCEPTION_MISSING_FIELD
)
4060 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "MissingFieldException", cfg
->exception_message
);
4061 else if (cfg
->exception_type
== MONO_EXCEPTION_MISSING_METHOD
)
4062 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "MissingMethodException", cfg
->exception_message
);
4063 else if (cfg
->exception_type
== MONO_EXCEPTION_TYPE_LOAD
)
4064 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "TypeLoadException", cfg
->exception_message
);
4065 else if (cfg
->exception_type
== MONO_EXCEPTION_FILE_NOT_FOUND
)
4066 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System.IO", "FileNotFoundException", cfg
->exception_message
);
4067 else if (cfg
->exception_type
== MONO_EXCEPTION_BAD_IMAGE
)
4068 ex
= mono_get_exception_bad_image_format (cfg
->exception_message
);
4069 else if (cfg
->exception_type
== MONO_EXCEPTION_INVALID_PROGRAM
)
4070 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "InvalidProgramException", cfg
->exception_message
);
4072 g_assert_not_reached ();
4076 case MONO_EXCEPTION_MONO_ERROR
:
4077 // FIXME: MonoError has no copy ctor
4078 g_assert (!is_ok (cfg
->error
));
4079 ex
= mono_error_convert_to_exception (cfg
->error
);
4082 g_assert_not_reached ();
4086 MONO_PROFILER_RAISE (jit_failed
, (method
));
4088 mono_destroy_compile (cfg
);
4089 mono_error_set_exception_instance (error
, ex
);
4094 if (mono_method_is_generic_sharable (method
, FALSE
)) {
4095 shared
= mini_get_shared_method_full (method
, SHARE_MODE_NONE
, error
);
4096 if (!is_ok (error
)) {
4097 MONO_PROFILER_RAISE (jit_failed
, (method
));
4098 mono_destroy_compile (cfg
);
4105 mono_domain_lock (target_domain
);
4107 /* Check if some other thread already did the job. In this case, we can
4108 discard the code this thread generated. */
4110 info
= mini_lookup_method (target_domain
, method
, shared
);
4112 /* We can't use a domain specific method in another domain */
4113 if ((target_domain
== mono_domain_get ()) || info
->domain_neutral
) {
4114 code
= info
->code_start
;
4116 discarded_jit_time
+= jit_time
;
4120 /* The lookup + insert is atomic since this is done inside the domain lock */
4121 mono_domain_jit_code_hash_lock (target_domain
);
4122 mono_internal_hash_table_insert (&target_domain
->jit_code_hash
, cfg
->jit_info
->d
.method
, cfg
->jit_info
);
4123 mono_domain_jit_code_hash_unlock (target_domain
);
4125 code
= cfg
->native_code
;
4127 if (cfg
->gshared
&& mono_method_is_generic_sharable (method
, FALSE
))
4128 mono_atomic_inc_i32 (&mono_stats
.generics_shared_methods
);
4130 mono_atomic_inc_i32 (&mono_stats
.gsharedvt_methods
);
4133 jinfo
= cfg
->jit_info
;
4136 * Update global stats while holding a lock, instead of doing many
4137 * mono_atomic_inc_i32 operations during JITting.
4139 mono_update_jit_stats (cfg
);
4141 mono_destroy_compile (cfg
);
4143 mini_patch_llvm_jit_callees (target_domain
, method
, code
);
4145 mono_emit_jit_map (jinfo
);
4147 mono_domain_unlock (target_domain
);
4152 vtable
= mono_class_vtable_checked (target_domain
, method
->klass
, error
);
4153 return_val_if_nok (error
, NULL
);
4155 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
4156 if (mono_marshal_method_from_wrapper (method
)) {
4157 /* Native func wrappers have no method */
4158 /* The profiler doesn't know about wrappers, so pass the original icall method */
4159 MONO_PROFILER_RAISE (jit_done
, (mono_marshal_method_from_wrapper (method
), jinfo
));
4162 MONO_PROFILER_RAISE (jit_done
, (method
, jinfo
));
4163 if (prof_method
!= method
)
4164 MONO_PROFILER_RAISE (jit_done
, (prof_method
, jinfo
));
4166 if (!(method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE
||
4167 method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
||
4168 method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
)) {
4169 if (!mono_runtime_class_init_full (vtable
, error
))
4176 * mini_get_underlying_type:
4178 * Return the type the JIT will use during compilation.
4179 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4180 * For gsharedvt types, it will return the original VAR/MVAR.
4183 mini_get_underlying_type (MonoType
*type
)
4185 return mini_type_get_underlying_type (type
);
4189 mini_jit_init (void)
4191 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT
| MONO_COUNTER_INT
, &discarded_code
);
4192 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT
| MONO_COUNTER_LONG
| MONO_COUNTER_TIME
, &discarded_jit_time
);
4193 mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT
| MONO_COUNTER_INT
, &jinfo_try_holes_size
);
4195 mono_os_mutex_init_recursive (&jit_mutex
);
4197 current_backend
= g_new0 (MonoBackend
, 1);
4198 init_backend (current_backend
);
4203 mini_jit_cleanup (void)
4206 g_free (emul_opcode_map
);
4207 g_free (emul_opcode_opcodes
);
4213 mono_llvm_emit_aot_file_info (MonoAotFileInfo
*info
, gboolean has_jitted_code
)
4215 g_assert_not_reached ();
4218 void mono_llvm_emit_aot_data (const char *symbol
, guint8
*data
, int data_len
)
4220 g_assert_not_reached ();
4225 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4228 mono_llvm_cpp_throw_exception (void)
4230 g_assert_not_reached ();
4234 mono_llvm_cpp_catch_exception (MonoLLVMInvokeCallback cb
, gpointer arg
, gboolean
*out_thrown
)
4236 g_assert_not_reached ();
4244 mini_method_compile (MonoMethod
*method
, guint32 opts
, MonoDomain
*domain
, JitFlags flags
, int parts
, int aot_method_index
)
4246 g_assert_not_reached ();
4251 mono_destroy_compile (MonoCompile
*cfg
)
4253 g_assert_not_reached ();
4257 mono_add_patch_info (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
4259 g_assert_not_reached ();
4262 #else // DISABLE_JIT
4265 mini_realloc_code_slow (MonoCompile
*cfg
, int size
)
4267 const int EXTRA_CODE_SPACE
= 16;
4269 if (cfg
->code_len
+ size
> (cfg
->code_size
- EXTRA_CODE_SPACE
)) {
4270 while (cfg
->code_len
+ size
> (cfg
->code_size
- EXTRA_CODE_SPACE
))
4271 cfg
->code_size
= cfg
->code_size
* 2 + EXTRA_CODE_SPACE
;
4272 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4273 cfg
->stat_code_reallocs
++;
4275 return cfg
->native_code
+ cfg
->code_len
;
4278 #endif /* DISABLE_JIT */
4281 mini_class_is_system_array (MonoClass
*klass
)
4283 return m_class_get_parent (klass
) == mono_defaults
.array_class
;
4287 * mono_target_pagesize:
4289 * query pagesize used to determine if an implicit NRE can be used
4292 mono_target_pagesize (void)
4294 /* We could query the system's pagesize via mono_pagesize (), however there
4295 * are pitfalls: sysconf (3) is called on some posix like systems, and per
4296 * POSIX.1-2008 this function doesn't have to be async-safe. Since this
4297 * function can be called from a signal handler, we simplify things by
4298 * using 4k on all targets. Implicit null-checks with an offset larger than
4299 * 4k are _very_ uncommon, so we don't mind emitting an explicit null-check