3 * The new Mono code generator.
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
23 #ifdef HAVE_SYS_TIME_H
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
63 #include <mono/utils/unlocked.h>
64 #include <mono/utils/mono-time.h>
67 #include "seq-points.h"
75 #include "jit-icalls.h"
78 #include "debugger-agent.h"
79 #include "llvm-runtime.h"
80 #include "mini-llvm.h"
82 #include "aot-runtime.h"
83 #include "mini-runtime.h"
85 MonoCallSpec
*mono_jit_trace_calls
;
86 MonoMethodDesc
*mono_inject_async_exc_method
;
87 int mono_inject_async_exc_pos
;
88 MonoMethodDesc
*mono_break_at_bb_method
;
89 int mono_break_at_bb_bb_num
;
90 gboolean mono_do_x86_stack_align
= TRUE
;
91 gboolean mono_using_xdebug
;
94 static guint32 discarded_code
;
95 static gint64 discarded_jit_time
;
96 static guint32 jinfo_try_holes_size
;
98 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
99 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
100 static mono_mutex_t jit_mutex
;
102 static MonoBackend
*current_backend
;
107 mono_realloc_native_code (MonoCompile
*cfg
)
109 return g_realloc (cfg
->native_code
, cfg
->code_size
);
113 MonoExceptionClause
*clause
;
114 MonoBasicBlock
*basic_block
;
119 * mono_emit_unwind_op:
121 * Add an unwind op with the given parameters for the list of unwind ops stored in
125 mono_emit_unwind_op (MonoCompile
*cfg
, int when
, int tag
, int reg
, int val
)
127 MonoUnwindOp
*op
= (MonoUnwindOp
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoUnwindOp
));
134 cfg
->unwind_ops
= g_slist_append_mempool (cfg
->mempool
, cfg
->unwind_ops
, op
);
135 if (cfg
->verbose_level
> 1) {
138 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when
, mono_arch_regname (reg
), val
);
140 case DW_CFA_def_cfa_register
:
141 printf ("CFA: [%x] def_cfa_reg: %s\n", when
, mono_arch_regname (reg
));
143 case DW_CFA_def_cfa_offset
:
144 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when
, val
);
147 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when
, mono_arch_regname (reg
), -val
);
154 * mono_unlink_bblock:
156 * Unlink two basic blocks.
159 mono_unlink_bblock (MonoCompile
*cfg
, MonoBasicBlock
*from
, MonoBasicBlock
* to
)
165 for (i
= 0; i
< from
->out_count
; ++i
) {
166 if (to
== from
->out_bb
[i
]) {
173 for (i
= 0; i
< from
->out_count
; ++i
) {
174 if (from
->out_bb
[i
] != to
)
175 from
->out_bb
[pos
++] = from
->out_bb
[i
];
177 g_assert (pos
== from
->out_count
- 1);
182 for (i
= 0; i
< to
->in_count
; ++i
) {
183 if (from
== to
->in_bb
[i
]) {
190 for (i
= 0; i
< to
->in_count
; ++i
) {
191 if (to
->in_bb
[i
] != from
)
192 to
->in_bb
[pos
++] = to
->in_bb
[i
];
194 g_assert (pos
== to
->in_count
- 1);
200 * mono_bblocks_linked:
202 * Return whenever BB1 and BB2 are linked in the CFG.
205 mono_bblocks_linked (MonoBasicBlock
*bb1
, MonoBasicBlock
*bb2
)
209 for (i
= 0; i
< bb1
->out_count
; ++i
) {
210 if (bb1
->out_bb
[i
] == bb2
)
218 mono_find_block_region_notry (MonoCompile
*cfg
, int offset
)
220 MonoMethodHeader
*header
= cfg
->header
;
221 MonoExceptionClause
*clause
;
224 for (i
= 0; i
< header
->num_clauses
; ++i
) {
225 clause
= &header
->clauses
[i
];
226 if ((clause
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) && (offset
>= clause
->data
.filter_offset
) &&
227 (offset
< (clause
->handler_offset
)))
228 return ((i
+ 1) << 8) | MONO_REGION_FILTER
| clause
->flags
;
230 if (MONO_OFFSET_IN_HANDLER (clause
, offset
)) {
231 if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
)
232 return ((i
+ 1) << 8) | MONO_REGION_FINALLY
| clause
->flags
;
233 else if (clause
->flags
== MONO_EXCEPTION_CLAUSE_FAULT
)
234 return ((i
+ 1) << 8) | MONO_REGION_FAULT
| clause
->flags
;
236 return ((i
+ 1) << 8) | MONO_REGION_CATCH
| clause
->flags
;
244 * mono_get_block_region_notry:
246 * Return the region corresponding to REGION, ignoring try clauses nested inside
250 mono_get_block_region_notry (MonoCompile
*cfg
, int region
)
252 if ((region
& (0xf << 4)) == MONO_REGION_TRY
) {
253 MonoMethodHeader
*header
= cfg
->header
;
256 * This can happen if a try clause is nested inside a finally clause.
258 int clause_index
= (region
>> 8) - 1;
259 g_assert (clause_index
>= 0 && clause_index
< header
->num_clauses
);
261 region
= mono_find_block_region_notry (cfg
, header
->clauses
[clause_index
].try_offset
);
268 mono_find_spvar_for_region (MonoCompile
*cfg
, int region
)
270 region
= mono_get_block_region_notry (cfg
, region
);
272 return (MonoInst
*)g_hash_table_lookup (cfg
->spvars
, GINT_TO_POINTER (region
));
276 df_visit (MonoBasicBlock
*start
, int *dfn
, MonoBasicBlock
**array
)
280 array
[*dfn
] = start
;
281 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
282 for (i
= 0; i
< start
->out_count
; ++i
) {
283 if (start
->out_bb
[i
]->dfn
)
286 start
->out_bb
[i
]->dfn
= *dfn
;
287 start
->out_bb
[i
]->df_parent
= start
;
288 array
[*dfn
] = start
->out_bb
[i
];
289 df_visit (start
->out_bb
[i
], dfn
, array
);
294 mono_reverse_branch_op (guint32 opcode
)
296 static const int reverse_map
[] = {
297 CEE_BNE_UN
, CEE_BLT
, CEE_BLE
, CEE_BGT
, CEE_BGE
,
298 CEE_BEQ
, CEE_BLT_UN
, CEE_BLE_UN
, CEE_BGT_UN
, CEE_BGE_UN
300 static const int reverse_fmap
[] = {
301 OP_FBNE_UN
, OP_FBLT
, OP_FBLE
, OP_FBGT
, OP_FBGE
,
302 OP_FBEQ
, OP_FBLT_UN
, OP_FBLE_UN
, OP_FBGT_UN
, OP_FBGE_UN
304 static const int reverse_lmap
[] = {
305 OP_LBNE_UN
, OP_LBLT
, OP_LBLE
, OP_LBGT
, OP_LBGE
,
306 OP_LBEQ
, OP_LBLT_UN
, OP_LBLE_UN
, OP_LBGT_UN
, OP_LBGE_UN
308 static const int reverse_imap
[] = {
309 OP_IBNE_UN
, OP_IBLT
, OP_IBLE
, OP_IBGT
, OP_IBGE
,
310 OP_IBEQ
, OP_IBLT_UN
, OP_IBLE_UN
, OP_IBGT_UN
, OP_IBGE_UN
313 if (opcode
>= CEE_BEQ
&& opcode
<= CEE_BLT_UN
) {
314 opcode
= reverse_map
[opcode
- CEE_BEQ
];
315 } else if (opcode
>= OP_FBEQ
&& opcode
<= OP_FBLT_UN
) {
316 opcode
= reverse_fmap
[opcode
- OP_FBEQ
];
317 } else if (opcode
>= OP_LBEQ
&& opcode
<= OP_LBLT_UN
) {
318 opcode
= reverse_lmap
[opcode
- OP_LBEQ
];
319 } else if (opcode
>= OP_IBEQ
&& opcode
<= OP_IBLT_UN
) {
320 opcode
= reverse_imap
[opcode
- OP_IBEQ
];
322 g_assert_not_reached ();
328 mono_type_to_store_membase (MonoCompile
*cfg
, MonoType
*type
)
330 type
= mini_get_underlying_type (type
);
333 switch (type
->type
) {
336 return OP_STOREI1_MEMBASE_REG
;
339 return OP_STOREI2_MEMBASE_REG
;
342 return OP_STOREI4_MEMBASE_REG
;
346 case MONO_TYPE_FNPTR
:
347 return OP_STORE_MEMBASE_REG
;
348 case MONO_TYPE_CLASS
:
349 case MONO_TYPE_STRING
:
350 case MONO_TYPE_OBJECT
:
351 case MONO_TYPE_SZARRAY
:
352 case MONO_TYPE_ARRAY
:
353 return OP_STORE_MEMBASE_REG
;
356 return OP_STOREI8_MEMBASE_REG
;
358 return OP_STORER4_MEMBASE_REG
;
360 return OP_STORER8_MEMBASE_REG
;
361 case MONO_TYPE_VALUETYPE
:
362 if (m_class_is_enumtype (type
->data
.klass
)) {
363 type
= mono_class_enum_basetype_internal (type
->data
.klass
);
366 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
367 return OP_STOREX_MEMBASE
;
368 return OP_STOREV_MEMBASE
;
369 case MONO_TYPE_TYPEDBYREF
:
370 return OP_STOREV_MEMBASE
;
371 case MONO_TYPE_GENERICINST
:
372 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
373 return OP_STOREX_MEMBASE
;
374 type
= m_class_get_byval_arg (type
->data
.generic_class
->container_class
);
378 g_assert (mini_type_var_is_vt (type
));
379 return OP_STOREV_MEMBASE
;
381 g_error ("unknown type 0x%02x in type_to_store_membase", type
->type
);
387 mono_type_to_load_membase (MonoCompile
*cfg
, MonoType
*type
)
389 type
= mini_get_underlying_type (type
);
391 switch (type
->type
) {
393 return OP_LOADI1_MEMBASE
;
395 return OP_LOADU1_MEMBASE
;
397 return OP_LOADI2_MEMBASE
;
399 return OP_LOADU2_MEMBASE
;
401 return OP_LOADI4_MEMBASE
;
403 return OP_LOADU4_MEMBASE
;
407 case MONO_TYPE_FNPTR
:
408 return OP_LOAD_MEMBASE
;
409 case MONO_TYPE_CLASS
:
410 case MONO_TYPE_STRING
:
411 case MONO_TYPE_OBJECT
:
412 case MONO_TYPE_SZARRAY
:
413 case MONO_TYPE_ARRAY
:
414 return OP_LOAD_MEMBASE
;
417 return OP_LOADI8_MEMBASE
;
419 return OP_LOADR4_MEMBASE
;
421 return OP_LOADR8_MEMBASE
;
422 case MONO_TYPE_VALUETYPE
:
423 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
424 return OP_LOADX_MEMBASE
;
425 case MONO_TYPE_TYPEDBYREF
:
426 return OP_LOADV_MEMBASE
;
427 case MONO_TYPE_GENERICINST
:
428 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (type
)))
429 return OP_LOADX_MEMBASE
;
430 if (mono_type_generic_inst_is_valuetype (type
))
431 return OP_LOADV_MEMBASE
;
433 return OP_LOAD_MEMBASE
;
437 g_assert (cfg
->gshared
);
438 g_assert (mini_type_var_is_vt (type
));
439 return OP_LOADV_MEMBASE
;
441 g_error ("unknown type 0x%02x in type_to_load_membase", type
->type
);
447 mini_type_to_stind (MonoCompile
* cfg
, MonoType
*type
)
449 type
= mini_get_underlying_type (type
);
450 if (cfg
->gshared
&& !type
->byref
&& (type
->type
== MONO_TYPE_VAR
|| type
->type
== MONO_TYPE_MVAR
)) {
451 g_assert (mini_type_var_is_vt (type
));
454 return mono_type_to_stind (type
);
458 mono_op_imm_to_op (int opcode
)
462 #if SIZEOF_REGISTER == 4
480 #if SIZEOF_REGISTER == 4
486 #if SIZEOF_REGISTER == 4
492 #if SIZEOF_REGISTER == 4
538 #if SIZEOF_REGISTER == 4
544 #if SIZEOF_REGISTER == 4
563 case OP_ICOMPARE_IMM
:
565 case OP_LOCALLOC_IMM
:
573 * mono_decompose_op_imm:
575 * Replace the OP_.._IMM INS with its non IMM variant.
578 mono_decompose_op_imm (MonoCompile
*cfg
, MonoBasicBlock
*bb
, MonoInst
*ins
)
580 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
583 const char *spec
= INS_INFO (ins
->opcode
);
585 if (spec
[MONO_INST_SRC2
] == 'l') {
586 dreg
= mono_alloc_lreg (cfg
);
588 /* Load the 64bit constant using decomposed ops */
589 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
590 temp
->inst_c0
= ins_get_l_low (ins
);
591 temp
->dreg
= MONO_LVREG_LS (dreg
);
592 mono_bblock_insert_before_ins (bb
, ins
, temp
);
594 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
595 temp
->inst_c0
= ins_get_l_high (ins
);
596 temp
->dreg
= MONO_LVREG_MS (dreg
);
598 dreg
= mono_alloc_ireg (cfg
);
600 MONO_INST_NEW (cfg
, temp
, OP_ICONST
);
601 temp
->inst_c0
= ins
->inst_imm
;
605 mono_bblock_insert_before_ins (bb
, ins
, temp
);
608 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
609 ins
->opcode
= opcode2
;
611 if (ins
->opcode
== OP_LOCALLOC
)
616 bb
->max_vreg
= MAX (bb
->max_vreg
, cfg
->next_vreg
);
620 set_vreg_to_inst (MonoCompile
*cfg
, int vreg
, MonoInst
*inst
)
622 if (vreg
>= cfg
->vreg_to_inst_len
) {
623 MonoInst
**tmp
= cfg
->vreg_to_inst
;
624 int size
= cfg
->vreg_to_inst_len
;
626 while (vreg
>= cfg
->vreg_to_inst_len
)
627 cfg
->vreg_to_inst_len
= cfg
->vreg_to_inst_len
? cfg
->vreg_to_inst_len
* 2 : 32;
628 cfg
->vreg_to_inst
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
*) * cfg
->vreg_to_inst_len
);
630 memcpy (cfg
->vreg_to_inst
, tmp
, size
* sizeof (MonoInst
*));
632 cfg
->vreg_to_inst
[vreg
] = inst
;
635 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
636 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
639 mono_compile_create_var_for_vreg (MonoCompile
*cfg
, MonoType
*type
, int opcode
, int vreg
)
642 int num
= cfg
->num_varinfo
;
645 type
= mini_get_underlying_type (type
);
647 if ((num
+ 1) >= cfg
->varinfo_count
) {
648 int orig_count
= cfg
->varinfo_count
;
649 cfg
->varinfo_count
= cfg
->varinfo_count
? (cfg
->varinfo_count
* 2) : 32;
650 cfg
->varinfo
= (MonoInst
**)g_realloc (cfg
->varinfo
, sizeof (MonoInst
*) * cfg
->varinfo_count
);
651 cfg
->vars
= (MonoMethodVar
*)g_realloc (cfg
->vars
, sizeof (MonoMethodVar
) * cfg
->varinfo_count
);
652 memset (&cfg
->vars
[orig_count
], 0, (cfg
->varinfo_count
- orig_count
) * sizeof (MonoMethodVar
));
655 cfg
->stat_allocate_var
++;
657 MONO_INST_NEW (cfg
, inst
, opcode
);
659 inst
->inst_vtype
= type
;
660 inst
->klass
= mono_class_from_mono_type_internal (type
);
661 mini_type_to_eval_stack_type (cfg
, type
, inst
);
662 /* if set to 1 the variable is native */
663 inst
->backend
.is_pinvoke
= 0;
666 if (mono_class_has_failure (inst
->klass
))
667 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
669 if (cfg
->compute_gc_maps
) {
671 mono_mark_vreg_as_mp (cfg
, vreg
);
673 if ((MONO_TYPE_ISSTRUCT (type
) && m_class_has_references (inst
->klass
)) || mini_type_is_reference (type
)) {
674 inst
->flags
|= MONO_INST_GC_TRACK
;
675 mono_mark_vreg_as_ref (cfg
, vreg
);
680 cfg
->varinfo
[num
] = inst
;
682 cfg
->vars
[num
].idx
= num
;
683 cfg
->vars
[num
].vreg
= vreg
;
684 cfg
->vars
[num
].range
.first_use
.pos
.bid
= 0xffff;
685 cfg
->vars
[num
].reg
= -1;
688 set_vreg_to_inst (cfg
, vreg
, inst
);
690 #if SIZEOF_REGISTER == 4
691 if (mono_arch_is_soft_float ()) {
692 regpair
= mono_type_is_long (type
) || mono_type_is_float (type
);
694 regpair
= mono_type_is_long (type
);
704 * These two cannot be allocated using create_var_for_vreg since that would
705 * put it into the cfg->varinfo array, confusing many parts of the JIT.
709 * Set flags to VOLATILE so SSA skips it.
712 if (cfg
->verbose_level
>= 4) {
713 printf (" Create LVAR R%d (R%d, R%d)\n", inst
->dreg
, MONO_LVREG_LS (inst
->dreg
), MONO_LVREG_MS (inst
->dreg
));
716 if (mono_arch_is_soft_float () && cfg
->opt
& MONO_OPT_SSA
) {
717 if (mono_type_is_float (type
))
718 inst
->flags
= MONO_INST_VOLATILE
;
721 /* Allocate a dummy MonoInst for the first vreg */
722 MONO_INST_NEW (cfg
, tree
, OP_LOCAL
);
723 tree
->dreg
= MONO_LVREG_LS (inst
->dreg
);
724 if (cfg
->opt
& MONO_OPT_SSA
)
725 tree
->flags
= MONO_INST_VOLATILE
;
727 tree
->type
= STACK_I4
;
728 tree
->inst_vtype
= mono_get_int32_type ();
729 tree
->klass
= mono_class_from_mono_type_internal (tree
->inst_vtype
);
731 set_vreg_to_inst (cfg
, MONO_LVREG_LS (inst
->dreg
), tree
);
733 /* Allocate a dummy MonoInst for the second vreg */
734 MONO_INST_NEW (cfg
, tree
, OP_LOCAL
);
735 tree
->dreg
= MONO_LVREG_MS (inst
->dreg
);
736 if (cfg
->opt
& MONO_OPT_SSA
)
737 tree
->flags
= MONO_INST_VOLATILE
;
739 tree
->type
= STACK_I4
;
740 tree
->inst_vtype
= mono_get_int32_type ();
741 tree
->klass
= mono_class_from_mono_type_internal (tree
->inst_vtype
);
743 set_vreg_to_inst (cfg
, MONO_LVREG_MS (inst
->dreg
), tree
);
747 if (cfg
->verbose_level
> 2)
748 g_print ("created temp %d (R%d) of type %s\n", num
, vreg
, mono_type_get_name (type
));
754 mono_compile_create_var (MonoCompile
*cfg
, MonoType
*type
, int opcode
)
758 #ifdef ENABLE_NETCORE
759 if (type
->type
== MONO_TYPE_VALUETYPE
&& !type
->byref
) {
760 MonoClass
*klass
= mono_class_from_mono_type_internal (type
);
761 if (m_class_is_enumtype (klass
) && m_class_get_image (klass
) == mono_get_corlib () && !strcmp (m_class_get_name (klass
), "StackCrawlMark")) {
762 if (!(cfg
->method
->flags
& METHOD_ATTRIBUTE_REQSECOBJ
))
763 g_error ("Method '%s' which contains a StackCrawlMark local variable must be decorated with [System.Security.DynamicSecurityMethod].", mono_method_get_full_name (cfg
->method
));
768 type
= mini_get_underlying_type (type
);
770 if (mono_type_is_long (type
))
771 dreg
= mono_alloc_dreg (cfg
, STACK_I8
);
772 else if (mono_arch_is_soft_float () && mono_type_is_float (type
))
773 dreg
= mono_alloc_dreg (cfg
, STACK_R8
);
775 /* All the others are unified */
776 dreg
= mono_alloc_preg (cfg
);
778 return mono_compile_create_var_for_vreg (cfg
, type
, opcode
, dreg
);
782 mini_get_int_to_float_spill_area (MonoCompile
*cfg
)
785 if (!cfg
->iconv_raw_var
) {
786 cfg
->iconv_raw_var
= mono_compile_create_var (cfg
, mono_get_int32_type (), OP_LOCAL
);
787 cfg
->iconv_raw_var
->flags
|= MONO_INST_VOLATILE
; /*FIXME, use the don't regalloc flag*/
789 return cfg
->iconv_raw_var
;
796 mono_mark_vreg_as_ref (MonoCompile
*cfg
, int vreg
)
798 if (vreg
>= cfg
->vreg_is_ref_len
) {
799 gboolean
*tmp
= cfg
->vreg_is_ref
;
800 int size
= cfg
->vreg_is_ref_len
;
802 while (vreg
>= cfg
->vreg_is_ref_len
)
803 cfg
->vreg_is_ref_len
= cfg
->vreg_is_ref_len
? cfg
->vreg_is_ref_len
* 2 : 32;
804 cfg
->vreg_is_ref
= (gboolean
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gboolean
) * cfg
->vreg_is_ref_len
);
806 memcpy (cfg
->vreg_is_ref
, tmp
, size
* sizeof (gboolean
));
808 cfg
->vreg_is_ref
[vreg
] = TRUE
;
812 mono_mark_vreg_as_mp (MonoCompile
*cfg
, int vreg
)
814 if (vreg
>= cfg
->vreg_is_mp_len
) {
815 gboolean
*tmp
= cfg
->vreg_is_mp
;
816 int size
= cfg
->vreg_is_mp_len
;
818 while (vreg
>= cfg
->vreg_is_mp_len
)
819 cfg
->vreg_is_mp_len
= cfg
->vreg_is_mp_len
? cfg
->vreg_is_mp_len
* 2 : 32;
820 cfg
->vreg_is_mp
= (gboolean
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (gboolean
) * cfg
->vreg_is_mp_len
);
822 memcpy (cfg
->vreg_is_mp
, tmp
, size
* sizeof (gboolean
));
824 cfg
->vreg_is_mp
[vreg
] = TRUE
;
828 type_from_stack_type (MonoInst
*ins
)
831 case STACK_I4
: return mono_get_int32_type ();
832 case STACK_I8
: return m_class_get_byval_arg (mono_defaults
.int64_class
);
833 case STACK_PTR
: return mono_get_int_type ();
834 case STACK_R8
: return m_class_get_byval_arg (mono_defaults
.double_class
);
837 * this if used to be commented without any specific reason, but
838 * it breaks #80235 when commented
841 return m_class_get_this_arg (ins
->klass
);
843 return m_class_get_this_arg (mono_defaults
.object_class
);
845 /* ins->klass may not be set for ldnull.
846 * Also, if we have a boxed valuetype, we want an object lass,
847 * not the valuetype class
849 if (ins
->klass
&& !m_class_is_valuetype (ins
->klass
))
850 return m_class_get_byval_arg (ins
->klass
);
851 return mono_get_object_type ();
852 case STACK_VTYPE
: return m_class_get_byval_arg (ins
->klass
);
854 g_error ("stack type %d to montype not handled\n", ins
->type
);
860 mono_type_from_stack_type (MonoInst
*ins
)
862 return type_from_stack_type (ins
);
866 * mono_add_ins_to_end:
868 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
871 mono_add_ins_to_end (MonoBasicBlock
*bb
, MonoInst
*inst
)
876 MONO_ADD_INS (bb
, inst
);
880 switch (bb
->last_ins
->opcode
) {
894 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
897 if (MONO_IS_COND_BRANCH_OP (bb
->last_ins
)) {
898 /* Need to insert the ins before the compare */
899 if (bb
->code
== bb
->last_ins
) {
900 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
904 if (bb
->code
->next
== bb
->last_ins
) {
905 /* Only two instructions */
906 opcode
= bb
->code
->opcode
;
908 if ((opcode
== OP_COMPARE
) || (opcode
== OP_COMPARE_IMM
) || (opcode
== OP_ICOMPARE
) || (opcode
== OP_ICOMPARE_IMM
) || (opcode
== OP_FCOMPARE
) || (opcode
== OP_LCOMPARE
) || (opcode
== OP_LCOMPARE_IMM
) || (opcode
== OP_RCOMPARE
)) {
910 mono_bblock_insert_before_ins (bb
, bb
->code
, inst
);
912 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
915 opcode
= bb
->last_ins
->prev
->opcode
;
917 if ((opcode
== OP_COMPARE
) || (opcode
== OP_COMPARE_IMM
) || (opcode
== OP_ICOMPARE
) || (opcode
== OP_ICOMPARE_IMM
) || (opcode
== OP_FCOMPARE
) || (opcode
== OP_LCOMPARE
) || (opcode
== OP_LCOMPARE_IMM
) || (opcode
== OP_RCOMPARE
)) {
919 mono_bblock_insert_before_ins (bb
, bb
->last_ins
->prev
, inst
);
921 mono_bblock_insert_before_ins (bb
, bb
->last_ins
, inst
);
926 MONO_ADD_INS (bb
, inst
);
932 mono_create_jump_table (MonoCompile
*cfg
, MonoInst
*label
, MonoBasicBlock
**bbs
, int num_blocks
)
934 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfo
));
935 MonoJumpInfoBBTable
*table
;
937 table
= (MonoJumpInfoBBTable
*)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoJumpInfoBBTable
));
939 table
->table_size
= num_blocks
;
941 ji
->ip
.label
= label
;
942 ji
->type
= MONO_PATCH_INFO_SWITCH
;
943 ji
->data
.table
= table
;
944 ji
->next
= cfg
->patch_info
;
945 cfg
->patch_info
= ji
;
949 mini_assembly_can_skip_verification (MonoDomain
*domain
, MonoMethod
*method
)
951 MonoAssembly
*assembly
= m_class_get_image (method
->klass
)->assembly
;
952 if (method
->wrapper_type
!= MONO_WRAPPER_NONE
&& method
->wrapper_type
!= MONO_WRAPPER_DYNAMIC_METHOD
)
954 if (assembly
->in_gac
|| assembly
->image
== mono_defaults
.corlib
)
956 return mono_assembly_has_skip_verification (assembly
);
960 * mini_method_verify:
962 * Verify the method using the verfier.
964 * Returns true if the method is invalid.
967 mini_method_verify (MonoCompile
*cfg
, MonoMethod
*method
, gboolean fail_compile
)
970 gboolean is_fulltrust
;
972 if (method
->verification_success
)
975 if (!mono_verifier_is_enabled_for_method (method
))
978 /*skip verification implies the assembly must be */
979 is_fulltrust
= mono_verifier_is_method_full_trust (method
) || mini_assembly_can_skip_verification (cfg
->domain
, method
);
981 res
= mono_method_verify_with_current_settings (method
, cfg
->skip_visibility
, is_fulltrust
);
984 for (tmp
= res
; tmp
; tmp
= tmp
->next
) {
985 MonoVerifyInfoExtended
*info
= (MonoVerifyInfoExtended
*)tmp
->data
;
986 if (info
->info
.status
== MONO_VERIFY_ERROR
) {
988 char *method_name
= mono_method_full_name (method
, TRUE
);
989 cfg
->exception_type
= (MonoExceptionType
)info
->exception_type
;
990 cfg
->exception_message
= g_strdup_printf ("Error verifying %s: %s", method_name
, info
->info
.message
);
991 g_free (method_name
);
993 mono_free_verify_list (res
);
996 if (info
->info
.status
== MONO_VERIFY_NOT_VERIFIABLE
&& (!is_fulltrust
|| info
->exception_type
== MONO_EXCEPTION_METHOD_ACCESS
|| info
->exception_type
== MONO_EXCEPTION_FIELD_ACCESS
)) {
998 char *method_name
= mono_method_full_name (method
, TRUE
);
999 char *msg
= g_strdup_printf ("Error verifying %s: %s", method_name
, info
->info
.message
);
1001 if (info
->exception_type
== MONO_EXCEPTION_METHOD_ACCESS
)
1002 mono_error_set_generic_error (&cfg
->error
, "System", "MethodAccessException", "%s", msg
);
1003 else if (info
->exception_type
== MONO_EXCEPTION_FIELD_ACCESS
)
1004 mono_error_set_generic_error (&cfg
->error
, "System", "FieldAccessException", "%s", msg
);
1005 else if (info
->exception_type
== MONO_EXCEPTION_UNVERIFIABLE_IL
)
1006 mono_error_set_generic_error (&cfg
->error
, "System.Security", "VerificationException", "%s", msg
);
1007 if (!mono_error_ok (&cfg
->error
)) {
1008 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
1011 cfg
->exception_type
= (MonoExceptionType
)info
->exception_type
;
1012 cfg
->exception_message
= msg
;
1014 g_free (method_name
);
1016 mono_free_verify_list (res
);
1020 mono_free_verify_list (res
);
1022 method
->verification_success
= 1;
1026 /*Returns true if something went wrong*/
1028 mono_compile_is_broken (MonoCompile
*cfg
, MonoMethod
*method
, gboolean fail_compile
)
1030 MonoMethod
*method_definition
= method
;
1031 gboolean dont_verify
= m_class_get_image (method
->klass
)->assembly
->corlib_internal
;
1033 while (method_definition
->is_inflated
) {
1034 MonoMethodInflated
*imethod
= (MonoMethodInflated
*) method_definition
;
1035 method_definition
= imethod
->declaring
;
1038 return !dont_verify
&& mini_method_verify (cfg
, method_definition
, fail_compile
);
1042 mono_dynamic_code_hash_insert (MonoDomain
*domain
, MonoMethod
*method
, MonoJitDynamicMethodInfo
*ji
)
1044 if (!domain_jit_info (domain
)->dynamic_code_hash
)
1045 domain_jit_info (domain
)->dynamic_code_hash
= g_hash_table_new (NULL
, NULL
);
1046 g_hash_table_insert (domain_jit_info (domain
)->dynamic_code_hash
, method
, ji
);
1049 static MonoJitDynamicMethodInfo
*
1050 mono_dynamic_code_hash_lookup (MonoDomain
*domain
, MonoMethod
*method
)
1052 MonoJitDynamicMethodInfo
*res
;
1054 if (domain_jit_info (domain
)->dynamic_code_hash
)
1055 res
= (MonoJitDynamicMethodInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->dynamic_code_hash
, method
);
1063 GList
*active
, *inactive
;
1068 compare_by_interval_start_pos_func (gconstpointer a
, gconstpointer b
)
1070 MonoMethodVar
*v1
= (MonoMethodVar
*)a
;
1071 MonoMethodVar
*v2
= (MonoMethodVar
*)b
;
1075 else if (v1
->interval
->range
&& v2
->interval
->range
)
1076 return v1
->interval
->range
->from
- v2
->interval
->range
->from
;
1077 else if (v1
->interval
->range
)
1084 #define LSCAN_DEBUG(a) do { a; } while (0)
1086 #define LSCAN_DEBUG(a)
1090 mono_allocate_stack_slots2 (MonoCompile
*cfg
, gboolean backward
, guint32
*stack_size
, guint32
*stack_align
)
1092 int i
, slot
, offset
, size
;
1097 GList
*vars
= NULL
, *l
, *unhandled
;
1098 StackSlotInfo
*scalar_stack_slots
, *vtype_stack_slots
, *slot_info
;
1101 int vtype_stack_slots_size
= 256;
1102 gboolean reuse_slot
;
1104 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg
->method
, TRUE
)));
1106 scalar_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * MONO_TYPE_PINNED
);
1107 vtype_stack_slots
= NULL
;
1110 offsets
= (gint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (gint32
) * cfg
->num_varinfo
);
1111 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
1114 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1115 inst
= cfg
->varinfo
[i
];
1116 vmv
= MONO_VARINFO (cfg
, i
);
1118 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
|| inst
->opcode
== OP_REGOFFSET
)
1121 vars
= g_list_prepend (vars
, vmv
);
1124 vars
= g_list_sort (vars
, compare_by_interval_start_pos_func
);
1129 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1130 MonoMethodVar *current = unhandled->data;
1132 if (current->interval->range) {
1133 g_assert (current->interval->range->from >= i);
1134 i = current->interval->range->from;
1141 for (unhandled
= vars
; unhandled
; unhandled
= unhandled
->next
) {
1142 MonoMethodVar
*current
= (MonoMethodVar
*)unhandled
->data
;
1145 inst
= cfg
->varinfo
[vmv
->idx
];
1147 t
= mono_type_get_underlying_type (inst
->inst_vtype
);
1148 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1151 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1152 * pinvoke wrappers when they call functions returning structures */
1153 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1154 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1159 size
= mini_type_stack_size (t
, &ialign
);
1162 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (t
)))
1167 if (cfg
->disable_reuse_stack_slots
)
1170 t
= mini_get_underlying_type (t
);
1172 case MONO_TYPE_GENERICINST
:
1173 if (!mono_type_generic_inst_is_valuetype (t
)) {
1174 slot_info
= &scalar_stack_slots
[t
->type
];
1178 case MONO_TYPE_VALUETYPE
:
1179 if (!vtype_stack_slots
)
1180 vtype_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1181 for (i
= 0; i
< nvtypes
; ++i
)
1182 if (t
->data
.klass
== vtype_stack_slots
[i
].vtype
)
1185 slot_info
= &vtype_stack_slots
[i
];
1187 if (nvtypes
== vtype_stack_slots_size
) {
1188 int new_slots_size
= vtype_stack_slots_size
* 2;
1189 StackSlotInfo
* new_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * new_slots_size
);
1191 memcpy (new_slots
, vtype_stack_slots
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1193 vtype_stack_slots
= new_slots
;
1194 vtype_stack_slots_size
= new_slots_size
;
1196 vtype_stack_slots
[nvtypes
].vtype
= t
->data
.klass
;
1197 slot_info
= &vtype_stack_slots
[nvtypes
];
1200 if (cfg
->disable_reuse_ref_stack_slots
)
1207 #if TARGET_SIZEOF_VOID_P == 4
1212 if (cfg
->disable_ref_noref_stack_slot_share
) {
1213 slot_info
= &scalar_stack_slots
[MONO_TYPE_I
];
1218 case MONO_TYPE_CLASS
:
1219 case MONO_TYPE_OBJECT
:
1220 case MONO_TYPE_ARRAY
:
1221 case MONO_TYPE_SZARRAY
:
1222 case MONO_TYPE_STRING
:
1223 /* Share non-float stack slots of the same size */
1224 slot_info
= &scalar_stack_slots
[MONO_TYPE_CLASS
];
1225 if (cfg
->disable_reuse_ref_stack_slots
)
1230 slot_info
= &scalar_stack_slots
[t
->type
];
1234 if (cfg
->comp_done
& MONO_COMP_LIVENESS
) {
1238 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1240 if (!current
->interval
->range
) {
1241 if (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))
1245 inst
->flags
|= MONO_INST_IS_DEAD
;
1250 pos
= current
->interval
->range
->from
;
1252 LSCAN_DEBUG (printf ("process R%d ", inst
->dreg
));
1253 if (current
->interval
->range
)
1254 LSCAN_DEBUG (mono_linterval_print (current
->interval
));
1255 LSCAN_DEBUG (printf ("\n"));
1257 /* Check for intervals in active which expired or inactive */
1259 /* FIXME: Optimize this */
1262 for (l
= slot_info
->active
; l
!= NULL
; l
= l
->next
) {
1263 MonoMethodVar
*v
= (MonoMethodVar
*)l
->data
;
1265 if (v
->interval
->last_range
->to
< pos
) {
1266 slot_info
->active
= g_list_delete_link (slot_info
->active
, l
);
1267 slot_info
->slots
= g_slist_prepend_mempool (cfg
->mempool
, slot_info
->slots
, GINT_TO_POINTER (offsets
[v
->idx
]));
1268 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg
->varinfo
[v
->idx
]->dreg
, offsets
[v
->idx
]));
1272 else if (!mono_linterval_covers (v
->interval
, pos
)) {
1273 slot_info
->inactive
= g_list_append (slot_info
->inactive
, v
);
1274 slot_info
->active
= g_list_delete_link (slot_info
->active
, l
);
1275 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg
->varinfo
[v
->idx
]->dreg
));
1282 /* Check for intervals in inactive which expired or active */
1284 /* FIXME: Optimize this */
1287 for (l
= slot_info
->inactive
; l
!= NULL
; l
= l
->next
) {
1288 MonoMethodVar
*v
= (MonoMethodVar
*)l
->data
;
1290 if (v
->interval
->last_range
->to
< pos
) {
1291 slot_info
->inactive
= g_list_delete_link (slot_info
->inactive
, l
);
1292 // FIXME: Enabling this seems to cause impossible to debug crashes
1293 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1294 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg
->varinfo
[v
->idx
]->dreg
, offsets
[v
->idx
]));
1298 else if (mono_linterval_covers (v
->interval
, pos
)) {
1299 slot_info
->active
= g_list_append (slot_info
->active
, v
);
1300 slot_info
->inactive
= g_list_delete_link (slot_info
->inactive
, l
);
1301 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg
->varinfo
[v
->idx
]->dreg
));
1309 * This also handles the case when the variable is used in an
1310 * exception region, as liveness info is not computed there.
1313 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1316 if (! (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))) {
1317 if (slot_info
->slots
) {
1318 slot
= GPOINTER_TO_INT (slot_info
->slots
->data
);
1320 slot_info
->slots
= slot_info
->slots
->next
;
1323 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1325 slot_info
->active
= mono_varlist_insert_sorted (cfg
, slot_info
->active
, vmv
, TRUE
);
1331 static int count
= 0;
1334 if (count
== atoi (g_getenv ("COUNT3")))
1335 printf ("LAST: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
1336 if (count
> atoi (g_getenv ("COUNT3")))
1339 mono_print_ins (inst
);
1343 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst
->dreg
, mono_type_full_name (t
), slot
));
1345 if (inst
->flags
& MONO_INST_LMF
) {
1346 size
= MONO_ABI_SIZEOF (MonoLMF
);
1347 align
= sizeof (target_mgreg_t
);
1354 if (slot
== 0xffffff) {
1356 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1357 * efficient copying (and to work around the fact that OP_MEMCPY
1358 * and OP_MEMSET ignores alignment).
1360 if (MONO_TYPE_ISSTRUCT (t
)) {
1361 align
= MAX (align
, sizeof (target_mgreg_t
));
1362 align
= MAX (align
, mono_class_min_align (mono_class_from_mono_type_internal (t
)));
1367 offset
+= align
- 1;
1368 offset
&= ~(align
- 1);
1372 offset
+= align
- 1;
1373 offset
&= ~(align
- 1);
1378 if (*stack_align
== 0)
1379 *stack_align
= align
;
1382 offsets
[vmv
->idx
] = slot
;
1385 for (i
= 0; i
< MONO_TYPE_PINNED
; ++i
) {
1386 if (scalar_stack_slots
[i
].active
)
1387 g_list_free (scalar_stack_slots
[i
].active
);
1389 for (i
= 0; i
< nvtypes
; ++i
) {
1390 if (vtype_stack_slots
[i
].active
)
1391 g_list_free (vtype_stack_slots
[i
].active
);
1394 cfg
->stat_locals_stack_size
+= offset
;
1396 *stack_size
= offset
;
1401 * mono_allocate_stack_slots:
1403 * Allocate stack slots for all non register allocated variables using a
1404 * linear scan algorithm.
1405 * Returns: an array of stack offsets.
1406 * STACK_SIZE is set to the amount of stack space needed.
1407 * STACK_ALIGN is set to the alignment needed by the locals area.
1410 mono_allocate_stack_slots (MonoCompile
*cfg
, gboolean backward
, guint32
*stack_size
, guint32
*stack_align
)
1412 int i
, slot
, offset
, size
;
1417 GList
*vars
= NULL
, *l
;
1418 StackSlotInfo
*scalar_stack_slots
, *vtype_stack_slots
, *slot_info
;
1421 int vtype_stack_slots_size
= 256;
1422 gboolean reuse_slot
;
1424 if ((cfg
->num_varinfo
> 0) && MONO_VARINFO (cfg
, 0)->interval
)
1425 return mono_allocate_stack_slots2 (cfg
, backward
, stack_size
, stack_align
);
1427 scalar_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * MONO_TYPE_PINNED
);
1428 vtype_stack_slots
= NULL
;
1431 offsets
= (gint32
*)mono_mempool_alloc (cfg
->mempool
, sizeof (gint32
) * cfg
->num_varinfo
);
1432 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
1435 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1436 inst
= cfg
->varinfo
[i
];
1437 vmv
= MONO_VARINFO (cfg
, i
);
1439 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
|| inst
->opcode
== OP_REGOFFSET
)
1442 vars
= g_list_prepend (vars
, vmv
);
1445 vars
= mono_varlist_sort (cfg
, vars
, 0);
1447 *stack_align
= sizeof (target_mgreg_t
);
1448 for (l
= vars
; l
; l
= l
->next
) {
1449 vmv
= (MonoMethodVar
*)l
->data
;
1450 inst
= cfg
->varinfo
[vmv
->idx
];
1452 t
= mono_type_get_underlying_type (inst
->inst_vtype
);
1453 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1456 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1457 * pinvoke wrappers when they call functions returning structures */
1458 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1459 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1463 size
= mini_type_stack_size (t
, &ialign
);
1466 if (mono_class_has_failure (mono_class_from_mono_type_internal (t
)))
1467 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_TYPE_LOAD
);
1469 if (MONO_CLASS_IS_SIMD (cfg
, mono_class_from_mono_type_internal (t
)))
1474 if (cfg
->disable_reuse_stack_slots
)
1477 t
= mini_get_underlying_type (t
);
1479 case MONO_TYPE_GENERICINST
:
1480 if (!mono_type_generic_inst_is_valuetype (t
)) {
1481 slot_info
= &scalar_stack_slots
[t
->type
];
1485 case MONO_TYPE_VALUETYPE
:
1486 if (!vtype_stack_slots
)
1487 vtype_stack_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1488 for (i
= 0; i
< nvtypes
; ++i
)
1489 if (t
->data
.klass
== vtype_stack_slots
[i
].vtype
)
1492 slot_info
= &vtype_stack_slots
[i
];
1494 if (nvtypes
== vtype_stack_slots_size
) {
1495 int new_slots_size
= vtype_stack_slots_size
* 2;
1496 StackSlotInfo
* new_slots
= (StackSlotInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (StackSlotInfo
) * new_slots_size
);
1498 memcpy (new_slots
, vtype_stack_slots
, sizeof (StackSlotInfo
) * vtype_stack_slots_size
);
1500 vtype_stack_slots
= new_slots
;
1501 vtype_stack_slots_size
= new_slots_size
;
1503 vtype_stack_slots
[nvtypes
].vtype
= t
->data
.klass
;
1504 slot_info
= &vtype_stack_slots
[nvtypes
];
1507 if (cfg
->disable_reuse_ref_stack_slots
)
1514 #if TARGET_SIZEOF_VOID_P == 4
1519 if (cfg
->disable_ref_noref_stack_slot_share
) {
1520 slot_info
= &scalar_stack_slots
[MONO_TYPE_I
];
1525 case MONO_TYPE_CLASS
:
1526 case MONO_TYPE_OBJECT
:
1527 case MONO_TYPE_ARRAY
:
1528 case MONO_TYPE_SZARRAY
:
1529 case MONO_TYPE_STRING
:
1530 /* Share non-float stack slots of the same size */
1531 slot_info
= &scalar_stack_slots
[MONO_TYPE_CLASS
];
1532 if (cfg
->disable_reuse_ref_stack_slots
)
1536 case MONO_TYPE_MVAR
:
1537 slot_info
= &scalar_stack_slots
[t
->type
];
1540 slot_info
= &scalar_stack_slots
[t
->type
];
1545 if (cfg
->comp_done
& MONO_COMP_LIVENESS
) {
1546 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1548 /* expire old intervals in active */
1549 while (slot_info
->active
) {
1550 MonoMethodVar
*amv
= (MonoMethodVar
*)slot_info
->active
->data
;
1552 if (amv
->range
.last_use
.abs_pos
> vmv
->range
.first_use
.abs_pos
)
1555 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1557 slot_info
->active
= g_list_delete_link (slot_info
->active
, slot_info
->active
);
1558 slot_info
->slots
= g_slist_prepend_mempool (cfg
->mempool
, slot_info
->slots
, GINT_TO_POINTER (offsets
[amv
->idx
]));
1562 * This also handles the case when the variable is used in an
1563 * exception region, as liveness info is not computed there.
1566 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1569 if (! (inst
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
))) {
1570 if (slot_info
->slots
) {
1571 slot
= GPOINTER_TO_INT (slot_info
->slots
->data
);
1573 slot_info
->slots
= slot_info
->slots
->next
;
1576 slot_info
->active
= mono_varlist_insert_sorted (cfg
, slot_info
->active
, vmv
, TRUE
);
1582 static int count
= 0;
1585 if (count
== atoi (g_getenv ("COUNT")))
1586 printf ("LAST: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
1587 if (count
> atoi (g_getenv ("COUNT")))
1590 mono_print_ins (inst
);
1594 if (inst
->flags
& MONO_INST_LMF
) {
1596 * This variable represents a MonoLMF structure, which has no corresponding
1597 * CLR type, so hard-code its size/alignment.
1599 size
= MONO_ABI_SIZEOF (MonoLMF
);
1600 align
= sizeof (target_mgreg_t
);
1607 if (slot
== 0xffffff) {
1609 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1610 * efficient copying (and to work around the fact that OP_MEMCPY
1611 * and OP_MEMSET ignores alignment).
1613 if (MONO_TYPE_ISSTRUCT (t
)) {
1614 align
= MAX (align
, sizeof (target_mgreg_t
));
1615 align
= MAX (align
, mono_class_min_align (mono_class_from_mono_type_internal (t
)));
1617 * Align the size too so the code generated for passing vtypes in
1618 * registers doesn't overwrite random locals.
1620 size
= (size
+ (align
- 1)) & ~(align
-1);
1625 offset
+= align
- 1;
1626 offset
&= ~(align
- 1);
1630 offset
+= align
- 1;
1631 offset
&= ~(align
- 1);
1636 *stack_align
= MAX (*stack_align
, align
);
1639 offsets
[vmv
->idx
] = slot
;
1642 for (i
= 0; i
< MONO_TYPE_PINNED
; ++i
) {
1643 if (scalar_stack_slots
[i
].active
)
1644 g_list_free (scalar_stack_slots
[i
].active
);
1646 for (i
= 0; i
< nvtypes
; ++i
) {
1647 if (vtype_stack_slots
[i
].active
)
1648 g_list_free (vtype_stack_slots
[i
].active
);
1651 cfg
->stat_locals_stack_size
+= offset
;
1653 *stack_size
= offset
;
1657 #define EMUL_HIT_SHIFT 3
1658 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1659 /* small hit bitmap cache */
1660 static mono_byte emul_opcode_hit_cache
[(OP_LAST
>>EMUL_HIT_SHIFT
) + 1] = {0};
1661 static short emul_opcode_num
= 0;
1662 static short emul_opcode_alloced
= 0;
1663 static short *emul_opcode_opcodes
;
1664 static MonoJitICallInfo
**emul_opcode_map
;
1667 mono_find_jit_opcode_emulation (int opcode
)
1669 g_assert (opcode
>= 0 && opcode
<= OP_LAST
);
1670 if (emul_opcode_hit_cache
[opcode
>> (EMUL_HIT_SHIFT
+ 3)] & (1 << (opcode
& EMUL_HIT_MASK
))) {
1672 for (i
= 0; i
< emul_opcode_num
; ++i
) {
1673 if (emul_opcode_opcodes
[i
] == opcode
)
1674 return emul_opcode_map
[i
];
1681 mini_register_opcode_emulation (int opcode
, MonoJitICallInfo
*info
, const char *name
, MonoMethodSignature
*sig
, gpointer func
, const char *symbol
, gboolean no_wrapper
)
1684 g_assert (!sig
->hasthis
);
1685 g_assert (sig
->param_count
< 3);
1687 mono_register_jit_icall_info (info
, func
, name
, sig
, no_wrapper
, symbol
);
1689 if (emul_opcode_num
>= emul_opcode_alloced
) {
1690 int incr
= emul_opcode_alloced
? emul_opcode_alloced
/2: 16;
1691 emul_opcode_alloced
+= incr
;
1692 emul_opcode_map
= (MonoJitICallInfo
**)g_realloc (emul_opcode_map
, sizeof (emul_opcode_map
[0]) * emul_opcode_alloced
);
1693 emul_opcode_opcodes
= (short *)g_realloc (emul_opcode_opcodes
, sizeof (emul_opcode_opcodes
[0]) * emul_opcode_alloced
);
1695 emul_opcode_map
[emul_opcode_num
] = info
;
1696 emul_opcode_opcodes
[emul_opcode_num
] = opcode
;
1698 emul_opcode_hit_cache
[opcode
>> (EMUL_HIT_SHIFT
+ 3)] |= (1 << (opcode
& EMUL_HIT_MASK
));
1702 print_dfn (MonoCompile
*cfg
)
1710 char *method_name
= mono_method_full_name (cfg
->method
, TRUE
);
1711 g_print ("IR code for method %s\n", method_name
);
1712 g_free (method_name
);
1715 for (i
= 0; i
< cfg
->num_bblocks
; ++i
) {
1716 bb
= cfg
->bblocks
[i
];
1717 /*if (bb->cil_code) {
1718 char* code1, *code2;
1719 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1720 if (bb->last_ins->cil_code)
1721 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1723 code2 = g_strdup ("");
1725 code1 [strlen (code1) - 1] = 0;
1726 code = g_strdup_printf ("%s -> %s", code1, code2);
1730 code
= g_strdup ("\n");
1731 g_print ("\nBB%d (%d) (len: %d): %s", bb
->block_num
, i
, bb
->cil_length
, code
);
1732 MONO_BB_FOR_EACH_INS (bb
, c
) {
1733 mono_print_ins_index (-1, c
);
1736 g_print ("\tprev:");
1737 for (j
= 0; j
< bb
->in_count
; ++j
) {
1738 g_print (" BB%d", bb
->in_bb
[j
]->block_num
);
1740 g_print ("\t\tsucc:");
1741 for (j
= 0; j
< bb
->out_count
; ++j
) {
1742 g_print (" BB%d", bb
->out_bb
[j
]->block_num
);
1744 g_print ("\n\tidom: BB%d\n", bb
->idom
? bb
->idom
->block_num
: -1);
1747 g_assert (mono_bitset_test_fast (bb
->dominators
, bb
->idom
->dfn
));
1750 mono_blockset_print (cfg
, bb
->dominators
, "\tdominators", bb
->idom
? bb
->idom
->dfn
: -1);
1752 mono_blockset_print (cfg
, bb
->dfrontier
, "\tdfrontier", -1);
1760 mono_bblock_add_inst (MonoBasicBlock
*bb
, MonoInst
*inst
)
1762 MONO_ADD_INS (bb
, inst
);
1766 mono_bblock_insert_after_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*ins_to_insert
)
1770 bb
->code
= ins_to_insert
;
1772 /* Link with next */
1773 ins_to_insert
->next
= ins
;
1775 ins
->prev
= ins_to_insert
;
1777 if (bb
->last_ins
== NULL
)
1778 bb
->last_ins
= ins_to_insert
;
1780 /* Link with next */
1781 ins_to_insert
->next
= ins
->next
;
1783 ins
->next
->prev
= ins_to_insert
;
1785 /* Link with previous */
1786 ins
->next
= ins_to_insert
;
1787 ins_to_insert
->prev
= ins
;
1789 if (bb
->last_ins
== ins
)
1790 bb
->last_ins
= ins_to_insert
;
1795 mono_bblock_insert_before_ins (MonoBasicBlock
*bb
, MonoInst
*ins
, MonoInst
*ins_to_insert
)
1800 ins
->prev
= ins_to_insert
;
1801 bb
->code
= ins_to_insert
;
1802 ins_to_insert
->next
= ins
;
1803 if (bb
->last_ins
== NULL
)
1804 bb
->last_ins
= ins_to_insert
;
1806 /* Link with previous */
1808 ins
->prev
->next
= ins_to_insert
;
1809 ins_to_insert
->prev
= ins
->prev
;
1811 /* Link with next */
1812 ins
->prev
= ins_to_insert
;
1813 ins_to_insert
->next
= ins
;
1815 if (bb
->code
== ins
)
1816 bb
->code
= ins_to_insert
;
1821 * mono_verify_bblock:
1823 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1826 mono_verify_bblock (MonoBasicBlock
*bb
)
1828 MonoInst
*ins
, *prev
;
1831 for (ins
= bb
->code
; ins
; ins
= ins
->next
) {
1832 g_assert (ins
->prev
== prev
);
1836 g_assert (!bb
->last_ins
->next
);
1842 * Perform consistency checks on the JIT data structures and the IR
1845 mono_verify_cfg (MonoCompile
*cfg
)
1849 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
1850 mono_verify_bblock (bb
);
1853 // This will free many fields in cfg to save
1854 // memory. Note that this must be safe to call
1855 // multiple times. It must be idempotent.
1857 mono_empty_compile (MonoCompile
*cfg
)
1859 mono_free_loop_info (cfg
);
1861 // These live in the mempool, and so must be freed
1863 for (GSList
*l
= cfg
->headers_to_free
; l
; l
= l
->next
) {
1864 mono_metadata_free_mh ((MonoMethodHeader
*)l
->data
);
1866 cfg
->headers_to_free
= NULL
;
1869 //mono_mempool_stats (cfg->mempool);
1870 mono_mempool_destroy (cfg
->mempool
);
1871 cfg
->mempool
= NULL
;
1874 g_free (cfg
->varinfo
);
1875 cfg
->varinfo
= NULL
;
1881 mono_regstate_free (cfg
->rs
);
1887 mono_destroy_compile (MonoCompile
*cfg
)
1889 mono_empty_compile (cfg
);
1891 mono_metadata_free_mh (cfg
->header
);
1893 g_hash_table_destroy (cfg
->spvars
);
1894 g_hash_table_destroy (cfg
->exvars
);
1895 g_list_free (cfg
->ldstr_list
);
1896 g_hash_table_destroy (cfg
->token_info_hash
);
1897 g_hash_table_destroy (cfg
->abs_patches
);
1899 mono_debug_free_method (cfg
);
1901 g_free (cfg
->varinfo
);
1903 g_free (cfg
->exception_message
);
1908 mono_add_patch_info (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
1910 if (type
== MONO_PATCH_INFO_NONE
)
1913 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfo
));
1917 ji
->data
.target
= target
;
1918 ji
->next
= cfg
->patch_info
;
1920 cfg
->patch_info
= ji
;
1924 mono_add_patch_info_rel (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
, int relocation
)
1926 if (type
== MONO_PATCH_INFO_NONE
)
1929 MonoJumpInfo
*ji
= (MonoJumpInfo
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoJumpInfo
));
1933 ji
->relocation
= relocation
;
1934 ji
->data
.target
= target
;
1935 ji
->next
= cfg
->patch_info
;
1937 cfg
->patch_info
= ji
;
1941 mono_remove_patch_info (MonoCompile
*cfg
, int ip
)
1943 MonoJumpInfo
**ji
= &cfg
->patch_info
;
1946 if ((*ji
)->ip
.i
== ip
)
1949 ji
= &((*ji
)->next
);
1954 mono_add_seq_point (MonoCompile
*cfg
, MonoBasicBlock
*bb
, MonoInst
*ins
, int native_offset
)
1956 ins
->inst_offset
= native_offset
;
1957 g_ptr_array_add (cfg
->seq_points
, ins
);
1959 bb
->seq_points
= g_slist_prepend_mempool (cfg
->mempool
, bb
->seq_points
, ins
);
1960 bb
->last_seq_point
= ins
;
1965 mono_add_var_location (MonoCompile
*cfg
, MonoInst
*var
, gboolean is_reg
, int reg
, int offset
, int from
, int to
)
1967 MonoDwarfLocListEntry
*entry
= (MonoDwarfLocListEntry
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoDwarfLocListEntry
));
1970 g_assert (offset
== 0);
1972 entry
->is_reg
= is_reg
;
1974 entry
->offset
= offset
;
1978 if (var
== cfg
->args
[0])
1979 cfg
->this_loclist
= g_slist_append_mempool (cfg
->mempool
, cfg
->this_loclist
, entry
);
1980 else if (var
== cfg
->rgctx_var
)
1981 cfg
->rgctx_loclist
= g_slist_append_mempool (cfg
->mempool
, cfg
->rgctx_loclist
, entry
);
1985 mono_apply_volatile (MonoInst
*inst
, MonoBitSet
*set
, gsize index
)
1987 inst
->flags
|= mono_bitset_test_safe (set
, index
) ? MONO_INST_VOLATILE
: 0;
1991 mono_compile_create_vars (MonoCompile
*cfg
)
1993 MonoMethodSignature
*sig
;
1994 MonoMethodHeader
*header
;
1997 header
= cfg
->header
;
1999 sig
= mono_method_signature_internal (cfg
->method
);
2001 if (!MONO_TYPE_IS_VOID (sig
->ret
)) {
2002 cfg
->ret
= mono_compile_create_var (cfg
, sig
->ret
, OP_ARG
);
2003 /* Inhibit optimizations */
2004 cfg
->ret
->flags
|= MONO_INST_VOLATILE
;
2006 if (cfg
->verbose_level
> 2)
2007 g_print ("creating vars\n");
2009 cfg
->args
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, (sig
->param_count
+ sig
->hasthis
) * sizeof (MonoInst
*));
2012 MonoInst
* arg
= mono_compile_create_var (cfg
, m_class_get_this_arg (cfg
->method
->klass
), OP_ARG
);
2013 mono_apply_volatile (arg
, header
->volatile_args
, 0);
2014 cfg
->args
[0] = arg
;
2015 cfg
->this_arg
= arg
;
2018 for (i
= 0; i
< sig
->param_count
; ++i
) {
2019 MonoInst
* arg
= mono_compile_create_var (cfg
, sig
->params
[i
], OP_ARG
);
2020 mono_apply_volatile (arg
, header
->volatile_args
, i
+ sig
->hasthis
);
2021 cfg
->args
[i
+ sig
->hasthis
] = arg
;
2024 if (cfg
->verbose_level
> 2) {
2026 printf ("\treturn : ");
2027 mono_print_ins (cfg
->ret
);
2031 printf ("\tthis: ");
2032 mono_print_ins (cfg
->args
[0]);
2035 for (i
= 0; i
< sig
->param_count
; ++i
) {
2036 printf ("\targ [%d]: ", i
);
2037 mono_print_ins (cfg
->args
[i
+ sig
->hasthis
]);
2041 cfg
->locals_start
= cfg
->num_varinfo
;
2042 cfg
->locals
= (MonoInst
**)mono_mempool_alloc0 (cfg
->mempool
, header
->num_locals
* sizeof (MonoInst
*));
2044 if (cfg
->verbose_level
> 2)
2045 g_print ("creating locals\n");
2047 for (i
= 0; i
< header
->num_locals
; ++i
) {
2048 if (cfg
->verbose_level
> 2)
2049 g_print ("\tlocal [%d]: ", i
);
2050 cfg
->locals
[i
] = mono_compile_create_var (cfg
, header
->locals
[i
], OP_LOCAL
);
2051 mono_apply_volatile (cfg
->locals
[i
], header
->volatile_locals
, i
);
2054 if (cfg
->verbose_level
> 2)
2055 g_print ("locals done\n");
2058 if (COMPILE_LLVM (cfg
))
2059 mono_llvm_create_vars (cfg
);
2061 mono_arch_create_vars (cfg
);
2063 mono_arch_create_vars (cfg
);
2066 if (cfg
->method
->save_lmf
&& cfg
->create_lmf_var
) {
2067 MonoInst
*lmf_var
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2068 lmf_var
->flags
|= MONO_INST_VOLATILE
;
2069 lmf_var
->flags
|= MONO_INST_LMF
;
2070 cfg
->lmf_var
= lmf_var
;
2075 mono_print_code (MonoCompile
*cfg
, const char* msg
)
2079 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2080 mono_print_bb (bb
, msg
);
2084 mono_postprocess_patches (MonoCompile
*cfg
)
2086 MonoJumpInfo
*patch_info
;
2089 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
2090 switch (patch_info
->type
) {
2091 case MONO_PATCH_INFO_ABS
: {
2092 MonoJitICallInfo
*info
= mono_find_jit_icall_by_addr (patch_info
->data
.target
);
2095 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2099 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL
;
2100 patch_info
->data
.name
= info
->name
;
2103 if (patch_info
->type
== MONO_PATCH_INFO_ABS
) {
2104 if (cfg
->abs_patches
) {
2105 MonoJumpInfo
*abs_ji
= (MonoJumpInfo
*)g_hash_table_lookup (cfg
->abs_patches
, patch_info
->data
.target
);
2107 patch_info
->type
= abs_ji
->type
;
2108 patch_info
->data
.target
= abs_ji
->data
.target
;
2115 case MONO_PATCH_INFO_SWITCH
: {
2117 if (cfg
->method
->dynamic
) {
2118 table
= (void **)mono_code_manager_reserve (cfg
->dynamic_info
->code_mp
, sizeof (gpointer
) * patch_info
->data
.table
->table_size
);
2120 table
= (void **)mono_domain_code_reserve (cfg
->domain
, sizeof (gpointer
) * patch_info
->data
.table
->table_size
);
2123 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++) {
2124 /* Might be NULL if the switch is eliminated */
2125 if (patch_info
->data
.table
->table
[i
]) {
2126 g_assert (patch_info
->data
.table
->table
[i
]->native_offset
);
2127 table
[i
] = GINT_TO_POINTER (patch_info
->data
.table
->table
[i
]->native_offset
);
2132 patch_info
->data
.table
->table
= (MonoBasicBlock
**)table
;
2135 case MONO_PATCH_INFO_METHOD_JUMP
: {
2136 unsigned char *ip
= cfg
->native_code
+ patch_info
->ip
.i
;
2138 mini_register_jump_site (cfg
->domain
, patch_info
->data
.method
, ip
);
2149 mono_codegen (MonoCompile
*cfg
)
2152 int max_epilog_size
;
2154 MonoDomain
*code_domain
;
2155 guint unwindlen
= 0;
2157 if (mono_using_xdebug
)
2159 * Recent gdb versions have trouble processing symbol files containing
2160 * overlapping address ranges, so allocate all code from the code manager
2161 * of the root domain. (#666152).
2163 code_domain
= mono_get_root_domain ();
2165 code_domain
= cfg
->domain
;
2167 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2168 cfg
->spill_count
= 0;
2169 /* we reuse dfn here */
2170 /* bb->dfn = bb_count++; */
2172 mono_arch_lowering_pass (cfg
, bb
);
2174 if (cfg
->opt
& MONO_OPT_PEEPHOLE
)
2175 mono_arch_peephole_pass_1 (cfg
, bb
);
2177 mono_local_regalloc (cfg
, bb
);
2179 if (cfg
->opt
& MONO_OPT_PEEPHOLE
)
2180 mono_arch_peephole_pass_2 (cfg
, bb
);
2182 if (cfg
->gen_seq_points
&& !cfg
->gen_sdb_seq_points
)
2183 mono_bb_deduplicate_op_il_seq_points (cfg
, bb
);
2186 code
= mono_arch_emit_prolog (cfg
);
2188 set_code_cursor (cfg
, code
);
2189 cfg
->prolog_end
= cfg
->code_len
;
2190 cfg
->cfa_reg
= cfg
->cur_cfa_reg
;
2191 cfg
->cfa_offset
= cfg
->cur_cfa_offset
;
2193 mono_debug_open_method (cfg
);
2195 /* emit code all basic blocks */
2196 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2197 bb
->native_offset
= cfg
->code_len
;
2198 bb
->real_native_offset
= cfg
->code_len
;
2199 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2200 mono_arch_output_basic_block (cfg
, bb
);
2201 bb
->native_length
= cfg
->code_len
- bb
->native_offset
;
2203 if (bb
== cfg
->bb_exit
) {
2204 cfg
->epilog_begin
= cfg
->code_len
;
2205 mono_arch_emit_epilog (cfg
);
2206 cfg
->epilog_end
= cfg
->code_len
;
2209 if (bb
->clause_holes
) {
2211 for (tmp
= bb
->clause_holes
; tmp
; tmp
= tmp
->prev
)
2212 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, cfg
->native_code
+ bb
->native_offset
, bb
);
2216 mono_arch_emit_exceptions (cfg
);
2218 max_epilog_size
= 0;
2220 /* we always allocate code in cfg->domain->code_mp to increase locality */
2221 cfg
->code_size
= cfg
->code_len
+ max_epilog_size
;
2223 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2225 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2226 if (!cfg
->compile_aot
)
2227 unwindlen
= mono_arch_unwindinfo_init_method_unwind_info (cfg
);
2230 if (cfg
->method
->dynamic
) {
2231 /* Allocate the code into a separate memory pool so it can be freed */
2232 cfg
->dynamic_info
= g_new0 (MonoJitDynamicMethodInfo
, 1);
2233 cfg
->dynamic_info
->code_mp
= mono_code_manager_new_dynamic ();
2234 mono_domain_lock (cfg
->domain
);
2235 mono_dynamic_code_hash_insert (cfg
->domain
, cfg
->method
, cfg
->dynamic_info
);
2236 mono_domain_unlock (cfg
->domain
);
2238 if (mono_using_xdebug
)
2239 /* See the comment for cfg->code_domain */
2240 code
= (guint8
*)mono_domain_code_reserve (code_domain
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2242 code
= (guint8
*)mono_code_manager_reserve (cfg
->dynamic_info
->code_mp
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2244 code
= (guint8
*)mono_domain_code_reserve (code_domain
, cfg
->code_size
+ cfg
->thunk_area
+ unwindlen
);
2247 if (cfg
->thunk_area
) {
2248 cfg
->thunks_offset
= cfg
->code_size
+ unwindlen
;
2249 cfg
->thunks
= code
+ cfg
->thunks_offset
;
2250 memset (cfg
->thunks
, 0, cfg
->thunk_area
);
2254 memcpy (code
, cfg
->native_code
, cfg
->code_len
);
2255 g_free (cfg
->native_code
);
2256 cfg
->native_code
= code
;
2257 code
= cfg
->native_code
+ cfg
->code_len
;
2259 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2260 mono_postprocess_patches (cfg
);
2262 #ifdef VALGRIND_JIT_REGISTER_MAP
2263 if (valgrind_register
){
2264 char* nm
= mono_method_full_name (cfg
->method
, TRUE
);
2265 VALGRIND_JIT_REGISTER_MAP (nm
, cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
);
2270 if (cfg
->verbose_level
> 0) {
2271 char* nm
= mono_method_get_full_name (cfg
->method
);
2272 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2274 cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
, cfg
->code_len
, cfg
->domain
->friendly_name
);
2279 gboolean is_generic
= FALSE
;
2281 if (cfg
->method
->is_inflated
|| mono_method_get_generic_container (cfg
->method
) ||
2282 mono_class_is_gtd (cfg
->method
->klass
) || mono_class_is_ginst (cfg
->method
->klass
)) {
2287 g_assert (is_generic
);
2290 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2291 mono_arch_save_unwind_info (cfg
);
2294 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2299 for (ji
= cfg
->patch_info
; ji
; ji
= ji
->next
) {
2300 if (cfg
->compile_aot
) {
2302 case MONO_PATCH_INFO_BB
:
2303 case MONO_PATCH_INFO_LABEL
:
2306 /* No need to patch these */
2311 if (ji
->type
== MONO_PATCH_INFO_NONE
)
2314 target
= mono_resolve_patch_target (cfg
->method
, cfg
->domain
, cfg
->native_code
, ji
, cfg
->run_cctors
, &cfg
->error
);
2315 if (!mono_error_ok (&cfg
->error
)) {
2316 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2319 mono_arch_patch_code_new (cfg
, cfg
->domain
, cfg
->native_code
, ji
, target
);
2323 mono_arch_patch_code (cfg
, cfg
->method
, cfg
->domain
, cfg
->native_code
, cfg
->patch_info
, cfg
->run_cctors
, &cfg
->error
);
2324 if (!is_ok (&cfg
->error
)) {
2325 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
2330 if (cfg
->method
->dynamic
) {
2331 if (mono_using_xdebug
)
2332 mono_domain_code_commit (code_domain
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2334 mono_code_manager_commit (cfg
->dynamic_info
->code_mp
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2336 mono_domain_code_commit (code_domain
, cfg
->native_code
, cfg
->code_size
, cfg
->code_len
);
2338 MONO_PROFILER_RAISE (jit_code_buffer
, (cfg
->native_code
, cfg
->code_len
, MONO_PROFILER_CODE_BUFFER_METHOD
, cfg
->method
));
2340 mono_arch_flush_icache (cfg
->native_code
, cfg
->code_len
);
2342 mono_debug_close_method (cfg
);
2344 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2345 if (!cfg
->compile_aot
)
2346 mono_arch_unwindinfo_install_method_unwind_info (&cfg
->arch
.unwindinfo
, cfg
->native_code
, cfg
->code_len
);
2351 compute_reachable (MonoBasicBlock
*bb
)
2355 if (!(bb
->flags
& BB_VISITED
)) {
2356 bb
->flags
|= BB_VISITED
;
2357 for (i
= 0; i
< bb
->out_count
; ++i
)
2358 compute_reachable (bb
->out_bb
[i
]);
2362 static void mono_bb_ordering (MonoCompile
*cfg
)
2365 /* Depth-first ordering on basic blocks */
2366 cfg
->bblocks
= (MonoBasicBlock
**)mono_mempool_alloc (cfg
->mempool
, sizeof (MonoBasicBlock
*) * (cfg
->num_bblocks
+ 1));
2368 cfg
->max_block_num
= cfg
->num_bblocks
;
2370 df_visit (cfg
->bb_entry
, &dfn
, cfg
->bblocks
);
2372 #if defined(__GNUC__) && __GNUC__ == 7 && defined(__x86_64__)
2373 /* workaround for an AMD specific issue that only happens on GCC 7 so far,
2374 * for more information see https://github.com/mono/mono/issues/9298 */
2375 mono_memory_barrier ();
2377 g_assertf (cfg
->num_bblocks
>= dfn
, "cfg->num_bblocks=%d, dfn=%d\n", cfg
->num_bblocks
, dfn
);
2379 if (cfg
->num_bblocks
!= dfn
+ 1) {
2382 cfg
->num_bblocks
= dfn
+ 1;
2384 /* remove unreachable code, because the code in them may be
2385 * inconsistent (access to dead variables for example) */
2386 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2387 bb
->flags
&= ~BB_VISITED
;
2388 compute_reachable (cfg
->bb_entry
);
2389 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2390 if (bb
->flags
& BB_EXCEPTION_HANDLER
)
2391 compute_reachable (bb
);
2392 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2393 if (!(bb
->flags
& BB_VISITED
)) {
2394 if (cfg
->verbose_level
> 1)
2395 g_print ("found unreachable code in BB%d\n", bb
->block_num
);
2396 bb
->code
= bb
->last_ins
= NULL
;
2397 while (bb
->out_count
)
2398 mono_unlink_bblock (cfg
, bb
, bb
->out_bb
[0]);
2401 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
)
2402 bb
->flags
&= ~BB_VISITED
;
2407 mono_handle_out_of_line_bblock (MonoCompile
*cfg
)
2410 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2411 if (bb
->next_bb
&& bb
->next_bb
->out_of_line
&& bb
->last_ins
&& !MONO_IS_BRANCH_OP (bb
->last_ins
)) {
2413 MONO_INST_NEW (cfg
, ins
, OP_BR
);
2414 MONO_ADD_INS (bb
, ins
);
2415 ins
->inst_target_bb
= bb
->next_bb
;
2421 create_jit_info (MonoCompile
*cfg
, MonoMethod
*method_to_compile
)
2424 MonoMethodHeader
*header
;
2426 MonoJitInfoFlags flags
= JIT_INFO_NONE
;
2427 int num_clauses
, num_holes
= 0;
2428 guint32 stack_size
= 0;
2430 g_assert (method_to_compile
== cfg
->method
);
2431 header
= cfg
->header
;
2434 flags
|= JIT_INFO_HAS_GENERIC_JIT_INFO
;
2436 if (cfg
->arch_eh_jit_info
) {
2437 MonoJitArgumentInfo
*arg_info
;
2438 MonoMethodSignature
*sig
= mono_method_signature_internal (cfg
->method_to_register
);
2441 * This cannot be computed during stack walking, as
2442 * mono_arch_get_argument_info () is not signal safe.
2444 arg_info
= g_newa (MonoJitArgumentInfo
, sig
->param_count
+ 1);
2445 stack_size
= mono_arch_get_argument_info (sig
, sig
->param_count
, arg_info
);
2448 flags
|= JIT_INFO_HAS_ARCH_EH_INFO
;
2451 if (cfg
->has_unwind_info_for_epilog
&& !(flags
& JIT_INFO_HAS_ARCH_EH_INFO
))
2452 flags
|= JIT_INFO_HAS_ARCH_EH_INFO
;
2454 if (cfg
->thunk_area
)
2455 flags
|= JIT_INFO_HAS_THUNK_INFO
;
2457 if (cfg
->try_block_holes
) {
2458 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2459 TryBlockHole
*hole
= (TryBlockHole
*)tmp
->data
;
2460 MonoExceptionClause
*ec
= hole
->clause
;
2461 int hole_end
= hole
->basic_block
->native_offset
+ hole
->basic_block
->native_length
;
2462 MonoBasicBlock
*clause_last_bb
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2463 g_assert (clause_last_bb
);
2465 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2466 if (clause_last_bb
->native_offset
!= hole_end
)
2470 flags
|= JIT_INFO_HAS_TRY_BLOCK_HOLES
;
2471 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2472 printf ("Number of try block holes %d\n", num_holes
);
2475 if (COMPILE_LLVM (cfg
))
2476 num_clauses
= cfg
->llvm_ex_info_len
;
2478 num_clauses
= header
->num_clauses
;
2480 if (cfg
->method
->dynamic
)
2481 jinfo
= (MonoJitInfo
*)g_malloc0 (mono_jit_info_size (flags
, num_clauses
, num_holes
));
2483 jinfo
= (MonoJitInfo
*)mono_domain_alloc0 (cfg
->domain
, mono_jit_info_size (flags
, num_clauses
, num_holes
));
2484 jinfo_try_holes_size
+= num_holes
* sizeof (MonoTryBlockHoleJitInfo
);
2486 mono_jit_info_init (jinfo
, cfg
->method_to_register
, cfg
->native_code
, cfg
->code_len
, flags
, num_clauses
, num_holes
);
2487 jinfo
->domain_neutral
= (cfg
->opt
& MONO_OPT_SHARED
) != 0;
2489 if (COMPILE_LLVM (cfg
))
2490 jinfo
->from_llvm
= TRUE
;
2494 MonoGenericJitInfo
*gi
;
2495 GSList
*loclist
= NULL
;
2497 gi
= mono_jit_info_get_generic_jit_info (jinfo
);
2500 if (cfg
->method
->dynamic
)
2501 gi
->generic_sharing_context
= g_new0 (MonoGenericSharingContext
, 1);
2503 gi
->generic_sharing_context
= (MonoGenericSharingContext
*)mono_domain_alloc0 (cfg
->domain
, sizeof (MonoGenericSharingContext
));
2504 mini_init_gsctx (cfg
->method
->dynamic
? NULL
: cfg
->domain
, NULL
, cfg
->gsctx_context
, gi
->generic_sharing_context
);
2506 if ((method_to_compile
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
2507 mini_method_get_context (method_to_compile
)->method_inst
||
2508 m_class_is_valuetype (method_to_compile
->klass
)) {
2509 g_assert (cfg
->rgctx_var
);
2514 if ((method_to_compile
->flags
& METHOD_ATTRIBUTE_STATIC
) ||
2515 mini_method_get_context (method_to_compile
)->method_inst
||
2516 m_class_is_valuetype (method_to_compile
->klass
)) {
2517 inst
= cfg
->rgctx_var
;
2518 if (!COMPILE_LLVM (cfg
))
2519 g_assert (inst
->opcode
== OP_REGOFFSET
);
2520 loclist
= cfg
->rgctx_loclist
;
2522 inst
= cfg
->args
[0];
2523 loclist
= cfg
->this_loclist
;
2527 /* Needed to handle async exceptions */
2531 gi
->nlocs
= g_slist_length (loclist
);
2532 if (cfg
->method
->dynamic
)
2533 gi
->locations
= (MonoDwarfLocListEntry
*)g_malloc0 (gi
->nlocs
* sizeof (MonoDwarfLocListEntry
));
2535 gi
->locations
= (MonoDwarfLocListEntry
*)mono_domain_alloc0 (cfg
->domain
, gi
->nlocs
* sizeof (MonoDwarfLocListEntry
));
2537 for (l
= loclist
; l
; l
= l
->next
) {
2538 memcpy (&(gi
->locations
[i
]), l
->data
, sizeof (MonoDwarfLocListEntry
));
2543 if (COMPILE_LLVM (cfg
)) {
2544 g_assert (cfg
->llvm_this_reg
!= -1);
2545 gi
->this_in_reg
= 0;
2546 gi
->this_reg
= cfg
->llvm_this_reg
;
2547 gi
->this_offset
= cfg
->llvm_this_offset
;
2548 } else if (inst
->opcode
== OP_REGVAR
) {
2549 gi
->this_in_reg
= 1;
2550 gi
->this_reg
= inst
->dreg
;
2552 g_assert (inst
->opcode
== OP_REGOFFSET
);
2554 g_assert (inst
->inst_basereg
== X86_EBP
);
2555 #elif defined(TARGET_AMD64)
2556 g_assert (inst
->inst_basereg
== X86_EBP
|| inst
->inst_basereg
== X86_ESP
);
2558 g_assert (inst
->inst_offset
>= G_MININT32
&& inst
->inst_offset
<= G_MAXINT32
);
2560 gi
->this_in_reg
= 0;
2561 gi
->this_reg
= inst
->inst_basereg
;
2562 gi
->this_offset
= inst
->inst_offset
;
2567 MonoTryBlockHoleTableJitInfo
*table
;
2570 table
= mono_jit_info_get_try_block_hole_table_info (jinfo
);
2571 table
->num_holes
= (guint16
)num_holes
;
2573 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2574 guint32 start_bb_offset
;
2575 MonoTryBlockHoleJitInfo
*hole
;
2576 TryBlockHole
*hole_data
= (TryBlockHole
*)tmp
->data
;
2577 MonoExceptionClause
*ec
= hole_data
->clause
;
2578 int hole_end
= hole_data
->basic_block
->native_offset
+ hole_data
->basic_block
->native_length
;
2579 MonoBasicBlock
*clause_last_bb
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2580 g_assert (clause_last_bb
);
2582 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2583 if (clause_last_bb
->native_offset
== hole_end
)
2586 start_bb_offset
= hole_data
->start_offset
- hole_data
->basic_block
->native_offset
;
2587 hole
= &table
->holes
[i
++];
2588 hole
->clause
= hole_data
->clause
- &header
->clauses
[0];
2589 hole
->offset
= (guint32
)hole_data
->start_offset
;
2590 hole
->length
= (guint16
)(hole_data
->basic_block
->native_length
- start_bb_offset
);
2592 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2593 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole
->clause
, hole
->offset
, hole
->length
);
2595 g_assert (i
== num_holes
);
2598 if (jinfo
->has_arch_eh_info
) {
2599 MonoArchEHJitInfo
*info
;
2601 info
= mono_jit_info_get_arch_eh_info (jinfo
);
2603 info
->stack_size
= stack_size
;
2606 if (cfg
->thunk_area
) {
2607 MonoThunkJitInfo
*info
;
2609 info
= mono_jit_info_get_thunk_info (jinfo
);
2610 info
->thunks_offset
= cfg
->thunks_offset
;
2611 info
->thunks_size
= cfg
->thunk_area
;
2614 if (COMPILE_LLVM (cfg
)) {
2616 memcpy (&jinfo
->clauses
[0], &cfg
->llvm_ex_info
[0], num_clauses
* sizeof (MonoJitExceptionInfo
));
2617 } else if (header
->num_clauses
) {
2620 for (i
= 0; i
< header
->num_clauses
; i
++) {
2621 MonoExceptionClause
*ec
= &header
->clauses
[i
];
2622 MonoJitExceptionInfo
*ei
= &jinfo
->clauses
[i
];
2623 MonoBasicBlock
*tblock
;
2626 ei
->flags
= ec
->flags
;
2628 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2629 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec
->try_offset
, ec
->try_offset
+ ec
->try_len
, ec
->handler_offset
, ec
->handler_offset
+ ec
->handler_len
, ec
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
? ec
->data
.filter_offset
: 0);
2631 exvar
= mono_find_exvar_for_offset (cfg
, ec
->handler_offset
);
2632 ei
->exvar_offset
= exvar
? exvar
->inst_offset
: 0;
2634 if (ei
->flags
== MONO_EXCEPTION_CLAUSE_FILTER
) {
2635 tblock
= cfg
->cil_offset_to_bb
[ec
->data
.filter_offset
];
2637 ei
->data
.filter
= cfg
->native_code
+ tblock
->native_offset
;
2639 ei
->data
.catch_class
= ec
->data
.catch_class
;
2642 tblock
= cfg
->cil_offset_to_bb
[ec
->try_offset
];
2644 g_assert (tblock
->native_offset
);
2645 ei
->try_start
= cfg
->native_code
+ tblock
->native_offset
;
2646 if (tblock
->extend_try_block
) {
2648 * Extend the try block backwards to include parts of the previous call
2651 ei
->try_start
= (guint8
*)ei
->try_start
- cfg
->backend
->monitor_enter_adjustment
;
2653 if (ec
->try_offset
+ ec
->try_len
< header
->code_size
)
2654 tblock
= cfg
->cil_offset_to_bb
[ec
->try_offset
+ ec
->try_len
];
2656 tblock
= cfg
->bb_exit
;
2657 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2658 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec
->try_offset
, ec
->try_len
, tblock
, header
->code_size
);
2660 if (!tblock
->native_offset
) {
2662 for (j
= ec
->try_offset
+ ec
->try_len
, end
= ec
->try_offset
; j
>= end
; --j
) {
2663 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[j
];
2664 if (bb
&& bb
->native_offset
) {
2670 ei
->try_end
= cfg
->native_code
+ tblock
->native_offset
;
2671 g_assert (tblock
->native_offset
);
2672 tblock
= cfg
->cil_offset_to_bb
[ec
->handler_offset
];
2674 ei
->handler_start
= cfg
->native_code
+ tblock
->native_offset
;
2676 for (tmp
= cfg
->try_block_holes
; tmp
; tmp
= tmp
->next
) {
2677 TryBlockHole
*hole
= (TryBlockHole
*)tmp
->data
;
2678 gpointer hole_end
= cfg
->native_code
+ (hole
->basic_block
->native_offset
+ hole
->basic_block
->native_length
);
2679 if (hole
->clause
== ec
&& hole_end
== ei
->try_end
) {
2680 if (G_UNLIKELY (cfg
->verbose_level
>= 4))
2681 printf ("\tShortening try block %d from %x to %x\n", i
, (int)((guint8
*)ei
->try_end
- cfg
->native_code
), hole
->start_offset
);
2683 ei
->try_end
= cfg
->native_code
+ hole
->start_offset
;
2688 if (ec
->flags
== MONO_EXCEPTION_CLAUSE_FINALLY
) {
2690 if (ec
->handler_offset
+ ec
->handler_len
< header
->code_size
) {
2691 tblock
= cfg
->cil_offset_to_bb
[ec
->handler_offset
+ ec
->handler_len
];
2692 if (tblock
->native_offset
) {
2693 end_offset
= tblock
->native_offset
;
2697 for (j
= ec
->handler_offset
+ ec
->handler_len
, end
= ec
->handler_offset
; j
>= end
; --j
) {
2698 MonoBasicBlock
*bb
= cfg
->cil_offset_to_bb
[j
];
2699 if (bb
&& bb
->native_offset
) {
2704 end_offset
= tblock
->native_offset
+ tblock
->native_length
;
2707 end_offset
= cfg
->epilog_begin
;
2709 ei
->data
.handler_end
= cfg
->native_code
+ end_offset
;
2714 if (G_UNLIKELY (cfg
->verbose_level
>= 4)) {
2716 for (i
= 0; i
< jinfo
->num_clauses
; i
++) {
2717 MonoJitExceptionInfo
*ei
= &jinfo
->clauses
[i
];
2718 int start
= (guint8
*)ei
->try_start
- cfg
->native_code
;
2719 int end
= (guint8
*)ei
->try_end
- cfg
->native_code
;
2720 int handler
= (guint8
*)ei
->handler_start
- cfg
->native_code
;
2721 int handler_end
= (guint8
*)ei
->data
.handler_end
- cfg
->native_code
;
2723 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i
, ei
->flags
, start
, end
, handler
, handler_end
);
2727 if (cfg
->encoded_unwind_ops
) {
2728 /* Generated by LLVM */
2729 jinfo
->unwind_info
= mono_cache_unwind_info (cfg
->encoded_unwind_ops
, cfg
->encoded_unwind_ops_len
);
2730 g_free (cfg
->encoded_unwind_ops
);
2731 } else if (cfg
->unwind_ops
) {
2733 guint8
*unwind_info
= mono_unwind_ops_encode (cfg
->unwind_ops
, &info_len
);
2734 guint32 unwind_desc
;
2736 unwind_desc
= mono_cache_unwind_info (unwind_info
, info_len
);
2738 if (cfg
->has_unwind_info_for_epilog
) {
2739 MonoArchEHJitInfo
*info
;
2741 info
= mono_jit_info_get_arch_eh_info (jinfo
);
2743 info
->epilog_size
= cfg
->code_len
- cfg
->epilog_begin
;
2745 jinfo
->unwind_info
= unwind_desc
;
2746 g_free (unwind_info
);
2748 jinfo
->unwind_info
= cfg
->used_int_regs
;
2754 /* Return whenever METHOD is a gsharedvt method */
2756 is_gsharedvt_method (MonoMethod
*method
)
2758 MonoGenericContext
*context
;
2759 MonoGenericInst
*inst
;
2762 if (!method
->is_inflated
)
2764 context
= mono_method_get_context (method
);
2765 inst
= context
->class_inst
;
2767 for (i
= 0; i
< inst
->type_argc
; ++i
)
2768 if (mini_is_gsharedvt_gparam (inst
->type_argv
[i
]))
2771 inst
= context
->method_inst
;
2773 for (i
= 0; i
< inst
->type_argc
; ++i
)
2774 if (mini_is_gsharedvt_gparam (inst
->type_argv
[i
]))
2781 is_open_method (MonoMethod
*method
)
2783 MonoGenericContext
*context
;
2785 if (!method
->is_inflated
)
2787 context
= mono_method_get_context (method
);
2788 if (context
->class_inst
&& context
->class_inst
->is_open
)
2790 if (context
->method_inst
&& context
->method_inst
->is_open
)
2796 mono_insert_nop_in_empty_bb (MonoCompile
*cfg
)
2799 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2803 MONO_INST_NEW (cfg
, nop
, OP_NOP
);
2804 MONO_ADD_INS (bb
, nop
);
2808 insert_safepoint (MonoCompile
*cfg
, MonoBasicBlock
*bblock
)
2810 MonoInst
*poll_addr
, *ins
;
2812 if (cfg
->disable_gc_safe_points
)
2815 if (cfg
->verbose_level
> 1)
2816 printf ("ADDING SAFE POINT TO BB %d\n", bblock
->block_num
);
2818 g_assert (mini_safepoints_enabled ());
2819 NEW_AOTCONST (cfg
, poll_addr
, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG
, (gpointer
)&mono_polling_required
);
2821 MONO_INST_NEW (cfg
, ins
, OP_GC_SAFE_POINT
);
2822 ins
->sreg1
= poll_addr
->dreg
;
2824 if (bblock
->flags
& BB_EXCEPTION_HANDLER
) {
2825 MonoInst
*eh_op
= bblock
->code
;
2827 if (eh_op
&& eh_op
->opcode
!= OP_START_HANDLER
&& eh_op
->opcode
!= OP_GET_EX_OBJ
) {
2830 MonoInst
*next_eh_op
= eh_op
? eh_op
->next
: NULL
;
2831 // skip all EH relateds ops
2832 while (next_eh_op
&& (next_eh_op
->opcode
== OP_START_HANDLER
|| next_eh_op
->opcode
== OP_GET_EX_OBJ
)) {
2834 next_eh_op
= eh_op
->next
;
2838 mono_bblock_insert_after_ins (bblock
, eh_op
, poll_addr
);
2839 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2840 } else if (bblock
== cfg
->bb_entry
) {
2841 mono_bblock_insert_after_ins (bblock
, bblock
->last_ins
, poll_addr
);
2842 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2845 mono_bblock_insert_before_ins (bblock
, NULL
, poll_addr
);
2846 mono_bblock_insert_after_ins (bblock
, poll_addr
, ins
);
2851 This code inserts safepoints into managed code at important code paths.
2854 -the first basic block
2855 -landing BB for exception handlers
2860 insert_safepoints (MonoCompile
*cfg
)
2864 g_assert (mini_safepoints_enabled ());
2866 if (COMPILE_LLVM (cfg
)) {
2867 if (!cfg
->llvm_only
&& cfg
->compile_aot
) {
2868 /* We rely on LLVM's safepoints insertion capabilities. */
2869 if (cfg
->verbose_level
> 1)
2870 printf ("SKIPPING SAFEPOINTS for code compiled with LLVM\n");
2875 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
2876 WrapperInfo
*info
= mono_marshal_get_wrapper_info (cfg
->method
);
2877 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2878 if (info
&& info
->subtype
== WRAPPER_SUBTYPE_ICALL_WRAPPER
&&
2879 (info
->d
.icall
.func
== mono_threads_state_poll
||
2880 info
->d
.icall
.func
== mono_thread_interruption_checkpoint
||
2881 info
->d
.icall
.func
== mono_threads_exit_gc_safe_region_unbalanced
)) {
2882 if (cfg
->verbose_level
> 1)
2883 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2888 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
2889 if (cfg
->verbose_level
> 1)
2890 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2894 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_OTHER
) {
2895 WrapperInfo
*info
= mono_marshal_get_wrapper_info (cfg
->method
);
2897 if (info
&& (info
->subtype
== WRAPPER_SUBTYPE_INTERP_IN
|| info
->subtype
== WRAPPER_SUBTYPE_INTERP_LMF
)) {
2898 /* These wrappers shouldn't do any icalls */
2899 if (cfg
->verbose_level
> 1)
2900 printf ("SKIPPING SAFEPOINTS for interp-in wrappers.\n");
2905 if (cfg
->verbose_level
> 1)
2906 printf ("INSERTING SAFEPOINTS\n");
2907 if (cfg
->verbose_level
> 2)
2908 mono_print_code (cfg
, "BEFORE SAFEPOINTS");
2910 /* if the method doesn't contain
2911 * (1) a call (so it's a leaf method)
2913 * we can skip the GC safepoint on method entry. */
2914 gboolean requires_safepoint
= cfg
->has_calls
;
2916 for (bb
= cfg
->bb_entry
->next_bb
; bb
; bb
= bb
->next_bb
) {
2917 if (bb
->loop_body_start
|| (bb
->flags
& BB_EXCEPTION_HANDLER
)) {
2918 requires_safepoint
= TRUE
;
2919 insert_safepoint (cfg
, bb
);
2923 if (requires_safepoint
)
2924 insert_safepoint (cfg
, cfg
->bb_entry
);
2926 if (cfg
->verbose_level
> 2)
2927 mono_print_code (cfg
, "AFTER SAFEPOINTS");
2933 mono_insert_branches_between_bblocks (MonoCompile
*cfg
)
2937 /* Add branches between non-consecutive bblocks */
2938 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2939 if (bb
->last_ins
&& MONO_IS_COND_BRANCH_OP (bb
->last_ins
) &&
2940 bb
->last_ins
->inst_false_bb
&& bb
->next_bb
!= bb
->last_ins
->inst_false_bb
) {
2941 /* we are careful when inverting, since bugs like #59580
2942 * could show up when dealing with NaNs.
2944 if (MONO_IS_COND_BRANCH_NOFP(bb
->last_ins
) && bb
->next_bb
== bb
->last_ins
->inst_true_bb
) {
2945 MonoBasicBlock
*tmp
= bb
->last_ins
->inst_true_bb
;
2946 bb
->last_ins
->inst_true_bb
= bb
->last_ins
->inst_false_bb
;
2947 bb
->last_ins
->inst_false_bb
= tmp
;
2949 bb
->last_ins
->opcode
= mono_reverse_branch_op (bb
->last_ins
->opcode
);
2951 MonoInst
*inst
= (MonoInst
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (MonoInst
));
2952 inst
->opcode
= OP_BR
;
2953 inst
->inst_target_bb
= bb
->last_ins
->inst_false_bb
;
2954 mono_bblock_add_inst (bb
, inst
);
2959 if (cfg
->verbose_level
>= 4) {
2960 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2961 MonoInst
*tree
= bb
->code
;
2962 g_print ("DUMP BLOCK %d:\n", bb
->block_num
);
2965 for (; tree
; tree
= tree
->next
) {
2966 mono_print_ins_index (-1, tree
);
2972 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
2973 bb
->max_vreg
= cfg
->next_vreg
;
2978 init_backend (MonoBackend
*backend
)
2980 #ifdef MONO_ARCH_NEED_GOT_VAR
2981 backend
->need_got_var
= 1;
2983 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2984 backend
->have_card_table_wb
= 1;
2986 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
2987 backend
->have_op_generic_class_init
= 1;
2989 #ifdef MONO_ARCH_EMULATE_MUL_DIV
2990 backend
->emulate_mul_div
= 1;
2992 #ifdef MONO_ARCH_EMULATE_DIV
2993 backend
->emulate_div
= 1;
2995 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
2996 backend
->emulate_long_shift_opts
= 1;
2998 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
2999 backend
->have_objc_get_selector
= 1;
3001 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
3002 backend
->have_generalized_imt_trampoline
= 1;
3004 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3005 backend
->gshared_supported
= 1;
3007 if (MONO_ARCH_USE_FPSTACK
)
3008 backend
->use_fpstack
= 1;
3009 // Does the ABI have a volatile non-parameter register, so tailcall
3010 // can pass context to generics or interfaces?
3011 backend
->have_volatile_non_param_register
= MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER
;
3012 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE
3013 backend
->have_op_tailcall_membase
= 1;
3015 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_REG
3016 backend
->have_op_tailcall_reg
= 1;
3018 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3019 backend
->monitor_enter_adjustment
= 1;
3021 backend
->monitor_enter_adjustment
= MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
;
3023 #if defined(MONO_ARCH_ILP32)
3026 #ifdef MONO_ARCH_NEED_DIV_CHECK
3027 backend
->need_div_check
= 1;
3029 #ifdef NO_UNALIGNED_ACCESS
3030 backend
->no_unaligned_access
= 1;
3032 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3033 backend
->dyn_call_param_area
= MONO_ARCH_DYN_CALL_PARAM_AREA
;
3035 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3036 backend
->disable_div_with_mul
= 1;
3038 #ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
3039 backend
->explicit_null_checks
= 1;
3041 #ifdef MONO_ARCH_HAVE_OPTIMIZED_DIV
3042 backend
->optimized_div
= 1;
3047 * mini_method_compile:
3048 * @method: the method to compile
3049 * @opts: the optimization flags to use
3050 * @domain: the domain where the method will be compiled in
3051 * @flags: compilation flags
3052 * @parts: debug flag
3054 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3055 * field in the returned struct to see if compilation succeded.
3058 mini_method_compile (MonoMethod
*method
, guint32 opts
, MonoDomain
*domain
, JitFlags flags
, int parts
, int aot_method_index
)
3060 MonoMethodHeader
*header
;
3061 MonoMethodSignature
*sig
;
3064 gboolean try_generic_shared
, try_llvm
= FALSE
;
3065 MonoMethod
*method_to_compile
, *method_to_register
;
3066 gboolean method_is_gshared
= FALSE
;
3067 gboolean run_cctors
= (flags
& JIT_FLAG_RUN_CCTORS
) ? 1 : 0;
3068 gboolean compile_aot
= (flags
& JIT_FLAG_AOT
) ? 1 : 0;
3069 gboolean full_aot
= (flags
& JIT_FLAG_FULL_AOT
) ? 1 : 0;
3070 gboolean disable_direct_icalls
= (flags
& JIT_FLAG_NO_DIRECT_ICALLS
) ? 1 : 0;
3071 gboolean gsharedvt_method
= FALSE
;
3073 gboolean llvm
= (flags
& JIT_FLAG_LLVM
) ? 1 : 0;
3075 static gboolean verbose_method_inited
;
3076 static char **verbose_method_names
;
3078 mono_atomic_inc_i32 (&mono_jit_stats
.methods_compiled
);
3079 MONO_PROFILER_RAISE (jit_begin
, (method
));
3080 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3081 MONO_PROBE_METHOD_COMPILE_BEGIN (method
);
3083 gsharedvt_method
= is_gsharedvt_method (method
);
3086 * In AOT mode, method can be the following:
3087 * - a gsharedvt method.
3088 * - a method inflated with type parameters. This is for ref/partial sharing.
3089 * - a method inflated with concrete types.
3092 if (is_open_method (method
)) {
3093 try_generic_shared
= TRUE
;
3094 method_is_gshared
= TRUE
;
3096 try_generic_shared
= FALSE
;
3098 g_assert (opts
& MONO_OPT_GSHARED
);
3100 try_generic_shared
= mono_class_generic_sharing_enabled (method
->klass
) &&
3101 (opts
& MONO_OPT_GSHARED
) && mono_method_is_generic_sharable_full (method
, FALSE
, FALSE
, FALSE
);
3102 if (mini_is_gsharedvt_sharable_method (method
)) {
3104 if (!mono_debug_count ())
3105 try_generic_shared = FALSE;
3111 if (try_generic_shared && !mono_debug_count ())
3112 try_generic_shared = FALSE;
3115 if (opts
& MONO_OPT_GSHARED
) {
3116 if (try_generic_shared
)
3117 mono_atomic_inc_i32 (&mono_stats
.generics_sharable_methods
);
3118 else if (mono_method_is_generic_impl (method
))
3119 mono_atomic_inc_i32 (&mono_stats
.generics_unsharable_methods
);
3123 try_llvm
= mono_use_llvm
|| llvm
;
3126 #ifndef MONO_ARCH_FLOAT32_SUPPORTED
3127 opts
&= ~MONO_OPT_FLOAT32
;
3131 if (method_is_gshared
) {
3132 method_to_compile
= method
;
3134 if (try_generic_shared
) {
3136 method_to_compile
= mini_get_shared_method_full (method
, SHARE_MODE_NONE
, error
);
3137 mono_error_assert_ok (error
);
3139 method_to_compile
= method
;
3143 cfg
= g_new0 (MonoCompile
, 1);
3144 cfg
->method
= method_to_compile
;
3145 cfg
->mempool
= mono_mempool_new ();
3147 cfg
->run_cctors
= run_cctors
;
3148 cfg
->domain
= domain
;
3149 cfg
->verbose_level
= mini_verbose
;
3150 cfg
->compile_aot
= compile_aot
;
3151 cfg
->full_aot
= full_aot
;
3152 cfg
->disable_omit_fp
= mini_debug_options
.disable_omit_fp
;
3153 cfg
->skip_visibility
= method
->skip_visibility
;
3154 cfg
->orig_method
= method
;
3155 cfg
->gen_seq_points
= !mini_debug_options
.no_seq_points_compact_data
|| mini_debug_options
.gen_sdb_seq_points
;
3156 cfg
->gen_sdb_seq_points
= mini_debug_options
.gen_sdb_seq_points
;
3157 cfg
->llvm_only
= (flags
& JIT_FLAG_LLVM_ONLY
) != 0;
3158 cfg
->interp
= (flags
& JIT_FLAG_INTERP
) != 0;
3159 cfg
->backend
= current_backend
;
3162 if (cfg
->method
->wrapper_type
!= MONO_WRAPPER_NONE
) {
3163 /* FIXME: Why is this needed */
3164 cfg
->gen_seq_points
= FALSE
;
3165 cfg
->gen_sdb_seq_points
= FALSE
;
3168 if (cfg
->method
->wrapper_type
== MONO_WRAPPER_ALLOC
) {
3169 /* We can't have seq points inside gc critical regions */
3170 cfg
->gen_seq_points
= FALSE
;
3171 cfg
->gen_sdb_seq_points
= FALSE
;
3173 /* coop requires loop detection to happen */
3174 if (mini_safepoints_enabled ())
3175 cfg
->opt
|= MONO_OPT_LOOP
;
3176 if (cfg
->backend
->explicit_null_checks
) {
3177 /* some platforms have null pages, so we can't SIGSEGV */
3178 cfg
->explicit_null_checks
= TRUE
;
3180 cfg
->explicit_null_checks
= mini_debug_options
.explicit_null_checks
|| (flags
& JIT_FLAG_EXPLICIT_NULL_CHECKS
);
3182 cfg
->soft_breakpoints
= mini_debug_options
.soft_breakpoints
;
3183 cfg
->check_pinvoke_callconv
= mini_debug_options
.check_pinvoke_callconv
;
3184 cfg
->disable_direct_icalls
= disable_direct_icalls
;
3185 cfg
->direct_pinvoke
= (flags
& JIT_FLAG_DIRECT_PINVOKE
) != 0;
3186 if (try_generic_shared
)
3187 cfg
->gshared
= TRUE
;
3188 cfg
->compile_llvm
= try_llvm
;
3189 cfg
->token_info_hash
= g_hash_table_new (NULL
, NULL
);
3190 if (cfg
->compile_aot
)
3191 cfg
->method_index
= aot_method_index
;
3194 if (!mono_debug_count ())
3195 cfg->opt &= ~MONO_OPT_FLOAT32;
3198 cfg
->opt
&= ~MONO_OPT_SIMD
;
3199 cfg
->r4fp
= (cfg
->opt
& MONO_OPT_FLOAT32
) ? 1 : 0;
3200 cfg
->r4_stack_type
= cfg
->r4fp
? STACK_R4
: STACK_R8
;
3202 if (cfg
->gen_seq_points
)
3203 cfg
->seq_points
= g_ptr_array_new ();
3204 error_init (&cfg
->error
);
3206 if (cfg
->compile_aot
&& !try_generic_shared
&& (method
->is_generic
|| mono_class_is_gtd (method
->klass
) || method_is_gshared
)) {
3207 cfg
->exception_type
= MONO_EXCEPTION_GENERIC_SHARING_FAILED
;
3211 if (cfg
->gshared
&& (gsharedvt_method
|| mini_is_gsharedvt_sharable_method (method
))) {
3212 MonoMethodInflated
*inflated
;
3213 MonoGenericContext
*context
;
3215 if (gsharedvt_method
) {
3216 g_assert (method
->is_inflated
);
3217 inflated
= (MonoMethodInflated
*)method
;
3218 context
= &inflated
->context
;
3220 /* We are compiling a gsharedvt method directly */
3221 g_assert (compile_aot
);
3223 g_assert (method_to_compile
->is_inflated
);
3224 inflated
= (MonoMethodInflated
*)method_to_compile
;
3225 context
= &inflated
->context
;
3228 mini_init_gsctx (NULL
, cfg
->mempool
, context
, &cfg
->gsctx
);
3229 cfg
->gsctx_context
= context
;
3231 cfg
->gsharedvt
= TRUE
;
3232 if (!cfg
->llvm_only
) {
3233 cfg
->disable_llvm
= TRUE
;
3234 cfg
->exception_message
= g_strdup ("gsharedvt");
3239 method_to_register
= method_to_compile
;
3241 g_assert (method
== method_to_compile
);
3242 method_to_register
= method
;
3244 cfg
->method_to_register
= method_to_register
;
3247 sig
= mono_method_signature_checked (cfg
->method
, err
);
3249 cfg
->exception_type
= MONO_EXCEPTION_TYPE_LOAD
;
3250 cfg
->exception_message
= g_strdup (mono_error_get_message (err
));
3251 mono_error_cleanup (err
);
3252 if (MONO_METHOD_COMPILE_END_ENABLED ())
3253 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3257 header
= cfg
->header
= mono_method_get_header_checked (cfg
->method
, &cfg
->error
);
3259 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3260 if (MONO_METHOD_COMPILE_END_ENABLED ())
3261 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3267 static gboolean inited
;
3273 * Check for methods which cannot be compiled by LLVM early, to avoid
3274 * the extra compilation pass.
3276 if (COMPILE_LLVM (cfg
)) {
3277 mono_llvm_check_method_supported (cfg
);
3278 if (cfg
->disable_llvm
) {
3279 if (cfg
->verbose_level
>= (cfg
->llvm_only
? 0 : 1)) {
3280 //nm = mono_method_full_name (cfg->method, TRUE);
3281 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method
->klass
), method
->name
, cfg
->exception_message
);
3284 if (cfg
->llvm_only
) {
3285 g_free (cfg
->exception_message
);
3286 cfg
->disable_aot
= TRUE
;
3289 mono_destroy_compile (cfg
);
3291 goto restart_compile
;
3297 cfg
->prof_flags
= mono_profiler_get_call_instrumentation_flags (cfg
->method
);
3298 cfg
->prof_coverage
= mono_profiler_coverage_instrumentation_enabled (cfg
->method
);
3300 gboolean trace
= mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
);
3302 cfg
->prof_flags
= (MonoProfilerCallInstrumentationFlags
)(
3303 MONO_PROFILER_CALL_INSTRUMENTATION_ENTER
| MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT
|
3304 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE
| MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT
);
3306 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3307 if (mini_debug_options
.mdb_optimizations
|| MONO_CFG_PROFILE_CALL_CONTEXT (cfg
)) {
3308 cfg
->disable_reuse_registers
= TRUE
;
3309 cfg
->disable_reuse_stack_slots
= TRUE
;
3311 * This decreases the change the debugger will read registers/stack slots which are
3312 * not yet initialized.
3314 cfg
->disable_initlocals_opt
= TRUE
;
3316 cfg
->extend_live_ranges
= TRUE
;
3318 /* The debugger needs all locals to be on the stack or in a global register */
3319 cfg
->disable_vreg_to_lvreg
= TRUE
;
3321 /* Don't remove unused variables when running inside the debugger since the user
3322 * may still want to view them. */
3323 cfg
->disable_deadce_vars
= TRUE
;
3325 cfg
->opt
&= ~MONO_OPT_DEADCE
;
3326 cfg
->opt
&= ~MONO_OPT_INLINE
;
3327 cfg
->opt
&= ~MONO_OPT_COPYPROP
;
3328 cfg
->opt
&= ~MONO_OPT_CONSPROP
;
3330 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3331 cfg
->disable_out_of_line_bblocks
= TRUE
;
3334 if (mono_using_xdebug
) {
3336 * Make each variable use its own register/stack slot and extend
3337 * their liveness to cover the whole method, making them displayable
3338 * in gdb even after they are dead.
3340 cfg
->disable_reuse_registers
= TRUE
;
3341 cfg
->disable_reuse_stack_slots
= TRUE
;
3342 cfg
->extend_live_ranges
= TRUE
;
3343 cfg
->compute_precise_live_ranges
= TRUE
;
3346 mini_gc_init_cfg (cfg
);
3348 if (method
->wrapper_type
== MONO_WRAPPER_OTHER
) {
3349 WrapperInfo
*info
= mono_marshal_get_wrapper_info (method
);
3351 if ((info
&& (info
->subtype
== WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG
|| info
->subtype
== WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG
))) {
3352 cfg
->disable_gc_safe_points
= TRUE
;
3353 /* This is safe, these wrappers only store to the stack */
3354 cfg
->gen_write_barriers
= FALSE
;
3358 if (COMPILE_LLVM (cfg
)) {
3359 cfg
->opt
|= MONO_OPT_ABCREM
;
3362 if (!verbose_method_inited
) {
3363 char *env
= g_getenv ("MONO_VERBOSE_METHOD");
3365 verbose_method_names
= g_strsplit (env
, ";", -1);
3367 verbose_method_inited
= TRUE
;
3369 if (verbose_method_names
) {
3372 for (i
= 0; verbose_method_names
[i
] != NULL
; i
++){
3373 const char *name
= verbose_method_names
[i
];
3375 if ((strchr (name
, '.') > name
) || strchr (name
, ':')) {
3376 MonoMethodDesc
*desc
;
3378 desc
= mono_method_desc_new (name
, TRUE
);
3380 if (mono_method_desc_full_match (desc
, cfg
->method
)) {
3381 cfg
->verbose_level
= 4;
3383 mono_method_desc_free (desc
);
3386 if (strcmp (cfg
->method
->name
, name
) == 0)
3387 cfg
->verbose_level
= 4;
3392 cfg
->intvars
= (guint16
*)mono_mempool_alloc0 (cfg
->mempool
, sizeof (guint16
) * STACK_MAX
* header
->max_stack
);
3394 if (cfg
->verbose_level
> 0) {
3397 method_name
= mono_method_get_full_name (method
);
3398 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg
) ? "llvm " : "", cfg
->gsharedvt
? "gsharedvt " : "", (cfg
->gshared
&& !cfg
->gsharedvt
) ? "gshared " : "", method_name
);
3400 if (COMPILE_LLVM (cfg))
3401 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3402 else if (cfg->gsharedvt)
3403 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3404 else if (cfg->gshared)
3405 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3407 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3409 g_free (method_name
);
3412 if (cfg
->opt
& MONO_OPT_ABCREM
)
3413 cfg
->opt
|= MONO_OPT_SSA
;
3415 cfg
->rs
= mono_regstate_new ();
3416 cfg
->next_vreg
= cfg
->rs
->next_vreg
;
3418 /* FIXME: Fix SSA to handle branches inside bblocks */
3419 if (cfg
->opt
& MONO_OPT_SSA
)
3420 cfg
->enable_extended_bblocks
= FALSE
;
3423 * FIXME: This confuses liveness analysis because variables which are assigned after
3424 * a branch inside a bblock become part of the kill set, even though the assignment
3425 * might not get executed. This causes the optimize_initlocals pass to delete some
3426 * assignments which are needed.
3427 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3430 //cfg->enable_extended_bblocks = TRUE;
3432 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3433 if (mono_compile_is_broken (cfg
, cfg
->method
, TRUE
)) {
3434 if (mini_get_debug_options ()->break_on_unverified
)
3440 * create MonoInst* which represents arguments and local variables
3442 mono_compile_create_vars (cfg
);
3444 mono_cfg_dump_create_context (cfg
);
3445 mono_cfg_dump_begin_group (cfg
);
3447 MONO_TIME_TRACK (mono_jit_stats
.jit_method_to_ir
, i
= mono_method_to_ir (cfg
, method_to_compile
, NULL
, NULL
, NULL
, NULL
, 0, FALSE
));
3448 mono_cfg_dump_ir (cfg
, "method-to-ir");
3450 if (cfg
->gdump_ctx
!= NULL
) {
3451 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3452 mono_insert_nop_in_empty_bb (cfg
);
3453 mono_cfg_dump_ir (cfg
, "mono_insert_nop_in_empty_bb");
3457 if (try_generic_shared
&& cfg
->exception_type
== MONO_EXCEPTION_GENERIC_SHARING_FAILED
) {
3459 if (MONO_METHOD_COMPILE_END_ENABLED ())
3460 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3463 mono_destroy_compile (cfg
);
3464 try_generic_shared
= FALSE
;
3465 goto restart_compile
;
3467 g_assert (cfg
->exception_type
!= MONO_EXCEPTION_GENERIC_SHARING_FAILED
);
3469 if (MONO_METHOD_COMPILE_END_ENABLED ())
3470 MONO_PROBE_METHOD_COMPILE_END (method
, FALSE
);
3471 /* cfg contains the details of the failure, so let the caller cleanup */
3475 cfg
->stat_basic_blocks
+= cfg
->num_bblocks
;
3477 if (COMPILE_LLVM (cfg
)) {
3480 /* The IR has to be in SSA form for LLVM */
3481 cfg
->opt
|= MONO_OPT_SSA
;
3485 // Allow SSA on the result value
3486 cfg
->ret
->flags
&= ~MONO_INST_VOLATILE
;
3488 // Add an explicit return instruction referencing the return value
3489 MONO_INST_NEW (cfg
, ins
, OP_SETRET
);
3490 ins
->sreg1
= cfg
->ret
->dreg
;
3492 MONO_ADD_INS (cfg
->bb_exit
, ins
);
3495 cfg
->opt
&= ~MONO_OPT_LINEARS
;
3498 cfg
->opt
&= ~MONO_OPT_BRANCH
;
3501 /* todo: remove code when we have verified that the liveness for try/catch blocks
3505 * Currently, this can't be commented out since exception blocks are not
3506 * processed during liveness analysis.
3507 * It is also needed, because otherwise the local optimization passes would
3508 * delete assignments in cases like this:
3510 * <something which throws>
3512 * This also allows SSA to be run on methods containing exception clauses, since
3513 * SSA will ignore variables marked VOLATILE.
3515 MONO_TIME_TRACK (mono_jit_stats
.jit_liveness_handle_exception_clauses
, mono_liveness_handle_exception_clauses (cfg
));
3516 mono_cfg_dump_ir (cfg
, "liveness_handle_exception_clauses");
3518 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_out_of_line_bblock
, mono_handle_out_of_line_bblock (cfg
));
3519 mono_cfg_dump_ir (cfg
, "handle_out_of_line_bblock");
3521 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3523 if (!COMPILE_LLVM (cfg
)) {
3524 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_long_opts
, mono_decompose_long_opts (cfg
));
3525 mono_cfg_dump_ir (cfg
, "decompose_long_opts");
3528 /* Should be done before branch opts */
3529 if (cfg
->opt
& (MONO_OPT_CONSPROP
| MONO_OPT_COPYPROP
)) {
3530 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop
, mono_local_cprop (cfg
));
3531 mono_cfg_dump_ir (cfg
, "local_cprop");
3534 if (cfg
->flags
& MONO_CFG_HAS_TYPE_CHECK
) {
3535 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_typechecks
, mono_decompose_typechecks (cfg
));
3536 if (cfg
->gdump_ctx
!= NULL
) {
3537 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3538 mono_insert_nop_in_empty_bb (cfg
);
3540 mono_cfg_dump_ir (cfg
, "decompose_typechecks");
3544 * Should be done after cprop which can do strength reduction on
3545 * some of these ops, after propagating immediates.
3547 if (cfg
->has_emulated_ops
) {
3548 MONO_TIME_TRACK (mono_jit_stats
.jit_local_emulate_ops
, mono_local_emulate_ops (cfg
));
3549 mono_cfg_dump_ir (cfg
, "local_emulate_ops");
3552 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3553 MONO_TIME_TRACK (mono_jit_stats
.jit_optimize_branches
, mono_optimize_branches (cfg
));
3554 mono_cfg_dump_ir (cfg
, "optimize_branches");
3557 /* This must be done _before_ global reg alloc and _after_ decompose */
3558 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_global_vregs
, mono_handle_global_vregs (cfg
));
3559 mono_cfg_dump_ir (cfg
, "handle_global_vregs");
3560 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3561 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce
, mono_local_deadce (cfg
));
3562 mono_cfg_dump_ir (cfg
, "local_deadce");
3564 if (cfg
->opt
& MONO_OPT_ALIAS_ANALYSIS
) {
3565 MONO_TIME_TRACK (mono_jit_stats
.jit_local_alias_analysis
, mono_local_alias_analysis (cfg
));
3566 mono_cfg_dump_ir (cfg
, "local_alias_analysis");
3568 /* Disable this for LLVM to make the IR easier to handle */
3569 if (!COMPILE_LLVM (cfg
)) {
3570 MONO_TIME_TRACK (mono_jit_stats
.jit_if_conversion
, mono_if_conversion (cfg
));
3571 mono_cfg_dump_ir (cfg
, "if_conversion");
3574 mono_threads_safepoint ();
3576 MONO_TIME_TRACK (mono_jit_stats
.jit_bb_ordering
, mono_bb_ordering (cfg
));
3577 mono_cfg_dump_ir (cfg
, "bb_ordering");
3579 if (((cfg
->num_varinfo
> 2000) || (cfg
->num_bblocks
> 1000)) && !cfg
->compile_aot
) {
3581 * we disable some optimizations if there are too many variables
3582 * because JIT time may become too expensive. The actual number needs
3583 * to be tweaked and eventually the non-linear algorithms should be fixed.
3585 cfg
->opt
&= ~ (MONO_OPT_LINEARS
| MONO_OPT_COPYPROP
| MONO_OPT_CONSPROP
);
3586 cfg
->disable_ssa
= TRUE
;
3589 if (cfg
->num_varinfo
> 10000 && !cfg
->llvm_only
)
3590 /* Disable llvm for overly complex methods */
3591 cfg
->disable_ssa
= TRUE
;
3593 if (cfg
->opt
& MONO_OPT_LOOP
) {
3594 MONO_TIME_TRACK (mono_jit_stats
.jit_compile_dominator_info
, mono_compile_dominator_info (cfg
, MONO_COMP_DOM
| MONO_COMP_IDOM
));
3595 MONO_TIME_TRACK (mono_jit_stats
.jit_compute_natural_loops
, mono_compute_natural_loops (cfg
));
3598 if (mono_threads_are_safepoints_enabled ()) {
3599 MONO_TIME_TRACK (mono_jit_stats
.jit_insert_safepoints
, insert_safepoints (cfg
));
3600 mono_cfg_dump_ir (cfg
, "insert_safepoints");
3603 /* after method_to_ir */
3605 if (MONO_METHOD_COMPILE_END_ENABLED ())
3606 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3611 if (header->num_clauses)
3612 cfg->disable_ssa = TRUE;
3615 //#define DEBUGSSA "logic_run"
3616 //#define DEBUGSSA_CLASS "Tests"
3619 if (!cfg
->disable_ssa
) {
3620 mono_local_cprop (cfg
);
3623 mono_ssa_compute (cfg
);
3627 if (cfg
->opt
& MONO_OPT_SSA
) {
3628 if (!(cfg
->comp_done
& MONO_COMP_SSA
) && !cfg
->disable_ssa
) {
3630 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_compute
, mono_ssa_compute (cfg
));
3631 mono_cfg_dump_ir (cfg
, "ssa_compute");
3634 if (cfg
->verbose_level
>= 2) {
3641 /* after SSA translation */
3643 if (MONO_METHOD_COMPILE_END_ENABLED ())
3644 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3648 if ((cfg
->opt
& MONO_OPT_CONSPROP
) || (cfg
->opt
& MONO_OPT_COPYPROP
)) {
3649 if (cfg
->comp_done
& MONO_COMP_SSA
&& !COMPILE_LLVM (cfg
)) {
3651 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_cprop
, mono_ssa_cprop (cfg
));
3652 mono_cfg_dump_ir (cfg
, "ssa_cprop");
3658 if (cfg
->comp_done
& MONO_COMP_SSA
&& !COMPILE_LLVM (cfg
)) {
3659 //mono_ssa_strength_reduction (cfg);
3661 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3662 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_deadce
, mono_ssa_deadce (cfg
));
3663 mono_cfg_dump_ir (cfg
, "ssa_deadce");
3666 if ((cfg
->flags
& (MONO_CFG_HAS_LDELEMA
|MONO_CFG_HAS_CHECK_THIS
)) && (cfg
->opt
& MONO_OPT_ABCREM
)) {
3667 MONO_TIME_TRACK (mono_jit_stats
.jit_perform_abc_removal
, mono_perform_abc_removal (cfg
));
3668 mono_cfg_dump_ir (cfg
, "perform_abc_removal");
3671 MONO_TIME_TRACK (mono_jit_stats
.jit_ssa_remove
, mono_ssa_remove (cfg
));
3672 mono_cfg_dump_ir (cfg
, "ssa_remove");
3673 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop2
, mono_local_cprop (cfg
));
3674 mono_cfg_dump_ir (cfg
, "local_cprop2");
3675 MONO_TIME_TRACK (mono_jit_stats
.jit_handle_global_vregs2
, mono_handle_global_vregs (cfg
));
3676 mono_cfg_dump_ir (cfg
, "handle_global_vregs2");
3677 if (cfg
->opt
& MONO_OPT_DEADCE
) {
3678 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce2
, mono_local_deadce (cfg
));
3679 mono_cfg_dump_ir (cfg
, "local_deadce2");
3682 if (cfg
->opt
& MONO_OPT_BRANCH
) {
3683 MONO_TIME_TRACK (mono_jit_stats
.jit_optimize_branches2
, mono_optimize_branches (cfg
));
3684 mono_cfg_dump_ir (cfg
, "optimize_branches2");
3689 if (cfg
->comp_done
& MONO_COMP_SSA
&& COMPILE_LLVM (cfg
)) {
3690 mono_ssa_loop_invariant_code_motion (cfg
);
3691 mono_cfg_dump_ir (cfg
, "loop_invariant_code_motion");
3692 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3693 if (cfg
->opt
& MONO_OPT_ABCREM
) {
3694 mono_perform_abc_removal (cfg
);
3695 mono_cfg_dump_ir (cfg
, "abc_removal");
3699 /* after SSA removal */
3701 if (MONO_METHOD_COMPILE_END_ENABLED ())
3702 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3706 if (cfg
->llvm_only
&& cfg
->gsharedvt
)
3707 mono_ssa_remove_gsharedvt (cfg
);
3709 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3710 if (COMPILE_SOFT_FLOAT (cfg
))
3711 mono_decompose_soft_float (cfg
);
3713 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_vtype_opts
, mono_decompose_vtype_opts (cfg
));
3714 if (cfg
->flags
& MONO_CFG_NEEDS_DECOMPOSE
) {
3715 MONO_TIME_TRACK (mono_jit_stats
.jit_decompose_array_access_opts
, mono_decompose_array_access_opts (cfg
));
3716 mono_cfg_dump_ir (cfg
, "decompose_array_access_opts");
3720 #ifndef MONO_ARCH_GOT_REG
3725 g_assert (cfg
->got_var_allocated
);
3728 * Allways allocate the GOT var to a register, because keeping it
3729 * in memory will increase the number of live temporaries in some
3730 * code created by inssel.brg, leading to the well known spills+
3731 * branches problem. Testcase: mcs crash in
3732 * System.MonoCustomAttrs:GetCustomAttributes.
3734 #ifdef MONO_ARCH_GOT_REG
3735 got_reg
= MONO_ARCH_GOT_REG
;
3737 regs
= mono_arch_get_global_int_regs (cfg
);
3739 got_reg
= GPOINTER_TO_INT (regs
->data
);
3742 cfg
->got_var
->opcode
= OP_REGVAR
;
3743 cfg
->got_var
->dreg
= got_reg
;
3744 cfg
->used_int_regs
|= 1LL << cfg
->got_var
->dreg
;
3748 * Have to call this again to process variables added since the first call.
3750 MONO_TIME_TRACK(mono_jit_stats
.jit_liveness_handle_exception_clauses2
, mono_liveness_handle_exception_clauses (cfg
));
3752 if (cfg
->opt
& MONO_OPT_LINEARS
) {
3753 GList
*vars
, *regs
, *l
;
3755 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3756 cfg
->comp_done
&= ~MONO_COMP_LIVENESS
;
3757 if (!(cfg
->comp_done
& MONO_COMP_LIVENESS
))
3758 MONO_TIME_TRACK (mono_jit_stats
.jit_analyze_liveness
, mono_analyze_liveness (cfg
));
3760 if ((vars
= mono_arch_get_allocatable_int_vars (cfg
))) {
3761 regs
= mono_arch_get_global_int_regs (cfg
);
3762 /* Remove the reg reserved for holding the GOT address */
3764 for (l
= regs
; l
; l
= l
->next
) {
3765 if (GPOINTER_TO_UINT (l
->data
) == cfg
->got_var
->dreg
) {
3766 regs
= g_list_delete_link (regs
, l
);
3771 MONO_TIME_TRACK (mono_jit_stats
.jit_linear_scan
, mono_linear_scan (cfg
, vars
, regs
, &cfg
->used_int_regs
));
3772 mono_cfg_dump_ir (cfg
, "linear_scan");
3776 //mono_print_code (cfg, "");
3780 /* variables are allocated after decompose, since decompose could create temps */
3781 if (!COMPILE_LLVM (cfg
)) {
3782 MONO_TIME_TRACK (mono_jit_stats
.jit_arch_allocate_vars
, mono_arch_allocate_vars (cfg
));
3783 mono_cfg_dump_ir (cfg
, "arch_allocate_vars");
3784 if (cfg
->exception_type
)
3789 mono_allocate_gsharedvt_vars (cfg
);
3791 if (!COMPILE_LLVM (cfg
)) {
3792 gboolean need_local_opts
;
3793 MONO_TIME_TRACK (mono_jit_stats
.jit_spill_global_vars
, mono_spill_global_vars (cfg
, &need_local_opts
));
3794 mono_cfg_dump_ir (cfg
, "spill_global_vars");
3796 if (need_local_opts
|| cfg
->compile_aot
) {
3797 /* To optimize code created by spill_global_vars */
3798 MONO_TIME_TRACK (mono_jit_stats
.jit_local_cprop3
, mono_local_cprop (cfg
));
3799 if (cfg
->opt
& MONO_OPT_DEADCE
)
3800 MONO_TIME_TRACK (mono_jit_stats
.jit_local_deadce3
, mono_local_deadce (cfg
));
3801 mono_cfg_dump_ir (cfg
, "needs_local_opts");
3805 mono_insert_branches_between_bblocks (cfg
);
3807 if (COMPILE_LLVM (cfg
)) {
3811 /* The IR has to be in SSA form for LLVM */
3812 if (!(cfg
->comp_done
& MONO_COMP_SSA
)) {
3813 cfg
->exception_message
= g_strdup ("SSA disabled.");
3814 cfg
->disable_llvm
= TRUE
;
3817 if (cfg
->flags
& MONO_CFG_NEEDS_DECOMPOSE
)
3818 mono_decompose_array_access_opts (cfg
);
3820 if (!cfg
->disable_llvm
)
3821 mono_llvm_emit_method (cfg
);
3822 if (cfg
->disable_llvm
) {
3823 if (cfg
->verbose_level
>= (cfg
->llvm_only
? 0 : 1)) {
3824 //nm = mono_method_full_name (cfg->method, TRUE);
3825 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method
->klass
), method
->name
, cfg
->exception_message
);
3828 if (cfg
->llvm_only
) {
3829 cfg
->disable_aot
= TRUE
;
3832 mono_destroy_compile (cfg
);
3834 goto restart_compile
;
3837 if (cfg
->verbose_level
> 0 && !cfg
->compile_aot
) {
3838 nm
= mono_method_get_full_name (cfg
->method
);
3839 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3841 cfg
->native_code
, cfg
->native_code
+ cfg
->code_len
, cfg
->code_len
, cfg
->domain
->friendly_name
);
3846 MONO_TIME_TRACK (mono_jit_stats
.jit_codegen
, mono_codegen (cfg
));
3847 mono_cfg_dump_ir (cfg
, "codegen");
3848 if (cfg
->exception_type
)
3852 if (COMPILE_LLVM (cfg
))
3853 mono_atomic_inc_i32 (&mono_jit_stats
.methods_with_llvm
);
3855 mono_atomic_inc_i32 (&mono_jit_stats
.methods_without_llvm
);
3857 MONO_TIME_TRACK (mono_jit_stats
.jit_create_jit_info
, cfg
->jit_info
= create_jit_info (cfg
, method_to_compile
));
3859 if (cfg
->extend_live_ranges
) {
3860 /* Extend live ranges to cover the whole method */
3861 for (i
= 0; i
< cfg
->num_varinfo
; ++i
)
3862 MONO_VARINFO (cfg
, i
)->live_range_end
= cfg
->code_len
;
3865 MONO_TIME_TRACK (mono_jit_stats
.jit_gc_create_gc_map
, mini_gc_create_gc_map (cfg
));
3866 MONO_TIME_TRACK (mono_jit_stats
.jit_save_seq_point_info
, mono_save_seq_point_info (cfg
, cfg
->jit_info
));
3868 if (!cfg
->compile_aot
) {
3869 mono_save_xdebug_info (cfg
);
3870 mono_lldb_save_method_info (cfg
);
3873 if (cfg
->verbose_level
>= 2) {
3874 char *id
= mono_method_full_name (cfg
->method
, FALSE
);
3875 mono_disassemble_code (cfg
, cfg
->native_code
, cfg
->code_len
, id
+ 3);
3879 if (!cfg
->compile_aot
&& !(flags
& JIT_FLAG_DISCARD_RESULTS
)) {
3880 mono_domain_lock (cfg
->domain
);
3881 mono_jit_info_table_add (cfg
->domain
, cfg
->jit_info
);
3883 if (cfg
->method
->dynamic
)
3884 mono_dynamic_code_hash_lookup (cfg
->domain
, cfg
->method
)->ji
= cfg
->jit_info
;
3885 mono_domain_unlock (cfg
->domain
);
3890 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg
->method
, TRUE
));
3893 /* collect statistics */
3894 #ifndef DISABLE_PERFCOUNTERS
3895 mono_atomic_inc_i32 (&mono_perfcounters
->jit_methods
);
3896 mono_atomic_fetch_add_i32 (&mono_perfcounters
->jit_bytes
, header
->code_size
);
3898 gint32 code_size_ratio
= cfg
->code_len
;
3899 mono_atomic_fetch_add_i32 (&mono_jit_stats
.allocated_code_size
, code_size_ratio
);
3900 mono_atomic_fetch_add_i32 (&mono_jit_stats
.native_code_size
, code_size_ratio
);
3901 /* FIXME: use an explicit function to read booleans */
3902 if ((gboolean
)mono_atomic_load_i32 ((gint32
*)&mono_jit_stats
.enabled
)) {
3903 if (code_size_ratio
> mono_atomic_load_i32 (&mono_jit_stats
.biggest_method_size
)) {
3904 mono_atomic_store_i32 (&mono_jit_stats
.biggest_method_size
, code_size_ratio
);
3905 char *biggest_method
= g_strdup_printf ("%s::%s)", m_class_get_name (method
->klass
), method
->name
);
3906 biggest_method
= (char*)mono_atomic_xchg_ptr ((gpointer
*)&mono_jit_stats
.biggest_method
, biggest_method
);
3907 g_free (biggest_method
);
3909 code_size_ratio
= (code_size_ratio
* 100) / header
->code_size
;
3910 if (code_size_ratio
> mono_atomic_load_i32 (&mono_jit_stats
.max_code_size_ratio
)) {
3911 mono_atomic_store_i32 (&mono_jit_stats
.max_code_size_ratio
, code_size_ratio
);
3912 char *max_ratio_method
= g_strdup_printf ("%s::%s)", m_class_get_name (method
->klass
), method
->name
);
3913 max_ratio_method
= (char*)mono_atomic_xchg_ptr ((gpointer
*)&mono_jit_stats
.max_ratio_method
, max_ratio_method
);
3914 g_free (max_ratio_method
);
3918 if (MONO_METHOD_COMPILE_END_ENABLED ())
3919 MONO_PROBE_METHOD_COMPILE_END (method
, TRUE
);
3921 mono_cfg_dump_close_group (cfg
);
3927 mini_class_has_reference_variant_generic_argument (MonoCompile
*cfg
, MonoClass
*klass
, int context_used
)
3930 MonoGenericContainer
*container
;
3931 MonoGenericInst
*ginst
;
3933 if (mono_class_is_ginst (klass
)) {
3934 container
= mono_class_get_generic_container (mono_class_get_generic_class (klass
)->container_class
);
3935 ginst
= mono_class_get_generic_class (klass
)->context
.class_inst
;
3936 } else if (mono_class_is_gtd (klass
) && context_used
) {
3937 container
= mono_class_get_generic_container (klass
);
3938 ginst
= container
->context
.class_inst
;
3943 for (i
= 0; i
< container
->type_argc
; ++i
) {
3945 if (!(mono_generic_container_get_param_info (container
, i
)->flags
& (MONO_GEN_PARAM_VARIANT
|MONO_GEN_PARAM_COVARIANT
)))
3947 type
= ginst
->type_argv
[i
];
3948 if (mini_type_is_reference (type
))
3955 mono_cfg_add_try_hole (MonoCompile
*cfg
, MonoExceptionClause
*clause
, guint8
*start
, MonoBasicBlock
*bb
)
3957 TryBlockHole
*hole
= (TryBlockHole
*)mono_mempool_alloc (cfg
->mempool
, sizeof (TryBlockHole
));
3958 hole
->clause
= clause
;
3959 hole
->start_offset
= start
- cfg
->native_code
;
3960 hole
->basic_block
= bb
;
3962 cfg
->try_block_holes
= g_slist_append_mempool (cfg
->mempool
, cfg
->try_block_holes
, hole
);
3966 mono_cfg_set_exception (MonoCompile
*cfg
, MonoExceptionType type
)
3968 cfg
->exception_type
= type
;
3971 /* Assumes ownership of the MSG argument */
3973 mono_cfg_set_exception_invalid_program (MonoCompile
*cfg
, char *msg
)
3975 mono_cfg_set_exception (cfg
, MONO_EXCEPTION_MONO_ERROR
);
3976 mono_error_set_generic_error (&cfg
->error
, "System", "InvalidProgramException", "%s", msg
);
3979 #endif /* DISABLE_JIT */
3981 gint64
mono_time_track_start ()
3983 return mono_100ns_ticks ();
3987 * mono_time_track_end:
3989 * Uses UnlockedAddDouble () to update \param time.
3991 void mono_time_track_end (gint64
*time
, gint64 start
)
3993 UnlockedAdd64 (time
, mono_100ns_ticks () - start
);
3997 * mono_update_jit_stats:
3999 * Only call this function in locked environments to avoid data races.
4001 MONO_NO_SANITIZE_THREAD
4003 mono_update_jit_stats (MonoCompile
*cfg
)
4005 mono_jit_stats
.allocate_var
+= cfg
->stat_allocate_var
;
4006 mono_jit_stats
.locals_stack_size
+= cfg
->stat_locals_stack_size
;
4007 mono_jit_stats
.basic_blocks
+= cfg
->stat_basic_blocks
;
4008 mono_jit_stats
.max_basic_blocks
= MAX (cfg
->stat_basic_blocks
, mono_jit_stats
.max_basic_blocks
);
4009 mono_jit_stats
.cil_code_size
+= cfg
->stat_cil_code_size
;
4010 mono_jit_stats
.regvars
+= cfg
->stat_n_regvars
;
4011 mono_jit_stats
.inlineable_methods
+= cfg
->stat_inlineable_methods
;
4012 mono_jit_stats
.inlined_methods
+= cfg
->stat_inlined_methods
;
4013 mono_jit_stats
.code_reallocs
+= cfg
->stat_code_reallocs
;
4017 * mono_jit_compile_method_inner:
4019 * Main entry point for the JIT.
4022 mono_jit_compile_method_inner (MonoMethod
*method
, MonoDomain
*target_domain
, int opt
, MonoError
*error
)
4025 gpointer code
= NULL
;
4026 MonoJitInfo
*jinfo
, *info
;
4028 MonoException
*ex
= NULL
;
4030 MonoMethod
*prof_method
, *shared
;
4034 start
= mono_time_track_start ();
4035 cfg
= mini_method_compile (method
, opt
, target_domain
, JIT_FLAG_RUN_CCTORS
, 0, -1);
4036 gint64 jit_time
= 0.0;
4037 mono_time_track_end (&jit_time
, start
);
4038 UnlockedAdd64 (&mono_jit_stats
.jit_time
, jit_time
);
4040 prof_method
= cfg
->method
;
4042 switch (cfg
->exception_type
) {
4043 case MONO_EXCEPTION_NONE
:
4045 case MONO_EXCEPTION_TYPE_LOAD
:
4046 case MONO_EXCEPTION_MISSING_FIELD
:
4047 case MONO_EXCEPTION_MISSING_METHOD
:
4048 case MONO_EXCEPTION_FILE_NOT_FOUND
:
4049 case MONO_EXCEPTION_BAD_IMAGE
:
4050 case MONO_EXCEPTION_INVALID_PROGRAM
: {
4051 /* Throw a type load exception if needed */
4052 if (cfg
->exception_ptr
) {
4053 ex
= mono_class_get_exception_for_failure ((MonoClass
*)cfg
->exception_ptr
);
4055 if (cfg
->exception_type
== MONO_EXCEPTION_MISSING_FIELD
)
4056 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "MissingFieldException", cfg
->exception_message
);
4057 else if (cfg
->exception_type
== MONO_EXCEPTION_MISSING_METHOD
)
4058 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "MissingMethodException", cfg
->exception_message
);
4059 else if (cfg
->exception_type
== MONO_EXCEPTION_TYPE_LOAD
)
4060 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "TypeLoadException", cfg
->exception_message
);
4061 else if (cfg
->exception_type
== MONO_EXCEPTION_FILE_NOT_FOUND
)
4062 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System.IO", "FileNotFoundException", cfg
->exception_message
);
4063 else if (cfg
->exception_type
== MONO_EXCEPTION_BAD_IMAGE
)
4064 ex
= mono_get_exception_bad_image_format (cfg
->exception_message
);
4065 else if (cfg
->exception_type
== MONO_EXCEPTION_INVALID_PROGRAM
)
4066 ex
= mono_exception_from_name_msg (mono_defaults
.corlib
, "System", "InvalidProgramException", cfg
->exception_message
);
4068 g_assert_not_reached ();
4072 case MONO_EXCEPTION_MONO_ERROR
:
4073 // FIXME: MonoError has no copy ctor
4074 g_assert (!mono_error_ok (&cfg
->error
));
4075 ex
= mono_error_convert_to_exception (&cfg
->error
);
4078 g_assert_not_reached ();
4082 MONO_PROFILER_RAISE (jit_failed
, (method
));
4084 mono_destroy_compile (cfg
);
4085 mono_error_set_exception_instance (error
, ex
);
4090 if (mono_method_is_generic_sharable (method
, FALSE
)) {
4091 shared
= mini_get_shared_method_full (method
, SHARE_MODE_NONE
, error
);
4092 if (!is_ok (error
)) {
4093 MONO_PROFILER_RAISE (jit_failed
, (method
));
4094 mono_destroy_compile (cfg
);
4101 mono_domain_lock (target_domain
);
4103 /* Check if some other thread already did the job. In this case, we can
4104 discard the code this thread generated. */
4106 info
= mini_lookup_method (target_domain
, method
, shared
);
4108 /* We can't use a domain specific method in another domain */
4109 if ((target_domain
== mono_domain_get ()) || info
->domain_neutral
) {
4110 code
= info
->code_start
;
4112 discarded_jit_time
+= jit_time
;
4116 /* The lookup + insert is atomic since this is done inside the domain lock */
4117 mono_domain_jit_code_hash_lock (target_domain
);
4118 mono_internal_hash_table_insert (&target_domain
->jit_code_hash
, cfg
->jit_info
->d
.method
, cfg
->jit_info
);
4119 mono_domain_jit_code_hash_unlock (target_domain
);
4121 code
= cfg
->native_code
;
4123 if (cfg
->gshared
&& mono_method_is_generic_sharable (method
, FALSE
))
4124 mono_atomic_inc_i32 (&mono_stats
.generics_shared_methods
);
4126 mono_atomic_inc_i32 (&mono_stats
.gsharedvt_methods
);
4129 jinfo
= cfg
->jit_info
;
4132 * Update global stats while holding a lock, instead of doing many
4133 * mono_atomic_inc_i32 operations during JITting.
4135 mono_update_jit_stats (cfg
);
4137 mono_destroy_compile (cfg
);
4140 /* Update llvm callees */
4141 if (domain_jit_info (target_domain
)->llvm_jit_callees
) {
4142 GSList
*callees
= (GSList
*)g_hash_table_lookup (domain_jit_info (target_domain
)->llvm_jit_callees
, method
);
4145 for (l
= callees
; l
; l
= l
->next
) {
4146 gpointer
*addr
= (gpointer
*)l
->data
;
4152 mono_emit_jit_map (jinfo
);
4154 mono_domain_unlock (target_domain
);
4156 if (!mono_error_ok (error
))
4159 vtable
= mono_class_vtable_checked (target_domain
, method
->klass
, error
);
4160 return_val_if_nok (error
, NULL
);
4162 if (method
->wrapper_type
== MONO_WRAPPER_MANAGED_TO_NATIVE
) {
4163 if (mono_marshal_method_from_wrapper (method
)) {
4164 /* Native func wrappers have no method */
4165 /* The profiler doesn't know about wrappers, so pass the original icall method */
4166 MONO_PROFILER_RAISE (jit_done
, (mono_marshal_method_from_wrapper (method
), jinfo
));
4169 MONO_PROFILER_RAISE (jit_done
, (method
, jinfo
));
4170 if (prof_method
!= method
)
4171 MONO_PROFILER_RAISE (jit_done
, (prof_method
, jinfo
));
4173 if (!(method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE
||
4174 method
->wrapper_type
== MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK
||
4175 method
->wrapper_type
== MONO_WRAPPER_XDOMAIN_INVOKE
)) {
4176 if (!mono_runtime_class_init_full (vtable
, error
))
4183 * mini_get_underlying_type:
4185 * Return the type the JIT will use during compilation.
4186 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4187 * For gsharedvt types, it will return the original VAR/MVAR.
4190 mini_get_underlying_type (MonoType
*type
)
4192 return mini_type_get_underlying_type (type
);
4196 mini_jit_init (void)
4198 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT
| MONO_COUNTER_INT
, &discarded_code
);
4199 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT
| MONO_COUNTER_LONG
| MONO_COUNTER_TIME
, &discarded_jit_time
);
4200 mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT
| MONO_COUNTER_INT
, &jinfo_try_holes_size
);
4202 mono_os_mutex_init_recursive (&jit_mutex
);
4204 current_backend
= g_new0 (MonoBackend
, 1);
4205 init_backend (current_backend
);
4210 mini_jit_cleanup (void)
4213 g_free (emul_opcode_map
);
4214 g_free (emul_opcode_opcodes
);
4220 mono_llvm_emit_aot_file_info (MonoAotFileInfo
*info
, gboolean has_jitted_code
)
4222 g_assert_not_reached ();
4225 void mono_llvm_emit_aot_data (const char *symbol
, guint8
*data
, int data_len
)
4227 g_assert_not_reached ();
4232 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4235 mono_llvm_cpp_throw_exception (void)
4237 g_assert_not_reached ();
4241 mono_llvm_cpp_catch_exception (MonoLLVMInvokeCallback cb
, gpointer arg
, gboolean
*out_thrown
)
4243 g_assert_not_reached ();
4251 mini_method_compile (MonoMethod
*method
, guint32 opts
, MonoDomain
*domain
, JitFlags flags
, int parts
, int aot_method_index
)
4253 g_assert_not_reached ();
4258 mono_destroy_compile (MonoCompile
*cfg
)
4260 g_assert_not_reached ();
4264 mono_add_patch_info (MonoCompile
*cfg
, int ip
, MonoJumpInfoType type
, gconstpointer target
)
4266 g_assert_not_reached ();
4269 #else // DISABLE_JIT
4272 mini_realloc_code_slow (MonoCompile
*cfg
, int size
)
4274 const int EXTRA_CODE_SPACE
= 16;
4276 if (cfg
->code_len
+ size
> (cfg
->code_size
- EXTRA_CODE_SPACE
)) {
4277 while (cfg
->code_len
+ size
> (cfg
->code_size
- EXTRA_CODE_SPACE
))
4278 cfg
->code_size
= cfg
->code_size
* 2 + EXTRA_CODE_SPACE
;
4279 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4280 cfg
->stat_code_reallocs
++;
4282 return cfg
->native_code
+ cfg
->code_len
;
4285 #endif /* DISABLE_JIT */
4288 mini_class_is_system_array (MonoClass
*klass
)
4290 return m_class_get_parent (klass
) == mono_defaults
.array_class
;
4294 * mono_target_pagesize:
4296 * query pagesize used to determine if an implicit NRE can be used
4299 mono_target_pagesize (void)
4301 /* We could query the system's pagesize via mono_pagesize (), however there
4302 * are pitfalls: sysconf (3) is called on some posix like systems, and per
4303 * POSIX.1-2008 this function doesn't have to be async-safe. Since this
4304 * function can be called from a signal handler, we simplify things by
4305 * using 4k on all targets. Implicit null-checks with an offset larger than
4306 * 4k are _very_ uncommon, so we don't mind emitting an explicit null-check