Revert "[mono][debugger] First PR to implement iCorDebug on mono (#20757)"
[mono-project.git] / mono / mini / mini.c
blobd12f3b07ed7e6aef238e8336fd51cb51dda381f0
1 /**
2 * \file
3 * The new Mono code generator.
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #ifdef HAVE_ALLOCA_H
17 #include <alloca.h>
18 #endif
19 #ifdef HAVE_UNISTD_H
20 #include <unistd.h>
21 #endif
22 #include <math.h>
23 #ifdef HAVE_SYS_TIME_H
24 #include <sys/time.h>
25 #endif
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
63 #include <mono/utils/unlocked.h>
64 #include <mono/utils/mono-time.h>
66 #include "mini.h"
67 #include "seq-points.h"
68 #include "tasklets.h"
69 #include <string.h>
70 #include <ctype.h>
71 #include "trace.h"
72 #include "ir-emit.h"
74 #include "jit-icalls.h"
76 #include "mini-gc.h"
77 #include "debugger-agent.h"
78 #include "llvm-runtime.h"
79 #include "mini-llvm.h"
80 #include "lldb.h"
81 #include "aot-runtime.h"
82 #include "mini-runtime.h"
84 MonoCallSpec *mono_jit_trace_calls;
85 MonoMethodDesc *mono_inject_async_exc_method;
86 int mono_inject_async_exc_pos;
87 MonoMethodDesc *mono_break_at_bb_method;
88 int mono_break_at_bb_bb_num;
89 gboolean mono_do_x86_stack_align = TRUE;
90 gboolean mono_using_xdebug;
92 /* Counters */
93 static guint32 discarded_code;
94 static gint64 discarded_jit_time;
95 static guint32 jinfo_try_holes_size;
97 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
98 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
99 static mono_mutex_t jit_mutex;
101 #ifndef DISABLE_JIT
102 static MonoBackend *current_backend;
104 gpointer
105 mono_realloc_native_code (MonoCompile *cfg)
107 return g_realloc (cfg->native_code, cfg->code_size);
110 typedef struct {
111 MonoExceptionClause *clause;
112 MonoBasicBlock *basic_block;
113 int start_offset;
114 } TryBlockHole;
117 * mono_emit_unwind_op:
119 * Add an unwind op with the given parameters for the list of unwind ops stored in
120 * cfg->unwind_ops.
122 void
123 mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
125 MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
127 op->op = tag;
128 op->reg = reg;
129 op->val = val;
130 op->when = when;
132 cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
133 if (cfg->verbose_level > 1) {
134 switch (tag) {
135 case DW_CFA_def_cfa:
136 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
137 break;
138 case DW_CFA_def_cfa_register:
139 printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
140 break;
141 case DW_CFA_def_cfa_offset:
142 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
143 break;
144 case DW_CFA_offset:
145 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
146 break;
152 * mono_unlink_bblock:
154 * Unlink two basic blocks.
156 void
157 mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
159 int i, pos;
160 gboolean found;
162 found = FALSE;
163 for (i = 0; i < from->out_count; ++i) {
164 if (to == from->out_bb [i]) {
165 found = TRUE;
166 break;
169 if (found) {
170 pos = 0;
171 for (i = 0; i < from->out_count; ++i) {
172 if (from->out_bb [i] != to)
173 from->out_bb [pos ++] = from->out_bb [i];
175 g_assert (pos == from->out_count - 1);
176 from->out_count--;
179 found = FALSE;
180 for (i = 0; i < to->in_count; ++i) {
181 if (from == to->in_bb [i]) {
182 found = TRUE;
183 break;
186 if (found) {
187 pos = 0;
188 for (i = 0; i < to->in_count; ++i) {
189 if (to->in_bb [i] != from)
190 to->in_bb [pos ++] = to->in_bb [i];
192 g_assert (pos == to->in_count - 1);
193 to->in_count--;
198 * mono_bblocks_linked:
200 * Return whenever BB1 and BB2 are linked in the CFG.
202 gboolean
203 mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
205 int i;
207 for (i = 0; i < bb1->out_count; ++i) {
208 if (bb1->out_bb [i] == bb2)
209 return TRUE;
212 return FALSE;
215 static int
216 mono_find_block_region_notry (MonoCompile *cfg, int offset)
218 MonoMethodHeader *header = cfg->header;
219 MonoExceptionClause *clause;
220 int i;
222 for (i = 0; i < header->num_clauses; ++i) {
223 clause = &header->clauses [i];
224 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
225 (offset < (clause->handler_offset)))
226 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
228 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
229 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
230 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
231 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
232 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
233 else
234 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
238 return -1;
242 * mono_get_block_region_notry:
244 * Return the region corresponding to REGION, ignoring try clauses nested inside
245 * finally clauses.
248 mono_get_block_region_notry (MonoCompile *cfg, int region)
250 if ((region & (0xf << 4)) == MONO_REGION_TRY) {
251 MonoMethodHeader *header = cfg->header;
254 * This can happen if a try clause is nested inside a finally clause.
256 int clause_index = (region >> 8) - 1;
257 g_assert (clause_index >= 0 && clause_index < header->num_clauses);
259 region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
262 return region;
265 MonoInst *
266 mono_find_spvar_for_region (MonoCompile *cfg, int region)
268 region = mono_get_block_region_notry (cfg, region);
270 return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
273 static void
274 df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
276 int i;
278 array [*dfn] = start;
279 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
280 for (i = 0; i < start->out_count; ++i) {
281 if (start->out_bb [i]->dfn)
282 continue;
283 (*dfn)++;
284 start->out_bb [i]->dfn = *dfn;
285 start->out_bb [i]->df_parent = start;
286 array [*dfn] = start->out_bb [i];
287 df_visit (start->out_bb [i], dfn, array);
291 guint32
292 mono_reverse_branch_op (guint32 opcode)
294 static const int reverse_map [] = {
295 CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
296 CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
298 static const int reverse_fmap [] = {
299 OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
300 OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
302 static const int reverse_lmap [] = {
303 OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
304 OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
306 static const int reverse_imap [] = {
307 OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
308 OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
311 if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
312 opcode = reverse_map [opcode - CEE_BEQ];
313 } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
314 opcode = reverse_fmap [opcode - OP_FBEQ];
315 } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
316 opcode = reverse_lmap [opcode - OP_LBEQ];
317 } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
318 opcode = reverse_imap [opcode - OP_IBEQ];
319 } else
320 g_assert_not_reached ();
322 return opcode;
325 guint
326 mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
328 type = mini_get_underlying_type (type);
330 handle_enum:
331 switch (type->type) {
332 case MONO_TYPE_I1:
333 case MONO_TYPE_U1:
334 return OP_STOREI1_MEMBASE_REG;
335 case MONO_TYPE_I2:
336 case MONO_TYPE_U2:
337 return OP_STOREI2_MEMBASE_REG;
338 case MONO_TYPE_I4:
339 case MONO_TYPE_U4:
340 return OP_STOREI4_MEMBASE_REG;
341 case MONO_TYPE_I:
342 case MONO_TYPE_U:
343 case MONO_TYPE_PTR:
344 case MONO_TYPE_FNPTR:
345 return OP_STORE_MEMBASE_REG;
346 case MONO_TYPE_CLASS:
347 case MONO_TYPE_STRING:
348 case MONO_TYPE_OBJECT:
349 case MONO_TYPE_SZARRAY:
350 case MONO_TYPE_ARRAY:
351 return OP_STORE_MEMBASE_REG;
352 case MONO_TYPE_I8:
353 case MONO_TYPE_U8:
354 return OP_STOREI8_MEMBASE_REG;
355 case MONO_TYPE_R4:
356 return OP_STORER4_MEMBASE_REG;
357 case MONO_TYPE_R8:
358 return OP_STORER8_MEMBASE_REG;
359 case MONO_TYPE_VALUETYPE:
360 if (m_class_is_enumtype (type->data.klass)) {
361 type = mono_class_enum_basetype_internal (type->data.klass);
362 goto handle_enum;
364 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
365 return OP_STOREX_MEMBASE;
366 return OP_STOREV_MEMBASE;
367 case MONO_TYPE_TYPEDBYREF:
368 return OP_STOREV_MEMBASE;
369 case MONO_TYPE_GENERICINST:
370 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
371 return OP_STOREX_MEMBASE;
372 type = m_class_get_byval_arg (type->data.generic_class->container_class);
373 goto handle_enum;
374 case MONO_TYPE_VAR:
375 case MONO_TYPE_MVAR:
376 g_assert (mini_type_var_is_vt (type));
377 return OP_STOREV_MEMBASE;
378 default:
379 g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
381 return -1;
384 guint
385 mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
387 type = mini_get_underlying_type (type);
389 switch (type->type) {
390 case MONO_TYPE_I1:
391 return OP_LOADI1_MEMBASE;
392 case MONO_TYPE_U1:
393 return OP_LOADU1_MEMBASE;
394 case MONO_TYPE_I2:
395 return OP_LOADI2_MEMBASE;
396 case MONO_TYPE_U2:
397 return OP_LOADU2_MEMBASE;
398 case MONO_TYPE_I4:
399 return OP_LOADI4_MEMBASE;
400 case MONO_TYPE_U4:
401 return OP_LOADU4_MEMBASE;
402 case MONO_TYPE_I:
403 case MONO_TYPE_U:
404 case MONO_TYPE_PTR:
405 case MONO_TYPE_FNPTR:
406 return OP_LOAD_MEMBASE;
407 case MONO_TYPE_CLASS:
408 case MONO_TYPE_STRING:
409 case MONO_TYPE_OBJECT:
410 case MONO_TYPE_SZARRAY:
411 case MONO_TYPE_ARRAY:
412 return OP_LOAD_MEMBASE;
413 case MONO_TYPE_I8:
414 case MONO_TYPE_U8:
415 return OP_LOADI8_MEMBASE;
416 case MONO_TYPE_R4:
417 return OP_LOADR4_MEMBASE;
418 case MONO_TYPE_R8:
419 return OP_LOADR8_MEMBASE;
420 case MONO_TYPE_VALUETYPE:
421 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
422 return OP_LOADX_MEMBASE;
423 case MONO_TYPE_TYPEDBYREF:
424 return OP_LOADV_MEMBASE;
425 case MONO_TYPE_GENERICINST:
426 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (type)))
427 return OP_LOADX_MEMBASE;
428 if (mono_type_generic_inst_is_valuetype (type))
429 return OP_LOADV_MEMBASE;
430 else
431 return OP_LOAD_MEMBASE;
432 break;
433 case MONO_TYPE_VAR:
434 case MONO_TYPE_MVAR:
435 g_assert (cfg->gshared);
436 g_assert (mini_type_var_is_vt (type));
437 return OP_LOADV_MEMBASE;
438 default:
439 g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
441 return -1;
444 guint
445 mini_type_to_stind (MonoCompile* cfg, MonoType *type)
447 type = mini_get_underlying_type (type);
448 if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
449 g_assert (mini_type_var_is_vt (type));
450 return CEE_STOBJ;
452 return mono_type_to_stind (type);
456 mono_op_imm_to_op (int opcode)
458 switch (opcode) {
459 case OP_ADD_IMM:
460 #if SIZEOF_REGISTER == 4
461 return OP_IADD;
462 #else
463 return OP_LADD;
464 #endif
465 case OP_IADD_IMM:
466 return OP_IADD;
467 case OP_LADD_IMM:
468 return OP_LADD;
469 case OP_ISUB_IMM:
470 return OP_ISUB;
471 case OP_LSUB_IMM:
472 return OP_LSUB;
473 case OP_IMUL_IMM:
474 return OP_IMUL;
475 case OP_LMUL_IMM:
476 return OP_LMUL;
477 case OP_AND_IMM:
478 #if SIZEOF_REGISTER == 4
479 return OP_IAND;
480 #else
481 return OP_LAND;
482 #endif
483 case OP_OR_IMM:
484 #if SIZEOF_REGISTER == 4
485 return OP_IOR;
486 #else
487 return OP_LOR;
488 #endif
489 case OP_XOR_IMM:
490 #if SIZEOF_REGISTER == 4
491 return OP_IXOR;
492 #else
493 return OP_LXOR;
494 #endif
495 case OP_IAND_IMM:
496 return OP_IAND;
497 case OP_LAND_IMM:
498 return OP_LAND;
499 case OP_IOR_IMM:
500 return OP_IOR;
501 case OP_LOR_IMM:
502 return OP_LOR;
503 case OP_IXOR_IMM:
504 return OP_IXOR;
505 case OP_LXOR_IMM:
506 return OP_LXOR;
507 case OP_ISHL_IMM:
508 return OP_ISHL;
509 case OP_LSHL_IMM:
510 return OP_LSHL;
511 case OP_ISHR_IMM:
512 return OP_ISHR;
513 case OP_LSHR_IMM:
514 return OP_LSHR;
515 case OP_ISHR_UN_IMM:
516 return OP_ISHR_UN;
517 case OP_LSHR_UN_IMM:
518 return OP_LSHR_UN;
519 case OP_IDIV_IMM:
520 return OP_IDIV;
521 case OP_LDIV_IMM:
522 return OP_LDIV;
523 case OP_IDIV_UN_IMM:
524 return OP_IDIV_UN;
525 case OP_LDIV_UN_IMM:
526 return OP_LDIV_UN;
527 case OP_IREM_UN_IMM:
528 return OP_IREM_UN;
529 case OP_LREM_UN_IMM:
530 return OP_LREM_UN;
531 case OP_IREM_IMM:
532 return OP_IREM;
533 case OP_LREM_IMM:
534 return OP_LREM;
535 case OP_DIV_IMM:
536 #if SIZEOF_REGISTER == 4
537 return OP_IDIV;
538 #else
539 return OP_LDIV;
540 #endif
541 case OP_REM_IMM:
542 #if SIZEOF_REGISTER == 4
543 return OP_IREM;
544 #else
545 return OP_LREM;
546 #endif
547 case OP_ADDCC_IMM:
548 return OP_ADDCC;
549 case OP_ADC_IMM:
550 return OP_ADC;
551 case OP_SUBCC_IMM:
552 return OP_SUBCC;
553 case OP_SBB_IMM:
554 return OP_SBB;
555 case OP_IADC_IMM:
556 return OP_IADC;
557 case OP_ISBB_IMM:
558 return OP_ISBB;
559 case OP_COMPARE_IMM:
560 return OP_COMPARE;
561 case OP_ICOMPARE_IMM:
562 return OP_ICOMPARE;
563 case OP_LOCALLOC_IMM:
564 return OP_LOCALLOC;
567 return -1;
571 * mono_decompose_op_imm:
573 * Replace the OP_.._IMM INS with its non IMM variant.
575 void
576 mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
578 int opcode2 = mono_op_imm_to_op (ins->opcode);
579 MonoInst *temp;
580 guint32 dreg;
581 const char *spec = INS_INFO (ins->opcode);
583 if (spec [MONO_INST_SRC2] == 'l') {
584 dreg = mono_alloc_lreg (cfg);
586 /* Load the 64bit constant using decomposed ops */
587 MONO_INST_NEW (cfg, temp, OP_ICONST);
588 temp->inst_c0 = ins_get_l_low (ins);
589 temp->dreg = MONO_LVREG_LS (dreg);
590 mono_bblock_insert_before_ins (bb, ins, temp);
592 MONO_INST_NEW (cfg, temp, OP_ICONST);
593 temp->inst_c0 = ins_get_l_high (ins);
594 temp->dreg = MONO_LVREG_MS (dreg);
595 } else {
596 dreg = mono_alloc_ireg (cfg);
598 MONO_INST_NEW (cfg, temp, OP_ICONST);
599 temp->inst_c0 = ins->inst_imm;
600 temp->dreg = dreg;
603 mono_bblock_insert_before_ins (bb, ins, temp);
605 if (opcode2 == -1)
606 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
607 ins->opcode = opcode2;
609 if (ins->opcode == OP_LOCALLOC)
610 ins->sreg1 = dreg;
611 else
612 ins->sreg2 = dreg;
614 bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
617 static void
618 set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
620 if (vreg >= cfg->vreg_to_inst_len) {
621 MonoInst **tmp = cfg->vreg_to_inst;
622 int size = cfg->vreg_to_inst_len;
624 while (vreg >= cfg->vreg_to_inst_len)
625 cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
626 cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
627 if (size)
628 memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
630 cfg->vreg_to_inst [vreg] = inst;
633 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
634 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
636 MonoInst*
637 mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
639 MonoInst *inst;
640 int num = cfg->num_varinfo;
641 gboolean regpair;
643 type = mini_get_underlying_type (type);
645 if ((num + 1) >= cfg->varinfo_count) {
646 int orig_count = cfg->varinfo_count;
647 cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
648 cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
649 cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
650 memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
653 cfg->stat_allocate_var++;
655 MONO_INST_NEW (cfg, inst, opcode);
656 inst->inst_c0 = num;
657 inst->inst_vtype = type;
658 inst->klass = mono_class_from_mono_type_internal (type);
659 mini_type_to_eval_stack_type (cfg, type, inst);
660 /* if set to 1 the variable is native */
661 inst->backend.is_pinvoke = 0;
662 inst->dreg = vreg;
664 if (mono_class_has_failure (inst->klass))
665 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
667 if (cfg->compute_gc_maps) {
668 if (type->byref) {
669 mono_mark_vreg_as_mp (cfg, vreg);
670 } else {
671 if ((MONO_TYPE_ISSTRUCT (type) && m_class_has_references (inst->klass)) || mini_type_is_reference (type)) {
672 inst->flags |= MONO_INST_GC_TRACK;
673 mono_mark_vreg_as_ref (cfg, vreg);
678 cfg->varinfo [num] = inst;
680 cfg->vars [num].idx = num;
681 cfg->vars [num].vreg = vreg;
682 cfg->vars [num].range.first_use.pos.bid = 0xffff;
683 cfg->vars [num].reg = -1;
685 if (vreg != -1)
686 set_vreg_to_inst (cfg, vreg, inst);
688 #if SIZEOF_REGISTER == 4
689 if (mono_arch_is_soft_float ()) {
690 regpair = mono_type_is_long (type) || mono_type_is_float (type);
691 } else {
692 regpair = mono_type_is_long (type);
694 #else
695 regpair = FALSE;
696 #endif
698 if (regpair) {
699 MonoInst *tree;
702 * These two cannot be allocated using create_var_for_vreg since that would
703 * put it into the cfg->varinfo array, confusing many parts of the JIT.
707 * Set flags to VOLATILE so SSA skips it.
710 if (cfg->verbose_level >= 4) {
711 printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
714 if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
715 if (mono_type_is_float (type))
716 inst->flags = MONO_INST_VOLATILE;
719 /* Allocate a dummy MonoInst for the first vreg */
720 MONO_INST_NEW (cfg, tree, OP_LOCAL);
721 tree->dreg = MONO_LVREG_LS (inst->dreg);
722 if (cfg->opt & MONO_OPT_SSA)
723 tree->flags = MONO_INST_VOLATILE;
724 tree->inst_c0 = num;
725 tree->type = STACK_I4;
726 tree->inst_vtype = mono_get_int32_type ();
727 tree->klass = mono_class_from_mono_type_internal (tree->inst_vtype);
729 set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
731 /* Allocate a dummy MonoInst for the second vreg */
732 MONO_INST_NEW (cfg, tree, OP_LOCAL);
733 tree->dreg = MONO_LVREG_MS (inst->dreg);
734 if (cfg->opt & MONO_OPT_SSA)
735 tree->flags = MONO_INST_VOLATILE;
736 tree->inst_c0 = num;
737 tree->type = STACK_I4;
738 tree->inst_vtype = mono_get_int32_type ();
739 tree->klass = mono_class_from_mono_type_internal (tree->inst_vtype);
741 set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
744 cfg->num_varinfo++;
745 if (cfg->verbose_level > 2)
746 g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
748 return inst;
751 MonoInst*
752 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
754 int dreg;
756 #ifdef ENABLE_NETCORE
757 if (type->type == MONO_TYPE_VALUETYPE && !type->byref) {
758 MonoClass *klass = mono_class_from_mono_type_internal (type);
759 if (m_class_is_enumtype (klass) && m_class_get_image (klass) == mono_get_corlib () && !strcmp (m_class_get_name (klass), "StackCrawlMark")) {
760 if (!(cfg->method->flags & METHOD_ATTRIBUTE_REQSECOBJ))
761 g_error ("Method '%s' which contains a StackCrawlMark local variable must be decorated with [System.Security.DynamicSecurityMethod].", mono_method_get_full_name (cfg->method));
764 #endif
766 type = mini_get_underlying_type (type);
768 if (mono_type_is_long (type))
769 dreg = mono_alloc_dreg (cfg, STACK_I8);
770 else if (mono_arch_is_soft_float () && mono_type_is_float (type))
771 dreg = mono_alloc_dreg (cfg, STACK_R8);
772 else
773 /* All the others are unified */
774 dreg = mono_alloc_preg (cfg);
776 return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
779 MonoInst*
780 mini_get_int_to_float_spill_area (MonoCompile *cfg)
782 #ifdef TARGET_X86
783 if (!cfg->iconv_raw_var) {
784 cfg->iconv_raw_var = mono_compile_create_var (cfg, mono_get_int32_type (), OP_LOCAL);
785 cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
787 return cfg->iconv_raw_var;
788 #else
789 return NULL;
790 #endif
793 void
794 mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
796 if (vreg >= cfg->vreg_is_ref_len) {
797 gboolean *tmp = cfg->vreg_is_ref;
798 int size = cfg->vreg_is_ref_len;
800 while (vreg >= cfg->vreg_is_ref_len)
801 cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
802 cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
803 if (size)
804 memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
806 cfg->vreg_is_ref [vreg] = TRUE;
809 void
810 mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
812 if (vreg >= cfg->vreg_is_mp_len) {
813 gboolean *tmp = cfg->vreg_is_mp;
814 int size = cfg->vreg_is_mp_len;
816 while (vreg >= cfg->vreg_is_mp_len)
817 cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
818 cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
819 if (size)
820 memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
822 cfg->vreg_is_mp [vreg] = TRUE;
825 static MonoType*
826 type_from_stack_type (MonoInst *ins)
828 switch (ins->type) {
829 case STACK_I4: return mono_get_int32_type ();
830 case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
831 case STACK_PTR: return mono_get_int_type ();
832 case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
833 case STACK_MP:
835 * this if used to be commented without any specific reason, but
836 * it breaks #80235 when commented
838 if (ins->klass)
839 return m_class_get_this_arg (ins->klass);
840 else
841 return m_class_get_this_arg (mono_defaults.object_class);
842 case STACK_OBJ:
843 /* ins->klass may not be set for ldnull.
844 * Also, if we have a boxed valuetype, we want an object lass,
845 * not the valuetype class
847 if (ins->klass && !m_class_is_valuetype (ins->klass))
848 return m_class_get_byval_arg (ins->klass);
849 return mono_get_object_type ();
850 case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
851 default:
852 g_error ("stack type %d to montype not handled\n", ins->type);
854 return NULL;
857 MonoType*
858 mono_type_from_stack_type (MonoInst *ins)
860 return type_from_stack_type (ins);
864 * mono_add_ins_to_end:
866 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
868 void
869 mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
871 int opcode;
873 if (!bb->code) {
874 MONO_ADD_INS (bb, inst);
875 return;
878 switch (bb->last_ins->opcode) {
879 case OP_BR:
880 case OP_BR_REG:
881 case CEE_BEQ:
882 case CEE_BGE:
883 case CEE_BGT:
884 case CEE_BLE:
885 case CEE_BLT:
886 case CEE_BNE_UN:
887 case CEE_BGE_UN:
888 case CEE_BGT_UN:
889 case CEE_BLE_UN:
890 case CEE_BLT_UN:
891 case OP_SWITCH:
892 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
893 break;
894 default:
895 if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
896 /* Need to insert the ins before the compare */
897 if (bb->code == bb->last_ins) {
898 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
899 return;
902 if (bb->code->next == bb->last_ins) {
903 /* Only two instructions */
904 opcode = bb->code->opcode;
906 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
907 /* NEW IR */
908 mono_bblock_insert_before_ins (bb, bb->code, inst);
909 } else {
910 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
912 } else {
913 opcode = bb->last_ins->prev->opcode;
915 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
916 /* NEW IR */
917 mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
918 } else {
919 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
923 else
924 MONO_ADD_INS (bb, inst);
925 break;
929 void
930 mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
932 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
933 MonoJumpInfoBBTable *table;
935 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
936 table->table = bbs;
937 table->table_size = num_blocks;
939 ji->ip.label = label;
940 ji->type = MONO_PATCH_INFO_SWITCH;
941 ji->data.table = table;
942 ji->next = cfg->patch_info;
943 cfg->patch_info = ji;
946 gboolean
947 mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
949 MonoAssembly *assembly = m_class_get_image (method->klass)->assembly;
950 if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
951 return FALSE;
952 if (assembly->in_gac || assembly->image == mono_defaults.corlib)
953 return FALSE;
954 return mono_assembly_has_skip_verification (assembly);
958 * mini_method_verify:
960 * Verify the method using the verfier.
962 * Returns true if the method is invalid.
964 static gboolean
965 mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
967 GSList *tmp, *res;
968 gboolean is_fulltrust;
970 if (mono_method_get_verification_success (method))
971 return FALSE;
973 if (!mono_verifier_is_enabled_for_method (method))
974 return FALSE;
976 /*skip verification implies the assembly must be */
977 is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
979 res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
981 if (res) {
982 for (tmp = res; tmp; tmp = tmp->next) {
983 MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
984 if (info->info.status == MONO_VERIFY_ERROR) {
985 if (fail_compile) {
986 char *method_name = mono_method_full_name (method, TRUE);
987 cfg->exception_type = (MonoExceptionType)info->exception_type;
988 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
989 g_free (method_name);
991 mono_free_verify_list (res);
992 return TRUE;
994 if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
995 if (fail_compile) {
996 char *method_name = mono_method_full_name (method, TRUE);
997 char *msg = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
999 if (info->exception_type == MONO_EXCEPTION_METHOD_ACCESS)
1000 mono_error_set_generic_error (cfg->error, "System", "MethodAccessException", "%s", msg);
1001 else if (info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)
1002 mono_error_set_generic_error (cfg->error, "System", "FieldAccessException", "%s", msg);
1003 else if (info->exception_type == MONO_EXCEPTION_UNVERIFIABLE_IL)
1004 mono_error_set_generic_error (cfg->error, "System.Security", "VerificationException", "%s", msg);
1005 if (!is_ok (cfg->error)) {
1006 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
1007 g_free (msg);
1008 } else {
1009 cfg->exception_type = (MonoExceptionType)info->exception_type;
1010 cfg->exception_message = msg;
1012 g_free (method_name);
1014 mono_free_verify_list (res);
1015 return TRUE;
1018 mono_free_verify_list (res);
1020 mono_method_set_verification_success (method);
1021 return FALSE;
1024 /*Returns true if something went wrong*/
1025 gboolean
1026 mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1028 MonoMethod *method_definition = method;
1029 gboolean dont_verify = m_class_get_image (method->klass)->assembly->corlib_internal;
1031 while (method_definition->is_inflated) {
1032 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
1033 method_definition = imethod->declaring;
1036 return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
1039 static void
1040 mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
1042 if (!domain_jit_info (domain)->dynamic_code_hash)
1043 domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
1044 g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
1047 static MonoJitDynamicMethodInfo*
1048 mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
1050 MonoJitDynamicMethodInfo *res;
1052 if (domain_jit_info (domain)->dynamic_code_hash)
1053 res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
1054 else
1055 res = NULL;
1056 return res;
1059 typedef struct {
1060 MonoClass *vtype;
1061 GList *active, *inactive;
1062 GSList *slots;
1063 } StackSlotInfo;
1065 static gint
1066 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1068 MonoMethodVar *v1 = (MonoMethodVar*)a;
1069 MonoMethodVar *v2 = (MonoMethodVar*)b;
1071 if (v1 == v2)
1072 return 0;
1073 else if (v1->interval->range && v2->interval->range)
1074 return v1->interval->range->from - v2->interval->range->from;
1075 else if (v1->interval->range)
1076 return -1;
1077 else
1078 return 1;
1081 #if 0
1082 #define LSCAN_DEBUG(a) do { a; } while (0)
1083 #else
1084 #define LSCAN_DEBUG(a) do { } while (0) /* non-empty to avoid warning */
1085 #endif
1087 static gint32*
1088 mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1090 int i, slot, offset, size;
1091 guint32 align;
1092 MonoMethodVar *vmv;
1093 MonoInst *inst;
1094 gint32 *offsets;
1095 GList *vars = NULL, *l, *unhandled;
1096 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1097 MonoType *t;
1098 int nvtypes;
1099 int vtype_stack_slots_size = 256;
1100 gboolean reuse_slot;
1102 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1104 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1105 vtype_stack_slots = NULL;
1106 nvtypes = 0;
1108 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1109 for (i = 0; i < cfg->num_varinfo; ++i)
1110 offsets [i] = -1;
1112 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1113 inst = cfg->varinfo [i];
1114 vmv = MONO_VARINFO (cfg, i);
1116 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1117 continue;
1119 vars = g_list_prepend (vars, vmv);
1122 vars = g_list_sort (vars, compare_by_interval_start_pos_func);
1124 /* Sanity check */
1126 i = 0;
1127 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1128 MonoMethodVar *current = unhandled->data;
1130 if (current->interval->range) {
1131 g_assert (current->interval->range->from >= i);
1132 i = current->interval->range->from;
1137 offset = 0;
1138 *stack_align = 0;
1139 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1140 MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
1142 vmv = current;
1143 inst = cfg->varinfo [vmv->idx];
1145 t = mono_type_get_underlying_type (inst->inst_vtype);
1146 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1147 continue;
1149 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1150 * pinvoke wrappers when they call functions returning structures */
1151 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1152 size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
1154 else {
1155 int ialign;
1157 size = mini_type_stack_size (t, &ialign);
1158 align = ialign;
1160 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (t)))
1161 align = 16;
1164 reuse_slot = TRUE;
1165 if (cfg->disable_reuse_stack_slots)
1166 reuse_slot = FALSE;
1168 t = mini_get_underlying_type (t);
1169 switch (t->type) {
1170 case MONO_TYPE_GENERICINST:
1171 if (!mono_type_generic_inst_is_valuetype (t)) {
1172 slot_info = &scalar_stack_slots [t->type];
1173 break;
1175 /* Fall through */
1176 case MONO_TYPE_VALUETYPE:
1177 if (!vtype_stack_slots)
1178 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * vtype_stack_slots_size);
1179 for (i = 0; i < nvtypes; ++i)
1180 if (t->data.klass == vtype_stack_slots [i].vtype)
1181 break;
1182 if (i < nvtypes)
1183 slot_info = &vtype_stack_slots [i];
1184 else {
1185 if (nvtypes == vtype_stack_slots_size) {
1186 int new_slots_size = vtype_stack_slots_size * 2;
1187 StackSlotInfo* new_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * new_slots_size);
1189 memcpy (new_slots, vtype_stack_slots, sizeof (StackSlotInfo) * vtype_stack_slots_size);
1191 vtype_stack_slots = new_slots;
1192 vtype_stack_slots_size = new_slots_size;
1194 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1195 slot_info = &vtype_stack_slots [nvtypes];
1196 nvtypes ++;
1198 if (cfg->disable_reuse_ref_stack_slots)
1199 reuse_slot = FALSE;
1200 break;
1202 case MONO_TYPE_PTR:
1203 case MONO_TYPE_I:
1204 case MONO_TYPE_U:
1205 #if TARGET_SIZEOF_VOID_P == 4
1206 case MONO_TYPE_I4:
1207 #else
1208 case MONO_TYPE_I8:
1209 #endif
1210 if (cfg->disable_ref_noref_stack_slot_share) {
1211 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1212 break;
1214 /* Fall through */
1216 case MONO_TYPE_CLASS:
1217 case MONO_TYPE_OBJECT:
1218 case MONO_TYPE_ARRAY:
1219 case MONO_TYPE_SZARRAY:
1220 case MONO_TYPE_STRING:
1221 /* Share non-float stack slots of the same size */
1222 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1223 if (cfg->disable_reuse_ref_stack_slots)
1224 reuse_slot = FALSE;
1225 break;
1227 default:
1228 slot_info = &scalar_stack_slots [t->type];
1231 slot = 0xffffff;
1232 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1233 int pos;
1234 gboolean changed;
1236 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1238 if (!current->interval->range) {
1239 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1240 pos = ~0;
1241 else {
1242 /* Dead */
1243 inst->flags |= MONO_INST_IS_DEAD;
1244 continue;
1247 else
1248 pos = current->interval->range->from;
1250 LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
1251 if (current->interval->range)
1252 LSCAN_DEBUG (mono_linterval_print (current->interval));
1253 LSCAN_DEBUG (printf ("\n"));
1255 /* Check for intervals in active which expired or inactive */
1256 changed = TRUE;
1257 /* FIXME: Optimize this */
1258 while (changed) {
1259 changed = FALSE;
1260 for (l = slot_info->active; l != NULL; l = l->next) {
1261 MonoMethodVar *v = (MonoMethodVar*)l->data;
1263 if (v->interval->last_range->to < pos) {
1264 slot_info->active = g_list_delete_link (slot_info->active, l);
1265 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1266 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1267 changed = TRUE;
1268 break;
1270 else if (!mono_linterval_covers (v->interval, pos)) {
1271 slot_info->inactive = g_list_append (slot_info->inactive, v);
1272 slot_info->active = g_list_delete_link (slot_info->active, l);
1273 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
1274 changed = TRUE;
1275 break;
1280 /* Check for intervals in inactive which expired or active */
1281 changed = TRUE;
1282 /* FIXME: Optimize this */
1283 while (changed) {
1284 changed = FALSE;
1285 for (l = slot_info->inactive; l != NULL; l = l->next) {
1286 MonoMethodVar *v = (MonoMethodVar*)l->data;
1288 if (v->interval->last_range->to < pos) {
1289 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1290 // FIXME: Enabling this seems to cause impossible to debug crashes
1291 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1292 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1293 changed = TRUE;
1294 break;
1296 else if (mono_linterval_covers (v->interval, pos)) {
1297 slot_info->active = g_list_append (slot_info->active, v);
1298 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1299 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
1300 changed = TRUE;
1301 break;
1307 * This also handles the case when the variable is used in an
1308 * exception region, as liveness info is not computed there.
1311 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1312 * opcodes.
1314 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1315 if (slot_info->slots) {
1316 slot = GPOINTER_TO_INT (slot_info->slots->data);
1318 slot_info->slots = slot_info->slots->next;
1321 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1323 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1327 #if 0
1329 static int count = 0;
1330 count ++;
1332 if (count == atoi (g_getenv ("COUNT3")))
1333 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1334 if (count > atoi (g_getenv ("COUNT3")))
1335 slot = 0xffffff;
1336 else
1337 mono_print_ins (inst);
1339 #endif
1341 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
1343 if (inst->flags & MONO_INST_LMF) {
1344 size = MONO_ABI_SIZEOF (MonoLMF);
1345 align = sizeof (target_mgreg_t);
1346 reuse_slot = FALSE;
1349 if (!reuse_slot)
1350 slot = 0xffffff;
1352 if (slot == 0xffffff) {
1354 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1355 * efficient copying (and to work around the fact that OP_MEMCPY
1356 * and OP_MEMSET ignores alignment).
1358 if (MONO_TYPE_ISSTRUCT (t)) {
1359 align = MAX (align, sizeof (target_mgreg_t));
1360 align = MAX (align, mono_class_min_align (mono_class_from_mono_type_internal (t)));
1363 if (backward) {
1364 offset += size;
1365 offset += align - 1;
1366 offset &= ~(align - 1);
1367 slot = offset;
1369 else {
1370 offset += align - 1;
1371 offset &= ~(align - 1);
1372 slot = offset;
1373 offset += size;
1376 if (*stack_align == 0)
1377 *stack_align = align;
1380 offsets [vmv->idx] = slot;
1382 g_list_free (vars);
1383 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1384 if (scalar_stack_slots [i].active)
1385 g_list_free (scalar_stack_slots [i].active);
1387 for (i = 0; i < nvtypes; ++i) {
1388 if (vtype_stack_slots [i].active)
1389 g_list_free (vtype_stack_slots [i].active);
1392 cfg->stat_locals_stack_size += offset;
1394 *stack_size = offset;
1395 return offsets;
1399 * mono_allocate_stack_slots:
1401 * Allocate stack slots for all non register allocated variables using a
1402 * linear scan algorithm.
1403 * Returns: an array of stack offsets.
1404 * STACK_SIZE is set to the amount of stack space needed.
1405 * STACK_ALIGN is set to the alignment needed by the locals area.
1407 gint32*
1408 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1410 int i, slot, offset, size;
1411 guint32 align;
1412 MonoMethodVar *vmv;
1413 MonoInst *inst;
1414 gint32 *offsets;
1415 GList *vars = NULL, *l;
1416 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1417 MonoType *t;
1418 int nvtypes;
1419 int vtype_stack_slots_size = 256;
1420 gboolean reuse_slot;
1422 if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
1423 return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
1425 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1426 vtype_stack_slots = NULL;
1427 nvtypes = 0;
1429 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1430 for (i = 0; i < cfg->num_varinfo; ++i)
1431 offsets [i] = -1;
1433 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1434 inst = cfg->varinfo [i];
1435 vmv = MONO_VARINFO (cfg, i);
1437 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1438 continue;
1440 vars = g_list_prepend (vars, vmv);
1443 vars = mono_varlist_sort (cfg, vars, 0);
1444 offset = 0;
1445 *stack_align = sizeof (target_mgreg_t);
1446 for (l = vars; l; l = l->next) {
1447 vmv = (MonoMethodVar *)l->data;
1448 inst = cfg->varinfo [vmv->idx];
1450 t = mono_type_get_underlying_type (inst->inst_vtype);
1451 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1452 continue;
1454 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1455 * pinvoke wrappers when they call functions returning structures */
1456 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1457 size = mono_class_native_size (mono_class_from_mono_type_internal (t), &align);
1458 } else {
1459 int ialign;
1461 size = mini_type_stack_size (t, &ialign);
1462 align = ialign;
1464 if (mono_class_has_failure (mono_class_from_mono_type_internal (t)))
1465 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
1467 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type_internal (t)))
1468 align = 16;
1471 reuse_slot = TRUE;
1472 if (cfg->disable_reuse_stack_slots)
1473 reuse_slot = FALSE;
1475 t = mini_get_underlying_type (t);
1476 switch (t->type) {
1477 case MONO_TYPE_GENERICINST:
1478 if (!mono_type_generic_inst_is_valuetype (t)) {
1479 slot_info = &scalar_stack_slots [t->type];
1480 break;
1482 /* Fall through */
1483 case MONO_TYPE_VALUETYPE:
1484 if (!vtype_stack_slots)
1485 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * vtype_stack_slots_size);
1486 for (i = 0; i < nvtypes; ++i)
1487 if (t->data.klass == vtype_stack_slots [i].vtype)
1488 break;
1489 if (i < nvtypes)
1490 slot_info = &vtype_stack_slots [i];
1491 else {
1492 if (nvtypes == vtype_stack_slots_size) {
1493 int new_slots_size = vtype_stack_slots_size * 2;
1494 StackSlotInfo* new_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * new_slots_size);
1496 memcpy (new_slots, vtype_stack_slots, sizeof (StackSlotInfo) * vtype_stack_slots_size);
1498 vtype_stack_slots = new_slots;
1499 vtype_stack_slots_size = new_slots_size;
1501 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1502 slot_info = &vtype_stack_slots [nvtypes];
1503 nvtypes ++;
1505 if (cfg->disable_reuse_ref_stack_slots)
1506 reuse_slot = FALSE;
1507 break;
1509 case MONO_TYPE_PTR:
1510 case MONO_TYPE_I:
1511 case MONO_TYPE_U:
1512 #if TARGET_SIZEOF_VOID_P == 4
1513 case MONO_TYPE_I4:
1514 #else
1515 case MONO_TYPE_I8:
1516 #endif
1517 if (cfg->disable_ref_noref_stack_slot_share) {
1518 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1519 break;
1521 /* Fall through */
1523 case MONO_TYPE_CLASS:
1524 case MONO_TYPE_OBJECT:
1525 case MONO_TYPE_ARRAY:
1526 case MONO_TYPE_SZARRAY:
1527 case MONO_TYPE_STRING:
1528 /* Share non-float stack slots of the same size */
1529 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1530 if (cfg->disable_reuse_ref_stack_slots)
1531 reuse_slot = FALSE;
1532 break;
1533 case MONO_TYPE_VAR:
1534 case MONO_TYPE_MVAR:
1535 slot_info = &scalar_stack_slots [t->type];
1536 break;
1537 default:
1538 slot_info = &scalar_stack_slots [t->type];
1539 break;
1542 slot = 0xffffff;
1543 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1544 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1546 /* expire old intervals in active */
1547 while (slot_info->active) {
1548 MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
1550 if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
1551 break;
1553 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1555 slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
1556 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
1560 * This also handles the case when the variable is used in an
1561 * exception region, as liveness info is not computed there.
1564 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1565 * opcodes.
1567 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1568 if (slot_info->slots) {
1569 slot = GPOINTER_TO_INT (slot_info->slots->data);
1571 slot_info->slots = slot_info->slots->next;
1574 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1578 #if 0
1580 static int count = 0;
1581 count ++;
1583 if (count == atoi (g_getenv ("COUNT")))
1584 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1585 if (count > atoi (g_getenv ("COUNT")))
1586 slot = 0xffffff;
1587 else
1588 mono_print_ins (inst);
1590 #endif
1592 if (inst->flags & MONO_INST_LMF) {
1594 * This variable represents a MonoLMF structure, which has no corresponding
1595 * CLR type, so hard-code its size/alignment.
1597 size = MONO_ABI_SIZEOF (MonoLMF);
1598 align = sizeof (target_mgreg_t);
1599 reuse_slot = FALSE;
1602 if (!reuse_slot)
1603 slot = 0xffffff;
1605 if (slot == 0xffffff) {
1607 * Allways allocate valuetypes to sizeof (target_mgreg_t) to allow more
1608 * efficient copying (and to work around the fact that OP_MEMCPY
1609 * and OP_MEMSET ignores alignment).
1611 if (MONO_TYPE_ISSTRUCT (t)) {
1612 align = MAX (align, sizeof (target_mgreg_t));
1613 align = MAX (align, mono_class_min_align (mono_class_from_mono_type_internal (t)));
1615 * Align the size too so the code generated for passing vtypes in
1616 * registers doesn't overwrite random locals.
1618 size = (size + (align - 1)) & ~(align -1);
1621 if (backward) {
1622 offset += size;
1623 offset += align - 1;
1624 offset &= ~(align - 1);
1625 slot = offset;
1627 else {
1628 offset += align - 1;
1629 offset &= ~(align - 1);
1630 slot = offset;
1631 offset += size;
1634 *stack_align = MAX (*stack_align, align);
1637 offsets [vmv->idx] = slot;
1639 g_list_free (vars);
1640 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1641 if (scalar_stack_slots [i].active)
1642 g_list_free (scalar_stack_slots [i].active);
1644 for (i = 0; i < nvtypes; ++i) {
1645 if (vtype_stack_slots [i].active)
1646 g_list_free (vtype_stack_slots [i].active);
1649 cfg->stat_locals_stack_size += offset;
1651 *stack_size = offset;
1652 return offsets;
1655 #define EMUL_HIT_SHIFT 3
1656 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1657 /* small hit bitmap cache */
1658 static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
1659 static short emul_opcode_num = 0;
1660 static short emul_opcode_alloced = 0;
1661 static short *emul_opcode_opcodes;
1662 static MonoJitICallInfo **emul_opcode_map;
1664 MonoJitICallInfo *
1665 mono_find_jit_opcode_emulation (int opcode)
1667 g_assert (opcode >= 0 && opcode <= OP_LAST);
1668 if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
1669 int i;
1670 for (i = 0; i < emul_opcode_num; ++i) {
1671 if (emul_opcode_opcodes [i] == opcode)
1672 return emul_opcode_map [i];
1675 return NULL;
1678 void
1679 mini_register_opcode_emulation (int opcode, MonoJitICallInfo *info, const char *name, MonoMethodSignature *sig, gpointer func, const char *symbol, gboolean no_wrapper)
1681 g_assert (info);
1682 g_assert (!sig->hasthis);
1683 g_assert (sig->param_count < 3);
1685 mono_register_jit_icall_info (info, func, name, sig, no_wrapper, symbol);
1687 if (emul_opcode_num >= emul_opcode_alloced) {
1688 int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
1689 emul_opcode_alloced += incr;
1690 emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
1691 emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
1693 emul_opcode_map [emul_opcode_num] = info;
1694 emul_opcode_opcodes [emul_opcode_num] = opcode;
1695 emul_opcode_num++;
1696 emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
1699 static void
1700 print_dfn (MonoCompile *cfg)
1702 int i, j;
1703 char *code;
1704 MonoBasicBlock *bb;
1705 MonoInst *c;
1708 char *method_name = mono_method_full_name (cfg->method, TRUE);
1709 g_print ("IR code for method %s\n", method_name);
1710 g_free (method_name);
1713 for (i = 0; i < cfg->num_bblocks; ++i) {
1714 bb = cfg->bblocks [i];
1715 /*if (bb->cil_code) {
1716 char* code1, *code2;
1717 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1718 if (bb->last_ins->cil_code)
1719 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1720 else
1721 code2 = g_strdup ("");
1723 code1 [strlen (code1) - 1] = 0;
1724 code = g_strdup_printf ("%s -> %s", code1, code2);
1725 g_free (code1);
1726 g_free (code2);
1727 } else*/
1728 code = g_strdup ("\n");
1729 g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
1730 MONO_BB_FOR_EACH_INS (bb, c) {
1731 mono_print_ins_index (-1, c);
1734 g_print ("\tprev:");
1735 for (j = 0; j < bb->in_count; ++j) {
1736 g_print (" BB%d", bb->in_bb [j]->block_num);
1738 g_print ("\t\tsucc:");
1739 for (j = 0; j < bb->out_count; ++j) {
1740 g_print (" BB%d", bb->out_bb [j]->block_num);
1742 g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
1744 if (bb->idom)
1745 g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
1747 if (bb->dominators)
1748 mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
1749 if (bb->dfrontier)
1750 mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
1751 g_free (code);
1754 g_print ("\n");
1757 void
1758 mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
1760 MONO_ADD_INS (bb, inst);
1763 void
1764 mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1766 if (ins == NULL) {
1767 ins = bb->code;
1768 bb->code = ins_to_insert;
1770 /* Link with next */
1771 ins_to_insert->next = ins;
1772 if (ins)
1773 ins->prev = ins_to_insert;
1775 if (bb->last_ins == NULL)
1776 bb->last_ins = ins_to_insert;
1777 } else {
1778 /* Link with next */
1779 ins_to_insert->next = ins->next;
1780 if (ins->next)
1781 ins->next->prev = ins_to_insert;
1783 /* Link with previous */
1784 ins->next = ins_to_insert;
1785 ins_to_insert->prev = ins;
1787 if (bb->last_ins == ins)
1788 bb->last_ins = ins_to_insert;
1792 void
1793 mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1795 if (ins == NULL) {
1796 ins = bb->code;
1797 if (ins)
1798 ins->prev = ins_to_insert;
1799 bb->code = ins_to_insert;
1800 ins_to_insert->next = ins;
1801 if (bb->last_ins == NULL)
1802 bb->last_ins = ins_to_insert;
1803 } else {
1804 /* Link with previous */
1805 if (ins->prev)
1806 ins->prev->next = ins_to_insert;
1807 ins_to_insert->prev = ins->prev;
1809 /* Link with next */
1810 ins->prev = ins_to_insert;
1811 ins_to_insert->next = ins;
1813 if (bb->code == ins)
1814 bb->code = ins_to_insert;
1819 * mono_verify_bblock:
1821 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1823 void
1824 mono_verify_bblock (MonoBasicBlock *bb)
1826 MonoInst *ins, *prev;
1828 prev = NULL;
1829 for (ins = bb->code; ins; ins = ins->next) {
1830 g_assert (ins->prev == prev);
1831 prev = ins;
1833 if (bb->last_ins)
1834 g_assert (!bb->last_ins->next);
1838 * mono_verify_cfg:
1840 * Perform consistency checks on the JIT data structures and the IR
1842 void
1843 mono_verify_cfg (MonoCompile *cfg)
1845 MonoBasicBlock *bb;
1847 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
1848 mono_verify_bblock (bb);
1851 // This will free many fields in cfg to save
1852 // memory. Note that this must be safe to call
1853 // multiple times. It must be idempotent.
1854 void
1855 mono_empty_compile (MonoCompile *cfg)
1857 mono_free_loop_info (cfg);
1859 // These live in the mempool, and so must be freed
1860 // first
1861 for (GSList *l = cfg->headers_to_free; l; l = l->next) {
1862 mono_metadata_free_mh ((MonoMethodHeader *)l->data);
1864 cfg->headers_to_free = NULL;
1866 if (cfg->mempool) {
1867 //mono_mempool_stats (cfg->mempool);
1868 mono_mempool_destroy (cfg->mempool);
1869 cfg->mempool = NULL;
1872 g_free (cfg->varinfo);
1873 cfg->varinfo = NULL;
1875 g_free (cfg->vars);
1876 cfg->vars = NULL;
1878 if (cfg->rs) {
1879 mono_regstate_free (cfg->rs);
1880 cfg->rs = NULL;
1884 void
1885 mono_destroy_compile (MonoCompile *cfg)
1887 mono_empty_compile (cfg);
1889 mono_metadata_free_mh (cfg->header);
1891 g_hash_table_destroy (cfg->spvars);
1892 g_hash_table_destroy (cfg->exvars);
1893 g_list_free (cfg->ldstr_list);
1894 g_hash_table_destroy (cfg->token_info_hash);
1895 g_hash_table_destroy (cfg->abs_patches);
1897 mono_debug_free_method (cfg);
1899 g_free (cfg->varinfo);
1900 g_free (cfg->vars);
1901 g_free (cfg->exception_message);
1902 g_free (cfg);
1905 void
1906 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
1908 if (type == MONO_PATCH_INFO_NONE)
1909 return;
1911 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1913 ji->ip.i = ip;
1914 ji->type = type;
1915 ji->data.target = target;
1916 ji->next = cfg->patch_info;
1918 cfg->patch_info = ji;
1921 void
1922 mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
1924 if (type == MONO_PATCH_INFO_NONE)
1925 return;
1927 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1929 ji->ip.i = ip;
1930 ji->type = type;
1931 ji->relocation = relocation;
1932 ji->data.target = target;
1933 ji->next = cfg->patch_info;
1935 cfg->patch_info = ji;
1938 void
1939 mono_remove_patch_info (MonoCompile *cfg, int ip)
1941 MonoJumpInfo **ji = &cfg->patch_info;
1943 while (*ji) {
1944 if ((*ji)->ip.i == ip)
1945 *ji = (*ji)->next;
1946 else
1947 ji = &((*ji)->next);
1951 void
1952 mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
1954 ins->inst_offset = native_offset;
1955 g_ptr_array_add (cfg->seq_points, ins);
1956 if (bb) {
1957 bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
1958 bb->last_seq_point = ins;
1962 void
1963 mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
1965 MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
1967 if (is_reg)
1968 g_assert (offset == 0);
1970 entry->is_reg = is_reg;
1971 entry->reg = reg;
1972 entry->offset = offset;
1973 entry->from = from;
1974 entry->to = to;
1976 if (var == cfg->args [0])
1977 cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
1978 else if (var == cfg->rgctx_var)
1979 cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
1982 static void
1983 mono_apply_volatile (MonoInst *inst, MonoBitSet *set, gsize index)
1985 inst->flags |= mono_bitset_test_safe (set, index) ? MONO_INST_VOLATILE : 0;
1988 static void
1989 mono_compile_create_vars (MonoCompile *cfg)
1991 MonoMethodSignature *sig;
1992 MonoMethodHeader *header;
1993 int i;
1995 header = cfg->header;
1997 sig = mono_method_signature_internal (cfg->method);
1999 if (!MONO_TYPE_IS_VOID (sig->ret)) {
2000 cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
2001 /* Inhibit optimizations */
2002 cfg->ret->flags |= MONO_INST_VOLATILE;
2004 if (cfg->verbose_level > 2)
2005 g_print ("creating vars\n");
2007 cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
2009 if (sig->hasthis) {
2010 MonoInst* arg = mono_compile_create_var (cfg, m_class_get_this_arg (cfg->method->klass), OP_ARG);
2011 mono_apply_volatile (arg, header->volatile_args, 0);
2012 cfg->args [0] = arg;
2013 cfg->this_arg = arg;
2016 for (i = 0; i < sig->param_count; ++i) {
2017 MonoInst* arg = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
2018 mono_apply_volatile (arg, header->volatile_args, i + sig->hasthis);
2019 cfg->args [i + sig->hasthis] = arg;
2022 if (cfg->verbose_level > 2) {
2023 if (cfg->ret) {
2024 printf ("\treturn : ");
2025 mono_print_ins (cfg->ret);
2028 if (sig->hasthis) {
2029 printf ("\tthis: ");
2030 mono_print_ins (cfg->args [0]);
2033 for (i = 0; i < sig->param_count; ++i) {
2034 printf ("\targ [%d]: ", i);
2035 mono_print_ins (cfg->args [i + sig->hasthis]);
2039 cfg->locals_start = cfg->num_varinfo;
2040 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
2042 if (cfg->verbose_level > 2)
2043 g_print ("creating locals\n");
2045 for (i = 0; i < header->num_locals; ++i) {
2046 if (cfg->verbose_level > 2)
2047 g_print ("\tlocal [%d]: ", i);
2048 cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
2049 mono_apply_volatile (cfg->locals [i], header->volatile_locals, i);
2052 if (cfg->verbose_level > 2)
2053 g_print ("locals done\n");
2055 #ifdef ENABLE_LLVM
2056 if (COMPILE_LLVM (cfg))
2057 mono_llvm_create_vars (cfg);
2058 else
2059 mono_arch_create_vars (cfg);
2060 #else
2061 mono_arch_create_vars (cfg);
2062 #endif
2064 if (cfg->method->save_lmf && cfg->create_lmf_var) {
2065 MonoInst *lmf_var = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
2066 lmf_var->flags |= MONO_INST_VOLATILE;
2067 lmf_var->flags |= MONO_INST_LMF;
2068 cfg->lmf_var = lmf_var;
2072 void
2073 mono_print_code (MonoCompile *cfg, const char* msg)
2075 MonoBasicBlock *bb;
2077 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2078 mono_print_bb (bb, msg);
2081 static void
2082 mono_postprocess_patches (MonoCompile *cfg)
2084 MonoJumpInfo *patch_info;
2085 int i;
2087 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2088 switch (patch_info->type) {
2089 case MONO_PATCH_INFO_ABS: {
2091 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2092 * absolute address.
2094 if (cfg->abs_patches) {
2095 MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
2096 if (abs_ji) {
2097 patch_info->type = abs_ji->type;
2098 patch_info->data.target = abs_ji->data.target;
2101 break;
2103 case MONO_PATCH_INFO_SWITCH: {
2104 gpointer *table;
2105 if (cfg->method->dynamic) {
2106 table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
2107 } else {
2108 table = (void **)mono_mem_manager_code_reserve (cfg->mem_manager, sizeof (gpointer) * patch_info->data.table->table_size);
2111 for (i = 0; i < patch_info->data.table->table_size; i++) {
2112 /* Might be NULL if the switch is eliminated */
2113 if (patch_info->data.table->table [i]) {
2114 g_assert (patch_info->data.table->table [i]->native_offset);
2115 table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
2116 } else {
2117 table [i] = NULL;
2120 patch_info->data.table->table = (MonoBasicBlock**)table;
2121 break;
2123 default:
2124 /* do nothing */
2125 break;
2130 /* Those patches require the JitInfo of the compiled method already be in place when used */
2131 static void
2132 mono_postprocess_patches_after_ji_publish (MonoCompile *cfg)
2134 MonoJumpInfo *patch_info;
2136 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2137 switch (patch_info->type) {
2138 case MONO_PATCH_INFO_METHOD_JUMP: {
2139 unsigned char *ip = cfg->native_code + patch_info->ip.i;
2141 mini_register_jump_site (cfg->domain, patch_info->data.method, ip);
2142 break;
2144 default:
2145 /* do nothing */
2146 break;
2151 void
2152 mono_codegen (MonoCompile *cfg)
2154 MonoBasicBlock *bb;
2155 int max_epilog_size;
2156 guint8 *code;
2157 MonoMemoryManager *code_mem_manager;
2158 guint unwindlen = 0;
2160 if (mono_using_xdebug)
2162 * Recent gdb versions have trouble processing symbol files containing
2163 * overlapping address ranges, so allocate all code from the code manager
2164 * of the root domain. (#666152).
2166 code_mem_manager = mono_domain_memory_manager (mono_get_root_domain ());
2167 else
2168 code_mem_manager = cfg->mem_manager;
2170 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2171 cfg->spill_count = 0;
2172 /* we reuse dfn here */
2173 /* bb->dfn = bb_count++; */
2175 mono_arch_lowering_pass (cfg, bb);
2177 if (cfg->opt & MONO_OPT_PEEPHOLE)
2178 mono_arch_peephole_pass_1 (cfg, bb);
2180 mono_local_regalloc (cfg, bb);
2182 if (cfg->opt & MONO_OPT_PEEPHOLE)
2183 mono_arch_peephole_pass_2 (cfg, bb);
2185 if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
2186 mono_bb_deduplicate_op_il_seq_points (cfg, bb);
2189 code = mono_arch_emit_prolog (cfg);
2191 set_code_cursor (cfg, code);
2192 cfg->prolog_end = cfg->code_len;
2193 cfg->cfa_reg = cfg->cur_cfa_reg;
2194 cfg->cfa_offset = cfg->cur_cfa_offset;
2196 mono_debug_open_method (cfg);
2198 /* emit code all basic blocks */
2199 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2200 bb->native_offset = cfg->code_len;
2201 bb->real_native_offset = cfg->code_len;
2202 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2203 mono_arch_output_basic_block (cfg, bb);
2204 bb->native_length = cfg->code_len - bb->native_offset;
2206 if (bb == cfg->bb_exit) {
2207 cfg->epilog_begin = cfg->code_len;
2208 mono_arch_emit_epilog (cfg);
2209 cfg->epilog_end = cfg->code_len;
2212 if (bb->clause_holes) {
2213 GList *tmp;
2214 for (tmp = bb->clause_holes; tmp; tmp = tmp->prev)
2215 mono_cfg_add_try_hole (cfg, ((MonoLeaveClause *) tmp->data)->clause, cfg->native_code + bb->native_offset, bb);
2219 mono_arch_emit_exceptions (cfg);
2221 max_epilog_size = 0;
2223 /* we always allocate code in cfg->domain->code_mp to increase locality */
2224 cfg->code_size = cfg->code_len + max_epilog_size;
2226 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2228 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2229 if (!cfg->compile_aot)
2230 unwindlen = mono_arch_unwindinfo_init_method_unwind_info (cfg);
2231 #endif
2233 if (cfg->method->dynamic) {
2234 /* Allocate the code into a separate memory pool so it can be freed */
2235 cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
2236 cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
2237 mono_domain_lock (cfg->domain);
2238 mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
2239 mono_domain_unlock (cfg->domain);
2241 if (mono_using_xdebug)
2242 /* See the comment for cfg->code_domain */
2243 code = (guint8 *)mono_mem_manager_code_reserve (code_mem_manager, cfg->code_size + cfg->thunk_area + unwindlen);
2244 else
2245 code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
2246 } else {
2247 code = (guint8 *)mono_mem_manager_code_reserve (code_mem_manager, cfg->code_size + cfg->thunk_area + unwindlen);
2250 mono_codeman_enable_write ();
2252 if (cfg->thunk_area) {
2253 cfg->thunks_offset = cfg->code_size + unwindlen;
2254 cfg->thunks = code + cfg->thunks_offset;
2255 memset (cfg->thunks, 0, cfg->thunk_area);
2258 g_assert (code);
2259 memcpy (code, cfg->native_code, cfg->code_len);
2260 g_free (cfg->native_code);
2261 cfg->native_code = code;
2262 code = cfg->native_code + cfg->code_len;
2264 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2265 mono_postprocess_patches (cfg);
2267 #ifdef VALGRIND_JIT_REGISTER_MAP
2268 if (valgrind_register){
2269 char* nm = mono_method_full_name (cfg->method, TRUE);
2270 VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
2271 g_free (nm);
2273 #endif
2275 if (cfg->verbose_level > 0) {
2276 char* nm = mono_method_get_full_name (cfg->method);
2277 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2278 nm,
2279 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
2280 g_free (nm);
2284 gboolean is_generic = FALSE;
2286 if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
2287 mono_class_is_gtd (cfg->method->klass) || mono_class_is_ginst (cfg->method->klass)) {
2288 is_generic = TRUE;
2291 if (cfg->gshared)
2292 g_assert (is_generic);
2295 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2296 mono_arch_save_unwind_info (cfg);
2297 #endif
2299 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2301 MonoJumpInfo *ji;
2302 gpointer target;
2304 for (ji = cfg->patch_info; ji; ji = ji->next) {
2305 if (cfg->compile_aot) {
2306 switch (ji->type) {
2307 case MONO_PATCH_INFO_BB:
2308 case MONO_PATCH_INFO_LABEL:
2309 break;
2310 default:
2311 /* No need to patch these */
2312 continue;
2316 if (ji->type == MONO_PATCH_INFO_NONE)
2317 continue;
2319 target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors, cfg->error);
2320 if (!is_ok (cfg->error)) {
2321 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2322 return;
2324 mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
2327 #else
2328 mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors, cfg->error);
2329 if (!is_ok (cfg->error)) {
2330 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2331 return;
2333 #endif
2335 if (cfg->method->dynamic) {
2336 if (mono_using_xdebug)
2337 mono_mem_manager_code_commit (code_mem_manager, cfg->native_code, cfg->code_size, cfg->code_len);
2338 else
2339 mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
2340 } else {
2341 mono_mem_manager_code_commit (code_mem_manager, cfg->native_code, cfg->code_size, cfg->code_len);
2344 mono_codeman_disable_write ();
2346 MONO_PROFILER_RAISE (jit_code_buffer, (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method));
2348 mono_arch_flush_icache (cfg->native_code, cfg->code_len);
2350 mono_debug_close_method (cfg);
2352 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2353 if (!cfg->compile_aot)
2354 mono_arch_unwindinfo_install_method_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
2355 #endif
2358 static void
2359 compute_reachable (MonoBasicBlock *bb)
2361 int i;
2363 if (!(bb->flags & BB_VISITED)) {
2364 bb->flags |= BB_VISITED;
2365 for (i = 0; i < bb->out_count; ++i)
2366 compute_reachable (bb->out_bb [i]);
2370 static void mono_bb_ordering (MonoCompile *cfg)
2372 int dfn = 0;
2373 /* Depth-first ordering on basic blocks */
2374 cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
2376 cfg->max_block_num = cfg->num_bblocks;
2378 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
2380 #if defined(__GNUC__) && __GNUC__ == 7 && defined(__x86_64__)
2381 /* workaround for an AMD specific issue that only happens on GCC 7 so far,
2382 * for more information see https://github.com/mono/mono/issues/9298 */
2383 mono_memory_barrier ();
2384 #endif
2385 g_assertf (cfg->num_bblocks >= dfn, "cfg->num_bblocks=%d, dfn=%d\n", cfg->num_bblocks, dfn);
2387 if (cfg->num_bblocks != dfn + 1) {
2388 MonoBasicBlock *bb;
2390 cfg->num_bblocks = dfn + 1;
2392 /* remove unreachable code, because the code in them may be
2393 * inconsistent (access to dead variables for example) */
2394 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2395 bb->flags &= ~BB_VISITED;
2396 compute_reachable (cfg->bb_entry);
2397 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2398 if (bb->flags & BB_EXCEPTION_HANDLER)
2399 compute_reachable (bb);
2400 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2401 if (!(bb->flags & BB_VISITED)) {
2402 if (cfg->verbose_level > 1)
2403 g_print ("found unreachable code in BB%d\n", bb->block_num);
2404 bb->code = bb->last_ins = NULL;
2405 while (bb->out_count)
2406 mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
2409 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2410 bb->flags &= ~BB_VISITED;
2414 static void
2415 mono_handle_out_of_line_bblock (MonoCompile *cfg)
2417 MonoBasicBlock *bb;
2418 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2419 if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
2420 MonoInst *ins;
2421 MONO_INST_NEW (cfg, ins, OP_BR);
2422 MONO_ADD_INS (bb, ins);
2423 ins->inst_target_bb = bb->next_bb;
2428 static MonoJitInfo*
2429 create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
2431 GSList *tmp;
2432 MonoMethodHeader *header;
2433 MonoJitInfo *jinfo;
2434 MonoJitInfoFlags flags = JIT_INFO_NONE;
2435 int num_clauses, num_holes = 0;
2436 guint32 stack_size = 0;
2438 g_assert (method_to_compile == cfg->method);
2439 header = cfg->header;
2441 if (cfg->gshared)
2442 flags |= JIT_INFO_HAS_GENERIC_JIT_INFO;
2444 if (cfg->arch_eh_jit_info) {
2445 MonoJitArgumentInfo *arg_info;
2446 MonoMethodSignature *sig = mono_method_signature_internal (cfg->method_to_register);
2449 * This cannot be computed during stack walking, as
2450 * mono_arch_get_argument_info () is not signal safe.
2452 arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
2453 stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
2455 if (stack_size)
2456 flags |= JIT_INFO_HAS_ARCH_EH_INFO;
2459 if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
2460 flags |= JIT_INFO_HAS_ARCH_EH_INFO;
2462 if (cfg->thunk_area)
2463 flags |= JIT_INFO_HAS_THUNK_INFO;
2465 if (cfg->try_block_holes) {
2466 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2467 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2468 MonoExceptionClause *ec = hole->clause;
2469 int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
2470 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2471 g_assert (clause_last_bb);
2473 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2474 if (clause_last_bb->native_offset != hole_end)
2475 ++num_holes;
2477 if (num_holes)
2478 flags |= JIT_INFO_HAS_TRY_BLOCK_HOLES;
2479 if (G_UNLIKELY (cfg->verbose_level >= 4))
2480 printf ("Number of try block holes %d\n", num_holes);
2483 if (COMPILE_LLVM (cfg))
2484 num_clauses = cfg->llvm_ex_info_len;
2485 else
2486 num_clauses = header->num_clauses;
2488 if (cfg->method->dynamic)
2489 jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
2490 else
2491 jinfo = (MonoJitInfo *)mono_mem_manager_alloc0 (cfg->mem_manager, mono_jit_info_size (flags, num_clauses, num_holes));
2492 jinfo_try_holes_size += num_holes * sizeof (MonoTryBlockHoleJitInfo);
2494 mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
2495 jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
2497 if (COMPILE_LLVM (cfg))
2498 jinfo->from_llvm = TRUE;
2500 if (cfg->gshared) {
2501 MonoInst *inst;
2502 MonoGenericJitInfo *gi;
2503 GSList *loclist = NULL;
2505 gi = mono_jit_info_get_generic_jit_info (jinfo);
2506 g_assert (gi);
2508 if (cfg->method->dynamic)
2509 gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
2510 else
2511 gi->generic_sharing_context = (MonoGenericSharingContext *)mono_mem_manager_alloc0 (cfg->mem_manager, sizeof (MonoGenericSharingContext));
2512 mini_init_gsctx (cfg->method->dynamic ? NULL : cfg->domain, NULL, cfg->gsctx_context, gi->generic_sharing_context);
2514 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2515 mini_method_get_context (method_to_compile)->method_inst ||
2516 m_class_is_valuetype (method_to_compile->klass)) {
2517 g_assert (cfg->rgctx_var);
2520 gi->has_this = 1;
2522 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2523 mini_method_get_context (method_to_compile)->method_inst ||
2524 m_class_is_valuetype (method_to_compile->klass)) {
2525 inst = cfg->rgctx_var;
2526 if (!COMPILE_LLVM (cfg))
2527 g_assert (inst->opcode == OP_REGOFFSET);
2528 loclist = cfg->rgctx_loclist;
2529 } else {
2530 inst = cfg->args [0];
2531 loclist = cfg->this_loclist;
2534 if (loclist) {
2535 /* Needed to handle async exceptions */
2536 GSList *l;
2537 int i;
2539 gi->nlocs = g_slist_length (loclist);
2540 if (cfg->method->dynamic)
2541 gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
2542 else
2543 gi->locations = (MonoDwarfLocListEntry *)mono_mem_manager_alloc0 (cfg->mem_manager, gi->nlocs * sizeof (MonoDwarfLocListEntry));
2544 i = 0;
2545 for (l = loclist; l; l = l->next) {
2546 memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
2547 i ++;
2551 if (COMPILE_LLVM (cfg)) {
2552 g_assert (cfg->llvm_this_reg != -1);
2553 gi->this_in_reg = 0;
2554 gi->this_reg = cfg->llvm_this_reg;
2555 gi->this_offset = cfg->llvm_this_offset;
2556 } else if (inst->opcode == OP_REGVAR) {
2557 gi->this_in_reg = 1;
2558 gi->this_reg = inst->dreg;
2559 } else {
2560 g_assert (inst->opcode == OP_REGOFFSET);
2561 #ifdef TARGET_X86
2562 g_assert (inst->inst_basereg == X86_EBP);
2563 #elif defined(TARGET_AMD64)
2564 g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
2565 #endif
2566 g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
2568 gi->this_in_reg = 0;
2569 gi->this_reg = inst->inst_basereg;
2570 gi->this_offset = inst->inst_offset;
2574 if (num_holes) {
2575 MonoTryBlockHoleTableJitInfo *table;
2576 int i;
2578 table = mono_jit_info_get_try_block_hole_table_info (jinfo);
2579 table->num_holes = (guint16)num_holes;
2580 i = 0;
2581 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2582 guint32 start_bb_offset;
2583 MonoTryBlockHoleJitInfo *hole;
2584 TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
2585 MonoExceptionClause *ec = hole_data->clause;
2586 int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
2587 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2588 g_assert (clause_last_bb);
2590 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2591 if (clause_last_bb->native_offset == hole_end)
2592 continue;
2594 start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
2595 hole = &table->holes [i++];
2596 hole->clause = hole_data->clause - &header->clauses [0];
2597 hole->offset = (guint32)hole_data->start_offset;
2598 hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
2600 if (G_UNLIKELY (cfg->verbose_level >= 4))
2601 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
2603 g_assert (i == num_holes);
2606 if (jinfo->has_arch_eh_info) {
2607 MonoArchEHJitInfo *info;
2609 info = mono_jit_info_get_arch_eh_info (jinfo);
2611 info->stack_size = stack_size;
2614 if (cfg->thunk_area) {
2615 MonoThunkJitInfo *info;
2617 info = mono_jit_info_get_thunk_info (jinfo);
2618 info->thunks_offset = cfg->thunks_offset;
2619 info->thunks_size = cfg->thunk_area;
2622 if (COMPILE_LLVM (cfg)) {
2623 if (num_clauses)
2624 memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
2625 } else if (header->num_clauses) {
2626 int i;
2628 for (i = 0; i < header->num_clauses; i++) {
2629 MonoExceptionClause *ec = &header->clauses [i];
2630 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2631 MonoBasicBlock *tblock;
2632 MonoInst *exvar;
2634 ei->flags = ec->flags;
2636 if (G_UNLIKELY (cfg->verbose_level >= 4))
2637 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
2639 exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
2640 ei->exvar_offset = exvar ? exvar->inst_offset : 0;
2642 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
2643 tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
2644 g_assert (tblock);
2645 ei->data.filter = cfg->native_code + tblock->native_offset;
2646 } else {
2647 ei->data.catch_class = ec->data.catch_class;
2650 tblock = cfg->cil_offset_to_bb [ec->try_offset];
2651 g_assert (tblock);
2652 g_assert (tblock->native_offset);
2653 ei->try_start = cfg->native_code + tblock->native_offset;
2654 if (tblock->extend_try_block) {
2656 * Extend the try block backwards to include parts of the previous call
2657 * instruction.
2659 ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
2661 if (ec->try_offset + ec->try_len < header->code_size)
2662 tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2663 else
2664 tblock = cfg->bb_exit;
2665 if (G_UNLIKELY (cfg->verbose_level >= 4))
2666 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
2667 g_assert (tblock);
2668 if (!tblock->native_offset) {
2669 int j, end;
2670 for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
2671 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2672 if (bb && bb->native_offset) {
2673 tblock = bb;
2674 break;
2678 ei->try_end = cfg->native_code + tblock->native_offset;
2679 g_assert (tblock->native_offset);
2680 tblock = cfg->cil_offset_to_bb [ec->handler_offset];
2681 g_assert (tblock);
2682 ei->handler_start = cfg->native_code + tblock->native_offset;
2684 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2685 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2686 gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
2687 if (hole->clause == ec && hole_end == ei->try_end) {
2688 if (G_UNLIKELY (cfg->verbose_level >= 4))
2689 printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
2691 ei->try_end = cfg->native_code + hole->start_offset;
2692 break;
2696 if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2697 int end_offset;
2698 if (ec->handler_offset + ec->handler_len < header->code_size) {
2699 tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
2700 if (tblock->native_offset) {
2701 end_offset = tblock->native_offset;
2702 } else {
2703 int j, end;
2705 for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
2706 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2707 if (bb && bb->native_offset) {
2708 tblock = bb;
2709 break;
2712 end_offset = tblock->native_offset + tblock->native_length;
2714 } else {
2715 end_offset = cfg->epilog_begin;
2717 ei->data.handler_end = cfg->native_code + end_offset;
2720 /* Keep try_start/end non-authenticated, they are never branched to */
2721 //ei->try_start = MINI_ADDR_TO_FTNPTR (ei->try_start);
2722 //ei->try_end = MINI_ADDR_TO_FTNPTR (ei->try_end);
2723 ei->handler_start = MINI_ADDR_TO_FTNPTR (ei->handler_start);
2724 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER)
2725 ei->data.filter = MINI_ADDR_TO_FTNPTR (ei->data.filter);
2726 else if (ei->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
2727 ei->data.handler_end = MINI_ADDR_TO_FTNPTR (ei->data.handler_end);
2731 if (G_UNLIKELY (cfg->verbose_level >= 4)) {
2732 int i;
2733 for (i = 0; i < jinfo->num_clauses; i++) {
2734 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2735 int start = (guint8*)ei->try_start - cfg->native_code;
2736 int end = (guint8*)ei->try_end - cfg->native_code;
2737 int handler = (guint8*)ei->handler_start - cfg->native_code;
2738 int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
2740 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
2744 if (cfg->encoded_unwind_ops) {
2745 /* Generated by LLVM */
2746 jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
2747 g_free (cfg->encoded_unwind_ops);
2748 } else if (cfg->unwind_ops) {
2749 guint32 info_len;
2750 guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
2751 guint32 unwind_desc;
2753 unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
2755 if (cfg->has_unwind_info_for_epilog) {
2756 MonoArchEHJitInfo *info;
2758 info = mono_jit_info_get_arch_eh_info (jinfo);
2759 g_assert (info);
2760 info->epilog_size = cfg->code_len - cfg->epilog_begin;
2762 jinfo->unwind_info = unwind_desc;
2763 g_free (unwind_info);
2764 } else {
2765 jinfo->unwind_info = cfg->used_int_regs;
2768 return jinfo;
2771 /* Return whenever METHOD is a gsharedvt method */
2772 static gboolean
2773 is_gsharedvt_method (MonoMethod *method)
2775 MonoGenericContext *context;
2776 MonoGenericInst *inst;
2777 int i;
2779 if (!method->is_inflated)
2780 return FALSE;
2781 context = mono_method_get_context (method);
2782 inst = context->class_inst;
2783 if (inst) {
2784 for (i = 0; i < inst->type_argc; ++i)
2785 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2786 return TRUE;
2788 inst = context->method_inst;
2789 if (inst) {
2790 for (i = 0; i < inst->type_argc; ++i)
2791 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2792 return TRUE;
2794 return FALSE;
2797 static gboolean
2798 is_open_method (MonoMethod *method)
2800 MonoGenericContext *context;
2802 if (!method->is_inflated)
2803 return FALSE;
2804 context = mono_method_get_context (method);
2805 if (context->class_inst && context->class_inst->is_open)
2806 return TRUE;
2807 if (context->method_inst && context->method_inst->is_open)
2808 return TRUE;
2809 return FALSE;
2812 static void
2813 mono_insert_nop_in_empty_bb (MonoCompile *cfg)
2815 MonoBasicBlock *bb;
2816 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2817 if (bb->code)
2818 continue;
2819 MonoInst *nop;
2820 MONO_INST_NEW (cfg, nop, OP_NOP);
2821 MONO_ADD_INS (bb, nop);
2824 static void
2825 insert_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
2827 MonoInst *poll_addr, *ins;
2829 if (cfg->disable_gc_safe_points)
2830 return;
2832 if (cfg->verbose_level > 1)
2833 printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
2835 g_assert (mini_safepoints_enabled ());
2836 NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
2838 MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
2839 ins->sreg1 = poll_addr->dreg;
2841 if (bblock->flags & BB_EXCEPTION_HANDLER) {
2842 MonoInst *eh_op = bblock->code;
2844 if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
2845 eh_op = NULL;
2846 } else {
2847 MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
2848 // skip all EH relateds ops
2849 while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
2850 eh_op = next_eh_op;
2851 next_eh_op = eh_op->next;
2855 mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
2856 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2857 } else if (bblock == cfg->bb_entry) {
2858 mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
2859 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2860 } else {
2861 mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
2862 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2867 This code inserts safepoints into managed code at important code paths.
2868 Those are:
2870 -the first basic block
2871 -landing BB for exception handlers
2872 -loop body starts.
2875 static void
2876 insert_safepoints (MonoCompile *cfg)
2878 MonoBasicBlock *bb;
2880 g_assert (mini_safepoints_enabled ());
2882 if (COMPILE_LLVM (cfg)) {
2883 if (!cfg->llvm_only) {
2884 /* We rely on LLVM's safepoints insertion capabilities. */
2885 if (cfg->verbose_level > 1)
2886 printf ("SKIPPING SAFEPOINTS for code compiled with LLVM\n");
2887 return;
2891 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2892 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2893 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2894 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
2895 (info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_threads_state_poll ||
2896 info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_thread_interruption_checkpoint ||
2897 info->d.icall.jit_icall_id == MONO_JIT_ICALL_mono_threads_exit_gc_safe_region_unbalanced)) {
2898 if (cfg->verbose_level > 1)
2899 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2900 return;
2904 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2905 if (cfg->verbose_level > 1)
2906 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2907 return;
2910 if (cfg->method->wrapper_type == MONO_WRAPPER_OTHER) {
2911 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2913 if (info && (info->subtype == WRAPPER_SUBTYPE_INTERP_IN || info->subtype == WRAPPER_SUBTYPE_INTERP_LMF)) {
2914 /* These wrappers shouldn't do any icalls */
2915 if (cfg->verbose_level > 1)
2916 printf ("SKIPPING SAFEPOINTS for interp-in wrappers.\n");
2917 return;
2921 if (cfg->verbose_level > 1)
2922 printf ("INSERTING SAFEPOINTS\n");
2923 if (cfg->verbose_level > 2)
2924 mono_print_code (cfg, "BEFORE SAFEPOINTS");
2926 /* if the method doesn't contain
2927 * (1) a call (so it's a leaf method)
2928 * (2) and no loops
2929 * we can skip the GC safepoint on method entry. */
2930 gboolean requires_safepoint = cfg->has_calls;
2932 for (bb = cfg->bb_entry->next_bb; bb; bb = bb->next_bb) {
2933 if (bb->loop_body_start || (bb->flags & BB_EXCEPTION_HANDLER)) {
2934 requires_safepoint = TRUE;
2935 insert_safepoint (cfg, bb);
2939 if (requires_safepoint)
2940 insert_safepoint (cfg, cfg->bb_entry);
2942 if (cfg->verbose_level > 2)
2943 mono_print_code (cfg, "AFTER SAFEPOINTS");
2948 static void
2949 mono_insert_branches_between_bblocks (MonoCompile *cfg)
2951 MonoBasicBlock *bb;
2953 /* Add branches between non-consecutive bblocks */
2954 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2955 if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
2956 bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
2957 /* we are careful when inverting, since bugs like #59580
2958 * could show up when dealing with NaNs.
2960 if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
2961 MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
2962 bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
2963 bb->last_ins->inst_false_bb = tmp;
2965 bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
2966 } else {
2967 MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
2968 inst->opcode = OP_BR;
2969 inst->inst_target_bb = bb->last_ins->inst_false_bb;
2970 mono_bblock_add_inst (bb, inst);
2975 if (cfg->verbose_level >= 4) {
2976 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2977 MonoInst *tree = bb->code;
2978 g_print ("DUMP BLOCK %d:\n", bb->block_num);
2979 if (!tree)
2980 continue;
2981 for (; tree; tree = tree->next) {
2982 mono_print_ins_index (-1, tree);
2987 /* FIXME: */
2988 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2989 bb->max_vreg = cfg->next_vreg;
2993 static void
2994 init_backend (MonoBackend *backend)
2996 #ifdef MONO_ARCH_NEED_GOT_VAR
2997 backend->need_got_var = 1;
2998 #endif
2999 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3000 backend->have_card_table_wb = 1;
3001 #endif
3002 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3003 backend->have_op_generic_class_init = 1;
3004 #endif
3005 #ifdef MONO_ARCH_EMULATE_MUL_DIV
3006 backend->emulate_mul_div = 1;
3007 #endif
3008 #ifdef MONO_ARCH_EMULATE_DIV
3009 backend->emulate_div = 1;
3010 #endif
3011 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
3012 backend->emulate_long_shift_opts = 1;
3013 #endif
3014 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
3015 backend->have_objc_get_selector = 1;
3016 #endif
3017 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
3018 backend->have_generalized_imt_trampoline = 1;
3019 #endif
3020 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3021 backend->gshared_supported = 1;
3022 #endif
3023 if (MONO_ARCH_USE_FPSTACK)
3024 backend->use_fpstack = 1;
3025 // Does the ABI have a volatile non-parameter register, so tailcall
3026 // can pass context to generics or interfaces?
3027 backend->have_volatile_non_param_register = MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER;
3028 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_MEMBASE
3029 backend->have_op_tailcall_membase = 1;
3030 #endif
3031 #ifdef MONO_ARCH_HAVE_OP_TAILCALL_REG
3032 backend->have_op_tailcall_reg = 1;
3033 #endif
3034 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3035 backend->monitor_enter_adjustment = 1;
3036 #else
3037 backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
3038 #endif
3039 #if defined(MONO_ARCH_ILP32)
3040 backend->ilp32 = 1;
3041 #endif
3042 #ifdef MONO_ARCH_NEED_DIV_CHECK
3043 backend->need_div_check = 1;
3044 #endif
3045 #ifdef NO_UNALIGNED_ACCESS
3046 backend->no_unaligned_access = 1;
3047 #endif
3048 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3049 backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
3050 #endif
3051 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3052 backend->disable_div_with_mul = 1;
3053 #endif
3054 #ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
3055 backend->explicit_null_checks = 1;
3056 #endif
3057 #ifdef MONO_ARCH_HAVE_OPTIMIZED_DIV
3058 backend->optimized_div = 1;
3059 #endif
3060 #ifdef MONO_ARCH_FORCE_FLOAT32
3061 backend->force_float32 = 1;
3062 #endif
3065 static gboolean
3066 is_simd_supported (MonoCompile *cfg)
3068 #ifdef DISABLE_SIMD
3069 return FALSE;
3070 #endif
3071 // FIXME: Clean this up
3072 #ifdef TARGET_WASM
3073 if ((mini_get_cpu_features (cfg) & MONO_CPU_WASM_SIMD) == 0)
3074 return FALSE;
3075 #else
3076 if (cfg->llvm_only)
3077 return FALSE;
3078 #endif
3079 return TRUE;
3083 * mini_method_compile:
3084 * @method: the method to compile
3085 * @opts: the optimization flags to use
3086 * @domain: the domain where the method will be compiled in
3087 * @flags: compilation flags
3088 * @parts: debug flag
3090 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3091 * field in the returned struct to see if compilation succeded.
3093 MonoCompile*
3094 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
3096 MonoMethodHeader *header;
3097 MonoMethodSignature *sig;
3098 MonoCompile *cfg;
3099 int i;
3100 gboolean try_generic_shared, try_llvm = FALSE;
3101 MonoMethod *method_to_compile, *method_to_register;
3102 gboolean method_is_gshared = FALSE;
3103 gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
3104 gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
3105 gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
3106 gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
3107 gboolean gsharedvt_method = FALSE;
3108 #ifdef ENABLE_LLVM
3109 gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
3110 #endif
3111 static gboolean verbose_method_inited;
3112 static char **verbose_method_names;
3114 mono_atomic_inc_i32 (&mono_jit_stats.methods_compiled);
3115 MONO_PROFILER_RAISE (jit_begin, (method));
3116 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3117 MONO_PROBE_METHOD_COMPILE_BEGIN (method);
3119 gsharedvt_method = is_gsharedvt_method (method);
3122 * In AOT mode, method can be the following:
3123 * - a gsharedvt method.
3124 * - a method inflated with type parameters. This is for ref/partial sharing.
3125 * - a method inflated with concrete types.
3127 if (compile_aot) {
3128 if (is_open_method (method)) {
3129 try_generic_shared = TRUE;
3130 method_is_gshared = TRUE;
3131 } else {
3132 try_generic_shared = FALSE;
3134 g_assert (opts & MONO_OPT_GSHARED);
3135 } else {
3136 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3137 (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_full (method, FALSE, FALSE, FALSE);
3138 if (mini_is_gsharedvt_sharable_method (method)) {
3140 if (!mono_debug_count ())
3141 try_generic_shared = FALSE;
3147 if (try_generic_shared && !mono_debug_count ())
3148 try_generic_shared = FALSE;
3151 if (opts & MONO_OPT_GSHARED) {
3152 if (try_generic_shared)
3153 mono_atomic_inc_i32 (&mono_stats.generics_sharable_methods);
3154 else if (mono_method_is_generic_impl (method))
3155 mono_atomic_inc_i32 (&mono_stats.generics_unsharable_methods);
3158 #ifdef ENABLE_LLVM
3159 try_llvm = mono_use_llvm || llvm;
3160 #endif
3162 #ifndef MONO_ARCH_FLOAT32_SUPPORTED
3163 opts &= ~MONO_OPT_FLOAT32;
3164 #endif
3165 if (current_backend->force_float32)
3166 /* Force float32 mode on newer platforms */
3167 opts |= MONO_OPT_FLOAT32;
3169 restart_compile:
3170 if (method_is_gshared) {
3171 method_to_compile = method;
3172 } else {
3173 if (try_generic_shared) {
3174 ERROR_DECL (error);
3175 method_to_compile = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
3176 mono_error_assert_ok (error);
3177 } else {
3178 method_to_compile = method;
3182 cfg = g_new0 (MonoCompile, 1);
3183 cfg->method = method_to_compile;
3184 cfg->mempool = mono_mempool_new ();
3185 cfg->opt = opts;
3186 cfg->run_cctors = run_cctors;
3187 cfg->domain = domain;
3188 cfg->verbose_level = mini_verbose;
3189 cfg->compile_aot = compile_aot;
3190 cfg->full_aot = full_aot;
3191 cfg->disable_omit_fp = mini_debug_options.disable_omit_fp;
3192 cfg->skip_visibility = method->skip_visibility;
3193 cfg->orig_method = method;
3194 cfg->gen_seq_points = !mini_debug_options.no_seq_points_compact_data || mini_debug_options.gen_sdb_seq_points;
3195 cfg->gen_sdb_seq_points = mini_debug_options.gen_sdb_seq_points;
3196 cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
3197 cfg->interp = (flags & JIT_FLAG_INTERP) != 0;
3198 cfg->use_current_cpu = (flags & JIT_FLAG_USE_CURRENT_CPU) != 0;
3199 cfg->self_init = (flags & JIT_FLAG_SELF_INIT) != 0;
3200 cfg->code_exec_only = (flags & JIT_FLAG_CODE_EXEC_ONLY) != 0;
3201 cfg->backend = current_backend;
3202 cfg->mem_manager = m_method_get_mem_manager (domain, cfg->method);
3204 if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC) {
3205 /* We can't have seq points inside gc critical regions */
3206 cfg->gen_seq_points = FALSE;
3207 cfg->gen_sdb_seq_points = FALSE;
3209 /* coop requires loop detection to happen */
3210 if (mini_safepoints_enabled ())
3211 cfg->opt |= MONO_OPT_LOOP;
3212 cfg->disable_llvm_implicit_null_checks = mini_debug_options.llvm_disable_implicit_null_checks;
3213 if (cfg->backend->explicit_null_checks || mini_debug_options.explicit_null_checks) {
3214 /* some platforms have null pages, so we can't SIGSEGV */
3215 cfg->explicit_null_checks = TRUE;
3216 cfg->disable_llvm_implicit_null_checks = TRUE;
3217 } else {
3218 cfg->explicit_null_checks = flags & JIT_FLAG_EXPLICIT_NULL_CHECKS;
3220 cfg->soft_breakpoints = mini_debug_options.soft_breakpoints;
3221 cfg->check_pinvoke_callconv = mini_debug_options.check_pinvoke_callconv;
3222 cfg->disable_direct_icalls = disable_direct_icalls;
3223 cfg->direct_pinvoke = (flags & JIT_FLAG_DIRECT_PINVOKE) != 0;
3224 if (try_generic_shared)
3225 cfg->gshared = TRUE;
3226 cfg->compile_llvm = try_llvm;
3227 cfg->token_info_hash = g_hash_table_new (NULL, NULL);
3228 if (cfg->compile_aot)
3229 cfg->method_index = aot_method_index;
3231 if (cfg->compile_llvm)
3232 cfg->explicit_null_checks = TRUE;
3235 if (!mono_debug_count ())
3236 cfg->opt &= ~MONO_OPT_FLOAT32;
3238 if (!is_simd_supported (cfg))
3239 cfg->opt &= ~MONO_OPT_SIMD;
3240 cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
3241 cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
3243 if (cfg->gen_seq_points)
3244 cfg->seq_points = g_ptr_array_new ();
3245 cfg->error = (MonoError*)&cfg->error_value;
3246 error_init (cfg->error);
3248 if (cfg->compile_aot && !try_generic_shared && (method->is_generic || mono_class_is_gtd (method->klass) || method_is_gshared)) {
3249 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
3250 return cfg;
3253 if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
3254 MonoMethodInflated *inflated;
3255 MonoGenericContext *context;
3257 if (gsharedvt_method) {
3258 g_assert (method->is_inflated);
3259 inflated = (MonoMethodInflated*)method;
3260 context = &inflated->context;
3262 /* We are compiling a gsharedvt method directly */
3263 g_assert (compile_aot);
3264 } else {
3265 g_assert (method_to_compile->is_inflated);
3266 inflated = (MonoMethodInflated*)method_to_compile;
3267 context = &inflated->context;
3270 mini_init_gsctx (NULL, cfg->mempool, context, &cfg->gsctx);
3271 cfg->gsctx_context = context;
3273 cfg->gsharedvt = TRUE;
3274 if (!cfg->llvm_only) {
3275 cfg->disable_llvm = TRUE;
3276 cfg->exception_message = g_strdup ("gsharedvt");
3280 if (cfg->gshared) {
3281 method_to_register = method_to_compile;
3282 } else {
3283 g_assert (method == method_to_compile);
3284 method_to_register = method;
3286 cfg->method_to_register = method_to_register;
3288 ERROR_DECL (err);
3289 sig = mono_method_signature_checked (cfg->method, err);
3290 if (!sig) {
3291 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3292 cfg->exception_message = g_strdup (mono_error_get_message (err));
3293 mono_error_cleanup (err);
3294 if (MONO_METHOD_COMPILE_END_ENABLED ())
3295 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3296 return cfg;
3299 header = cfg->header = mono_method_get_header_checked (cfg->method, cfg->error);
3300 if (!header) {
3301 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3302 if (MONO_METHOD_COMPILE_END_ENABLED ())
3303 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3304 return cfg;
3307 #ifdef ENABLE_LLVM
3309 static gboolean inited;
3311 if (!inited)
3312 inited = TRUE;
3315 * Check for methods which cannot be compiled by LLVM early, to avoid
3316 * the extra compilation pass.
3318 if (COMPILE_LLVM (cfg)) {
3319 mono_llvm_check_method_supported (cfg);
3320 if (cfg->disable_llvm) {
3321 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3322 //nm = mono_method_full_name (cfg->method, TRUE);
3323 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method->klass), method->name, cfg->exception_message);
3324 //g_free (nm);
3326 if (cfg->llvm_only) {
3327 g_free (cfg->exception_message);
3328 cfg->disable_aot = TRUE;
3329 return cfg;
3331 mono_destroy_compile (cfg);
3332 try_llvm = FALSE;
3333 goto restart_compile;
3337 #endif
3339 cfg->prof_flags = mono_profiler_get_call_instrumentation_flags (cfg->method);
3340 cfg->prof_coverage = mono_profiler_coverage_instrumentation_enabled (cfg->method);
3342 gboolean trace = mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method);
3343 if (trace)
3344 cfg->prof_flags = (MonoProfilerCallInstrumentationFlags)(
3345 MONO_PROFILER_CALL_INSTRUMENTATION_ENTER | MONO_PROFILER_CALL_INSTRUMENTATION_ENTER_CONTEXT |
3346 MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE | MONO_PROFILER_CALL_INSTRUMENTATION_LEAVE_CONTEXT);
3348 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3349 if (mini_debug_options.mdb_optimizations || MONO_CFG_PROFILE_CALL_CONTEXT (cfg)) {
3350 cfg->disable_reuse_registers = TRUE;
3351 cfg->disable_reuse_stack_slots = TRUE;
3353 * This decreases the change the debugger will read registers/stack slots which are
3354 * not yet initialized.
3356 cfg->disable_initlocals_opt = TRUE;
3358 cfg->extend_live_ranges = TRUE;
3360 /* The debugger needs all locals to be on the stack or in a global register */
3361 cfg->disable_vreg_to_lvreg = TRUE;
3363 /* Don't remove unused variables when running inside the debugger since the user
3364 * may still want to view them. */
3365 cfg->disable_deadce_vars = TRUE;
3367 cfg->opt &= ~MONO_OPT_DEADCE;
3368 cfg->opt &= ~MONO_OPT_INLINE;
3369 cfg->opt &= ~MONO_OPT_COPYPROP;
3370 cfg->opt &= ~MONO_OPT_CONSPROP;
3372 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3373 cfg->disable_out_of_line_bblocks = TRUE;
3376 if (mono_using_xdebug) {
3378 * Make each variable use its own register/stack slot and extend
3379 * their liveness to cover the whole method, making them displayable
3380 * in gdb even after they are dead.
3382 cfg->disable_reuse_registers = TRUE;
3383 cfg->disable_reuse_stack_slots = TRUE;
3384 cfg->extend_live_ranges = TRUE;
3385 cfg->compute_precise_live_ranges = TRUE;
3388 mini_gc_init_cfg (cfg);
3390 if (method->wrapper_type == MONO_WRAPPER_OTHER) {
3391 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
3393 if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG))) {
3394 cfg->disable_gc_safe_points = TRUE;
3395 /* This is safe, these wrappers only store to the stack */
3396 cfg->gen_write_barriers = FALSE;
3400 if (COMPILE_LLVM (cfg)) {
3401 cfg->opt |= MONO_OPT_ABCREM;
3404 if (!verbose_method_inited) {
3405 char *env = g_getenv ("MONO_VERBOSE_METHOD");
3406 if (env != NULL)
3407 verbose_method_names = g_strsplit (env, ";", -1);
3409 verbose_method_inited = TRUE;
3411 if (verbose_method_names) {
3412 int i;
3414 for (i = 0; verbose_method_names [i] != NULL; i++){
3415 const char *name = verbose_method_names [i];
3417 if ((strchr (name, '.') > name) || strchr (name, ':')) {
3418 MonoMethodDesc *desc;
3420 desc = mono_method_desc_new (name, TRUE);
3421 if (desc) {
3422 if (mono_method_desc_full_match (desc, cfg->method)) {
3423 cfg->verbose_level = 4;
3425 mono_method_desc_free (desc);
3427 } else {
3428 if (strcmp (cfg->method->name, name) == 0)
3429 cfg->verbose_level = 4;
3434 cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
3436 if (cfg->verbose_level > 0) {
3437 char *method_name;
3439 method_name = mono_method_get_full_name (method);
3440 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
3442 if (COMPILE_LLVM (cfg))
3443 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3444 else if (cfg->gsharedvt)
3445 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3446 else if (cfg->gshared)
3447 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3448 else
3449 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3451 g_free (method_name);
3454 if (cfg->opt & MONO_OPT_ABCREM)
3455 cfg->opt |= MONO_OPT_SSA;
3457 cfg->rs = mono_regstate_new ();
3458 cfg->next_vreg = cfg->rs->next_vreg;
3460 /* FIXME: Fix SSA to handle branches inside bblocks */
3461 if (cfg->opt & MONO_OPT_SSA)
3462 cfg->enable_extended_bblocks = FALSE;
3465 * FIXME: This confuses liveness analysis because variables which are assigned after
3466 * a branch inside a bblock become part of the kill set, even though the assignment
3467 * might not get executed. This causes the optimize_initlocals pass to delete some
3468 * assignments which are needed.
3469 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3470 * created by this.
3472 //cfg->enable_extended_bblocks = TRUE;
3474 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3475 if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
3476 if (mini_debug_options.break_on_unverified)
3477 G_BREAKPOINT ();
3478 return cfg;
3482 * create MonoInst* which represents arguments and local variables
3484 mono_compile_create_vars (cfg);
3486 mono_cfg_dump_create_context (cfg);
3487 mono_cfg_dump_begin_group (cfg);
3489 MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
3490 mono_cfg_dump_ir (cfg, "method-to-ir");
3492 if (cfg->gdump_ctx != NULL) {
3493 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3494 mono_insert_nop_in_empty_bb (cfg);
3495 mono_cfg_dump_ir (cfg, "mono_insert_nop_in_empty_bb");
3498 if (i < 0) {
3499 if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
3500 if (compile_aot) {
3501 if (MONO_METHOD_COMPILE_END_ENABLED ())
3502 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3503 return cfg;
3505 mono_destroy_compile (cfg);
3506 try_generic_shared = FALSE;
3507 goto restart_compile;
3509 g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
3511 if (MONO_METHOD_COMPILE_END_ENABLED ())
3512 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3513 /* cfg contains the details of the failure, so let the caller cleanup */
3514 return cfg;
3517 cfg->stat_basic_blocks += cfg->num_bblocks;
3519 if (COMPILE_LLVM (cfg)) {
3520 MonoInst *ins;
3522 /* The IR has to be in SSA form for LLVM */
3523 cfg->opt |= MONO_OPT_SSA;
3525 // FIXME:
3526 if (cfg->ret) {
3527 // Allow SSA on the result value
3528 cfg->ret->flags &= ~MONO_INST_VOLATILE;
3530 // Add an explicit return instruction referencing the return value
3531 MONO_INST_NEW (cfg, ins, OP_SETRET);
3532 ins->sreg1 = cfg->ret->dreg;
3534 MONO_ADD_INS (cfg->bb_exit, ins);
3537 cfg->opt &= ~MONO_OPT_LINEARS;
3539 /* FIXME: */
3540 cfg->opt &= ~MONO_OPT_BRANCH;
3543 /* todo: remove code when we have verified that the liveness for try/catch blocks
3544 * works perfectly
3547 * Currently, this can't be commented out since exception blocks are not
3548 * processed during liveness analysis.
3549 * It is also needed, because otherwise the local optimization passes would
3550 * delete assignments in cases like this:
3551 * r1 <- 1
3552 * <something which throws>
3553 * r1 <- 2
3554 * This also allows SSA to be run on methods containing exception clauses, since
3555 * SSA will ignore variables marked VOLATILE.
3557 MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
3558 mono_cfg_dump_ir (cfg, "liveness_handle_exception_clauses");
3560 MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
3561 mono_cfg_dump_ir (cfg, "handle_out_of_line_bblock");
3563 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3565 if (!COMPILE_LLVM (cfg)) {
3566 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
3567 mono_cfg_dump_ir (cfg, "decompose_long_opts");
3570 /* Should be done before branch opts */
3571 if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) {
3572 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
3573 mono_cfg_dump_ir (cfg, "local_cprop");
3576 if (cfg->flags & MONO_CFG_HAS_TYPE_CHECK) {
3577 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_typechecks, mono_decompose_typechecks (cfg));
3578 if (cfg->gdump_ctx != NULL) {
3579 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3580 mono_insert_nop_in_empty_bb (cfg);
3582 mono_cfg_dump_ir (cfg, "decompose_typechecks");
3586 * Should be done after cprop which can do strength reduction on
3587 * some of these ops, after propagating immediates.
3589 if (cfg->has_emulated_ops) {
3590 MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
3591 mono_cfg_dump_ir (cfg, "local_emulate_ops");
3594 if (cfg->opt & MONO_OPT_BRANCH) {
3595 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
3596 mono_cfg_dump_ir (cfg, "optimize_branches");
3599 /* This must be done _before_ global reg alloc and _after_ decompose */
3600 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
3601 mono_cfg_dump_ir (cfg, "handle_global_vregs");
3602 if (cfg->opt & MONO_OPT_DEADCE) {
3603 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
3604 mono_cfg_dump_ir (cfg, "local_deadce");
3606 if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS) {
3607 MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
3608 mono_cfg_dump_ir (cfg, "local_alias_analysis");
3610 /* Disable this for LLVM to make the IR easier to handle */
3611 if (!COMPILE_LLVM (cfg)) {
3612 MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
3613 mono_cfg_dump_ir (cfg, "if_conversion");
3616 mono_threads_safepoint ();
3618 MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
3619 mono_cfg_dump_ir (cfg, "bb_ordering");
3621 if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
3623 * we disable some optimizations if there are too many variables
3624 * because JIT time may become too expensive. The actual number needs
3625 * to be tweaked and eventually the non-linear algorithms should be fixed.
3627 cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
3628 cfg->disable_ssa = TRUE;
3631 if (cfg->num_varinfo > 10000 && !cfg->llvm_only)
3632 /* Disable llvm for overly complex methods */
3633 cfg->disable_ssa = TRUE;
3635 if (cfg->opt & MONO_OPT_LOOP) {
3636 MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
3637 MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
3640 if (mono_threads_are_safepoints_enabled ()) {
3641 MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, insert_safepoints (cfg));
3642 mono_cfg_dump_ir (cfg, "insert_safepoints");
3645 /* after method_to_ir */
3646 if (parts == 1) {
3647 if (MONO_METHOD_COMPILE_END_ENABLED ())
3648 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3649 return cfg;
3653 if (header->num_clauses)
3654 cfg->disable_ssa = TRUE;
3657 //#define DEBUGSSA "logic_run"
3658 //#define DEBUGSSA_CLASS "Tests"
3659 #ifdef DEBUGSSA
3661 if (!cfg->disable_ssa) {
3662 mono_local_cprop (cfg);
3664 #ifndef DISABLE_SSA
3665 mono_ssa_compute (cfg);
3666 #endif
3668 #else
3669 if (cfg->opt & MONO_OPT_SSA) {
3670 if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
3671 #ifndef DISABLE_SSA
3672 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
3673 mono_cfg_dump_ir (cfg, "ssa_compute");
3674 #endif
3676 if (cfg->verbose_level >= 2) {
3677 print_dfn (cfg);
3681 #endif
3683 /* after SSA translation */
3684 if (parts == 2) {
3685 if (MONO_METHOD_COMPILE_END_ENABLED ())
3686 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3687 return cfg;
3690 if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
3691 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3692 #ifndef DISABLE_SSA
3693 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
3694 mono_cfg_dump_ir (cfg, "ssa_cprop");
3695 #endif
3699 #ifndef DISABLE_SSA
3700 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3701 //mono_ssa_strength_reduction (cfg);
3703 if (cfg->opt & MONO_OPT_DEADCE) {
3704 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
3705 mono_cfg_dump_ir (cfg, "ssa_deadce");
3708 if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) {
3709 MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
3710 mono_cfg_dump_ir (cfg, "perform_abc_removal");
3713 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
3714 mono_cfg_dump_ir (cfg, "ssa_remove");
3715 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
3716 mono_cfg_dump_ir (cfg, "local_cprop2");
3717 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
3718 mono_cfg_dump_ir (cfg, "handle_global_vregs2");
3719 if (cfg->opt & MONO_OPT_DEADCE) {
3720 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
3721 mono_cfg_dump_ir (cfg, "local_deadce2");
3724 if (cfg->opt & MONO_OPT_BRANCH) {
3725 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
3726 mono_cfg_dump_ir (cfg, "optimize_branches2");
3729 #endif
3731 if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
3732 mono_ssa_loop_invariant_code_motion (cfg);
3733 mono_cfg_dump_ir (cfg, "loop_invariant_code_motion");
3734 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3735 if (cfg->opt & MONO_OPT_ABCREM) {
3736 mono_perform_abc_removal (cfg);
3737 mono_cfg_dump_ir (cfg, "abc_removal");
3741 /* after SSA removal */
3742 if (parts == 3) {
3743 if (MONO_METHOD_COMPILE_END_ENABLED ())
3744 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3745 return cfg;
3748 if (cfg->llvm_only && cfg->gsharedvt)
3749 mono_ssa_remove_gsharedvt (cfg);
3751 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3752 if (COMPILE_SOFT_FLOAT (cfg))
3753 mono_decompose_soft_float (cfg);
3754 #endif
3755 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
3756 if (cfg->flags & MONO_CFG_NEEDS_DECOMPOSE) {
3757 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
3758 mono_cfg_dump_ir (cfg, "decompose_array_access_opts");
3761 if (cfg->got_var) {
3762 #ifndef MONO_ARCH_GOT_REG
3763 GList *regs;
3764 #endif
3765 int got_reg;
3767 g_assert (cfg->got_var_allocated);
3770 * Allways allocate the GOT var to a register, because keeping it
3771 * in memory will increase the number of live temporaries in some
3772 * code created by inssel.brg, leading to the well known spills+
3773 * branches problem. Testcase: mcs crash in
3774 * System.MonoCustomAttrs:GetCustomAttributes.
3776 #ifdef MONO_ARCH_GOT_REG
3777 got_reg = MONO_ARCH_GOT_REG;
3778 #else
3779 regs = mono_arch_get_global_int_regs (cfg);
3780 g_assert (regs);
3781 got_reg = GPOINTER_TO_INT (regs->data);
3782 g_list_free (regs);
3783 #endif
3784 cfg->got_var->opcode = OP_REGVAR;
3785 cfg->got_var->dreg = got_reg;
3786 cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
3790 * Have to call this again to process variables added since the first call.
3792 MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
3794 if (cfg->opt & MONO_OPT_LINEARS) {
3795 GList *vars, *regs, *l;
3797 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3798 cfg->comp_done &= ~MONO_COMP_LIVENESS;
3799 if (!(cfg->comp_done & MONO_COMP_LIVENESS))
3800 MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
3802 if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
3803 regs = mono_arch_get_global_int_regs (cfg);
3804 /* Remove the reg reserved for holding the GOT address */
3805 if (cfg->got_var) {
3806 for (l = regs; l; l = l->next) {
3807 if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
3808 regs = g_list_delete_link (regs, l);
3809 break;
3813 MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
3814 mono_cfg_dump_ir (cfg, "linear_scan");
3818 //mono_print_code (cfg, "");
3820 //print_dfn (cfg);
3822 /* variables are allocated after decompose, since decompose could create temps */
3823 if (!COMPILE_LLVM (cfg)) {
3824 MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
3825 mono_cfg_dump_ir (cfg, "arch_allocate_vars");
3826 if (cfg->exception_type)
3827 return cfg;
3830 if (cfg->gsharedvt)
3831 mono_allocate_gsharedvt_vars (cfg);
3833 if (!COMPILE_LLVM (cfg)) {
3834 gboolean need_local_opts;
3835 MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
3836 mono_cfg_dump_ir (cfg, "spill_global_vars");
3838 if (need_local_opts || cfg->compile_aot) {
3839 /* To optimize code created by spill_global_vars */
3840 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
3841 if (cfg->opt & MONO_OPT_DEADCE)
3842 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
3843 mono_cfg_dump_ir (cfg, "needs_local_opts");
3847 mono_insert_branches_between_bblocks (cfg);
3849 if (COMPILE_LLVM (cfg)) {
3850 #ifdef ENABLE_LLVM
3851 char *nm;
3853 /* The IR has to be in SSA form for LLVM */
3854 if (!(cfg->comp_done & MONO_COMP_SSA)) {
3855 cfg->exception_message = g_strdup ("SSA disabled.");
3856 cfg->disable_llvm = TRUE;
3859 if (cfg->flags & MONO_CFG_NEEDS_DECOMPOSE)
3860 mono_decompose_array_access_opts (cfg);
3862 if (!cfg->disable_llvm)
3863 mono_llvm_emit_method (cfg);
3864 if (cfg->disable_llvm) {
3865 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3866 //nm = mono_method_full_name (cfg->method, TRUE);
3867 printf ("LLVM failed for '%s.%s': %s\n", m_class_get_name (method->klass), method->name, cfg->exception_message);
3868 //g_free (nm);
3870 if (cfg->llvm_only) {
3871 cfg->disable_aot = TRUE;
3872 return cfg;
3874 mono_destroy_compile (cfg);
3875 try_llvm = FALSE;
3876 goto restart_compile;
3879 if (cfg->verbose_level > 0 && !cfg->compile_aot) {
3880 nm = mono_method_get_full_name (cfg->method);
3881 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3882 nm,
3883 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
3884 g_free (nm);
3886 #endif
3887 } else {
3888 MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
3889 mono_cfg_dump_ir (cfg, "codegen");
3890 if (cfg->exception_type)
3891 return cfg;
3894 if (COMPILE_LLVM (cfg))
3895 mono_atomic_inc_i32 (&mono_jit_stats.methods_with_llvm);
3896 else
3897 mono_atomic_inc_i32 (&mono_jit_stats.methods_without_llvm);
3899 MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
3901 if (cfg->extend_live_ranges) {
3902 /* Extend live ranges to cover the whole method */
3903 for (i = 0; i < cfg->num_varinfo; ++i)
3904 MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
3907 MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
3908 MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg, cfg->jit_info));
3910 if (!cfg->compile_aot) {
3911 mono_save_xdebug_info (cfg);
3912 mono_lldb_save_method_info (cfg);
3915 if (cfg->verbose_level >= 2) {
3916 char *id = mono_method_full_name (cfg->method, TRUE);
3917 g_print ("\n*** ASM for %s ***\n", id);
3918 mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
3919 g_print ("***\n\n");
3920 g_free (id);
3923 if (!cfg->compile_aot && !(flags & JIT_FLAG_DISCARD_RESULTS)) {
3924 mono_domain_lock (cfg->domain);
3925 mono_jit_info_table_add (cfg->domain, cfg->jit_info);
3927 if (cfg->method->dynamic)
3928 mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
3930 mono_postprocess_patches_after_ji_publish (cfg);
3932 mono_domain_unlock (cfg->domain);
3935 #if 0
3936 if (cfg->gsharedvt)
3937 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
3938 #endif
3940 /* collect statistics */
3941 #ifndef DISABLE_PERFCOUNTERS
3942 mono_atomic_inc_i32 (&mono_perfcounters->jit_methods);
3943 mono_atomic_fetch_add_i32 (&mono_perfcounters->jit_bytes, header->code_size);
3944 #endif
3945 gint32 code_size_ratio = cfg->code_len;
3946 mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_code_size, code_size_ratio);
3947 mono_atomic_fetch_add_i32 (&mono_jit_stats.native_code_size, code_size_ratio);
3948 /* FIXME: use an explicit function to read booleans */
3949 if ((gboolean)mono_atomic_load_i32 ((gint32*)&mono_jit_stats.enabled)) {
3950 if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.biggest_method_size)) {
3951 mono_atomic_store_i32 (&mono_jit_stats.biggest_method_size, code_size_ratio);
3952 char *biggest_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
3953 biggest_method = (char*)mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.biggest_method, biggest_method);
3954 g_free (biggest_method);
3956 code_size_ratio = (code_size_ratio * 100) / header->code_size;
3957 if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.max_code_size_ratio)) {
3958 mono_atomic_store_i32 (&mono_jit_stats.max_code_size_ratio, code_size_ratio);
3959 char *max_ratio_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
3960 max_ratio_method = (char*)mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.max_ratio_method, max_ratio_method);
3961 g_free (max_ratio_method);
3965 if (MONO_METHOD_COMPILE_END_ENABLED ())
3966 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3968 mono_cfg_dump_close_group (cfg);
3970 return cfg;
3973 gboolean
3974 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3976 int i;
3977 MonoGenericContainer *container;
3978 MonoGenericInst *ginst;
3980 if (mono_class_is_ginst (klass)) {
3981 container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
3982 ginst = mono_class_get_generic_class (klass)->context.class_inst;
3983 } else if (mono_class_is_gtd (klass) && context_used) {
3984 container = mono_class_get_generic_container (klass);
3985 ginst = container->context.class_inst;
3986 } else {
3987 return FALSE;
3990 for (i = 0; i < container->type_argc; ++i) {
3991 MonoType *type;
3992 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3993 continue;
3994 type = ginst->type_argv [i];
3995 if (mini_type_is_reference (type))
3996 return TRUE;
3998 return FALSE;
4001 void
4002 mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
4004 TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
4005 hole->clause = clause;
4006 hole->start_offset = start - cfg->native_code;
4007 hole->basic_block = bb;
4009 cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
4012 void
4013 mono_cfg_set_exception (MonoCompile *cfg, MonoExceptionType type)
4015 cfg->exception_type = type;
4018 /* Assumes ownership of the MSG argument */
4019 void
4020 mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
4022 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
4023 mono_error_set_generic_error (cfg->error, "System", "InvalidProgramException", "%s", msg);
4026 #endif /* DISABLE_JIT */
4028 gint64 mono_time_track_start ()
4030 return mono_100ns_ticks ();
4034 * mono_time_track_end:
4036 * Uses UnlockedAddDouble () to update \param time.
4038 void mono_time_track_end (gint64 *time, gint64 start)
4040 UnlockedAdd64 (time, mono_100ns_ticks () - start);
4044 * mono_update_jit_stats:
4046 * Only call this function in locked environments to avoid data races.
4048 MONO_NO_SANITIZE_THREAD
4049 void
4050 mono_update_jit_stats (MonoCompile *cfg)
4052 mono_jit_stats.allocate_var += cfg->stat_allocate_var;
4053 mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
4054 mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
4055 mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
4056 mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
4057 mono_jit_stats.regvars += cfg->stat_n_regvars;
4058 mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
4059 mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
4060 mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
4064 * mono_jit_compile_method_inner:
4066 * Main entry point for the JIT.
4068 gpointer
4069 mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoError *error)
4071 MonoCompile *cfg;
4072 gpointer code = NULL;
4073 MonoJitInfo *jinfo, *info;
4074 MonoVTable *vtable;
4075 MonoException *ex = NULL;
4076 gint64 start;
4077 MonoMethod *prof_method, *shared;
4079 error_init (error);
4081 start = mono_time_track_start ();
4082 cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
4083 gint64 jit_time = 0.0;
4084 mono_time_track_end (&jit_time, start);
4085 UnlockedAdd64 (&mono_jit_stats.jit_time, jit_time);
4087 prof_method = cfg->method;
4089 switch (cfg->exception_type) {
4090 case MONO_EXCEPTION_NONE:
4091 break;
4092 case MONO_EXCEPTION_TYPE_LOAD:
4093 case MONO_EXCEPTION_MISSING_FIELD:
4094 case MONO_EXCEPTION_MISSING_METHOD:
4095 case MONO_EXCEPTION_FILE_NOT_FOUND:
4096 case MONO_EXCEPTION_BAD_IMAGE:
4097 case MONO_EXCEPTION_INVALID_PROGRAM: {
4098 /* Throw a type load exception if needed */
4099 if (cfg->exception_ptr) {
4100 ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
4101 } else {
4102 if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
4103 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
4104 else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
4105 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
4106 else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
4107 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
4108 else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
4109 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
4110 else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
4111 ex = mono_get_exception_bad_image_format (cfg->exception_message);
4112 else if (cfg->exception_type == MONO_EXCEPTION_INVALID_PROGRAM)
4113 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
4114 else
4115 g_assert_not_reached ();
4117 break;
4119 case MONO_EXCEPTION_MONO_ERROR:
4120 // FIXME: MonoError has no copy ctor
4121 g_assert (!is_ok (cfg->error));
4122 ex = mono_error_convert_to_exception (cfg->error);
4123 break;
4124 default:
4125 g_assert_not_reached ();
4128 if (ex) {
4129 MONO_PROFILER_RAISE (jit_failed, (method));
4131 mono_destroy_compile (cfg);
4132 mono_error_set_exception_instance (error, ex);
4134 return NULL;
4137 if (mono_method_is_generic_sharable (method, FALSE)) {
4138 shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
4139 if (!is_ok (error)) {
4140 MONO_PROFILER_RAISE (jit_failed, (method));
4141 mono_destroy_compile (cfg);
4142 return NULL;
4144 } else {
4145 shared = NULL;
4148 mono_domain_lock (target_domain);
4150 if (mono_stats_method_desc && mono_method_desc_full_match (mono_stats_method_desc, method)) {
4151 g_printf ("Printing runtime stats at method: %s\n", mono_method_get_full_name (method));
4152 mono_runtime_print_stats ();
4155 /* Check if some other thread already did the job. In this case, we can
4156 discard the code this thread generated. */
4158 info = mini_lookup_method (target_domain, method, shared);
4159 if (info) {
4160 /* We can't use a domain specific method in another domain */
4161 if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
4162 code = info->code_start;
4163 discarded_code ++;
4164 discarded_jit_time += jit_time;
4167 if (code == NULL) {
4168 /* The lookup + insert is atomic since this is done inside the domain lock */
4169 mono_domain_jit_code_hash_lock (target_domain);
4170 mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
4171 mono_domain_jit_code_hash_unlock (target_domain);
4173 code = cfg->native_code;
4175 if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
4176 mono_atomic_inc_i32 (&mono_stats.generics_shared_methods);
4177 if (cfg->gsharedvt)
4178 mono_atomic_inc_i32 (&mono_stats.gsharedvt_methods);
4181 jinfo = cfg->jit_info;
4184 * Update global stats while holding a lock, instead of doing many
4185 * mono_atomic_inc_i32 operations during JITting.
4187 mono_update_jit_stats (cfg);
4189 mono_destroy_compile (cfg);
4191 mini_patch_llvm_jit_callees (target_domain, method, code);
4192 #ifndef DISABLE_JIT
4193 mono_emit_jit_map (jinfo);
4194 mono_emit_jit_dump (jinfo, code);
4195 #endif
4196 mono_domain_unlock (target_domain);
4198 if (!is_ok (error))
4199 return NULL;
4201 vtable = mono_class_vtable_checked (target_domain, method->klass, error);
4202 return_val_if_nok (error, NULL);
4204 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
4205 if (mono_marshal_method_from_wrapper (method)) {
4206 /* Native func wrappers have no method */
4207 /* The profiler doesn't know about wrappers, so pass the original icall method */
4208 MONO_PROFILER_RAISE (jit_done, (mono_marshal_method_from_wrapper (method), jinfo));
4211 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4212 if (prof_method != method)
4213 MONO_PROFILER_RAISE (jit_done, (prof_method, jinfo));
4215 if (!(method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE ||
4216 method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK ||
4217 method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE)) {
4218 if (!mono_runtime_class_init_full (vtable, error))
4219 return NULL;
4221 return MINI_ADDR_TO_FTNPTR (code);
4225 * mini_get_underlying_type:
4227 * Return the type the JIT will use during compilation.
4228 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4229 * For gsharedvt types, it will return the original VAR/MVAR.
4231 MonoType*
4232 mini_get_underlying_type (MonoType *type)
4234 return mini_type_get_underlying_type (type);
4237 void
4238 mini_jit_init (void)
4240 mono_os_mutex_init_recursive (&jit_mutex);
4242 #ifndef DISABLE_JIT
4243 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT | MONO_COUNTER_INT, &discarded_code);
4244 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &discarded_jit_time);
4245 mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &jinfo_try_holes_size);
4247 mono_counters_register ("JIT/method_to_ir", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_method_to_ir);
4248 mono_counters_register ("JIT/liveness_handle_exception_clauses", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_liveness_handle_exception_clauses);
4249 mono_counters_register ("JIT/handle_out_of_line_bblock", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_out_of_line_bblock);
4250 mono_counters_register ("JIT/decompose_long_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_long_opts);
4251 mono_counters_register ("JIT/decompose_typechecks", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_typechecks);
4252 mono_counters_register ("JIT/local_cprop", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop);
4253 mono_counters_register ("JIT/local_emulate_ops", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_emulate_ops);
4254 mono_counters_register ("JIT/optimize_branches", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_optimize_branches);
4255 mono_counters_register ("JIT/handle_global_vregs", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_global_vregs);
4256 mono_counters_register ("JIT/local_deadce", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce);
4257 mono_counters_register ("JIT/local_alias_analysis", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_alias_analysis);
4258 mono_counters_register ("JIT/if_conversion", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_if_conversion);
4259 mono_counters_register ("JIT/bb_ordering", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_bb_ordering);
4260 mono_counters_register ("JIT/compile_dominator_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_compile_dominator_info);
4261 mono_counters_register ("JIT/compute_natural_loops", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_compute_natural_loops);
4262 mono_counters_register ("JIT/insert_safepoints", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_insert_safepoints);
4263 mono_counters_register ("JIT/ssa_compute", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_compute);
4264 mono_counters_register ("JIT/ssa_cprop", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_cprop);
4265 mono_counters_register ("JIT/ssa_deadce", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_deadce);
4266 mono_counters_register ("JIT/perform_abc_removal", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_perform_abc_removal);
4267 mono_counters_register ("JIT/ssa_remove", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_ssa_remove);
4268 mono_counters_register ("JIT/local_cprop2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop2);
4269 mono_counters_register ("JIT/handle_global_vregs2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_handle_global_vregs2);
4270 mono_counters_register ("JIT/local_deadce2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce2);
4271 mono_counters_register ("JIT/optimize_branches2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_optimize_branches2);
4272 mono_counters_register ("JIT/decompose_vtype_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_vtype_opts);
4273 mono_counters_register ("JIT/decompose_array_access_opts", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_decompose_array_access_opts);
4274 mono_counters_register ("JIT/liveness_handle_exception_clauses2", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_liveness_handle_exception_clauses2);
4275 mono_counters_register ("JIT/analyze_liveness", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_analyze_liveness);
4276 mono_counters_register ("JIT/linear_scan", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_linear_scan);
4277 mono_counters_register ("JIT/arch_allocate_vars", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_arch_allocate_vars);
4278 mono_counters_register ("JIT/spill_global_var", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_spill_global_vars);
4279 mono_counters_register ("JIT/local_cprop3", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_cprop3);
4280 mono_counters_register ("JIT/local_deadce3", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_local_deadce3);
4281 mono_counters_register ("JIT/codegen", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_codegen);
4282 mono_counters_register ("JIT/create_jit_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_create_jit_info);
4283 mono_counters_register ("JIT/gc_create_gc_map", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_gc_create_gc_map);
4284 mono_counters_register ("JIT/save_seq_point_info", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_save_seq_point_info);
4285 mono_counters_register ("Total time spent JITting", MONO_COUNTER_JIT | MONO_COUNTER_LONG | MONO_COUNTER_TIME, &mono_jit_stats.jit_time);
4286 mono_counters_register ("Basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.basic_blocks);
4287 mono_counters_register ("Max basic blocks", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.max_basic_blocks);
4288 mono_counters_register ("Allocated vars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocate_var);
4289 mono_counters_register ("Code reallocs", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.code_reallocs);
4290 mono_counters_register ("Allocated code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_code_size);
4291 mono_counters_register ("Allocated seq points size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.allocated_seq_points_size);
4292 mono_counters_register ("Inlineable methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlineable_methods);
4293 mono_counters_register ("Inlined methods", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.inlined_methods);
4294 mono_counters_register ("Regvars", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.regvars);
4295 mono_counters_register ("Locals stack size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.locals_stack_size);
4296 mono_counters_register ("Method cache lookups", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.methods_lookups);
4297 mono_counters_register ("Compiled CIL code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.cil_code_size);
4298 mono_counters_register ("Native code size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.native_code_size);
4299 mono_counters_register ("Aliases found", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.alias_found);
4300 mono_counters_register ("Aliases eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.alias_removed);
4301 mono_counters_register ("Aliased loads eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.loads_eliminated);
4302 mono_counters_register ("Aliased stores eliminated", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.stores_eliminated);
4303 mono_counters_register ("Optimized immediate divisions", MONO_COUNTER_JIT | MONO_COUNTER_INT, &mono_jit_stats.optimized_divisions);
4304 current_backend = g_new0 (MonoBackend, 1);
4305 init_backend (current_backend);
4306 #endif
4309 void
4310 mini_jit_cleanup (void)
4312 #ifndef DISABLE_JIT
4313 g_free (emul_opcode_map);
4314 g_free (emul_opcode_opcodes);
4315 #endif
4318 #ifndef ENABLE_LLVM
4319 void
4320 mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
4322 g_assert_not_reached ();
4325 gpointer
4326 mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
4328 g_assert_not_reached ();
4331 gpointer
4332 mono_llvm_emit_aot_data_aligned (const char *symbol, guint8 *data, int data_len, int align)
4334 g_assert_not_reached ();
4337 #endif
4339 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4341 void
4342 mono_llvm_cpp_throw_exception (void)
4344 g_assert_not_reached ();
4347 void
4348 mono_llvm_cpp_catch_exception (MonoLLVMInvokeCallback cb, gpointer arg, gboolean *out_thrown)
4350 g_assert_not_reached ();
4353 #endif
4355 #ifdef DISABLE_JIT
4357 MonoCompile*
4358 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
4360 g_assert_not_reached ();
4361 return NULL;
4364 void
4365 mono_destroy_compile (MonoCompile *cfg)
4367 g_assert_not_reached ();
4370 void
4371 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
4373 g_assert_not_reached ();
4376 #else // DISABLE_JIT
4378 guint8*
4379 mini_realloc_code_slow (MonoCompile *cfg, int size)
4381 const int EXTRA_CODE_SPACE = 16;
4383 if (cfg->code_len + size > (cfg->code_size - EXTRA_CODE_SPACE)) {
4384 while (cfg->code_len + size > (cfg->code_size - EXTRA_CODE_SPACE))
4385 cfg->code_size = cfg->code_size * 2 + EXTRA_CODE_SPACE;
4386 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4387 cfg->stat_code_reallocs++;
4389 return cfg->native_code + cfg->code_len;
4392 #endif /* DISABLE_JIT */
4394 gboolean
4395 mini_class_is_system_array (MonoClass *klass)
4397 return m_class_get_parent (klass) == mono_defaults.array_class;
4401 * mono_target_pagesize:
4403 * query pagesize used to determine if an implicit NRE can be used
4406 mono_target_pagesize (void)
4408 /* We could query the system's pagesize via mono_pagesize (), however there
4409 * are pitfalls: sysconf (3) is called on some posix like systems, and per
4410 * POSIX.1-2008 this function doesn't have to be async-safe. Since this
4411 * function can be called from a signal handler, we simplify things by
4412 * using 4k on all targets. Implicit null-checks with an offset larger than
4413 * 4k are _very_ uncommon, so we don't mind emitting an explicit null-check
4414 * for those cases.
4416 return 4 * 1024;
4419 MonoCPUFeatures
4420 mini_get_cpu_features (MonoCompile* cfg)
4422 MonoCPUFeatures features = (MonoCPUFeatures)0;
4423 #if !defined(MONO_CROSS_COMPILE)
4424 if (!cfg->compile_aot || cfg->use_current_cpu) {
4425 // detect current CPU features if we are in JIT mode or AOT with use_current_cpu flag.
4426 #if defined(ENABLE_LLVM)
4427 features = mono_llvm_get_cpu_features (); // llvm has a nice built-in API to detect features
4428 #elif defined(TARGET_AMD64) || defined(TARGET_X86)
4429 features = mono_arch_get_cpu_features ();
4430 #endif
4432 #endif
4434 #if defined(TARGET_ARM64)
4435 // All Arm64 devices have this set
4436 features |= MONO_CPU_ARM64_BASE;
4437 #endif
4439 // apply parameters passed via -mattr
4440 return (features | mono_cpu_features_enabled) & ~mono_cpu_features_disabled;