[mini] Use MonoClass getters in a few files (#7771)
[mono-project.git] / mono / mini / mini.c
blobd539021d7c8f9902506c3d8e74db4e6ba620d722
1 /**
2 * \file
3 * The new Mono code generator.
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #ifdef HAVE_ALLOCA_H
17 #include <alloca.h>
18 #endif
19 #ifdef HAVE_UNISTD_H
20 #include <unistd.h>
21 #endif
22 #include <math.h>
23 #ifdef HAVE_SYS_TIME_H
24 #include <sys/time.h>
25 #endif
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
63 #include <mono/utils/unlocked.h>
65 #include "mini.h"
66 #include "seq-points.h"
67 #include "tasklets.h"
68 #include <string.h>
69 #include <ctype.h>
70 #include "trace.h"
71 #include "version.h"
72 #include "ir-emit.h"
74 #include "jit-icalls.h"
76 #include "mini-gc.h"
77 #include "debugger-agent.h"
78 #include "llvm-runtime.h"
79 #include "mini-llvm.h"
80 #include "lldb.h"
81 #include "aot-runtime.h"
82 #include "mini-runtime.h"
84 MonoCallSpec *mono_jit_trace_calls;
85 MonoMethodDesc *mono_inject_async_exc_method;
86 int mono_inject_async_exc_pos;
87 MonoMethodDesc *mono_break_at_bb_method;
88 int mono_break_at_bb_bb_num;
89 gboolean mono_do_x86_stack_align = TRUE;
90 gboolean mono_using_xdebug;
92 /* Counters */
93 static guint32 discarded_code;
94 static double discarded_jit_time;
95 static guint32 jinfo_try_holes_size;
97 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
98 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
99 static mono_mutex_t jit_mutex;
101 static MonoBackend *current_backend;
103 #ifndef DISABLE_JIT
105 gpointer
106 mono_realloc_native_code (MonoCompile *cfg)
108 return g_realloc (cfg->native_code, cfg->code_size);
111 typedef struct {
112 MonoExceptionClause *clause;
113 MonoBasicBlock *basic_block;
114 int start_offset;
115 } TryBlockHole;
118 * mono_emit_unwind_op:
120 * Add an unwind op with the given parameters for the list of unwind ops stored in
121 * cfg->unwind_ops.
123 void
124 mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
126 MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
128 op->op = tag;
129 op->reg = reg;
130 op->val = val;
131 op->when = when;
133 cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
134 if (cfg->verbose_level > 1) {
135 switch (tag) {
136 case DW_CFA_def_cfa:
137 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
138 break;
139 case DW_CFA_def_cfa_register:
140 printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
141 break;
142 case DW_CFA_def_cfa_offset:
143 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
144 break;
145 case DW_CFA_offset:
146 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
147 break;
153 * mono_unlink_bblock:
155 * Unlink two basic blocks.
157 void
158 mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
160 int i, pos;
161 gboolean found;
163 found = FALSE;
164 for (i = 0; i < from->out_count; ++i) {
165 if (to == from->out_bb [i]) {
166 found = TRUE;
167 break;
170 if (found) {
171 pos = 0;
172 for (i = 0; i < from->out_count; ++i) {
173 if (from->out_bb [i] != to)
174 from->out_bb [pos ++] = from->out_bb [i];
176 g_assert (pos == from->out_count - 1);
177 from->out_count--;
180 found = FALSE;
181 for (i = 0; i < to->in_count; ++i) {
182 if (from == to->in_bb [i]) {
183 found = TRUE;
184 break;
187 if (found) {
188 pos = 0;
189 for (i = 0; i < to->in_count; ++i) {
190 if (to->in_bb [i] != from)
191 to->in_bb [pos ++] = to->in_bb [i];
193 g_assert (pos == to->in_count - 1);
194 to->in_count--;
199 * mono_bblocks_linked:
201 * Return whenever BB1 and BB2 are linked in the CFG.
203 gboolean
204 mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
206 int i;
208 for (i = 0; i < bb1->out_count; ++i) {
209 if (bb1->out_bb [i] == bb2)
210 return TRUE;
213 return FALSE;
216 static int
217 mono_find_block_region_notry (MonoCompile *cfg, int offset)
219 MonoMethodHeader *header = cfg->header;
220 MonoExceptionClause *clause;
221 int i;
223 for (i = 0; i < header->num_clauses; ++i) {
224 clause = &header->clauses [i];
225 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
226 (offset < (clause->handler_offset)))
227 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
229 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
230 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
231 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
232 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
233 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
234 else
235 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
239 return -1;
243 * mono_get_block_region_notry:
245 * Return the region corresponding to REGION, ignoring try clauses nested inside
246 * finally clauses.
249 mono_get_block_region_notry (MonoCompile *cfg, int region)
251 if ((region & (0xf << 4)) == MONO_REGION_TRY) {
252 MonoMethodHeader *header = cfg->header;
255 * This can happen if a try clause is nested inside a finally clause.
257 int clause_index = (region >> 8) - 1;
258 g_assert (clause_index >= 0 && clause_index < header->num_clauses);
260 region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
263 return region;
266 MonoInst *
267 mono_find_spvar_for_region (MonoCompile *cfg, int region)
269 region = mono_get_block_region_notry (cfg, region);
271 return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
274 static void
275 df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
277 int i;
279 array [*dfn] = start;
280 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
281 for (i = 0; i < start->out_count; ++i) {
282 if (start->out_bb [i]->dfn)
283 continue;
284 (*dfn)++;
285 start->out_bb [i]->dfn = *dfn;
286 start->out_bb [i]->df_parent = start;
287 array [*dfn] = start->out_bb [i];
288 df_visit (start->out_bb [i], dfn, array);
292 guint32
293 mono_reverse_branch_op (guint32 opcode)
295 static const int reverse_map [] = {
296 CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
297 CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
299 static const int reverse_fmap [] = {
300 OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
301 OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
303 static const int reverse_lmap [] = {
304 OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
305 OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
307 static const int reverse_imap [] = {
308 OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
309 OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
312 if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
313 opcode = reverse_map [opcode - CEE_BEQ];
314 } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
315 opcode = reverse_fmap [opcode - OP_FBEQ];
316 } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
317 opcode = reverse_lmap [opcode - OP_LBEQ];
318 } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
319 opcode = reverse_imap [opcode - OP_IBEQ];
320 } else
321 g_assert_not_reached ();
323 return opcode;
326 guint
327 mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
329 type = mini_get_underlying_type (type);
331 handle_enum:
332 switch (type->type) {
333 case MONO_TYPE_I1:
334 case MONO_TYPE_U1:
335 return OP_STOREI1_MEMBASE_REG;
336 case MONO_TYPE_I2:
337 case MONO_TYPE_U2:
338 return OP_STOREI2_MEMBASE_REG;
339 case MONO_TYPE_I4:
340 case MONO_TYPE_U4:
341 return OP_STOREI4_MEMBASE_REG;
342 case MONO_TYPE_I:
343 case MONO_TYPE_U:
344 case MONO_TYPE_PTR:
345 case MONO_TYPE_FNPTR:
346 return OP_STORE_MEMBASE_REG;
347 case MONO_TYPE_CLASS:
348 case MONO_TYPE_STRING:
349 case MONO_TYPE_OBJECT:
350 case MONO_TYPE_SZARRAY:
351 case MONO_TYPE_ARRAY:
352 return OP_STORE_MEMBASE_REG;
353 case MONO_TYPE_I8:
354 case MONO_TYPE_U8:
355 return OP_STOREI8_MEMBASE_REG;
356 case MONO_TYPE_R4:
357 return OP_STORER4_MEMBASE_REG;
358 case MONO_TYPE_R8:
359 return OP_STORER8_MEMBASE_REG;
360 case MONO_TYPE_VALUETYPE:
361 if (m_class_is_enumtype (type->data.klass)) {
362 type = mono_class_enum_basetype (type->data.klass);
363 goto handle_enum;
365 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
366 return OP_STOREX_MEMBASE;
367 return OP_STOREV_MEMBASE;
368 case MONO_TYPE_TYPEDBYREF:
369 return OP_STOREV_MEMBASE;
370 case MONO_TYPE_GENERICINST:
371 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
372 return OP_STOREX_MEMBASE;
373 type = m_class_get_byval_arg (type->data.generic_class->container_class);
374 goto handle_enum;
375 case MONO_TYPE_VAR:
376 case MONO_TYPE_MVAR:
377 g_assert (mini_type_var_is_vt (type));
378 return OP_STOREV_MEMBASE;
379 default:
380 g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
382 return -1;
385 guint
386 mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
388 type = mini_get_underlying_type (type);
390 switch (type->type) {
391 case MONO_TYPE_I1:
392 return OP_LOADI1_MEMBASE;
393 case MONO_TYPE_U1:
394 return OP_LOADU1_MEMBASE;
395 case MONO_TYPE_I2:
396 return OP_LOADI2_MEMBASE;
397 case MONO_TYPE_U2:
398 return OP_LOADU2_MEMBASE;
399 case MONO_TYPE_I4:
400 return OP_LOADI4_MEMBASE;
401 case MONO_TYPE_U4:
402 return OP_LOADU4_MEMBASE;
403 case MONO_TYPE_I:
404 case MONO_TYPE_U:
405 case MONO_TYPE_PTR:
406 case MONO_TYPE_FNPTR:
407 return OP_LOAD_MEMBASE;
408 case MONO_TYPE_CLASS:
409 case MONO_TYPE_STRING:
410 case MONO_TYPE_OBJECT:
411 case MONO_TYPE_SZARRAY:
412 case MONO_TYPE_ARRAY:
413 return OP_LOAD_MEMBASE;
414 case MONO_TYPE_I8:
415 case MONO_TYPE_U8:
416 return OP_LOADI8_MEMBASE;
417 case MONO_TYPE_R4:
418 return OP_LOADR4_MEMBASE;
419 case MONO_TYPE_R8:
420 return OP_LOADR8_MEMBASE;
421 case MONO_TYPE_VALUETYPE:
422 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
423 return OP_LOADX_MEMBASE;
424 case MONO_TYPE_TYPEDBYREF:
425 return OP_LOADV_MEMBASE;
426 case MONO_TYPE_GENERICINST:
427 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
428 return OP_LOADX_MEMBASE;
429 if (mono_type_generic_inst_is_valuetype (type))
430 return OP_LOADV_MEMBASE;
431 else
432 return OP_LOAD_MEMBASE;
433 break;
434 case MONO_TYPE_VAR:
435 case MONO_TYPE_MVAR:
436 g_assert (cfg->gshared);
437 g_assert (mini_type_var_is_vt (type));
438 return OP_LOADV_MEMBASE;
439 default:
440 g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
442 return -1;
445 guint
446 mini_type_to_stind (MonoCompile* cfg, MonoType *type)
448 type = mini_get_underlying_type (type);
449 if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
450 g_assert (mini_type_var_is_vt (type));
451 return CEE_STOBJ;
453 return mono_type_to_stind (type);
457 mono_op_imm_to_op (int opcode)
459 switch (opcode) {
460 case OP_ADD_IMM:
461 #if SIZEOF_REGISTER == 4
462 return OP_IADD;
463 #else
464 return OP_LADD;
465 #endif
466 case OP_IADD_IMM:
467 return OP_IADD;
468 case OP_LADD_IMM:
469 return OP_LADD;
470 case OP_ISUB_IMM:
471 return OP_ISUB;
472 case OP_LSUB_IMM:
473 return OP_LSUB;
474 case OP_IMUL_IMM:
475 return OP_IMUL;
476 case OP_LMUL_IMM:
477 return OP_LMUL;
478 case OP_AND_IMM:
479 #if SIZEOF_REGISTER == 4
480 return OP_IAND;
481 #else
482 return OP_LAND;
483 #endif
484 case OP_OR_IMM:
485 #if SIZEOF_REGISTER == 4
486 return OP_IOR;
487 #else
488 return OP_LOR;
489 #endif
490 case OP_XOR_IMM:
491 #if SIZEOF_REGISTER == 4
492 return OP_IXOR;
493 #else
494 return OP_LXOR;
495 #endif
496 case OP_IAND_IMM:
497 return OP_IAND;
498 case OP_LAND_IMM:
499 return OP_LAND;
500 case OP_IOR_IMM:
501 return OP_IOR;
502 case OP_LOR_IMM:
503 return OP_LOR;
504 case OP_IXOR_IMM:
505 return OP_IXOR;
506 case OP_LXOR_IMM:
507 return OP_LXOR;
508 case OP_ISHL_IMM:
509 return OP_ISHL;
510 case OP_LSHL_IMM:
511 return OP_LSHL;
512 case OP_ISHR_IMM:
513 return OP_ISHR;
514 case OP_LSHR_IMM:
515 return OP_LSHR;
516 case OP_ISHR_UN_IMM:
517 return OP_ISHR_UN;
518 case OP_LSHR_UN_IMM:
519 return OP_LSHR_UN;
520 case OP_IDIV_IMM:
521 return OP_IDIV;
522 case OP_LDIV_IMM:
523 return OP_LDIV;
524 case OP_IDIV_UN_IMM:
525 return OP_IDIV_UN;
526 case OP_LDIV_UN_IMM:
527 return OP_LDIV_UN;
528 case OP_IREM_UN_IMM:
529 return OP_IREM_UN;
530 case OP_LREM_UN_IMM:
531 return OP_LREM_UN;
532 case OP_IREM_IMM:
533 return OP_IREM;
534 case OP_LREM_IMM:
535 return OP_LREM;
536 case OP_DIV_IMM:
537 #if SIZEOF_REGISTER == 4
538 return OP_IDIV;
539 #else
540 return OP_LDIV;
541 #endif
542 case OP_REM_IMM:
543 #if SIZEOF_REGISTER == 4
544 return OP_IREM;
545 #else
546 return OP_LREM;
547 #endif
548 case OP_ADDCC_IMM:
549 return OP_ADDCC;
550 case OP_ADC_IMM:
551 return OP_ADC;
552 case OP_SUBCC_IMM:
553 return OP_SUBCC;
554 case OP_SBB_IMM:
555 return OP_SBB;
556 case OP_IADC_IMM:
557 return OP_IADC;
558 case OP_ISBB_IMM:
559 return OP_ISBB;
560 case OP_COMPARE_IMM:
561 return OP_COMPARE;
562 case OP_ICOMPARE_IMM:
563 return OP_ICOMPARE;
564 case OP_LOCALLOC_IMM:
565 return OP_LOCALLOC;
568 return -1;
572 * mono_decompose_op_imm:
574 * Replace the OP_.._IMM INS with its non IMM variant.
576 void
577 mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
579 int opcode2 = mono_op_imm_to_op (ins->opcode);
580 MonoInst *temp;
581 guint32 dreg;
582 const char *spec = INS_INFO (ins->opcode);
584 if (spec [MONO_INST_SRC2] == 'l') {
585 dreg = mono_alloc_lreg (cfg);
587 /* Load the 64bit constant using decomposed ops */
588 MONO_INST_NEW (cfg, temp, OP_ICONST);
589 temp->inst_c0 = ins->inst_ls_word;
590 temp->dreg = MONO_LVREG_LS (dreg);
591 mono_bblock_insert_before_ins (bb, ins, temp);
593 MONO_INST_NEW (cfg, temp, OP_ICONST);
594 temp->inst_c0 = ins->inst_ms_word;
595 temp->dreg = MONO_LVREG_MS (dreg);
596 } else {
597 dreg = mono_alloc_ireg (cfg);
599 MONO_INST_NEW (cfg, temp, OP_ICONST);
600 temp->inst_c0 = ins->inst_imm;
601 temp->dreg = dreg;
604 mono_bblock_insert_before_ins (bb, ins, temp);
606 if (opcode2 == -1)
607 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
608 ins->opcode = opcode2;
610 if (ins->opcode == OP_LOCALLOC)
611 ins->sreg1 = dreg;
612 else
613 ins->sreg2 = dreg;
615 bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
618 static void
619 set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
621 if (vreg >= cfg->vreg_to_inst_len) {
622 MonoInst **tmp = cfg->vreg_to_inst;
623 int size = cfg->vreg_to_inst_len;
625 while (vreg >= cfg->vreg_to_inst_len)
626 cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
627 cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
628 if (size)
629 memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
631 cfg->vreg_to_inst [vreg] = inst;
634 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
635 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
637 MonoInst*
638 mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
640 MonoInst *inst;
641 int num = cfg->num_varinfo;
642 gboolean regpair;
644 type = mini_get_underlying_type (type);
646 if ((num + 1) >= cfg->varinfo_count) {
647 int orig_count = cfg->varinfo_count;
648 cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
649 cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
650 cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
651 memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
654 cfg->stat_allocate_var++;
656 MONO_INST_NEW (cfg, inst, opcode);
657 inst->inst_c0 = num;
658 inst->inst_vtype = type;
659 inst->klass = mono_class_from_mono_type (type);
660 mini_type_to_eval_stack_type (cfg, type, inst);
661 /* if set to 1 the variable is native */
662 inst->backend.is_pinvoke = 0;
663 inst->dreg = vreg;
665 if (mono_class_has_failure (inst->klass))
666 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
668 if (cfg->compute_gc_maps) {
669 if (type->byref) {
670 mono_mark_vreg_as_mp (cfg, vreg);
671 } else {
672 if ((MONO_TYPE_ISSTRUCT (type) && m_class_has_references (inst->klass)) || mini_type_is_reference (type)) {
673 inst->flags |= MONO_INST_GC_TRACK;
674 mono_mark_vreg_as_ref (cfg, vreg);
679 cfg->varinfo [num] = inst;
681 cfg->vars [num].idx = num;
682 cfg->vars [num].vreg = vreg;
683 cfg->vars [num].range.first_use.pos.bid = 0xffff;
684 cfg->vars [num].reg = -1;
686 if (vreg != -1)
687 set_vreg_to_inst (cfg, vreg, inst);
689 #if SIZEOF_REGISTER == 4
690 if (mono_arch_is_soft_float ()) {
691 regpair = mono_type_is_long (type) || mono_type_is_float (type);
692 } else {
693 regpair = mono_type_is_long (type);
695 #else
696 regpair = FALSE;
697 #endif
699 if (regpair) {
700 MonoInst *tree;
703 * These two cannot be allocated using create_var_for_vreg since that would
704 * put it into the cfg->varinfo array, confusing many parts of the JIT.
708 * Set flags to VOLATILE so SSA skips it.
711 if (cfg->verbose_level >= 4) {
712 printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
715 if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
716 if (mono_type_is_float (type))
717 inst->flags = MONO_INST_VOLATILE;
720 /* Allocate a dummy MonoInst for the first vreg */
721 MONO_INST_NEW (cfg, tree, OP_LOCAL);
722 tree->dreg = MONO_LVREG_LS (inst->dreg);
723 if (cfg->opt & MONO_OPT_SSA)
724 tree->flags = MONO_INST_VOLATILE;
725 tree->inst_c0 = num;
726 tree->type = STACK_I4;
727 tree->inst_vtype = m_class_get_byval_arg (mono_defaults.int32_class);
728 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
730 set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
732 /* Allocate a dummy MonoInst for the second vreg */
733 MONO_INST_NEW (cfg, tree, OP_LOCAL);
734 tree->dreg = MONO_LVREG_MS (inst->dreg);
735 if (cfg->opt & MONO_OPT_SSA)
736 tree->flags = MONO_INST_VOLATILE;
737 tree->inst_c0 = num;
738 tree->type = STACK_I4;
739 tree->inst_vtype = m_class_get_byval_arg (mono_defaults.int32_class);
740 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
742 set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
745 cfg->num_varinfo++;
746 if (cfg->verbose_level > 2)
747 g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
748 return inst;
751 MonoInst*
752 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
754 int dreg;
755 type = mini_get_underlying_type (type);
757 if (mono_type_is_long (type))
758 dreg = mono_alloc_dreg (cfg, STACK_I8);
759 else if (mono_arch_is_soft_float () && mono_type_is_float (type))
760 dreg = mono_alloc_dreg (cfg, STACK_R8);
761 else
762 /* All the others are unified */
763 dreg = mono_alloc_preg (cfg);
765 return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
768 MonoInst*
769 mini_get_int_to_float_spill_area (MonoCompile *cfg)
771 #ifdef TARGET_X86
772 if (!cfg->iconv_raw_var) {
773 cfg->iconv_raw_var = mono_compile_create_var (cfg, &mono_defaults.int32_class->byval_arg, OP_LOCAL);
774 cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
776 return cfg->iconv_raw_var;
777 #else
778 return NULL;
779 #endif
782 void
783 mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
785 if (vreg >= cfg->vreg_is_ref_len) {
786 gboolean *tmp = cfg->vreg_is_ref;
787 int size = cfg->vreg_is_ref_len;
789 while (vreg >= cfg->vreg_is_ref_len)
790 cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
791 cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
792 if (size)
793 memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
795 cfg->vreg_is_ref [vreg] = TRUE;
798 void
799 mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
801 if (vreg >= cfg->vreg_is_mp_len) {
802 gboolean *tmp = cfg->vreg_is_mp;
803 int size = cfg->vreg_is_mp_len;
805 while (vreg >= cfg->vreg_is_mp_len)
806 cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
807 cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
808 if (size)
809 memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
811 cfg->vreg_is_mp [vreg] = TRUE;
814 static MonoType*
815 type_from_stack_type (MonoInst *ins)
817 switch (ins->type) {
818 case STACK_I4: return m_class_get_byval_arg (mono_defaults.int32_class);
819 case STACK_I8: return m_class_get_byval_arg (mono_defaults.int64_class);
820 case STACK_PTR: return m_class_get_byval_arg (mono_defaults.int_class);
821 case STACK_R8: return m_class_get_byval_arg (mono_defaults.double_class);
822 case STACK_MP:
824 * this if used to be commented without any specific reason, but
825 * it breaks #80235 when commented
827 if (ins->klass)
828 return m_class_get_this_arg (ins->klass);
829 else
830 return m_class_get_this_arg (mono_defaults.object_class);
831 case STACK_OBJ:
832 /* ins->klass may not be set for ldnull.
833 * Also, if we have a boxed valuetype, we want an object lass,
834 * not the valuetype class
836 if (ins->klass && !m_class_is_valuetype (ins->klass))
837 return m_class_get_byval_arg (ins->klass);
838 return m_class_get_byval_arg (mono_defaults.object_class);
839 case STACK_VTYPE: return m_class_get_byval_arg (ins->klass);
840 default:
841 g_error ("stack type %d to montype not handled\n", ins->type);
843 return NULL;
846 MonoType*
847 mono_type_from_stack_type (MonoInst *ins)
849 return type_from_stack_type (ins);
853 * mono_add_ins_to_end:
855 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
857 void
858 mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
860 int opcode;
862 if (!bb->code) {
863 MONO_ADD_INS (bb, inst);
864 return;
867 switch (bb->last_ins->opcode) {
868 case OP_BR:
869 case OP_BR_REG:
870 case CEE_BEQ:
871 case CEE_BGE:
872 case CEE_BGT:
873 case CEE_BLE:
874 case CEE_BLT:
875 case CEE_BNE_UN:
876 case CEE_BGE_UN:
877 case CEE_BGT_UN:
878 case CEE_BLE_UN:
879 case CEE_BLT_UN:
880 case OP_SWITCH:
881 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
882 break;
883 default:
884 if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
885 /* Need to insert the ins before the compare */
886 if (bb->code == bb->last_ins) {
887 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
888 return;
891 if (bb->code->next == bb->last_ins) {
892 /* Only two instructions */
893 opcode = bb->code->opcode;
895 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
896 /* NEW IR */
897 mono_bblock_insert_before_ins (bb, bb->code, inst);
898 } else {
899 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
901 } else {
902 opcode = bb->last_ins->prev->opcode;
904 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
905 /* NEW IR */
906 mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
907 } else {
908 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
912 else
913 MONO_ADD_INS (bb, inst);
914 break;
918 void
919 mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
921 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
922 MonoJumpInfoBBTable *table;
924 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
925 table->table = bbs;
926 table->table_size = num_blocks;
928 ji->ip.label = label;
929 ji->type = MONO_PATCH_INFO_SWITCH;
930 ji->data.table = table;
931 ji->next = cfg->patch_info;
932 cfg->patch_info = ji;
935 static MonoMethodSignature *
936 mono_get_array_new_va_signature (int arity)
938 static GHashTable *sighash;
939 MonoMethodSignature *res;
940 int i;
942 mono_jit_lock ();
943 if (!sighash) {
944 sighash = g_hash_table_new (NULL, NULL);
946 else if ((res = (MonoMethodSignature *)g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) {
947 mono_jit_unlock ();
948 return res;
951 res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1);
953 res->pinvoke = 1;
954 if (ARCH_VARARG_ICALLS)
955 /* Only set this only some archs since not all backends can handle varargs+pinvoke */
956 res->call_convention = MONO_CALL_VARARG;
958 #ifdef TARGET_WIN32
959 res->call_convention = MONO_CALL_C;
960 #endif
962 MonoType *int_type = m_class_get_byval_arg (mono_defaults.int_class);
963 res->params [0] = int_type;
964 for (i = 0; i < arity; i++)
965 res->params [i + 1] = int_type;
967 res->ret = m_class_get_byval_arg (mono_defaults.object_class);
969 g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
970 mono_jit_unlock ();
972 return res;
975 MonoJitICallInfo *
976 mono_get_array_new_va_icall (int rank)
978 MonoMethodSignature *esig;
979 char icall_name [256];
980 char *name;
981 MonoJitICallInfo *info;
983 /* Need to register the icall so it gets an icall wrapper */
984 sprintf (icall_name, "ves_array_new_va_%d", rank);
986 mono_jit_lock ();
987 info = mono_find_jit_icall_by_name (icall_name);
988 if (info == NULL) {
989 esig = mono_get_array_new_va_signature (rank);
990 name = g_strdup (icall_name);
991 info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
993 mono_jit_unlock ();
995 return info;
998 gboolean
999 mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
1001 MonoAssembly *assembly = m_class_get_image (method->klass)->assembly;
1002 if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
1003 return FALSE;
1004 if (assembly->in_gac || assembly->image == mono_defaults.corlib)
1005 return FALSE;
1006 return mono_assembly_has_skip_verification (assembly);
1010 * mini_method_verify:
1012 * Verify the method using the verfier.
1014 * Returns true if the method is invalid.
1016 static gboolean
1017 mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1019 GSList *tmp, *res;
1020 gboolean is_fulltrust;
1022 if (method->verification_success)
1023 return FALSE;
1025 if (!mono_verifier_is_enabled_for_method (method))
1026 return FALSE;
1028 /*skip verification implies the assembly must be */
1029 is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
1031 res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
1033 if (res) {
1034 for (tmp = res; tmp; tmp = tmp->next) {
1035 MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
1036 if (info->info.status == MONO_VERIFY_ERROR) {
1037 if (fail_compile) {
1038 char *method_name = mono_method_full_name (method, TRUE);
1039 cfg->exception_type = info->exception_type;
1040 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1041 g_free (method_name);
1043 mono_free_verify_list (res);
1044 return TRUE;
1046 if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
1047 if (fail_compile) {
1048 char *method_name = mono_method_full_name (method, TRUE);
1049 char *msg = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1051 if (info->exception_type == MONO_EXCEPTION_METHOD_ACCESS)
1052 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "%s", msg);
1053 else if (info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)
1054 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "%s", msg);
1055 else if (info->exception_type == MONO_EXCEPTION_UNVERIFIABLE_IL)
1056 mono_error_set_generic_error (&cfg->error, "System.Security", "VerificationException", "%s", msg);
1057 if (!mono_error_ok (&cfg->error)) {
1058 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
1059 g_free (msg);
1060 } else {
1061 cfg->exception_type = info->exception_type;
1062 cfg->exception_message = msg;
1064 g_free (method_name);
1066 mono_free_verify_list (res);
1067 return TRUE;
1070 mono_free_verify_list (res);
1072 method->verification_success = 1;
1073 return FALSE;
1076 /*Returns true if something went wrong*/
1077 gboolean
1078 mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1080 MonoMethod *method_definition = method;
1081 gboolean dont_verify = m_class_get_image (method->klass)->assembly->corlib_internal;
1083 while (method_definition->is_inflated) {
1084 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
1085 method_definition = imethod->declaring;
1088 return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
1091 static void
1092 mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
1094 if (!domain_jit_info (domain)->dynamic_code_hash)
1095 domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
1096 g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
1099 static MonoJitDynamicMethodInfo*
1100 mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
1102 MonoJitDynamicMethodInfo *res;
1104 if (domain_jit_info (domain)->dynamic_code_hash)
1105 res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
1106 else
1107 res = NULL;
1108 return res;
1111 typedef struct {
1112 MonoClass *vtype;
1113 GList *active, *inactive;
1114 GSList *slots;
1115 } StackSlotInfo;
1117 static gint
1118 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1120 MonoMethodVar *v1 = (MonoMethodVar*)a;
1121 MonoMethodVar *v2 = (MonoMethodVar*)b;
1123 if (v1 == v2)
1124 return 0;
1125 else if (v1->interval->range && v2->interval->range)
1126 return v1->interval->range->from - v2->interval->range->from;
1127 else if (v1->interval->range)
1128 return -1;
1129 else
1130 return 1;
1133 #if 0
1134 #define LSCAN_DEBUG(a) do { a; } while (0)
1135 #else
1136 #define LSCAN_DEBUG(a)
1137 #endif
1139 static gint32*
1140 mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1142 int i, slot, offset, size;
1143 guint32 align;
1144 MonoMethodVar *vmv;
1145 MonoInst *inst;
1146 gint32 *offsets;
1147 GList *vars = NULL, *l, *unhandled;
1148 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1149 MonoType *t;
1150 int nvtypes;
1151 gboolean reuse_slot;
1153 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1155 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1156 vtype_stack_slots = NULL;
1157 nvtypes = 0;
1159 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1160 for (i = 0; i < cfg->num_varinfo; ++i)
1161 offsets [i] = -1;
1163 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1164 inst = cfg->varinfo [i];
1165 vmv = MONO_VARINFO (cfg, i);
1167 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1168 continue;
1170 vars = g_list_prepend (vars, vmv);
1173 vars = g_list_sort (vars, compare_by_interval_start_pos_func);
1175 /* Sanity check */
1177 i = 0;
1178 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1179 MonoMethodVar *current = unhandled->data;
1181 if (current->interval->range) {
1182 g_assert (current->interval->range->from >= i);
1183 i = current->interval->range->from;
1188 offset = 0;
1189 *stack_align = 0;
1190 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1191 MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
1193 vmv = current;
1194 inst = cfg->varinfo [vmv->idx];
1196 t = mono_type_get_underlying_type (inst->inst_vtype);
1197 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1198 continue;
1200 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1201 * pinvoke wrappers when they call functions returning structures */
1202 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1203 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1205 else {
1206 int ialign;
1208 size = mini_type_stack_size (t, &ialign);
1209 align = ialign;
1211 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1212 align = 16;
1215 reuse_slot = TRUE;
1216 if (cfg->disable_reuse_stack_slots)
1217 reuse_slot = FALSE;
1219 t = mini_get_underlying_type (t);
1220 switch (t->type) {
1221 case MONO_TYPE_GENERICINST:
1222 if (!mono_type_generic_inst_is_valuetype (t)) {
1223 slot_info = &scalar_stack_slots [t->type];
1224 break;
1226 /* Fall through */
1227 case MONO_TYPE_VALUETYPE:
1228 if (!vtype_stack_slots)
1229 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1230 for (i = 0; i < nvtypes; ++i)
1231 if (t->data.klass == vtype_stack_slots [i].vtype)
1232 break;
1233 if (i < nvtypes)
1234 slot_info = &vtype_stack_slots [i];
1235 else {
1236 g_assert (nvtypes < 256);
1237 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1238 slot_info = &vtype_stack_slots [nvtypes];
1239 nvtypes ++;
1241 if (cfg->disable_reuse_ref_stack_slots)
1242 reuse_slot = FALSE;
1243 break;
1245 case MONO_TYPE_PTR:
1246 case MONO_TYPE_I:
1247 case MONO_TYPE_U:
1248 #if SIZEOF_VOID_P == 4
1249 case MONO_TYPE_I4:
1250 #else
1251 case MONO_TYPE_I8:
1252 #endif
1253 if (cfg->disable_ref_noref_stack_slot_share) {
1254 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1255 break;
1257 /* Fall through */
1259 case MONO_TYPE_CLASS:
1260 case MONO_TYPE_OBJECT:
1261 case MONO_TYPE_ARRAY:
1262 case MONO_TYPE_SZARRAY:
1263 case MONO_TYPE_STRING:
1264 /* Share non-float stack slots of the same size */
1265 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1266 if (cfg->disable_reuse_ref_stack_slots)
1267 reuse_slot = FALSE;
1268 break;
1270 default:
1271 slot_info = &scalar_stack_slots [t->type];
1274 slot = 0xffffff;
1275 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1276 int pos;
1277 gboolean changed;
1279 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1281 if (!current->interval->range) {
1282 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1283 pos = ~0;
1284 else {
1285 /* Dead */
1286 inst->flags |= MONO_INST_IS_DEAD;
1287 continue;
1290 else
1291 pos = current->interval->range->from;
1293 LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
1294 if (current->interval->range)
1295 LSCAN_DEBUG (mono_linterval_print (current->interval));
1296 LSCAN_DEBUG (printf ("\n"));
1298 /* Check for intervals in active which expired or inactive */
1299 changed = TRUE;
1300 /* FIXME: Optimize this */
1301 while (changed) {
1302 changed = FALSE;
1303 for (l = slot_info->active; l != NULL; l = l->next) {
1304 MonoMethodVar *v = (MonoMethodVar*)l->data;
1306 if (v->interval->last_range->to < pos) {
1307 slot_info->active = g_list_delete_link (slot_info->active, l);
1308 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1309 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1310 changed = TRUE;
1311 break;
1313 else if (!mono_linterval_covers (v->interval, pos)) {
1314 slot_info->inactive = g_list_append (slot_info->inactive, v);
1315 slot_info->active = g_list_delete_link (slot_info->active, l);
1316 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
1317 changed = TRUE;
1318 break;
1323 /* Check for intervals in inactive which expired or active */
1324 changed = TRUE;
1325 /* FIXME: Optimize this */
1326 while (changed) {
1327 changed = FALSE;
1328 for (l = slot_info->inactive; l != NULL; l = l->next) {
1329 MonoMethodVar *v = (MonoMethodVar*)l->data;
1331 if (v->interval->last_range->to < pos) {
1332 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1333 // FIXME: Enabling this seems to cause impossible to debug crashes
1334 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1335 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1336 changed = TRUE;
1337 break;
1339 else if (mono_linterval_covers (v->interval, pos)) {
1340 slot_info->active = g_list_append (slot_info->active, v);
1341 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1342 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
1343 changed = TRUE;
1344 break;
1350 * This also handles the case when the variable is used in an
1351 * exception region, as liveness info is not computed there.
1354 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1355 * opcodes.
1357 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1358 if (slot_info->slots) {
1359 slot = GPOINTER_TO_INT (slot_info->slots->data);
1361 slot_info->slots = slot_info->slots->next;
1364 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1366 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1370 #if 0
1372 static int count = 0;
1373 count ++;
1375 if (count == atoi (g_getenv ("COUNT3")))
1376 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1377 if (count > atoi (g_getenv ("COUNT3")))
1378 slot = 0xffffff;
1379 else
1380 mono_print_ins (inst);
1382 #endif
1384 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
1386 if (inst->flags & MONO_INST_LMF) {
1387 size = sizeof (MonoLMF);
1388 align = sizeof (mgreg_t);
1389 reuse_slot = FALSE;
1392 if (!reuse_slot)
1393 slot = 0xffffff;
1395 if (slot == 0xffffff) {
1397 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1398 * efficient copying (and to work around the fact that OP_MEMCPY
1399 * and OP_MEMSET ignores alignment).
1401 if (MONO_TYPE_ISSTRUCT (t)) {
1402 align = MAX (align, sizeof (gpointer));
1403 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1406 if (backward) {
1407 offset += size;
1408 offset += align - 1;
1409 offset &= ~(align - 1);
1410 slot = offset;
1412 else {
1413 offset += align - 1;
1414 offset &= ~(align - 1);
1415 slot = offset;
1416 offset += size;
1419 if (*stack_align == 0)
1420 *stack_align = align;
1423 offsets [vmv->idx] = slot;
1425 g_list_free (vars);
1426 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1427 if (scalar_stack_slots [i].active)
1428 g_list_free (scalar_stack_slots [i].active);
1430 for (i = 0; i < nvtypes; ++i) {
1431 if (vtype_stack_slots [i].active)
1432 g_list_free (vtype_stack_slots [i].active);
1435 cfg->stat_locals_stack_size += offset;
1437 *stack_size = offset;
1438 return offsets;
1442 * mono_allocate_stack_slots:
1444 * Allocate stack slots for all non register allocated variables using a
1445 * linear scan algorithm.
1446 * Returns: an array of stack offsets.
1447 * STACK_SIZE is set to the amount of stack space needed.
1448 * STACK_ALIGN is set to the alignment needed by the locals area.
1450 gint32*
1451 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1453 int i, slot, offset, size;
1454 guint32 align;
1455 MonoMethodVar *vmv;
1456 MonoInst *inst;
1457 gint32 *offsets;
1458 GList *vars = NULL, *l;
1459 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1460 MonoType *t;
1461 int nvtypes;
1462 gboolean reuse_slot;
1464 if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
1465 return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
1467 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1468 vtype_stack_slots = NULL;
1469 nvtypes = 0;
1471 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1472 for (i = 0; i < cfg->num_varinfo; ++i)
1473 offsets [i] = -1;
1475 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1476 inst = cfg->varinfo [i];
1477 vmv = MONO_VARINFO (cfg, i);
1479 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1480 continue;
1482 vars = g_list_prepend (vars, vmv);
1485 vars = mono_varlist_sort (cfg, vars, 0);
1486 offset = 0;
1487 *stack_align = sizeof(mgreg_t);
1488 for (l = vars; l; l = l->next) {
1489 vmv = (MonoMethodVar *)l->data;
1490 inst = cfg->varinfo [vmv->idx];
1492 t = mono_type_get_underlying_type (inst->inst_vtype);
1493 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1494 continue;
1496 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1497 * pinvoke wrappers when they call functions returning structures */
1498 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1499 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1500 } else {
1501 int ialign;
1503 size = mini_type_stack_size (t, &ialign);
1504 align = ialign;
1506 if (mono_class_has_failure (mono_class_from_mono_type (t)))
1507 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
1509 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1510 align = 16;
1513 reuse_slot = TRUE;
1514 if (cfg->disable_reuse_stack_slots)
1515 reuse_slot = FALSE;
1517 t = mini_get_underlying_type (t);
1518 switch (t->type) {
1519 case MONO_TYPE_GENERICINST:
1520 if (!mono_type_generic_inst_is_valuetype (t)) {
1521 slot_info = &scalar_stack_slots [t->type];
1522 break;
1524 /* Fall through */
1525 case MONO_TYPE_VALUETYPE:
1526 if (!vtype_stack_slots)
1527 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1528 for (i = 0; i < nvtypes; ++i)
1529 if (t->data.klass == vtype_stack_slots [i].vtype)
1530 break;
1531 if (i < nvtypes)
1532 slot_info = &vtype_stack_slots [i];
1533 else {
1534 g_assert (nvtypes < 256);
1535 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1536 slot_info = &vtype_stack_slots [nvtypes];
1537 nvtypes ++;
1539 if (cfg->disable_reuse_ref_stack_slots)
1540 reuse_slot = FALSE;
1541 break;
1543 case MONO_TYPE_PTR:
1544 case MONO_TYPE_I:
1545 case MONO_TYPE_U:
1546 #if SIZEOF_VOID_P == 4
1547 case MONO_TYPE_I4:
1548 #else
1549 case MONO_TYPE_I8:
1550 #endif
1551 if (cfg->disable_ref_noref_stack_slot_share) {
1552 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1553 break;
1555 /* Fall through */
1557 case MONO_TYPE_CLASS:
1558 case MONO_TYPE_OBJECT:
1559 case MONO_TYPE_ARRAY:
1560 case MONO_TYPE_SZARRAY:
1561 case MONO_TYPE_STRING:
1562 /* Share non-float stack slots of the same size */
1563 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1564 if (cfg->disable_reuse_ref_stack_slots)
1565 reuse_slot = FALSE;
1566 break;
1567 case MONO_TYPE_VAR:
1568 case MONO_TYPE_MVAR:
1569 slot_info = &scalar_stack_slots [t->type];
1570 break;
1571 default:
1572 slot_info = &scalar_stack_slots [t->type];
1573 break;
1576 slot = 0xffffff;
1577 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1578 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1580 /* expire old intervals in active */
1581 while (slot_info->active) {
1582 MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
1584 if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
1585 break;
1587 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1589 slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
1590 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
1594 * This also handles the case when the variable is used in an
1595 * exception region, as liveness info is not computed there.
1598 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1599 * opcodes.
1601 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1602 if (slot_info->slots) {
1603 slot = GPOINTER_TO_INT (slot_info->slots->data);
1605 slot_info->slots = slot_info->slots->next;
1608 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1612 #if 0
1614 static int count = 0;
1615 count ++;
1617 if (count == atoi (g_getenv ("COUNT")))
1618 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1619 if (count > atoi (g_getenv ("COUNT")))
1620 slot = 0xffffff;
1621 else
1622 mono_print_ins (inst);
1624 #endif
1626 if (inst->flags & MONO_INST_LMF) {
1628 * This variable represents a MonoLMF structure, which has no corresponding
1629 * CLR type, so hard-code its size/alignment.
1631 size = sizeof (MonoLMF);
1632 align = sizeof (mgreg_t);
1633 reuse_slot = FALSE;
1636 if (!reuse_slot)
1637 slot = 0xffffff;
1639 if (slot == 0xffffff) {
1641 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1642 * efficient copying (and to work around the fact that OP_MEMCPY
1643 * and OP_MEMSET ignores alignment).
1645 if (MONO_TYPE_ISSTRUCT (t)) {
1646 align = MAX (align, sizeof (gpointer));
1647 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1649 * Align the size too so the code generated for passing vtypes in
1650 * registers doesn't overwrite random locals.
1652 size = (size + (align - 1)) & ~(align -1);
1655 if (backward) {
1656 offset += size;
1657 offset += align - 1;
1658 offset &= ~(align - 1);
1659 slot = offset;
1661 else {
1662 offset += align - 1;
1663 offset &= ~(align - 1);
1664 slot = offset;
1665 offset += size;
1668 *stack_align = MAX (*stack_align, align);
1671 offsets [vmv->idx] = slot;
1673 g_list_free (vars);
1674 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1675 if (scalar_stack_slots [i].active)
1676 g_list_free (scalar_stack_slots [i].active);
1678 for (i = 0; i < nvtypes; ++i) {
1679 if (vtype_stack_slots [i].active)
1680 g_list_free (vtype_stack_slots [i].active);
1683 cfg->stat_locals_stack_size += offset;
1685 *stack_size = offset;
1686 return offsets;
1689 #define EMUL_HIT_SHIFT 3
1690 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1691 /* small hit bitmap cache */
1692 static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
1693 static short emul_opcode_num = 0;
1694 static short emul_opcode_alloced = 0;
1695 static short *emul_opcode_opcodes;
1696 static MonoJitICallInfo **emul_opcode_map;
1698 MonoJitICallInfo *
1699 mono_find_jit_opcode_emulation (int opcode)
1701 g_assert (opcode >= 0 && opcode <= OP_LAST);
1702 if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
1703 int i;
1704 for (i = 0; i < emul_opcode_num; ++i) {
1705 if (emul_opcode_opcodes [i] == opcode)
1706 return emul_opcode_map [i];
1709 return NULL;
1712 void
1713 mini_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, const char *symbol, gboolean no_wrapper)
1715 MonoJitICallInfo *info;
1716 MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
1718 g_assert (!sig->hasthis);
1719 g_assert (sig->param_count < 3);
1721 info = mono_register_jit_icall_full (func, name, sig, no_wrapper, symbol);
1723 if (emul_opcode_num >= emul_opcode_alloced) {
1724 int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
1725 emul_opcode_alloced += incr;
1726 emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
1727 emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
1729 emul_opcode_map [emul_opcode_num] = info;
1730 emul_opcode_opcodes [emul_opcode_num] = opcode;
1731 emul_opcode_num++;
1732 emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
1735 static void
1736 print_dfn (MonoCompile *cfg)
1738 int i, j;
1739 char *code;
1740 MonoBasicBlock *bb;
1741 MonoInst *c;
1744 char *method_name = mono_method_full_name (cfg->method, TRUE);
1745 g_print ("IR code for method %s\n", method_name);
1746 g_free (method_name);
1749 for (i = 0; i < cfg->num_bblocks; ++i) {
1750 bb = cfg->bblocks [i];
1751 /*if (bb->cil_code) {
1752 char* code1, *code2;
1753 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1754 if (bb->last_ins->cil_code)
1755 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1756 else
1757 code2 = g_strdup ("");
1759 code1 [strlen (code1) - 1] = 0;
1760 code = g_strdup_printf ("%s -> %s", code1, code2);
1761 g_free (code1);
1762 g_free (code2);
1763 } else*/
1764 code = g_strdup ("\n");
1765 g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
1766 MONO_BB_FOR_EACH_INS (bb, c) {
1767 mono_print_ins_index (-1, c);
1770 g_print ("\tprev:");
1771 for (j = 0; j < bb->in_count; ++j) {
1772 g_print (" BB%d", bb->in_bb [j]->block_num);
1774 g_print ("\t\tsucc:");
1775 for (j = 0; j < bb->out_count; ++j) {
1776 g_print (" BB%d", bb->out_bb [j]->block_num);
1778 g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
1780 if (bb->idom)
1781 g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
1783 if (bb->dominators)
1784 mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
1785 if (bb->dfrontier)
1786 mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
1787 g_free (code);
1790 g_print ("\n");
1793 void
1794 mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
1796 MONO_ADD_INS (bb, inst);
1799 void
1800 mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1802 if (ins == NULL) {
1803 ins = bb->code;
1804 bb->code = ins_to_insert;
1806 /* Link with next */
1807 ins_to_insert->next = ins;
1808 if (ins)
1809 ins->prev = ins_to_insert;
1811 if (bb->last_ins == NULL)
1812 bb->last_ins = ins_to_insert;
1813 } else {
1814 /* Link with next */
1815 ins_to_insert->next = ins->next;
1816 if (ins->next)
1817 ins->next->prev = ins_to_insert;
1819 /* Link with previous */
1820 ins->next = ins_to_insert;
1821 ins_to_insert->prev = ins;
1823 if (bb->last_ins == ins)
1824 bb->last_ins = ins_to_insert;
1828 void
1829 mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1831 if (ins == NULL) {
1832 ins = bb->code;
1833 if (ins)
1834 ins->prev = ins_to_insert;
1835 bb->code = ins_to_insert;
1836 ins_to_insert->next = ins;
1837 if (bb->last_ins == NULL)
1838 bb->last_ins = ins_to_insert;
1839 } else {
1840 /* Link with previous */
1841 if (ins->prev)
1842 ins->prev->next = ins_to_insert;
1843 ins_to_insert->prev = ins->prev;
1845 /* Link with next */
1846 ins->prev = ins_to_insert;
1847 ins_to_insert->next = ins;
1849 if (bb->code == ins)
1850 bb->code = ins_to_insert;
1855 * mono_verify_bblock:
1857 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1859 void
1860 mono_verify_bblock (MonoBasicBlock *bb)
1862 MonoInst *ins, *prev;
1864 prev = NULL;
1865 for (ins = bb->code; ins; ins = ins->next) {
1866 g_assert (ins->prev == prev);
1867 prev = ins;
1869 if (bb->last_ins)
1870 g_assert (!bb->last_ins->next);
1874 * mono_verify_cfg:
1876 * Perform consistency checks on the JIT data structures and the IR
1878 void
1879 mono_verify_cfg (MonoCompile *cfg)
1881 MonoBasicBlock *bb;
1883 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
1884 mono_verify_bblock (bb);
1887 // This will free many fields in cfg to save
1888 // memory. Note that this must be safe to call
1889 // multiple times. It must be idempotent.
1890 void
1891 mono_empty_compile (MonoCompile *cfg)
1893 mono_free_loop_info (cfg);
1895 // These live in the mempool, and so must be freed
1896 // first
1897 for (GSList *l = cfg->headers_to_free; l; l = l->next) {
1898 mono_metadata_free_mh ((MonoMethodHeader *)l->data);
1900 cfg->headers_to_free = NULL;
1902 if (cfg->mempool) {
1903 //mono_mempool_stats (cfg->mempool);
1904 mono_mempool_destroy (cfg->mempool);
1905 cfg->mempool = NULL;
1908 g_free (cfg->varinfo);
1909 cfg->varinfo = NULL;
1911 g_free (cfg->vars);
1912 cfg->vars = NULL;
1914 if (cfg->rs) {
1915 mono_regstate_free (cfg->rs);
1916 cfg->rs = NULL;
1920 void
1921 mono_destroy_compile (MonoCompile *cfg)
1923 mono_empty_compile (cfg);
1925 if (cfg->header)
1926 mono_metadata_free_mh (cfg->header);
1928 if (cfg->spvars)
1929 g_hash_table_destroy (cfg->spvars);
1930 if (cfg->exvars)
1931 g_hash_table_destroy (cfg->exvars);
1933 g_list_free (cfg->ldstr_list);
1935 if (cfg->token_info_hash)
1936 g_hash_table_destroy (cfg->token_info_hash);
1938 if (cfg->abs_patches)
1939 g_hash_table_destroy (cfg->abs_patches);
1941 mono_debug_free_method (cfg);
1943 g_free (cfg->varinfo);
1944 g_free (cfg->vars);
1945 g_free (cfg->exception_message);
1946 g_free (cfg);
1949 void
1950 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
1952 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1954 ji->ip.i = ip;
1955 ji->type = type;
1956 ji->data.target = target;
1957 ji->next = cfg->patch_info;
1959 cfg->patch_info = ji;
1962 void
1963 mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
1965 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1967 ji->ip.i = ip;
1968 ji->type = type;
1969 ji->relocation = relocation;
1970 ji->data.target = target;
1971 ji->next = cfg->patch_info;
1973 cfg->patch_info = ji;
1976 void
1977 mono_remove_patch_info (MonoCompile *cfg, int ip)
1979 MonoJumpInfo **ji = &cfg->patch_info;
1981 while (*ji) {
1982 if ((*ji)->ip.i == ip)
1983 *ji = (*ji)->next;
1984 else
1985 ji = &((*ji)->next);
1989 void
1990 mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
1992 ins->inst_offset = native_offset;
1993 g_ptr_array_add (cfg->seq_points, ins);
1994 if (bb) {
1995 bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
1996 bb->last_seq_point = ins;
2000 void
2001 mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
2003 MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
2005 if (is_reg)
2006 g_assert (offset == 0);
2008 entry->is_reg = is_reg;
2009 entry->reg = reg;
2010 entry->offset = offset;
2011 entry->from = from;
2012 entry->to = to;
2014 if (var == cfg->args [0])
2015 cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
2016 else if (var == cfg->rgctx_var)
2017 cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
2020 static void
2021 mono_compile_create_vars (MonoCompile *cfg)
2023 MonoMethodSignature *sig;
2024 MonoMethodHeader *header;
2025 int i;
2027 header = cfg->header;
2029 sig = mono_method_signature (cfg->method);
2031 if (!MONO_TYPE_IS_VOID (sig->ret)) {
2032 cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
2033 /* Inhibit optimizations */
2034 cfg->ret->flags |= MONO_INST_VOLATILE;
2036 if (cfg->verbose_level > 2)
2037 g_print ("creating vars\n");
2039 cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
2041 if (sig->hasthis) {
2042 cfg->args [0] = mono_compile_create_var (cfg, m_class_get_this_arg (cfg->method->klass), OP_ARG);
2043 cfg->this_arg = cfg->args [0];
2046 for (i = 0; i < sig->param_count; ++i) {
2047 cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
2050 if (cfg->verbose_level > 2) {
2051 if (cfg->ret) {
2052 printf ("\treturn : ");
2053 mono_print_ins (cfg->ret);
2056 if (sig->hasthis) {
2057 printf ("\tthis: ");
2058 mono_print_ins (cfg->args [0]);
2061 for (i = 0; i < sig->param_count; ++i) {
2062 printf ("\targ [%d]: ", i);
2063 mono_print_ins (cfg->args [i + sig->hasthis]);
2067 cfg->locals_start = cfg->num_varinfo;
2068 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
2070 if (cfg->verbose_level > 2)
2071 g_print ("creating locals\n");
2073 for (i = 0; i < header->num_locals; ++i) {
2074 if (cfg->verbose_level > 2)
2075 g_print ("\tlocal [%d]: ", i);
2076 cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
2079 if (cfg->verbose_level > 2)
2080 g_print ("locals done\n");
2082 #ifdef ENABLE_LLVM
2083 if (COMPILE_LLVM (cfg))
2084 mono_llvm_create_vars (cfg);
2085 else
2086 mono_arch_create_vars (cfg);
2087 #else
2088 mono_arch_create_vars (cfg);
2089 #endif
2091 if (cfg->method->save_lmf && cfg->create_lmf_var) {
2092 MonoInst *lmf_var = mono_compile_create_var (cfg, m_class_get_byval_arg (mono_defaults.int_class), OP_LOCAL);
2093 lmf_var->flags |= MONO_INST_VOLATILE;
2094 lmf_var->flags |= MONO_INST_LMF;
2095 cfg->lmf_var = lmf_var;
2099 void
2100 mono_print_code (MonoCompile *cfg, const char* msg)
2102 MonoBasicBlock *bb;
2104 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2105 mono_print_bb (bb, msg);
2108 static void
2109 mono_postprocess_patches (MonoCompile *cfg)
2111 MonoJumpInfo *patch_info;
2112 int i;
2114 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2115 switch (patch_info->type) {
2116 case MONO_PATCH_INFO_ABS: {
2117 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
2120 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2121 * absolute address.
2123 if (info) {
2124 //printf ("TEST %s %p\n", info->name, patch_info->data.target);
2125 /* for these array methods we currently register the same function pointer
2126 * since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
2127 * will return the incorrect one depending on the order they are registered.
2128 * See tests/test-arr.cs
2130 if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) {
2131 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2132 patch_info->data.name = info->name;
2136 if (patch_info->type == MONO_PATCH_INFO_ABS) {
2137 if (cfg->abs_patches) {
2138 MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
2139 if (abs_ji) {
2140 patch_info->type = abs_ji->type;
2141 patch_info->data.target = abs_ji->data.target;
2146 break;
2148 case MONO_PATCH_INFO_SWITCH: {
2149 gpointer *table;
2150 if (cfg->method->dynamic) {
2151 table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
2152 } else {
2153 table = (void **)mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
2156 for (i = 0; i < patch_info->data.table->table_size; i++) {
2157 /* Might be NULL if the switch is eliminated */
2158 if (patch_info->data.table->table [i]) {
2159 g_assert (patch_info->data.table->table [i]->native_offset);
2160 table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
2161 } else {
2162 table [i] = NULL;
2165 patch_info->data.table->table = (MonoBasicBlock**)table;
2166 break;
2168 case MONO_PATCH_INFO_METHOD_JUMP: {
2169 unsigned char *ip = cfg->native_code + patch_info->ip.i;
2171 mini_register_jump_site (cfg->domain, patch_info->data.method, ip);
2172 break;
2174 default:
2175 /* do nothing */
2176 break;
2181 void
2182 mono_codegen (MonoCompile *cfg)
2184 MonoBasicBlock *bb;
2185 int max_epilog_size;
2186 guint8 *code;
2187 MonoDomain *code_domain;
2188 guint unwindlen = 0;
2190 if (mono_using_xdebug)
2192 * Recent gdb versions have trouble processing symbol files containing
2193 * overlapping address ranges, so allocate all code from the code manager
2194 * of the root domain. (#666152).
2196 code_domain = mono_get_root_domain ();
2197 else
2198 code_domain = cfg->domain;
2200 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2201 cfg->spill_count = 0;
2202 /* we reuse dfn here */
2203 /* bb->dfn = bb_count++; */
2205 mono_arch_lowering_pass (cfg, bb);
2207 if (cfg->opt & MONO_OPT_PEEPHOLE)
2208 mono_arch_peephole_pass_1 (cfg, bb);
2210 mono_local_regalloc (cfg, bb);
2212 if (cfg->opt & MONO_OPT_PEEPHOLE)
2213 mono_arch_peephole_pass_2 (cfg, bb);
2215 if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
2216 mono_bb_deduplicate_op_il_seq_points (cfg, bb);
2219 code = mono_arch_emit_prolog (cfg);
2221 cfg->code_len = code - cfg->native_code;
2222 cfg->prolog_end = cfg->code_len;
2223 cfg->cfa_reg = cfg->cur_cfa_reg;
2224 cfg->cfa_offset = cfg->cur_cfa_offset;
2226 mono_debug_open_method (cfg);
2228 /* emit code all basic blocks */
2229 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2230 bb->native_offset = cfg->code_len;
2231 bb->real_native_offset = cfg->code_len;
2232 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2233 mono_arch_output_basic_block (cfg, bb);
2234 bb->native_length = cfg->code_len - bb->native_offset;
2236 if (bb == cfg->bb_exit) {
2237 cfg->epilog_begin = cfg->code_len;
2238 mono_arch_emit_epilog (cfg);
2239 cfg->epilog_end = cfg->code_len;
2242 if (bb->clause_holes) {
2243 GList *tmp;
2244 for (tmp = bb->clause_holes; tmp; tmp = tmp->prev)
2245 mono_cfg_add_try_hole (cfg, (MonoExceptionClause *)tmp->data, cfg->native_code + bb->native_offset, bb);
2249 mono_arch_emit_exceptions (cfg);
2251 max_epilog_size = 0;
2253 /* we always allocate code in cfg->domain->code_mp to increase locality */
2254 cfg->code_size = cfg->code_len + max_epilog_size;
2256 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2258 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2259 if (!cfg->compile_aot)
2260 unwindlen = mono_arch_unwindinfo_init_method_unwind_info (cfg);
2261 #endif
2263 if (cfg->method->dynamic) {
2264 /* Allocate the code into a separate memory pool so it can be freed */
2265 cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
2266 cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
2267 mono_domain_lock (cfg->domain);
2268 mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
2269 mono_domain_unlock (cfg->domain);
2271 if (mono_using_xdebug)
2272 /* See the comment for cfg->code_domain */
2273 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2274 else
2275 code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
2276 } else {
2277 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2280 if (cfg->thunk_area) {
2281 cfg->thunks_offset = cfg->code_size + unwindlen;
2282 cfg->thunks = code + cfg->thunks_offset;
2283 memset (cfg->thunks, 0, cfg->thunk_area);
2286 g_assert (code);
2287 memcpy (code, cfg->native_code, cfg->code_len);
2288 g_free (cfg->native_code);
2289 cfg->native_code = code;
2290 code = cfg->native_code + cfg->code_len;
2292 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2293 mono_postprocess_patches (cfg);
2295 #ifdef VALGRIND_JIT_REGISTER_MAP
2296 if (valgrind_register){
2297 char* nm = mono_method_full_name (cfg->method, TRUE);
2298 VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
2299 g_free (nm);
2301 #endif
2303 if (cfg->verbose_level > 0) {
2304 char* nm = mono_method_get_full_name (cfg->method);
2305 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2306 nm,
2307 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
2308 g_free (nm);
2312 gboolean is_generic = FALSE;
2314 if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
2315 mono_class_is_gtd (cfg->method->klass) || mono_class_is_ginst (cfg->method->klass)) {
2316 is_generic = TRUE;
2319 if (cfg->gshared)
2320 g_assert (is_generic);
2323 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2324 mono_arch_save_unwind_info (cfg);
2325 #endif
2327 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2329 MonoJumpInfo *ji;
2330 gpointer target;
2332 for (ji = cfg->patch_info; ji; ji = ji->next) {
2333 if (cfg->compile_aot) {
2334 switch (ji->type) {
2335 case MONO_PATCH_INFO_BB:
2336 case MONO_PATCH_INFO_LABEL:
2337 break;
2338 default:
2339 /* No need to patch these */
2340 continue;
2344 if (ji->type == MONO_PATCH_INFO_NONE)
2345 continue;
2347 target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors, &cfg->error);
2348 if (!mono_error_ok (&cfg->error)) {
2349 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2350 return;
2352 mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
2355 #else
2356 mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors, &cfg->error);
2357 if (!is_ok (&cfg->error)) {
2358 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2359 return;
2361 #endif
2363 if (cfg->method->dynamic) {
2364 if (mono_using_xdebug)
2365 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2366 else
2367 mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
2368 } else {
2369 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2371 MONO_PROFILER_RAISE (jit_code_buffer, (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method));
2373 mono_arch_flush_icache (cfg->native_code, cfg->code_len);
2375 mono_debug_close_method (cfg);
2377 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2378 if (!cfg->compile_aot)
2379 mono_arch_unwindinfo_install_method_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
2380 #endif
2383 static void
2384 compute_reachable (MonoBasicBlock *bb)
2386 int i;
2388 if (!(bb->flags & BB_VISITED)) {
2389 bb->flags |= BB_VISITED;
2390 for (i = 0; i < bb->out_count; ++i)
2391 compute_reachable (bb->out_bb [i]);
2395 static void mono_bb_ordering (MonoCompile *cfg)
2397 int dfn = 0;
2398 /* Depth-first ordering on basic blocks */
2399 cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
2401 cfg->max_block_num = cfg->num_bblocks;
2403 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
2404 if (cfg->num_bblocks != dfn + 1) {
2405 MonoBasicBlock *bb;
2407 cfg->num_bblocks = dfn + 1;
2409 /* remove unreachable code, because the code in them may be
2410 * inconsistent (access to dead variables for example) */
2411 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2412 bb->flags &= ~BB_VISITED;
2413 compute_reachable (cfg->bb_entry);
2414 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2415 if (bb->flags & BB_EXCEPTION_HANDLER)
2416 compute_reachable (bb);
2417 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2418 if (!(bb->flags & BB_VISITED)) {
2419 if (cfg->verbose_level > 1)
2420 g_print ("found unreachable code in BB%d\n", bb->block_num);
2421 bb->code = bb->last_ins = NULL;
2422 while (bb->out_count)
2423 mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
2426 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2427 bb->flags &= ~BB_VISITED;
2431 static void
2432 mono_handle_out_of_line_bblock (MonoCompile *cfg)
2434 MonoBasicBlock *bb;
2435 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2436 if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
2437 MonoInst *ins;
2438 MONO_INST_NEW (cfg, ins, OP_BR);
2439 MONO_ADD_INS (bb, ins);
2440 ins->inst_target_bb = bb->next_bb;
2445 static MonoJitInfo*
2446 create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
2448 GSList *tmp;
2449 MonoMethodHeader *header;
2450 MonoJitInfo *jinfo;
2451 MonoJitInfoFlags flags = JIT_INFO_NONE;
2452 int num_clauses, num_holes = 0;
2453 guint32 stack_size = 0;
2455 g_assert (method_to_compile == cfg->method);
2456 header = cfg->header;
2458 if (cfg->gshared)
2459 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_GENERIC_JIT_INFO);
2461 if (cfg->arch_eh_jit_info) {
2462 MonoJitArgumentInfo *arg_info;
2463 MonoMethodSignature *sig = mono_method_signature (cfg->method_to_register);
2466 * This cannot be computed during stack walking, as
2467 * mono_arch_get_argument_info () is not signal safe.
2469 arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
2470 stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
2472 if (stack_size)
2473 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2476 if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
2477 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2479 if (cfg->thunk_area)
2480 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_THUNK_INFO);
2482 if (cfg->try_block_holes) {
2483 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2484 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2485 MonoExceptionClause *ec = hole->clause;
2486 int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
2487 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2488 g_assert (clause_last_bb);
2490 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2491 if (clause_last_bb->native_offset != hole_end)
2492 ++num_holes;
2494 if (num_holes)
2495 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_TRY_BLOCK_HOLES);
2496 if (G_UNLIKELY (cfg->verbose_level >= 4))
2497 printf ("Number of try block holes %d\n", num_holes);
2500 if (COMPILE_LLVM (cfg))
2501 num_clauses = cfg->llvm_ex_info_len;
2502 else
2503 num_clauses = header->num_clauses;
2505 if (cfg->method->dynamic)
2506 jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
2507 else
2508 jinfo = (MonoJitInfo *)mono_domain_alloc0 (cfg->domain, mono_jit_info_size (flags, num_clauses, num_holes));
2509 jinfo_try_holes_size += num_holes * sizeof (MonoTryBlockHoleJitInfo);
2511 mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
2512 jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
2514 if (COMPILE_LLVM (cfg))
2515 jinfo->from_llvm = TRUE;
2517 if (cfg->gshared) {
2518 MonoInst *inst;
2519 MonoGenericJitInfo *gi;
2520 GSList *loclist = NULL;
2522 gi = mono_jit_info_get_generic_jit_info (jinfo);
2523 g_assert (gi);
2525 if (cfg->method->dynamic)
2526 gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
2527 else
2528 gi->generic_sharing_context = (MonoGenericSharingContext *)mono_domain_alloc0 (cfg->domain, sizeof (MonoGenericSharingContext));
2529 mini_init_gsctx (cfg->method->dynamic ? NULL : cfg->domain, NULL, cfg->gsctx_context, gi->generic_sharing_context);
2531 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2532 mini_method_get_context (method_to_compile)->method_inst ||
2533 m_class_is_valuetype (method_to_compile->klass)) {
2534 g_assert (cfg->rgctx_var);
2537 gi->has_this = 1;
2539 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2540 mini_method_get_context (method_to_compile)->method_inst ||
2541 m_class_is_valuetype (method_to_compile->klass)) {
2542 inst = cfg->rgctx_var;
2543 if (!COMPILE_LLVM (cfg))
2544 g_assert (inst->opcode == OP_REGOFFSET);
2545 loclist = cfg->rgctx_loclist;
2546 } else {
2547 inst = cfg->args [0];
2548 loclist = cfg->this_loclist;
2551 if (loclist) {
2552 /* Needed to handle async exceptions */
2553 GSList *l;
2554 int i;
2556 gi->nlocs = g_slist_length (loclist);
2557 if (cfg->method->dynamic)
2558 gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
2559 else
2560 gi->locations = (MonoDwarfLocListEntry *)mono_domain_alloc0 (cfg->domain, gi->nlocs * sizeof (MonoDwarfLocListEntry));
2561 i = 0;
2562 for (l = loclist; l; l = l->next) {
2563 memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
2564 i ++;
2568 if (COMPILE_LLVM (cfg)) {
2569 g_assert (cfg->llvm_this_reg != -1);
2570 gi->this_in_reg = 0;
2571 gi->this_reg = cfg->llvm_this_reg;
2572 gi->this_offset = cfg->llvm_this_offset;
2573 } else if (inst->opcode == OP_REGVAR) {
2574 gi->this_in_reg = 1;
2575 gi->this_reg = inst->dreg;
2576 } else {
2577 g_assert (inst->opcode == OP_REGOFFSET);
2578 #ifdef TARGET_X86
2579 g_assert (inst->inst_basereg == X86_EBP);
2580 #elif defined(TARGET_AMD64)
2581 g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
2582 #endif
2583 g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
2585 gi->this_in_reg = 0;
2586 gi->this_reg = inst->inst_basereg;
2587 gi->this_offset = inst->inst_offset;
2591 if (num_holes) {
2592 MonoTryBlockHoleTableJitInfo *table;
2593 int i;
2595 table = mono_jit_info_get_try_block_hole_table_info (jinfo);
2596 table->num_holes = (guint16)num_holes;
2597 i = 0;
2598 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2599 guint32 start_bb_offset;
2600 MonoTryBlockHoleJitInfo *hole;
2601 TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
2602 MonoExceptionClause *ec = hole_data->clause;
2603 int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
2604 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2605 g_assert (clause_last_bb);
2607 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2608 if (clause_last_bb->native_offset == hole_end)
2609 continue;
2611 start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
2612 hole = &table->holes [i++];
2613 hole->clause = hole_data->clause - &header->clauses [0];
2614 hole->offset = (guint32)hole_data->start_offset;
2615 hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
2617 if (G_UNLIKELY (cfg->verbose_level >= 4))
2618 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
2620 g_assert (i == num_holes);
2623 if (jinfo->has_arch_eh_info) {
2624 MonoArchEHJitInfo *info;
2626 info = mono_jit_info_get_arch_eh_info (jinfo);
2628 info->stack_size = stack_size;
2631 if (cfg->thunk_area) {
2632 MonoThunkJitInfo *info;
2634 info = mono_jit_info_get_thunk_info (jinfo);
2635 info->thunks_offset = cfg->thunks_offset;
2636 info->thunks_size = cfg->thunk_area;
2639 if (COMPILE_LLVM (cfg)) {
2640 if (num_clauses)
2641 memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
2642 } else if (header->num_clauses) {
2643 int i;
2645 for (i = 0; i < header->num_clauses; i++) {
2646 MonoExceptionClause *ec = &header->clauses [i];
2647 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2648 MonoBasicBlock *tblock;
2649 MonoInst *exvar;
2651 ei->flags = ec->flags;
2653 if (G_UNLIKELY (cfg->verbose_level >= 4))
2654 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
2656 exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
2657 ei->exvar_offset = exvar ? exvar->inst_offset : 0;
2659 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
2660 tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
2661 g_assert (tblock);
2662 ei->data.filter = cfg->native_code + tblock->native_offset;
2663 } else {
2664 ei->data.catch_class = ec->data.catch_class;
2667 tblock = cfg->cil_offset_to_bb [ec->try_offset];
2668 g_assert (tblock);
2669 g_assert (tblock->native_offset);
2670 ei->try_start = cfg->native_code + tblock->native_offset;
2671 if (tblock->extend_try_block) {
2673 * Extend the try block backwards to include parts of the previous call
2674 * instruction.
2676 ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
2678 if (ec->try_offset + ec->try_len < header->code_size)
2679 tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2680 else
2681 tblock = cfg->bb_exit;
2682 if (G_UNLIKELY (cfg->verbose_level >= 4))
2683 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
2684 g_assert (tblock);
2685 if (!tblock->native_offset) {
2686 int j, end;
2687 for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
2688 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2689 if (bb && bb->native_offset) {
2690 tblock = bb;
2691 break;
2695 ei->try_end = cfg->native_code + tblock->native_offset;
2696 g_assert (tblock->native_offset);
2697 tblock = cfg->cil_offset_to_bb [ec->handler_offset];
2698 g_assert (tblock);
2699 ei->handler_start = cfg->native_code + tblock->native_offset;
2701 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2702 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2703 gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
2704 if (hole->clause == ec && hole_end == ei->try_end) {
2705 if (G_UNLIKELY (cfg->verbose_level >= 4))
2706 printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
2708 ei->try_end = cfg->native_code + hole->start_offset;
2709 break;
2713 if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2714 int end_offset;
2715 if (ec->handler_offset + ec->handler_len < header->code_size) {
2716 tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
2717 if (tblock->native_offset) {
2718 end_offset = tblock->native_offset;
2719 } else {
2720 int j, end;
2722 for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
2723 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2724 if (bb && bb->native_offset) {
2725 tblock = bb;
2726 break;
2729 end_offset = tblock->native_offset + tblock->native_length;
2731 } else {
2732 end_offset = cfg->epilog_begin;
2734 ei->data.handler_end = cfg->native_code + end_offset;
2739 if (G_UNLIKELY (cfg->verbose_level >= 4)) {
2740 int i;
2741 for (i = 0; i < jinfo->num_clauses; i++) {
2742 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2743 int start = (guint8*)ei->try_start - cfg->native_code;
2744 int end = (guint8*)ei->try_end - cfg->native_code;
2745 int handler = (guint8*)ei->handler_start - cfg->native_code;
2746 int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
2748 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
2752 if (cfg->encoded_unwind_ops) {
2753 /* Generated by LLVM */
2754 jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
2755 g_free (cfg->encoded_unwind_ops);
2756 } else if (cfg->unwind_ops) {
2757 guint32 info_len;
2758 guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
2759 guint32 unwind_desc;
2761 unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
2763 if (cfg->has_unwind_info_for_epilog) {
2764 MonoArchEHJitInfo *info;
2766 info = mono_jit_info_get_arch_eh_info (jinfo);
2767 g_assert (info);
2768 info->epilog_size = cfg->code_len - cfg->epilog_begin;
2770 jinfo->unwind_info = unwind_desc;
2771 g_free (unwind_info);
2772 } else {
2773 jinfo->unwind_info = cfg->used_int_regs;
2776 return jinfo;
2779 /* Return whenever METHOD is a gsharedvt method */
2780 static gboolean
2781 is_gsharedvt_method (MonoMethod *method)
2783 MonoGenericContext *context;
2784 MonoGenericInst *inst;
2785 int i;
2787 if (!method->is_inflated)
2788 return FALSE;
2789 context = mono_method_get_context (method);
2790 inst = context->class_inst;
2791 if (inst) {
2792 for (i = 0; i < inst->type_argc; ++i)
2793 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2794 return TRUE;
2796 inst = context->method_inst;
2797 if (inst) {
2798 for (i = 0; i < inst->type_argc; ++i)
2799 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2800 return TRUE;
2802 return FALSE;
2805 static gboolean
2806 is_open_method (MonoMethod *method)
2808 MonoGenericContext *context;
2810 if (!method->is_inflated)
2811 return FALSE;
2812 context = mono_method_get_context (method);
2813 if (context->class_inst && context->class_inst->is_open)
2814 return TRUE;
2815 if (context->method_inst && context->method_inst->is_open)
2816 return TRUE;
2817 return FALSE;
2820 static void
2821 mono_insert_nop_in_empty_bb (MonoCompile *cfg)
2823 MonoBasicBlock *bb;
2824 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2825 if (bb->code)
2826 continue;
2827 MonoInst *nop;
2828 MONO_INST_NEW (cfg, nop, OP_NOP);
2829 MONO_ADD_INS (bb, nop);
2832 static void
2833 mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
2835 MonoInst *poll_addr, *ins;
2837 if (cfg->disable_gc_safe_points)
2838 return;
2840 if (cfg->verbose_level > 1)
2841 printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
2843 g_assert (mono_threads_are_safepoints_enabled ());
2844 NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
2846 MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
2847 ins->sreg1 = poll_addr->dreg;
2849 if (bblock->flags & BB_EXCEPTION_HANDLER) {
2850 MonoInst *eh_op = bblock->code;
2852 if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
2853 eh_op = NULL;
2854 } else {
2855 MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
2856 // skip all EH relateds ops
2857 while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
2858 eh_op = next_eh_op;
2859 next_eh_op = eh_op->next;
2863 mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
2864 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2865 } else if (bblock == cfg->bb_entry) {
2866 mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
2867 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2869 } else {
2870 mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
2871 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2876 This code inserts safepoints into managed code at important code paths.
2877 Those are:
2879 -the first basic block
2880 -landing BB for exception handlers
2881 -loop body starts.
2884 static void
2885 mono_insert_safepoints (MonoCompile *cfg)
2887 MonoBasicBlock *bb;
2889 if (!mono_threads_are_safepoints_enabled ())
2890 return;
2892 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2893 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2894 g_assert (mono_threads_are_safepoints_enabled ());
2895 gpointer poll_func = &mono_threads_state_poll;
2897 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == poll_func) {
2898 if (cfg->verbose_level > 1)
2899 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2900 return;
2904 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2905 if (cfg->verbose_level > 1)
2906 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2907 return;
2910 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2911 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2913 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
2914 (info->d.icall.func == mono_thread_interruption_checkpoint ||
2915 info->d.icall.func == mono_threads_exit_gc_safe_region_unbalanced)) {
2916 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2917 if (cfg->verbose_level > 1)
2918 printf ("SKIPPING SAFEPOINTS for wrapper %s\n", cfg->method->name);
2919 return;
2923 if (cfg->verbose_level > 1)
2924 printf ("INSERTING SAFEPOINTS\n");
2925 if (cfg->verbose_level > 2)
2926 mono_print_code (cfg, "BEFORE SAFEPOINTS");
2928 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2929 if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
2930 mono_create_gc_safepoint (cfg, bb);
2933 if (cfg->verbose_level > 2)
2934 mono_print_code (cfg, "AFTER SAFEPOINTS");
2939 static void
2940 mono_insert_branches_between_bblocks (MonoCompile *cfg)
2942 MonoBasicBlock *bb;
2944 /* Add branches between non-consecutive bblocks */
2945 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2946 if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
2947 bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
2948 /* we are careful when inverting, since bugs like #59580
2949 * could show up when dealing with NaNs.
2951 if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
2952 MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
2953 bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
2954 bb->last_ins->inst_false_bb = tmp;
2956 bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
2957 } else {
2958 MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
2959 inst->opcode = OP_BR;
2960 inst->inst_target_bb = bb->last_ins->inst_false_bb;
2961 mono_bblock_add_inst (bb, inst);
2966 if (cfg->verbose_level >= 4) {
2967 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2968 MonoInst *tree = bb->code;
2969 g_print ("DUMP BLOCK %d:\n", bb->block_num);
2970 if (!tree)
2971 continue;
2972 for (; tree; tree = tree->next) {
2973 mono_print_ins_index (-1, tree);
2978 /* FIXME: */
2979 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2980 bb->max_vreg = cfg->next_vreg;
2984 static void
2985 init_backend (MonoBackend *backend)
2987 #ifdef MONO_ARCH_NEED_GOT_VAR
2988 backend->need_got_var = 1;
2989 #endif
2990 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2991 backend->have_card_table_wb = 1;
2992 #endif
2993 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
2994 backend->have_op_generic_class_init = 1;
2995 #endif
2996 #ifdef MONO_ARCH_EMULATE_MUL_DIV
2997 backend->emulate_mul_div = 1;
2998 #endif
2999 #ifdef MONO_ARCH_EMULATE_DIV
3000 backend->emulate_div = 1;
3001 #endif
3002 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
3003 backend->emulate_long_shift_opts = 1;
3004 #endif
3005 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
3006 backend->have_objc_get_selector = 1;
3007 #endif
3008 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
3009 backend->have_generalized_imt_trampoline = 1;
3010 #endif
3011 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3012 backend->gshared_supported = 1;
3013 #endif
3014 if (MONO_ARCH_USE_FPSTACK)
3015 backend->use_fpstack = 1;
3016 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
3017 backend->have_op_tail_call = 1;
3018 #endif
3019 // Does the ABI have a volatile non-parameter register, so tailcall
3020 // can pass context to generics or interfaces?
3021 backend->have_volatile_non_param_register = MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER;
3022 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL_MEMBASE
3023 backend->have_op_tail_call_membase = 1;
3024 #endif
3025 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3026 backend->monitor_enter_adjustment = 1;
3027 #else
3028 backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
3029 #endif
3030 #if defined(__mono_ilp32__)
3031 backend->ilp32 = 1;
3032 #endif
3033 #ifdef MONO_ARCH_NEED_DIV_CHECK
3034 backend->need_div_check = 1;
3035 #endif
3036 #ifdef NO_UNALIGNED_ACCESS
3037 backend->no_unaligned_access = 1;
3038 #endif
3039 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3040 backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
3041 #endif
3042 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3043 backend->disable_div_with_mul = 1;
3044 #endif
3045 #ifdef MONO_ARCH_EXPLICIT_NULL_CHECKS
3046 backend->explicit_null_checks = 1;
3047 #endif
3051 * mini_method_compile:
3052 * @method: the method to compile
3053 * @opts: the optimization flags to use
3054 * @domain: the domain where the method will be compiled in
3055 * @flags: compilation flags
3056 * @parts: debug flag
3058 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3059 * field in the returned struct to see if compilation succeded.
3061 MonoCompile*
3062 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
3064 MonoMethodHeader *header;
3065 MonoMethodSignature *sig;
3066 ERROR_DECL_VALUE (err);
3067 MonoCompile *cfg;
3068 int i;
3069 gboolean try_generic_shared, try_llvm = FALSE;
3070 MonoMethod *method_to_compile, *method_to_register;
3071 gboolean method_is_gshared = FALSE;
3072 gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
3073 gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
3074 gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
3075 gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
3076 gboolean gsharedvt_method = FALSE;
3077 #ifdef ENABLE_LLVM
3078 gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
3079 #endif
3080 static gboolean verbose_method_inited;
3081 static char **verbose_method_names;
3083 mono_atomic_inc_i32 (&mono_jit_stats.methods_compiled);
3084 MONO_PROFILER_RAISE (jit_begin, (method));
3085 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3086 MONO_PROBE_METHOD_COMPILE_BEGIN (method);
3088 gsharedvt_method = is_gsharedvt_method (method);
3091 * In AOT mode, method can be the following:
3092 * - a gsharedvt method.
3093 * - a method inflated with type parameters. This is for ref/partial sharing.
3094 * - a method inflated with concrete types.
3096 if (compile_aot) {
3097 if (is_open_method (method)) {
3098 try_generic_shared = TRUE;
3099 method_is_gshared = TRUE;
3100 } else {
3101 try_generic_shared = FALSE;
3103 g_assert (opts & MONO_OPT_GSHARED);
3104 } else {
3105 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3106 (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_full (method, FALSE, FALSE, FALSE);
3107 if (mini_is_gsharedvt_sharable_method (method)) {
3109 if (!mono_debug_count ())
3110 try_generic_shared = FALSE;
3116 if (try_generic_shared && !mono_debug_count ())
3117 try_generic_shared = FALSE;
3120 if (opts & MONO_OPT_GSHARED) {
3121 if (try_generic_shared)
3122 mono_atomic_inc_i32 (&mono_stats.generics_sharable_methods);
3123 else if (mono_method_is_generic_impl (method))
3124 mono_atomic_inc_i32 (&mono_stats.generics_unsharable_methods);
3127 #ifdef ENABLE_LLVM
3128 try_llvm = mono_use_llvm || llvm;
3129 #endif
3131 restart_compile:
3132 if (method_is_gshared) {
3133 method_to_compile = method;
3134 } else {
3135 if (try_generic_shared) {
3136 ERROR_DECL (error);
3137 method_to_compile = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
3138 mono_error_assert_ok (error);
3139 } else {
3140 method_to_compile = method;
3144 cfg = g_new0 (MonoCompile, 1);
3145 cfg->method = method_to_compile;
3146 cfg->mempool = mono_mempool_new ();
3147 cfg->opt = opts;
3148 cfg->run_cctors = run_cctors;
3149 cfg->domain = domain;
3150 cfg->verbose_level = mini_verbose;
3151 cfg->compile_aot = compile_aot;
3152 cfg->full_aot = full_aot;
3153 cfg->disable_omit_fp = mini_debug_options.disable_omit_fp;
3154 cfg->skip_visibility = method->skip_visibility;
3155 cfg->orig_method = method;
3156 cfg->gen_seq_points = !mini_debug_options.no_seq_points_compact_data || mini_debug_options.gen_sdb_seq_points;
3157 cfg->gen_sdb_seq_points = mini_debug_options.gen_sdb_seq_points;
3158 cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
3159 cfg->backend = current_backend;
3161 #ifdef HOST_ANDROID
3162 if (cfg->method->wrapper_type != MONO_WRAPPER_NONE) {
3163 /* FIXME: Why is this needed */
3164 cfg->gen_seq_points = FALSE;
3165 cfg->gen_sdb_seq_points = FALSE;
3167 #endif
3168 if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC) {
3169 /* We can't have seq points inside gc critical regions */
3170 cfg->gen_seq_points = FALSE;
3171 cfg->gen_sdb_seq_points = FALSE;
3173 /* coop requires loop detection to happen */
3174 if (mono_threads_are_safepoints_enabled ())
3175 cfg->opt |= MONO_OPT_LOOP;
3176 if (cfg->backend->explicit_null_checks) {
3177 /* some platforms have null pages, so we can't SIGSEGV */
3178 cfg->explicit_null_checks = TRUE;
3179 } else {
3180 cfg->explicit_null_checks = mini_debug_options.explicit_null_checks || (flags & JIT_FLAG_EXPLICIT_NULL_CHECKS);
3182 cfg->soft_breakpoints = mini_debug_options.soft_breakpoints;
3183 cfg->check_pinvoke_callconv = mini_debug_options.check_pinvoke_callconv;
3184 cfg->disable_direct_icalls = disable_direct_icalls;
3185 cfg->direct_pinvoke = (flags & JIT_FLAG_DIRECT_PINVOKE) != 0;
3186 if (try_generic_shared)
3187 cfg->gshared = TRUE;
3188 cfg->compile_llvm = try_llvm;
3189 cfg->token_info_hash = g_hash_table_new (NULL, NULL);
3190 if (cfg->compile_aot)
3191 cfg->method_index = aot_method_index;
3194 if (!mono_debug_count ())
3195 cfg->opt &= ~MONO_OPT_FLOAT32;
3197 if (cfg->llvm_only)
3198 cfg->opt &= ~MONO_OPT_SIMD;
3199 cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
3200 cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
3202 if (cfg->gen_seq_points)
3203 cfg->seq_points = g_ptr_array_new ();
3204 error_init (&cfg->error);
3206 if (cfg->compile_aot && !try_generic_shared && (method->is_generic || mono_class_is_gtd (method->klass) || method_is_gshared)) {
3207 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
3208 return cfg;
3211 if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
3212 MonoMethodInflated *inflated;
3213 MonoGenericContext *context;
3215 if (gsharedvt_method) {
3216 g_assert (method->is_inflated);
3217 inflated = (MonoMethodInflated*)method;
3218 context = &inflated->context;
3220 /* We are compiling a gsharedvt method directly */
3221 g_assert (compile_aot);
3222 } else {
3223 g_assert (method_to_compile->is_inflated);
3224 inflated = (MonoMethodInflated*)method_to_compile;
3225 context = &inflated->context;
3228 mini_init_gsctx (NULL, cfg->mempool, context, &cfg->gsctx);
3229 cfg->gsctx_context = context;
3231 cfg->gsharedvt = TRUE;
3232 if (!cfg->llvm_only) {
3233 cfg->disable_llvm = TRUE;
3234 cfg->exception_message = g_strdup ("gsharedvt");
3238 if (cfg->gshared) {
3239 method_to_register = method_to_compile;
3240 } else {
3241 g_assert (method == method_to_compile);
3242 method_to_register = method;
3244 cfg->method_to_register = method_to_register;
3246 error_init (&err);
3247 sig = mono_method_signature_checked (cfg->method, &err);
3248 if (!sig) {
3249 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3250 cfg->exception_message = g_strdup (mono_error_get_message (&err));
3251 mono_error_cleanup (&err);
3252 if (MONO_METHOD_COMPILE_END_ENABLED ())
3253 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3254 return cfg;
3257 header = cfg->header = mono_method_get_header_checked (cfg->method, &cfg->error);
3258 if (!header) {
3259 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3260 if (MONO_METHOD_COMPILE_END_ENABLED ())
3261 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3262 return cfg;
3265 #ifdef ENABLE_LLVM
3267 static gboolean inited;
3269 if (!inited)
3270 inited = TRUE;
3273 * Check for methods which cannot be compiled by LLVM early, to avoid
3274 * the extra compilation pass.
3276 if (COMPILE_LLVM (cfg)) {
3277 mono_llvm_check_method_supported (cfg);
3278 if (cfg->disable_llvm) {
3279 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3280 //nm = mono_method_full_name (cfg->method, TRUE);
3281 printf ("LLVM failed for '%s.%s': %s\n", method->klass->name, method->name, cfg->exception_message);
3282 //g_free (nm);
3284 if (cfg->llvm_only) {
3285 g_free (cfg->exception_message);
3286 cfg->disable_aot = TRUE;
3287 return cfg;
3289 mono_destroy_compile (cfg);
3290 try_llvm = FALSE;
3291 goto restart_compile;
3295 #endif
3297 cfg->prof_flags = mono_profiler_get_call_instrumentation_flags (cfg->method);
3298 cfg->prof_coverage = mono_profiler_coverage_instrumentation_enabled (cfg->method);
3300 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3301 if (mini_debug_options.mdb_optimizations || MONO_CFG_PROFILE_CALL_CONTEXT (cfg)) {
3302 cfg->disable_reuse_registers = TRUE;
3303 cfg->disable_reuse_stack_slots = TRUE;
3305 * This decreases the change the debugger will read registers/stack slots which are
3306 * not yet initialized.
3308 cfg->disable_initlocals_opt = TRUE;
3310 cfg->extend_live_ranges = TRUE;
3312 /* The debugger needs all locals to be on the stack or in a global register */
3313 cfg->disable_vreg_to_lvreg = TRUE;
3315 /* Don't remove unused variables when running inside the debugger since the user
3316 * may still want to view them. */
3317 cfg->disable_deadce_vars = TRUE;
3319 cfg->opt &= ~MONO_OPT_DEADCE;
3320 cfg->opt &= ~MONO_OPT_INLINE;
3321 cfg->opt &= ~MONO_OPT_COPYPROP;
3322 cfg->opt &= ~MONO_OPT_CONSPROP;
3324 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3325 cfg->disable_out_of_line_bblocks = TRUE;
3328 if (mono_using_xdebug) {
3330 * Make each variable use its own register/stack slot and extend
3331 * their liveness to cover the whole method, making them displayable
3332 * in gdb even after they are dead.
3334 cfg->disable_reuse_registers = TRUE;
3335 cfg->disable_reuse_stack_slots = TRUE;
3336 cfg->extend_live_ranges = TRUE;
3337 cfg->compute_precise_live_ranges = TRUE;
3340 mini_gc_init_cfg (cfg);
3342 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
3343 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
3345 /* These wrappers are using linkonce linkage, so they can't access GOT slots */
3346 if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG))) {
3347 cfg->disable_gc_safe_points = TRUE;
3348 /* This is safe, these wrappers only store to the stack */
3349 cfg->gen_write_barriers = FALSE;
3353 if (COMPILE_LLVM (cfg)) {
3354 cfg->opt |= MONO_OPT_ABCREM;
3357 if (!verbose_method_inited) {
3358 char *env = g_getenv ("MONO_VERBOSE_METHOD");
3359 if (env != NULL)
3360 verbose_method_names = g_strsplit (env, ",", -1);
3362 verbose_method_inited = TRUE;
3364 if (verbose_method_names) {
3365 int i;
3367 for (i = 0; verbose_method_names [i] != NULL; i++){
3368 const char *name = verbose_method_names [i];
3370 if ((strchr (name, '.') > name) || strchr (name, ':')) {
3371 MonoMethodDesc *desc;
3373 desc = mono_method_desc_new (name, TRUE);
3374 if (mono_method_desc_full_match (desc, cfg->method)) {
3375 cfg->verbose_level = 4;
3377 mono_method_desc_free (desc);
3378 } else {
3379 if (strcmp (cfg->method->name, name) == 0)
3380 cfg->verbose_level = 4;
3385 cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
3387 if (cfg->verbose_level > 0) {
3388 char *method_name;
3390 method_name = mono_method_get_full_name (method);
3391 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
3393 if (COMPILE_LLVM (cfg))
3394 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3395 else if (cfg->gsharedvt)
3396 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3397 else if (cfg->gshared)
3398 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3399 else
3400 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3402 g_free (method_name);
3405 if (cfg->opt & MONO_OPT_ABCREM)
3406 cfg->opt |= MONO_OPT_SSA;
3408 cfg->rs = mono_regstate_new ();
3409 cfg->next_vreg = cfg->rs->next_vreg;
3411 /* FIXME: Fix SSA to handle branches inside bblocks */
3412 if (cfg->opt & MONO_OPT_SSA)
3413 cfg->enable_extended_bblocks = FALSE;
3416 * FIXME: This confuses liveness analysis because variables which are assigned after
3417 * a branch inside a bblock become part of the kill set, even though the assignment
3418 * might not get executed. This causes the optimize_initlocals pass to delete some
3419 * assignments which are needed.
3420 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3421 * created by this.
3423 //cfg->enable_extended_bblocks = TRUE;
3425 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3426 if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
3427 if (mini_get_debug_options ()->break_on_unverified)
3428 G_BREAKPOINT ();
3429 return cfg;
3433 * create MonoInst* which represents arguments and local variables
3435 mono_compile_create_vars (cfg);
3437 mono_cfg_dump_create_context (cfg);
3438 mono_cfg_dump_begin_group (cfg);
3440 MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
3441 mono_cfg_dump_ir (cfg, "method-to-ir");
3443 if (cfg->gdump_ctx != NULL) {
3444 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3445 mono_insert_nop_in_empty_bb (cfg);
3446 mono_cfg_dump_ir (cfg, "mono_insert_nop_in_empty_bb");
3449 if (i < 0) {
3450 if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
3451 if (compile_aot) {
3452 if (MONO_METHOD_COMPILE_END_ENABLED ())
3453 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3454 return cfg;
3456 mono_destroy_compile (cfg);
3457 try_generic_shared = FALSE;
3458 goto restart_compile;
3460 g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
3462 if (MONO_METHOD_COMPILE_END_ENABLED ())
3463 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3464 /* cfg contains the details of the failure, so let the caller cleanup */
3465 return cfg;
3468 cfg->stat_basic_blocks += cfg->num_bblocks;
3470 if (COMPILE_LLVM (cfg)) {
3471 MonoInst *ins;
3473 /* The IR has to be in SSA form for LLVM */
3474 cfg->opt |= MONO_OPT_SSA;
3476 // FIXME:
3477 if (cfg->ret) {
3478 // Allow SSA on the result value
3479 cfg->ret->flags &= ~MONO_INST_VOLATILE;
3481 // Add an explicit return instruction referencing the return value
3482 MONO_INST_NEW (cfg, ins, OP_SETRET);
3483 ins->sreg1 = cfg->ret->dreg;
3485 MONO_ADD_INS (cfg->bb_exit, ins);
3488 cfg->opt &= ~MONO_OPT_LINEARS;
3490 /* FIXME: */
3491 cfg->opt &= ~MONO_OPT_BRANCH;
3494 /* todo: remove code when we have verified that the liveness for try/catch blocks
3495 * works perfectly
3498 * Currently, this can't be commented out since exception blocks are not
3499 * processed during liveness analysis.
3500 * It is also needed, because otherwise the local optimization passes would
3501 * delete assignments in cases like this:
3502 * r1 <- 1
3503 * <something which throws>
3504 * r1 <- 2
3505 * This also allows SSA to be run on methods containing exception clauses, since
3506 * SSA will ignore variables marked VOLATILE.
3508 MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
3509 mono_cfg_dump_ir (cfg, "liveness_handle_exception_clauses");
3511 MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
3512 mono_cfg_dump_ir (cfg, "handle_out_of_line_bblock");
3514 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3516 if (!COMPILE_LLVM (cfg)) {
3517 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
3518 mono_cfg_dump_ir (cfg, "decompose_long_opts");
3521 /* Should be done before branch opts */
3522 if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) {
3523 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
3524 mono_cfg_dump_ir (cfg, "local_cprop");
3527 if (cfg->flags & MONO_CFG_HAS_TYPE_CHECK) {
3528 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_typechecks, mono_decompose_typechecks (cfg));
3529 if (cfg->gdump_ctx != NULL) {
3530 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3531 mono_insert_nop_in_empty_bb (cfg);
3533 mono_cfg_dump_ir (cfg, "decompose_typechecks");
3537 * Should be done after cprop which can do strength reduction on
3538 * some of these ops, after propagating immediates.
3540 if (cfg->has_emulated_ops) {
3541 MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
3542 mono_cfg_dump_ir (cfg, "local_emulate_ops");
3545 if (cfg->opt & MONO_OPT_BRANCH) {
3546 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
3547 mono_cfg_dump_ir (cfg, "optimize_branches");
3550 /* This must be done _before_ global reg alloc and _after_ decompose */
3551 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
3552 mono_cfg_dump_ir (cfg, "handle_global_vregs");
3553 if (cfg->opt & MONO_OPT_DEADCE) {
3554 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
3555 mono_cfg_dump_ir (cfg, "local_deadce");
3557 if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS) {
3558 MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
3559 mono_cfg_dump_ir (cfg, "local_alias_analysis");
3561 /* Disable this for LLVM to make the IR easier to handle */
3562 if (!COMPILE_LLVM (cfg)) {
3563 MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
3564 mono_cfg_dump_ir (cfg, "if_conversion");
3567 mono_threads_safepoint ();
3569 MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
3570 mono_cfg_dump_ir (cfg, "bb_ordering");
3572 if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
3574 * we disable some optimizations if there are too many variables
3575 * because JIT time may become too expensive. The actual number needs
3576 * to be tweaked and eventually the non-linear algorithms should be fixed.
3578 cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
3579 cfg->disable_ssa = TRUE;
3582 if (cfg->num_varinfo > 10000 && !cfg->llvm_only)
3583 /* Disable llvm for overly complex methods */
3584 cfg->disable_ssa = TRUE;
3586 if (cfg->opt & MONO_OPT_LOOP) {
3587 MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
3588 MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
3591 MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, mono_insert_safepoints (cfg));
3592 mono_cfg_dump_ir (cfg, "insert_safepoints");
3594 /* after method_to_ir */
3595 if (parts == 1) {
3596 if (MONO_METHOD_COMPILE_END_ENABLED ())
3597 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3598 return cfg;
3602 if (header->num_clauses)
3603 cfg->disable_ssa = TRUE;
3606 //#define DEBUGSSA "logic_run"
3607 //#define DEBUGSSA_CLASS "Tests"
3608 #ifdef DEBUGSSA
3610 if (!cfg->disable_ssa) {
3611 mono_local_cprop (cfg);
3613 #ifndef DISABLE_SSA
3614 mono_ssa_compute (cfg);
3615 #endif
3617 #else
3618 if (cfg->opt & MONO_OPT_SSA) {
3619 if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
3620 #ifndef DISABLE_SSA
3621 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
3622 mono_cfg_dump_ir (cfg, "ssa_compute");
3623 #endif
3625 if (cfg->verbose_level >= 2) {
3626 print_dfn (cfg);
3630 #endif
3632 /* after SSA translation */
3633 if (parts == 2) {
3634 if (MONO_METHOD_COMPILE_END_ENABLED ())
3635 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3636 return cfg;
3639 if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
3640 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3641 #ifndef DISABLE_SSA
3642 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
3643 mono_cfg_dump_ir (cfg, "ssa_cprop");
3644 #endif
3648 #ifndef DISABLE_SSA
3649 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3650 //mono_ssa_strength_reduction (cfg);
3652 if (cfg->opt & MONO_OPT_DEADCE) {
3653 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
3654 mono_cfg_dump_ir (cfg, "ssa_deadce");
3657 if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) {
3658 MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
3659 mono_cfg_dump_ir (cfg, "perform_abc_removal");
3662 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
3663 mono_cfg_dump_ir (cfg, "ssa_remove");
3664 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
3665 mono_cfg_dump_ir (cfg, "local_cprop2");
3666 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
3667 mono_cfg_dump_ir (cfg, "handle_global_vregs2");
3668 if (cfg->opt & MONO_OPT_DEADCE) {
3669 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
3670 mono_cfg_dump_ir (cfg, "local_deadce2");
3673 if (cfg->opt & MONO_OPT_BRANCH) {
3674 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
3675 mono_cfg_dump_ir (cfg, "optimize_branches2");
3678 #endif
3680 if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
3681 mono_ssa_loop_invariant_code_motion (cfg);
3682 mono_cfg_dump_ir (cfg, "loop_invariant_code_motion");
3683 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3684 if (cfg->opt & MONO_OPT_ABCREM) {
3685 mono_perform_abc_removal (cfg);
3686 mono_cfg_dump_ir (cfg, "abc_removal");
3690 /* after SSA removal */
3691 if (parts == 3) {
3692 if (MONO_METHOD_COMPILE_END_ENABLED ())
3693 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3694 return cfg;
3697 if (cfg->llvm_only && cfg->gsharedvt)
3698 mono_ssa_remove_gsharedvt (cfg);
3700 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3701 if (COMPILE_SOFT_FLOAT (cfg))
3702 mono_decompose_soft_float (cfg);
3703 #endif
3704 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
3705 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS) {
3706 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
3707 mono_cfg_dump_ir (cfg, "decompose_array_access_opts");
3710 if (cfg->got_var) {
3711 #ifndef MONO_ARCH_GOT_REG
3712 GList *regs;
3713 #endif
3714 int got_reg;
3716 g_assert (cfg->got_var_allocated);
3719 * Allways allocate the GOT var to a register, because keeping it
3720 * in memory will increase the number of live temporaries in some
3721 * code created by inssel.brg, leading to the well known spills+
3722 * branches problem. Testcase: mcs crash in
3723 * System.MonoCustomAttrs:GetCustomAttributes.
3725 #ifdef MONO_ARCH_GOT_REG
3726 got_reg = MONO_ARCH_GOT_REG;
3727 #else
3728 regs = mono_arch_get_global_int_regs (cfg);
3729 g_assert (regs);
3730 got_reg = GPOINTER_TO_INT (regs->data);
3731 g_list_free (regs);
3732 #endif
3733 cfg->got_var->opcode = OP_REGVAR;
3734 cfg->got_var->dreg = got_reg;
3735 cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
3739 * Have to call this again to process variables added since the first call.
3741 MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
3743 if (cfg->opt & MONO_OPT_LINEARS) {
3744 GList *vars, *regs, *l;
3746 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3747 cfg->comp_done &= ~MONO_COMP_LIVENESS;
3748 if (!(cfg->comp_done & MONO_COMP_LIVENESS))
3749 MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
3751 if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
3752 regs = mono_arch_get_global_int_regs (cfg);
3753 /* Remove the reg reserved for holding the GOT address */
3754 if (cfg->got_var) {
3755 for (l = regs; l; l = l->next) {
3756 if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
3757 regs = g_list_delete_link (regs, l);
3758 break;
3762 MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
3763 mono_cfg_dump_ir (cfg, "linear_scan");
3767 //mono_print_code (cfg, "");
3769 //print_dfn (cfg);
3771 /* variables are allocated after decompose, since decompose could create temps */
3772 if (!COMPILE_LLVM (cfg)) {
3773 MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
3774 mono_cfg_dump_ir (cfg, "arch_allocate_vars");
3775 if (cfg->exception_type)
3776 return cfg;
3779 if (cfg->gsharedvt)
3780 mono_allocate_gsharedvt_vars (cfg);
3782 if (!COMPILE_LLVM (cfg)) {
3783 gboolean need_local_opts;
3784 MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
3785 mono_cfg_dump_ir (cfg, "spill_global_vars");
3787 if (need_local_opts || cfg->compile_aot) {
3788 /* To optimize code created by spill_global_vars */
3789 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
3790 if (cfg->opt & MONO_OPT_DEADCE)
3791 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
3792 mono_cfg_dump_ir (cfg, "needs_local_opts");
3796 mono_insert_branches_between_bblocks (cfg);
3798 if (COMPILE_LLVM (cfg)) {
3799 #ifdef ENABLE_LLVM
3800 char *nm;
3802 /* The IR has to be in SSA form for LLVM */
3803 if (!(cfg->comp_done & MONO_COMP_SSA)) {
3804 cfg->exception_message = g_strdup ("SSA disabled.");
3805 cfg->disable_llvm = TRUE;
3808 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
3809 mono_decompose_array_access_opts (cfg);
3811 if (!cfg->disable_llvm)
3812 mono_llvm_emit_method (cfg);
3813 if (cfg->disable_llvm) {
3814 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3815 //nm = mono_method_full_name (cfg->method, TRUE);
3816 printf ("LLVM failed for '%s.%s': %s\n", method->klass->name, method->name, cfg->exception_message);
3817 //g_free (nm);
3819 if (cfg->llvm_only) {
3820 cfg->disable_aot = TRUE;
3821 return cfg;
3823 mono_destroy_compile (cfg);
3824 try_llvm = FALSE;
3825 goto restart_compile;
3828 if (cfg->verbose_level > 0 && !cfg->compile_aot) {
3829 nm = mono_method_full_name (cfg->method, TRUE);
3830 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3831 nm,
3832 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
3833 g_free (nm);
3835 #endif
3836 } else {
3837 MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
3838 mono_cfg_dump_ir (cfg, "codegen");
3839 if (cfg->exception_type)
3840 return cfg;
3843 if (COMPILE_LLVM (cfg))
3844 mono_atomic_inc_i32 (&mono_jit_stats.methods_with_llvm);
3845 else
3846 mono_atomic_inc_i32 (&mono_jit_stats.methods_without_llvm);
3848 MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
3850 if (cfg->extend_live_ranges) {
3851 /* Extend live ranges to cover the whole method */
3852 for (i = 0; i < cfg->num_varinfo; ++i)
3853 MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
3856 MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
3857 MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg));
3859 if (!cfg->compile_aot) {
3860 mono_save_xdebug_info (cfg);
3861 mono_lldb_save_method_info (cfg);
3864 if (cfg->verbose_level >= 2) {
3865 char *id = mono_method_full_name (cfg->method, FALSE);
3866 mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
3867 g_free (id);
3870 if (!cfg->compile_aot && !(flags & JIT_FLAG_DISCARD_RESULTS)) {
3871 mono_domain_lock (cfg->domain);
3872 mono_jit_info_table_add (cfg->domain, cfg->jit_info);
3874 if (cfg->method->dynamic)
3875 mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
3876 mono_domain_unlock (cfg->domain);
3879 #if 0
3880 if (cfg->gsharedvt)
3881 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
3882 #endif
3884 /* collect statistics */
3885 #ifndef DISABLE_PERFCOUNTERS
3886 mono_atomic_inc_i32 (&mono_perfcounters->jit_methods);
3887 mono_atomic_fetch_add_i32 (&mono_perfcounters->jit_bytes, header->code_size);
3888 #endif
3889 gint32 code_size_ratio = cfg->code_len;
3890 mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_code_size, code_size_ratio);
3891 mono_atomic_fetch_add_i32 (&mono_jit_stats.native_code_size, code_size_ratio);
3892 /* FIXME: use an explicit function to read booleans */
3893 if ((gboolean)mono_atomic_load_i32 ((gint32*)&mono_jit_stats.enabled)) {
3894 if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.biggest_method_size)) {
3895 mono_atomic_store_i32 (&mono_jit_stats.biggest_method_size, code_size_ratio);
3896 char *biggest_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
3897 biggest_method = mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.biggest_method, biggest_method);
3898 g_free (biggest_method);
3900 code_size_ratio = (code_size_ratio * 100) / header->code_size;
3901 if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.max_code_size_ratio)) {
3902 mono_atomic_store_i32 (&mono_jit_stats.max_code_size_ratio, code_size_ratio);
3903 char *max_ratio_method = g_strdup_printf ("%s::%s)", m_class_get_name (method->klass), method->name);
3904 max_ratio_method = mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.max_ratio_method, max_ratio_method);
3905 g_free (max_ratio_method);
3909 if (MONO_METHOD_COMPILE_END_ENABLED ())
3910 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3912 mono_cfg_dump_close_group (cfg);
3914 return cfg;
3917 gboolean
3918 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3920 int i;
3921 MonoGenericContainer *container;
3922 MonoGenericInst *ginst;
3924 if (mono_class_is_ginst (klass)) {
3925 container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
3926 ginst = mono_class_get_generic_class (klass)->context.class_inst;
3927 } else if (mono_class_is_gtd (klass) && context_used) {
3928 container = mono_class_get_generic_container (klass);
3929 ginst = container->context.class_inst;
3930 } else {
3931 return FALSE;
3934 for (i = 0; i < container->type_argc; ++i) {
3935 MonoType *type;
3936 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3937 continue;
3938 type = ginst->type_argv [i];
3939 if (mini_type_is_reference (type))
3940 return TRUE;
3942 return FALSE;
3945 void
3946 mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
3948 TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
3949 hole->clause = clause;
3950 hole->start_offset = start - cfg->native_code;
3951 hole->basic_block = bb;
3953 cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
3956 void
3957 mono_cfg_set_exception (MonoCompile *cfg, int type)
3959 cfg->exception_type = type;
3962 /* Assumes ownership of the MSG argument */
3963 void
3964 mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
3966 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3967 mono_error_set_generic_error (&cfg->error, "System", "InvalidProgramException", "%s", msg);
3970 #endif /* DISABLE_JIT */
3972 static MonoJitInfo*
3973 create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
3975 MonoDomain *domain = mono_get_root_domain ();
3976 MonoJitInfo *jinfo;
3977 guint8 *uw_info;
3978 guint32 info_len;
3980 if (info->uw_info) {
3981 uw_info = info->uw_info;
3982 info_len = info->uw_info_len;
3983 } else {
3984 uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
3987 jinfo = (MonoJitInfo *)mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
3988 jinfo->d.method = wrapper;
3989 jinfo->code_start = info->code;
3990 jinfo->code_size = info->code_size;
3991 jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
3993 if (!info->uw_info)
3994 g_free (uw_info);
3996 return jinfo;
3999 GTimer *mono_time_track_start ()
4001 return g_timer_new ();
4005 * mono_time_track_end:
4007 * Uses UnlockedAddDouble () to update \param time.
4009 void mono_time_track_end (gdouble *time, GTimer *timer)
4011 g_timer_stop (timer);
4012 UnlockedAddDouble (time, g_timer_elapsed (timer, NULL));
4013 g_timer_destroy (timer);
4017 * mono_update_jit_stats:
4019 * Only call this function in locked environments to avoid data races.
4021 MONO_NO_SANITIZE_THREAD
4022 void
4023 mono_update_jit_stats (MonoCompile *cfg)
4025 mono_jit_stats.allocate_var += cfg->stat_allocate_var;
4026 mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
4027 mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
4028 mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
4029 mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
4030 mono_jit_stats.regvars += cfg->stat_n_regvars;
4031 mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
4032 mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
4033 mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
4037 * mono_jit_compile_method_inner:
4039 * Main entry point for the JIT.
4041 gpointer
4042 mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoError *error)
4044 MonoCompile *cfg;
4045 gpointer code = NULL;
4046 MonoJitInfo *jinfo, *info;
4047 MonoVTable *vtable;
4048 MonoException *ex = NULL;
4049 GTimer *jit_timer;
4050 MonoMethod *prof_method, *shared;
4052 error_init (error);
4054 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4055 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
4056 MonoMethod *nm;
4057 MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method;
4059 if (!piinfo->addr) {
4060 if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)
4061 piinfo->addr = mono_lookup_internal_call (method);
4062 else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)
4063 #ifdef HOST_WIN32
4064 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), m_class_get_image (method->klass)->name);
4065 #else
4066 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), m_class_get_image (method->klass)->name);
4067 #endif
4068 else
4069 mono_lookup_pinvoke_call (method, NULL, NULL);
4071 nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only);
4072 gpointer compiled_method = mono_compile_method_checked (nm, error);
4073 return_val_if_nok (error, NULL);
4074 code = mono_get_addr_from_ftnptr (compiled_method);
4075 jinfo = mono_jit_info_table_find (target_domain, code);
4076 if (!jinfo)
4077 jinfo = mono_jit_info_table_find (mono_domain_get (), code);
4078 if (jinfo)
4079 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4080 return code;
4081 } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
4082 const char *name = method->name;
4083 char *full_name, *msg;
4084 MonoMethod *nm;
4086 if (m_class_get_parent (method->klass) == mono_defaults.multicastdelegate_class) {
4087 if (*name == '.' && (strcmp (name, ".ctor") == 0)) {
4088 MonoJitICallInfo *mi = mono_find_jit_icall_by_name ("ves_icall_mono_delegate_ctor");
4089 g_assert (mi);
4091 * We need to make sure this wrapper
4092 * is compiled because it might end up
4093 * in an (M)RGCTX if generic sharing
4094 * is enabled, and would be called
4095 * indirectly. If it were a
4096 * trampoline we'd try to patch that
4097 * indirect call, which is not
4098 * possible.
4100 return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
4101 } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
4102 if (mono_llvm_only) {
4103 nm = mono_marshal_get_delegate_invoke (method, NULL);
4104 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4105 mono_error_assert_ok (error);
4106 return mono_get_addr_from_ftnptr (compiled_ptr);
4108 return mono_create_delegate_trampoline (target_domain, method->klass);
4109 } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
4110 nm = mono_marshal_get_delegate_begin_invoke (method);
4111 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4112 mono_error_assert_ok (error);
4113 return mono_get_addr_from_ftnptr (compiled_ptr);
4114 } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) {
4115 nm = mono_marshal_get_delegate_end_invoke (method);
4116 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4117 mono_error_assert_ok (error);
4118 return mono_get_addr_from_ftnptr (compiled_ptr);
4122 full_name = mono_method_full_name (method, TRUE);
4123 msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name);
4124 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg);
4125 mono_error_set_exception_instance (error, ex);
4126 g_free (full_name);
4127 g_free (msg);
4128 return NULL;
4131 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
4132 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
4134 if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) {
4135 static MonoTrampInfo *in_tinfo, *out_tinfo;
4136 MonoTrampInfo *tinfo;
4137 MonoJitInfo *jinfo;
4138 gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN;
4140 if (is_in && in_tinfo)
4141 return in_tinfo->code;
4142 else if (!is_in && out_tinfo)
4143 return out_tinfo->code;
4146 * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH
4147 * works.
4148 * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms
4150 if (mono_aot_only)
4151 mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo);
4152 else
4153 mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE);
4154 jinfo = create_jit_info_for_trampoline (method, tinfo);
4155 mono_jit_info_table_add (mono_get_root_domain (), jinfo);
4156 if (is_in)
4157 in_tinfo = tinfo;
4158 else
4159 out_tinfo = tinfo;
4160 return tinfo->code;
4164 if (mono_aot_only) {
4165 char *fullname = mono_method_full_name (method, TRUE);
4166 mono_error_set_execution_engine (error, "Attempting to JIT compile method '%s' while running in aot-only mode. See https://developer.xamarin.com/guides/ios/advanced_topics/limitations/ for more information.\n", fullname);
4167 g_free (fullname);
4169 return NULL;
4172 jit_timer = mono_time_track_start ();
4173 cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
4174 gdouble jit_time = 0.0;
4175 mono_time_track_end (&jit_time, jit_timer);
4176 UnlockedAddDouble (&mono_jit_stats.jit_time, jit_time);
4178 prof_method = cfg->method;
4180 switch (cfg->exception_type) {
4181 case MONO_EXCEPTION_NONE:
4182 break;
4183 case MONO_EXCEPTION_TYPE_LOAD:
4184 case MONO_EXCEPTION_MISSING_FIELD:
4185 case MONO_EXCEPTION_MISSING_METHOD:
4186 case MONO_EXCEPTION_FILE_NOT_FOUND:
4187 case MONO_EXCEPTION_BAD_IMAGE:
4188 case MONO_EXCEPTION_INVALID_PROGRAM: {
4189 /* Throw a type load exception if needed */
4190 if (cfg->exception_ptr) {
4191 ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
4192 } else {
4193 if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
4194 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
4195 else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
4196 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
4197 else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
4198 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
4199 else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
4200 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
4201 else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
4202 ex = mono_get_exception_bad_image_format (cfg->exception_message);
4203 else if (cfg->exception_type == MONO_EXCEPTION_INVALID_PROGRAM)
4204 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
4205 else
4206 g_assert_not_reached ();
4208 break;
4210 case MONO_EXCEPTION_MONO_ERROR:
4211 // FIXME: MonoError has no copy ctor
4212 g_assert (!mono_error_ok (&cfg->error));
4213 ex = mono_error_convert_to_exception (&cfg->error);
4214 break;
4215 default:
4216 g_assert_not_reached ();
4219 if (ex) {
4220 MONO_PROFILER_RAISE (jit_failed, (method));
4222 mono_destroy_compile (cfg);
4223 mono_error_set_exception_instance (error, ex);
4225 return NULL;
4228 if (mono_method_is_generic_sharable (method, FALSE)) {
4229 shared = mini_get_shared_method_full (method, SHARE_MODE_NONE, error);
4230 if (!is_ok (error)) {
4231 MONO_PROFILER_RAISE (jit_failed, (method));
4232 mono_destroy_compile (cfg);
4233 return NULL;
4235 } else {
4236 shared = NULL;
4239 mono_domain_lock (target_domain);
4241 /* Check if some other thread already did the job. In this case, we can
4242 discard the code this thread generated. */
4244 info = mini_lookup_method (target_domain, method, shared);
4245 if (info) {
4246 /* We can't use a domain specific method in another domain */
4247 if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
4248 code = info->code_start;
4249 discarded_code ++;
4250 discarded_jit_time += jit_time;
4253 if (code == NULL) {
4254 /* The lookup + insert is atomic since this is done inside the domain lock */
4255 mono_domain_jit_code_hash_lock (target_domain);
4256 mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
4257 mono_domain_jit_code_hash_unlock (target_domain);
4259 code = cfg->native_code;
4261 if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
4262 mono_atomic_inc_i32 (&mono_stats.generics_shared_methods);
4263 if (cfg->gsharedvt)
4264 mono_atomic_inc_i32 (&mono_stats.gsharedvt_methods);
4267 jinfo = cfg->jit_info;
4270 * Update global stats while holding a lock, instead of doing many
4271 * mono_atomic_inc_i32 operations during JITting.
4273 mono_update_jit_stats (cfg);
4275 mono_destroy_compile (cfg);
4277 #ifndef DISABLE_JIT
4278 /* Update llvm callees */
4279 if (domain_jit_info (target_domain)->llvm_jit_callees) {
4280 GSList *callees = g_hash_table_lookup (domain_jit_info (target_domain)->llvm_jit_callees, method);
4281 GSList *l;
4283 for (l = callees; l; l = l->next) {
4284 gpointer *addr = (gpointer*)l->data;
4286 *addr = code;
4290 mono_emit_jit_map (jinfo);
4291 #endif
4292 mono_domain_unlock (target_domain);
4294 if (!mono_error_ok (error))
4295 return NULL;
4297 vtable = mono_class_vtable_checked (target_domain, method->klass, error);
4298 return_val_if_nok (error, NULL);
4300 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
4301 if (mono_marshal_method_from_wrapper (method)) {
4302 /* Native func wrappers have no method */
4303 /* The profiler doesn't know about wrappers, so pass the original icall method */
4304 MONO_PROFILER_RAISE (jit_done, (mono_marshal_method_from_wrapper (method), jinfo));
4307 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4308 if (prof_method != method)
4309 MONO_PROFILER_RAISE (jit_done, (prof_method, jinfo));
4311 if (!(method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE ||
4312 method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK ||
4313 method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE)) {
4314 if (!mono_runtime_class_init_full (vtable, error))
4315 return NULL;
4317 return code;
4321 * mini_get_underlying_type:
4323 * Return the type the JIT will use during compilation.
4324 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4325 * For gsharedvt types, it will return the original VAR/MVAR.
4327 MonoType*
4328 mini_get_underlying_type (MonoType *type)
4330 return mini_type_get_underlying_type (type);
4333 void
4334 mini_jit_init (void)
4336 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT | MONO_COUNTER_INT, &discarded_code);
4337 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &discarded_jit_time);
4338 mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &jinfo_try_holes_size);
4340 mono_os_mutex_init_recursive (&jit_mutex);
4341 #ifndef DISABLE_JIT
4342 current_backend = g_new0 (MonoBackend, 1);
4343 init_backend (current_backend);
4344 #endif
4347 void
4348 mini_jit_cleanup (void)
4350 #ifndef DISABLE_JIT
4351 g_free (emul_opcode_map);
4352 g_free (emul_opcode_opcodes);
4353 #endif
4356 #ifndef ENABLE_LLVM
4357 void
4358 mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
4360 g_assert_not_reached ();
4363 void mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
4365 g_assert_not_reached ();
4368 #endif
4370 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4372 void
4373 mono_llvm_cpp_throw_exception (void)
4375 g_assert_not_reached ();
4378 #endif
4380 #ifdef DISABLE_JIT
4382 MonoCompile*
4383 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
4385 g_assert_not_reached ();
4386 return NULL;
4389 void
4390 mono_destroy_compile (MonoCompile *cfg)
4392 g_assert_not_reached ();
4395 void
4396 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
4398 g_assert_not_reached ();
4401 #endif /* DISABLE_JIT */
4403 gboolean
4404 mini_class_is_system_array (MonoClass *klass)
4406 if (m_class_get_parent (klass) == mono_defaults.array_class)
4407 return TRUE;
4408 else
4409 return FALSE;