[jit] Disable partial sharing during JITting, its only been tested in full aot mode...
[mono-project.git] / mono / mini / mini.c
blob7c009b37df597b94187f13f7a3b8c52ccbee0ef8
1 /**
2 * \file
3 * The new Mono code generator.
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * Copyright 2002-2003 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc.
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <config.h>
16 #ifdef HAVE_ALLOCA_H
17 #include <alloca.h>
18 #endif
19 #ifdef HAVE_UNISTD_H
20 #include <unistd.h>
21 #endif
22 #include <math.h>
23 #ifdef HAVE_SYS_TIME_H
24 #include <sys/time.h>
25 #endif
27 #include <mono/utils/memcheck.h>
29 #include <mono/metadata/assembly.h>
30 #include <mono/metadata/loader.h>
31 #include <mono/metadata/tabledefs.h>
32 #include <mono/metadata/class.h>
33 #include <mono/metadata/object.h>
34 #include <mono/metadata/tokentype.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/threads.h>
37 #include <mono/metadata/appdomain.h>
38 #include <mono/metadata/debug-helpers.h>
39 #include <mono/metadata/profiler-private.h>
40 #include <mono/metadata/mono-config.h>
41 #include <mono/metadata/environment.h>
42 #include <mono/metadata/mono-debug.h>
43 #include <mono/metadata/gc-internals.h>
44 #include <mono/metadata/threads-types.h>
45 #include <mono/metadata/verify.h>
46 #include <mono/metadata/verify-internals.h>
47 #include <mono/metadata/mempool-internals.h>
48 #include <mono/metadata/attach.h>
49 #include <mono/metadata/runtime.h>
50 #include <mono/metadata/attrdefs.h>
51 #include <mono/utils/mono-math.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/utils/mono-counters.h>
54 #include <mono/utils/mono-error-internals.h>
55 #include <mono/utils/mono-logger-internals.h>
56 #include <mono/utils/mono-mmap.h>
57 #include <mono/utils/mono-path.h>
58 #include <mono/utils/mono-tls.h>
59 #include <mono/utils/mono-hwcap.h>
60 #include <mono/utils/dtrace.h>
61 #include <mono/utils/mono-threads.h>
62 #include <mono/utils/mono-threads-coop.h>
63 #include <mono/utils/unlocked.h>
65 #include "mini.h"
66 #include "seq-points.h"
67 #include "tasklets.h"
68 #include <string.h>
69 #include <ctype.h>
70 #include "trace.h"
71 #include "version.h"
72 #include "ir-emit.h"
74 #include "jit-icalls.h"
76 #include "mini-gc.h"
77 #include "debugger-agent.h"
78 #include "llvm-runtime.h"
79 #include "mini-llvm.h"
80 #include "lldb.h"
81 #include "aot-runtime.h"
82 #include "mini-runtime.h"
84 MonoCallSpec *mono_jit_trace_calls;
85 MonoMethodDesc *mono_inject_async_exc_method;
86 int mono_inject_async_exc_pos;
87 MonoMethodDesc *mono_break_at_bb_method;
88 int mono_break_at_bb_bb_num;
89 gboolean mono_do_x86_stack_align = TRUE;
90 gboolean mono_using_xdebug;
92 /* Counters */
93 static guint32 discarded_code;
94 static double discarded_jit_time;
95 static guint32 jinfo_try_holes_size;
97 #define mono_jit_lock() mono_os_mutex_lock (&jit_mutex)
98 #define mono_jit_unlock() mono_os_mutex_unlock (&jit_mutex)
99 static mono_mutex_t jit_mutex;
101 MonoBackend *current_backend;
103 #ifndef DISABLE_JIT
105 gpointer
106 mono_realloc_native_code (MonoCompile *cfg)
108 return g_realloc (cfg->native_code, cfg->code_size);
111 typedef struct {
112 MonoExceptionClause *clause;
113 MonoBasicBlock *basic_block;
114 int start_offset;
115 } TryBlockHole;
118 * mono_emit_unwind_op:
120 * Add an unwind op with the given parameters for the list of unwind ops stored in
121 * cfg->unwind_ops.
123 void
124 mono_emit_unwind_op (MonoCompile *cfg, int when, int tag, int reg, int val)
126 MonoUnwindOp *op = (MonoUnwindOp *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoUnwindOp));
128 op->op = tag;
129 op->reg = reg;
130 op->val = val;
131 op->when = when;
133 cfg->unwind_ops = g_slist_append_mempool (cfg->mempool, cfg->unwind_ops, op);
134 if (cfg->verbose_level > 1) {
135 switch (tag) {
136 case DW_CFA_def_cfa:
137 printf ("CFA: [%x] def_cfa: %s+0x%x\n", when, mono_arch_regname (reg), val);
138 break;
139 case DW_CFA_def_cfa_register:
140 printf ("CFA: [%x] def_cfa_reg: %s\n", when, mono_arch_regname (reg));
141 break;
142 case DW_CFA_def_cfa_offset:
143 printf ("CFA: [%x] def_cfa_offset: 0x%x\n", when, val);
144 break;
145 case DW_CFA_offset:
146 printf ("CFA: [%x] offset: %s at cfa-0x%x\n", when, mono_arch_regname (reg), -val);
147 break;
153 * mono_unlink_bblock:
155 * Unlink two basic blocks.
157 void
158 mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
160 int i, pos;
161 gboolean found;
163 found = FALSE;
164 for (i = 0; i < from->out_count; ++i) {
165 if (to == from->out_bb [i]) {
166 found = TRUE;
167 break;
170 if (found) {
171 pos = 0;
172 for (i = 0; i < from->out_count; ++i) {
173 if (from->out_bb [i] != to)
174 from->out_bb [pos ++] = from->out_bb [i];
176 g_assert (pos == from->out_count - 1);
177 from->out_count--;
180 found = FALSE;
181 for (i = 0; i < to->in_count; ++i) {
182 if (from == to->in_bb [i]) {
183 found = TRUE;
184 break;
187 if (found) {
188 pos = 0;
189 for (i = 0; i < to->in_count; ++i) {
190 if (to->in_bb [i] != from)
191 to->in_bb [pos ++] = to->in_bb [i];
193 g_assert (pos == to->in_count - 1);
194 to->in_count--;
199 * mono_bblocks_linked:
201 * Return whenever BB1 and BB2 are linked in the CFG.
203 gboolean
204 mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
206 int i;
208 for (i = 0; i < bb1->out_count; ++i) {
209 if (bb1->out_bb [i] == bb2)
210 return TRUE;
213 return FALSE;
216 static int
217 mono_find_block_region_notry (MonoCompile *cfg, int offset)
219 MonoMethodHeader *header = cfg->header;
220 MonoExceptionClause *clause;
221 int i;
223 for (i = 0; i < header->num_clauses; ++i) {
224 clause = &header->clauses [i];
225 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
226 (offset < (clause->handler_offset)))
227 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
229 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
230 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
231 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
232 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
233 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
234 else
235 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
239 return -1;
243 * mono_get_block_region_notry:
245 * Return the region corresponding to REGION, ignoring try clauses nested inside
246 * finally clauses.
249 mono_get_block_region_notry (MonoCompile *cfg, int region)
251 if ((region & (0xf << 4)) == MONO_REGION_TRY) {
252 MonoMethodHeader *header = cfg->header;
255 * This can happen if a try clause is nested inside a finally clause.
257 int clause_index = (region >> 8) - 1;
258 g_assert (clause_index >= 0 && clause_index < header->num_clauses);
260 region = mono_find_block_region_notry (cfg, header->clauses [clause_index].try_offset);
263 return region;
266 MonoInst *
267 mono_find_spvar_for_region (MonoCompile *cfg, int region)
269 region = mono_get_block_region_notry (cfg, region);
271 return (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
274 static void
275 df_visit (MonoBasicBlock *start, int *dfn, MonoBasicBlock **array)
277 int i;
279 array [*dfn] = start;
280 /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
281 for (i = 0; i < start->out_count; ++i) {
282 if (start->out_bb [i]->dfn)
283 continue;
284 (*dfn)++;
285 start->out_bb [i]->dfn = *dfn;
286 start->out_bb [i]->df_parent = start;
287 array [*dfn] = start->out_bb [i];
288 df_visit (start->out_bb [i], dfn, array);
292 guint32
293 mono_reverse_branch_op (guint32 opcode)
295 static const int reverse_map [] = {
296 CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
297 CEE_BEQ, CEE_BLT_UN, CEE_BLE_UN, CEE_BGT_UN, CEE_BGE_UN
299 static const int reverse_fmap [] = {
300 OP_FBNE_UN, OP_FBLT, OP_FBLE, OP_FBGT, OP_FBGE,
301 OP_FBEQ, OP_FBLT_UN, OP_FBLE_UN, OP_FBGT_UN, OP_FBGE_UN
303 static const int reverse_lmap [] = {
304 OP_LBNE_UN, OP_LBLT, OP_LBLE, OP_LBGT, OP_LBGE,
305 OP_LBEQ, OP_LBLT_UN, OP_LBLE_UN, OP_LBGT_UN, OP_LBGE_UN
307 static const int reverse_imap [] = {
308 OP_IBNE_UN, OP_IBLT, OP_IBLE, OP_IBGT, OP_IBGE,
309 OP_IBEQ, OP_IBLT_UN, OP_IBLE_UN, OP_IBGT_UN, OP_IBGE_UN
312 if (opcode >= CEE_BEQ && opcode <= CEE_BLT_UN) {
313 opcode = reverse_map [opcode - CEE_BEQ];
314 } else if (opcode >= OP_FBEQ && opcode <= OP_FBLT_UN) {
315 opcode = reverse_fmap [opcode - OP_FBEQ];
316 } else if (opcode >= OP_LBEQ && opcode <= OP_LBLT_UN) {
317 opcode = reverse_lmap [opcode - OP_LBEQ];
318 } else if (opcode >= OP_IBEQ && opcode <= OP_IBLT_UN) {
319 opcode = reverse_imap [opcode - OP_IBEQ];
320 } else
321 g_assert_not_reached ();
323 return opcode;
326 guint
327 mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
329 type = mini_get_underlying_type (type);
331 handle_enum:
332 switch (type->type) {
333 case MONO_TYPE_I1:
334 case MONO_TYPE_U1:
335 return OP_STOREI1_MEMBASE_REG;
336 case MONO_TYPE_I2:
337 case MONO_TYPE_U2:
338 return OP_STOREI2_MEMBASE_REG;
339 case MONO_TYPE_I4:
340 case MONO_TYPE_U4:
341 return OP_STOREI4_MEMBASE_REG;
342 case MONO_TYPE_I:
343 case MONO_TYPE_U:
344 case MONO_TYPE_PTR:
345 case MONO_TYPE_FNPTR:
346 return OP_STORE_MEMBASE_REG;
347 case MONO_TYPE_CLASS:
348 case MONO_TYPE_STRING:
349 case MONO_TYPE_OBJECT:
350 case MONO_TYPE_SZARRAY:
351 case MONO_TYPE_ARRAY:
352 return OP_STORE_MEMBASE_REG;
353 case MONO_TYPE_I8:
354 case MONO_TYPE_U8:
355 return OP_STOREI8_MEMBASE_REG;
356 case MONO_TYPE_R4:
357 return OP_STORER4_MEMBASE_REG;
358 case MONO_TYPE_R8:
359 return OP_STORER8_MEMBASE_REG;
360 case MONO_TYPE_VALUETYPE:
361 if (type->data.klass->enumtype) {
362 type = mono_class_enum_basetype (type->data.klass);
363 goto handle_enum;
365 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
366 return OP_STOREX_MEMBASE;
367 return OP_STOREV_MEMBASE;
368 case MONO_TYPE_TYPEDBYREF:
369 return OP_STOREV_MEMBASE;
370 case MONO_TYPE_GENERICINST:
371 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
372 return OP_STOREX_MEMBASE;
373 type = &type->data.generic_class->container_class->byval_arg;
374 goto handle_enum;
375 case MONO_TYPE_VAR:
376 case MONO_TYPE_MVAR:
377 g_assert (mini_type_var_is_vt (type));
378 return OP_STOREV_MEMBASE;
379 default:
380 g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
382 return -1;
385 guint
386 mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
388 type = mini_get_underlying_type (type);
390 switch (type->type) {
391 case MONO_TYPE_I1:
392 return OP_LOADI1_MEMBASE;
393 case MONO_TYPE_U1:
394 return OP_LOADU1_MEMBASE;
395 case MONO_TYPE_I2:
396 return OP_LOADI2_MEMBASE;
397 case MONO_TYPE_U2:
398 return OP_LOADU2_MEMBASE;
399 case MONO_TYPE_I4:
400 return OP_LOADI4_MEMBASE;
401 case MONO_TYPE_U4:
402 return OP_LOADU4_MEMBASE;
403 case MONO_TYPE_I:
404 case MONO_TYPE_U:
405 case MONO_TYPE_PTR:
406 case MONO_TYPE_FNPTR:
407 return OP_LOAD_MEMBASE;
408 case MONO_TYPE_CLASS:
409 case MONO_TYPE_STRING:
410 case MONO_TYPE_OBJECT:
411 case MONO_TYPE_SZARRAY:
412 case MONO_TYPE_ARRAY:
413 return OP_LOAD_MEMBASE;
414 case MONO_TYPE_I8:
415 case MONO_TYPE_U8:
416 return OP_LOADI8_MEMBASE;
417 case MONO_TYPE_R4:
418 return OP_LOADR4_MEMBASE;
419 case MONO_TYPE_R8:
420 return OP_LOADR8_MEMBASE;
421 case MONO_TYPE_VALUETYPE:
422 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
423 return OP_LOADX_MEMBASE;
424 case MONO_TYPE_TYPEDBYREF:
425 return OP_LOADV_MEMBASE;
426 case MONO_TYPE_GENERICINST:
427 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
428 return OP_LOADX_MEMBASE;
429 if (mono_type_generic_inst_is_valuetype (type))
430 return OP_LOADV_MEMBASE;
431 else
432 return OP_LOAD_MEMBASE;
433 break;
434 case MONO_TYPE_VAR:
435 case MONO_TYPE_MVAR:
436 g_assert (cfg->gshared);
437 g_assert (mini_type_var_is_vt (type));
438 return OP_LOADV_MEMBASE;
439 default:
440 g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
442 return -1;
445 guint
446 mini_type_to_stind (MonoCompile* cfg, MonoType *type)
448 type = mini_get_underlying_type (type);
449 if (cfg->gshared && !type->byref && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR)) {
450 g_assert (mini_type_var_is_vt (type));
451 return CEE_STOBJ;
453 return mono_type_to_stind (type);
457 mono_op_imm_to_op (int opcode)
459 switch (opcode) {
460 case OP_ADD_IMM:
461 #if SIZEOF_REGISTER == 4
462 return OP_IADD;
463 #else
464 return OP_LADD;
465 #endif
466 case OP_IADD_IMM:
467 return OP_IADD;
468 case OP_LADD_IMM:
469 return OP_LADD;
470 case OP_ISUB_IMM:
471 return OP_ISUB;
472 case OP_LSUB_IMM:
473 return OP_LSUB;
474 case OP_IMUL_IMM:
475 return OP_IMUL;
476 case OP_LMUL_IMM:
477 return OP_LMUL;
478 case OP_AND_IMM:
479 #if SIZEOF_REGISTER == 4
480 return OP_IAND;
481 #else
482 return OP_LAND;
483 #endif
484 case OP_OR_IMM:
485 #if SIZEOF_REGISTER == 4
486 return OP_IOR;
487 #else
488 return OP_LOR;
489 #endif
490 case OP_XOR_IMM:
491 #if SIZEOF_REGISTER == 4
492 return OP_IXOR;
493 #else
494 return OP_LXOR;
495 #endif
496 case OP_IAND_IMM:
497 return OP_IAND;
498 case OP_LAND_IMM:
499 return OP_LAND;
500 case OP_IOR_IMM:
501 return OP_IOR;
502 case OP_LOR_IMM:
503 return OP_LOR;
504 case OP_IXOR_IMM:
505 return OP_IXOR;
506 case OP_LXOR_IMM:
507 return OP_LXOR;
508 case OP_ISHL_IMM:
509 return OP_ISHL;
510 case OP_LSHL_IMM:
511 return OP_LSHL;
512 case OP_ISHR_IMM:
513 return OP_ISHR;
514 case OP_LSHR_IMM:
515 return OP_LSHR;
516 case OP_ISHR_UN_IMM:
517 return OP_ISHR_UN;
518 case OP_LSHR_UN_IMM:
519 return OP_LSHR_UN;
520 case OP_IDIV_IMM:
521 return OP_IDIV;
522 case OP_LDIV_IMM:
523 return OP_LDIV;
524 case OP_IDIV_UN_IMM:
525 return OP_IDIV_UN;
526 case OP_LDIV_UN_IMM:
527 return OP_LDIV_UN;
528 case OP_IREM_UN_IMM:
529 return OP_IREM_UN;
530 case OP_LREM_UN_IMM:
531 return OP_LREM_UN;
532 case OP_IREM_IMM:
533 return OP_IREM;
534 case OP_LREM_IMM:
535 return OP_LREM;
536 case OP_DIV_IMM:
537 #if SIZEOF_REGISTER == 4
538 return OP_IDIV;
539 #else
540 return OP_LDIV;
541 #endif
542 case OP_REM_IMM:
543 #if SIZEOF_REGISTER == 4
544 return OP_IREM;
545 #else
546 return OP_LREM;
547 #endif
548 case OP_ADDCC_IMM:
549 return OP_ADDCC;
550 case OP_ADC_IMM:
551 return OP_ADC;
552 case OP_SUBCC_IMM:
553 return OP_SUBCC;
554 case OP_SBB_IMM:
555 return OP_SBB;
556 case OP_IADC_IMM:
557 return OP_IADC;
558 case OP_ISBB_IMM:
559 return OP_ISBB;
560 case OP_COMPARE_IMM:
561 return OP_COMPARE;
562 case OP_ICOMPARE_IMM:
563 return OP_ICOMPARE;
564 case OP_LOCALLOC_IMM:
565 return OP_LOCALLOC;
568 return -1;
572 * mono_decompose_op_imm:
574 * Replace the OP_.._IMM INS with its non IMM variant.
576 void
577 mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
579 int opcode2 = mono_op_imm_to_op (ins->opcode);
580 MonoInst *temp;
581 guint32 dreg;
582 const char *spec = INS_INFO (ins->opcode);
584 if (spec [MONO_INST_SRC2] == 'l') {
585 dreg = mono_alloc_lreg (cfg);
587 /* Load the 64bit constant using decomposed ops */
588 MONO_INST_NEW (cfg, temp, OP_ICONST);
589 temp->inst_c0 = ins->inst_ls_word;
590 temp->dreg = MONO_LVREG_LS (dreg);
591 mono_bblock_insert_before_ins (bb, ins, temp);
593 MONO_INST_NEW (cfg, temp, OP_ICONST);
594 temp->inst_c0 = ins->inst_ms_word;
595 temp->dreg = MONO_LVREG_MS (dreg);
596 } else {
597 dreg = mono_alloc_ireg (cfg);
599 MONO_INST_NEW (cfg, temp, OP_ICONST);
600 temp->inst_c0 = ins->inst_imm;
601 temp->dreg = dreg;
604 mono_bblock_insert_before_ins (bb, ins, temp);
606 if (opcode2 == -1)
607 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins->opcode));
608 ins->opcode = opcode2;
610 if (ins->opcode == OP_LOCALLOC)
611 ins->sreg1 = dreg;
612 else
613 ins->sreg2 = dreg;
615 bb->max_vreg = MAX (bb->max_vreg, cfg->next_vreg);
618 static void
619 set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
621 if (vreg >= cfg->vreg_to_inst_len) {
622 MonoInst **tmp = cfg->vreg_to_inst;
623 int size = cfg->vreg_to_inst_len;
625 while (vreg >= cfg->vreg_to_inst_len)
626 cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
627 cfg->vreg_to_inst = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
628 if (size)
629 memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
631 cfg->vreg_to_inst [vreg] = inst;
634 #define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
635 #define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
637 MonoInst*
638 mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
640 MonoInst *inst;
641 int num = cfg->num_varinfo;
642 gboolean regpair;
644 type = mini_get_underlying_type (type);
646 if ((num + 1) >= cfg->varinfo_count) {
647 int orig_count = cfg->varinfo_count;
648 cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 32;
649 cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
650 cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
651 memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
654 cfg->stat_allocate_var++;
656 MONO_INST_NEW (cfg, inst, opcode);
657 inst->inst_c0 = num;
658 inst->inst_vtype = type;
659 inst->klass = mono_class_from_mono_type (type);
660 type_to_eval_stack_type (cfg, type, inst);
661 /* if set to 1 the variable is native */
662 inst->backend.is_pinvoke = 0;
663 inst->dreg = vreg;
665 if (mono_class_has_failure (inst->klass))
666 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
668 if (cfg->compute_gc_maps) {
669 if (type->byref) {
670 mono_mark_vreg_as_mp (cfg, vreg);
671 } else {
672 if ((MONO_TYPE_ISSTRUCT (type) && inst->klass->has_references) || mini_type_is_reference (type)) {
673 inst->flags |= MONO_INST_GC_TRACK;
674 mono_mark_vreg_as_ref (cfg, vreg);
679 cfg->varinfo [num] = inst;
681 cfg->vars [num].idx = num;
682 cfg->vars [num].vreg = vreg;
683 cfg->vars [num].range.first_use.pos.bid = 0xffff;
684 cfg->vars [num].reg = -1;
686 if (vreg != -1)
687 set_vreg_to_inst (cfg, vreg, inst);
689 #if SIZEOF_REGISTER == 4
690 if (mono_arch_is_soft_float ()) {
691 regpair = mono_type_is_long (type) || mono_type_is_float (type);
692 } else {
693 regpair = mono_type_is_long (type);
695 #else
696 regpair = FALSE;
697 #endif
699 if (regpair) {
700 MonoInst *tree;
703 * These two cannot be allocated using create_var_for_vreg since that would
704 * put it into the cfg->varinfo array, confusing many parts of the JIT.
708 * Set flags to VOLATILE so SSA skips it.
711 if (cfg->verbose_level >= 4) {
712 printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, MONO_LVREG_LS (inst->dreg), MONO_LVREG_MS (inst->dreg));
715 if (mono_arch_is_soft_float () && cfg->opt & MONO_OPT_SSA) {
716 if (mono_type_is_float (type))
717 inst->flags = MONO_INST_VOLATILE;
720 /* Allocate a dummy MonoInst for the first vreg */
721 MONO_INST_NEW (cfg, tree, OP_LOCAL);
722 tree->dreg = MONO_LVREG_LS (inst->dreg);
723 if (cfg->opt & MONO_OPT_SSA)
724 tree->flags = MONO_INST_VOLATILE;
725 tree->inst_c0 = num;
726 tree->type = STACK_I4;
727 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
728 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
730 set_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg), tree);
732 /* Allocate a dummy MonoInst for the second vreg */
733 MONO_INST_NEW (cfg, tree, OP_LOCAL);
734 tree->dreg = MONO_LVREG_MS (inst->dreg);
735 if (cfg->opt & MONO_OPT_SSA)
736 tree->flags = MONO_INST_VOLATILE;
737 tree->inst_c0 = num;
738 tree->type = STACK_I4;
739 tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
740 tree->klass = mono_class_from_mono_type (tree->inst_vtype);
742 set_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg), tree);
745 cfg->num_varinfo++;
746 if (cfg->verbose_level > 2)
747 g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
748 return inst;
751 MonoInst*
752 mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
754 int dreg;
755 type = mini_get_underlying_type (type);
757 if (mono_type_is_long (type))
758 dreg = mono_alloc_dreg (cfg, STACK_I8);
759 else if (mono_arch_is_soft_float () && mono_type_is_float (type))
760 dreg = mono_alloc_dreg (cfg, STACK_R8);
761 else
762 /* All the others are unified */
763 dreg = mono_alloc_preg (cfg);
765 return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
768 MonoInst*
769 mini_get_int_to_float_spill_area (MonoCompile *cfg)
771 #ifdef TARGET_X86
772 if (!cfg->iconv_raw_var) {
773 cfg->iconv_raw_var = mono_compile_create_var (cfg, &mono_defaults.int32_class->byval_arg, OP_LOCAL);
774 cfg->iconv_raw_var->flags |= MONO_INST_VOLATILE; /*FIXME, use the don't regalloc flag*/
776 return cfg->iconv_raw_var;
777 #else
778 return NULL;
779 #endif
782 void
783 mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
785 if (vreg >= cfg->vreg_is_ref_len) {
786 gboolean *tmp = cfg->vreg_is_ref;
787 int size = cfg->vreg_is_ref_len;
789 while (vreg >= cfg->vreg_is_ref_len)
790 cfg->vreg_is_ref_len = cfg->vreg_is_ref_len ? cfg->vreg_is_ref_len * 2 : 32;
791 cfg->vreg_is_ref = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_ref_len);
792 if (size)
793 memcpy (cfg->vreg_is_ref, tmp, size * sizeof (gboolean));
795 cfg->vreg_is_ref [vreg] = TRUE;
798 void
799 mono_mark_vreg_as_mp (MonoCompile *cfg, int vreg)
801 if (vreg >= cfg->vreg_is_mp_len) {
802 gboolean *tmp = cfg->vreg_is_mp;
803 int size = cfg->vreg_is_mp_len;
805 while (vreg >= cfg->vreg_is_mp_len)
806 cfg->vreg_is_mp_len = cfg->vreg_is_mp_len ? cfg->vreg_is_mp_len * 2 : 32;
807 cfg->vreg_is_mp = (gboolean *)mono_mempool_alloc0 (cfg->mempool, sizeof (gboolean) * cfg->vreg_is_mp_len);
808 if (size)
809 memcpy (cfg->vreg_is_mp, tmp, size * sizeof (gboolean));
811 cfg->vreg_is_mp [vreg] = TRUE;
814 static MonoType*
815 type_from_stack_type (MonoInst *ins)
817 switch (ins->type) {
818 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
819 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
820 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
821 case STACK_R8: return &mono_defaults.double_class->byval_arg;
822 case STACK_MP:
824 * this if used to be commented without any specific reason, but
825 * it breaks #80235 when commented
827 if (ins->klass)
828 return &ins->klass->this_arg;
829 else
830 return &mono_defaults.object_class->this_arg;
831 case STACK_OBJ:
832 /* ins->klass may not be set for ldnull.
833 * Also, if we have a boxed valuetype, we want an object lass,
834 * not the valuetype class
836 if (ins->klass && !ins->klass->valuetype)
837 return &ins->klass->byval_arg;
838 return &mono_defaults.object_class->byval_arg;
839 case STACK_VTYPE: return &ins->klass->byval_arg;
840 default:
841 g_error ("stack type %d to montype not handled\n", ins->type);
843 return NULL;
846 MonoType*
847 mono_type_from_stack_type (MonoInst *ins)
849 return type_from_stack_type (ins);
853 * mono_add_ins_to_end:
855 * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
857 void
858 mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
860 int opcode;
862 if (!bb->code) {
863 MONO_ADD_INS (bb, inst);
864 return;
867 switch (bb->last_ins->opcode) {
868 case OP_BR:
869 case OP_BR_REG:
870 case CEE_BEQ:
871 case CEE_BGE:
872 case CEE_BGT:
873 case CEE_BLE:
874 case CEE_BLT:
875 case CEE_BNE_UN:
876 case CEE_BGE_UN:
877 case CEE_BGT_UN:
878 case CEE_BLE_UN:
879 case CEE_BLT_UN:
880 case OP_SWITCH:
881 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
882 break;
883 default:
884 if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
885 /* Need to insert the ins before the compare */
886 if (bb->code == bb->last_ins) {
887 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
888 return;
891 if (bb->code->next == bb->last_ins) {
892 /* Only two instructions */
893 opcode = bb->code->opcode;
895 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
896 /* NEW IR */
897 mono_bblock_insert_before_ins (bb, bb->code, inst);
898 } else {
899 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
901 } else {
902 opcode = bb->last_ins->prev->opcode;
904 if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM) || (opcode == OP_RCOMPARE)) {
905 /* NEW IR */
906 mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
907 } else {
908 mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
912 else
913 MONO_ADD_INS (bb, inst);
914 break;
918 void
919 mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks)
921 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfo));
922 MonoJumpInfoBBTable *table;
924 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
925 table->table = bbs;
926 table->table_size = num_blocks;
928 ji->ip.label = label;
929 ji->type = MONO_PATCH_INFO_SWITCH;
930 ji->data.table = table;
931 ji->next = cfg->patch_info;
932 cfg->patch_info = ji;
935 static MonoMethodSignature *
936 mono_get_array_new_va_signature (int arity)
938 static GHashTable *sighash;
939 MonoMethodSignature *res;
940 int i;
942 mono_jit_lock ();
943 if (!sighash) {
944 sighash = g_hash_table_new (NULL, NULL);
946 else if ((res = (MonoMethodSignature *)g_hash_table_lookup (sighash, GINT_TO_POINTER (arity)))) {
947 mono_jit_unlock ();
948 return res;
951 res = mono_metadata_signature_alloc (mono_defaults.corlib, arity + 1);
953 res->pinvoke = 1;
954 if (ARCH_VARARG_ICALLS)
955 /* Only set this only some archs since not all backends can handle varargs+pinvoke */
956 res->call_convention = MONO_CALL_VARARG;
958 #ifdef TARGET_WIN32
959 res->call_convention = MONO_CALL_C;
960 #endif
962 res->params [0] = &mono_defaults.int_class->byval_arg;
963 for (i = 0; i < arity; i++)
964 res->params [i + 1] = &mono_defaults.int_class->byval_arg;
966 res->ret = &mono_defaults.object_class->byval_arg;
968 g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
969 mono_jit_unlock ();
971 return res;
974 MonoJitICallInfo *
975 mono_get_array_new_va_icall (int rank)
977 MonoMethodSignature *esig;
978 char icall_name [256];
979 char *name;
980 MonoJitICallInfo *info;
982 /* Need to register the icall so it gets an icall wrapper */
983 sprintf (icall_name, "ves_array_new_va_%d", rank);
985 mono_jit_lock ();
986 info = mono_find_jit_icall_by_name (icall_name);
987 if (info == NULL) {
988 esig = mono_get_array_new_va_signature (rank);
989 name = g_strdup (icall_name);
990 info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
992 mono_jit_unlock ();
994 return info;
997 gboolean
998 mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
1000 MonoAssembly *assembly = method->klass->image->assembly;
1001 if (method->wrapper_type != MONO_WRAPPER_NONE && method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
1002 return FALSE;
1003 if (assembly->in_gac || assembly->image == mono_defaults.corlib)
1004 return FALSE;
1005 return mono_assembly_has_skip_verification (assembly);
1009 * mini_method_verify:
1011 * Verify the method using the verfier.
1013 * Returns true if the method is invalid.
1015 static gboolean
1016 mini_method_verify (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1018 GSList *tmp, *res;
1019 gboolean is_fulltrust;
1021 if (method->verification_success)
1022 return FALSE;
1024 if (!mono_verifier_is_enabled_for_method (method))
1025 return FALSE;
1027 /*skip verification implies the assembly must be */
1028 is_fulltrust = mono_verifier_is_method_full_trust (method) || mini_assembly_can_skip_verification (cfg->domain, method);
1030 res = mono_method_verify_with_current_settings (method, cfg->skip_visibility, is_fulltrust);
1032 if (res) {
1033 for (tmp = res; tmp; tmp = tmp->next) {
1034 MonoVerifyInfoExtended *info = (MonoVerifyInfoExtended *)tmp->data;
1035 if (info->info.status == MONO_VERIFY_ERROR) {
1036 if (fail_compile) {
1037 char *method_name = mono_method_full_name (method, TRUE);
1038 cfg->exception_type = info->exception_type;
1039 cfg->exception_message = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1040 g_free (method_name);
1042 mono_free_verify_list (res);
1043 return TRUE;
1045 if (info->info.status == MONO_VERIFY_NOT_VERIFIABLE && (!is_fulltrust || info->exception_type == MONO_EXCEPTION_METHOD_ACCESS || info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)) {
1046 if (fail_compile) {
1047 char *method_name = mono_method_full_name (method, TRUE);
1048 char *msg = g_strdup_printf ("Error verifying %s: %s", method_name, info->info.message);
1050 if (info->exception_type == MONO_EXCEPTION_METHOD_ACCESS)
1051 mono_error_set_generic_error (&cfg->error, "System", "MethodAccessException", "%s", msg);
1052 else if (info->exception_type == MONO_EXCEPTION_FIELD_ACCESS)
1053 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "%s", msg);
1054 else if (info->exception_type == MONO_EXCEPTION_UNVERIFIABLE_IL)
1055 mono_error_set_generic_error (&cfg->error, "System.Security", "VerificationException", "%s", msg);
1056 if (!mono_error_ok (&cfg->error)) {
1057 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
1058 g_free (msg);
1059 } else {
1060 cfg->exception_type = info->exception_type;
1061 cfg->exception_message = msg;
1063 g_free (method_name);
1065 mono_free_verify_list (res);
1066 return TRUE;
1069 mono_free_verify_list (res);
1071 method->verification_success = 1;
1072 return FALSE;
1075 /*Returns true if something went wrong*/
1076 gboolean
1077 mono_compile_is_broken (MonoCompile *cfg, MonoMethod *method, gboolean fail_compile)
1079 MonoMethod *method_definition = method;
1080 gboolean dont_verify = method->klass->image->assembly->corlib_internal;
1082 while (method_definition->is_inflated) {
1083 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
1084 method_definition = imethod->declaring;
1087 return !dont_verify && mini_method_verify (cfg, method_definition, fail_compile);
1090 static void
1091 mono_dynamic_code_hash_insert (MonoDomain *domain, MonoMethod *method, MonoJitDynamicMethodInfo *ji)
1093 if (!domain_jit_info (domain)->dynamic_code_hash)
1094 domain_jit_info (domain)->dynamic_code_hash = g_hash_table_new (NULL, NULL);
1095 g_hash_table_insert (domain_jit_info (domain)->dynamic_code_hash, method, ji);
1098 static MonoJitDynamicMethodInfo*
1099 mono_dynamic_code_hash_lookup (MonoDomain *domain, MonoMethod *method)
1101 MonoJitDynamicMethodInfo *res;
1103 if (domain_jit_info (domain)->dynamic_code_hash)
1104 res = (MonoJitDynamicMethodInfo *)g_hash_table_lookup (domain_jit_info (domain)->dynamic_code_hash, method);
1105 else
1106 res = NULL;
1107 return res;
1110 typedef struct {
1111 MonoClass *vtype;
1112 GList *active, *inactive;
1113 GSList *slots;
1114 } StackSlotInfo;
1116 static gint
1117 compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
1119 MonoMethodVar *v1 = (MonoMethodVar*)a;
1120 MonoMethodVar *v2 = (MonoMethodVar*)b;
1122 if (v1 == v2)
1123 return 0;
1124 else if (v1->interval->range && v2->interval->range)
1125 return v1->interval->range->from - v2->interval->range->from;
1126 else if (v1->interval->range)
1127 return -1;
1128 else
1129 return 1;
1132 #if 0
1133 #define LSCAN_DEBUG(a) do { a; } while (0)
1134 #else
1135 #define LSCAN_DEBUG(a)
1136 #endif
1138 static gint32*
1139 mono_allocate_stack_slots2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1141 int i, slot, offset, size;
1142 guint32 align;
1143 MonoMethodVar *vmv;
1144 MonoInst *inst;
1145 gint32 *offsets;
1146 GList *vars = NULL, *l, *unhandled;
1147 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1148 MonoType *t;
1149 int nvtypes;
1150 gboolean reuse_slot;
1152 LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
1154 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1155 vtype_stack_slots = NULL;
1156 nvtypes = 0;
1158 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1159 for (i = 0; i < cfg->num_varinfo; ++i)
1160 offsets [i] = -1;
1162 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1163 inst = cfg->varinfo [i];
1164 vmv = MONO_VARINFO (cfg, i);
1166 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1167 continue;
1169 vars = g_list_prepend (vars, vmv);
1172 vars = g_list_sort (vars, compare_by_interval_start_pos_func);
1174 /* Sanity check */
1176 i = 0;
1177 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1178 MonoMethodVar *current = unhandled->data;
1180 if (current->interval->range) {
1181 g_assert (current->interval->range->from >= i);
1182 i = current->interval->range->from;
1187 offset = 0;
1188 *stack_align = 0;
1189 for (unhandled = vars; unhandled; unhandled = unhandled->next) {
1190 MonoMethodVar *current = (MonoMethodVar *)unhandled->data;
1192 vmv = current;
1193 inst = cfg->varinfo [vmv->idx];
1195 t = mono_type_get_underlying_type (inst->inst_vtype);
1196 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1197 continue;
1199 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1200 * pinvoke wrappers when they call functions returning structures */
1201 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1202 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1204 else {
1205 int ialign;
1207 size = mini_type_stack_size (t, &ialign);
1208 align = ialign;
1210 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1211 align = 16;
1214 reuse_slot = TRUE;
1215 if (cfg->disable_reuse_stack_slots)
1216 reuse_slot = FALSE;
1218 t = mini_get_underlying_type (t);
1219 switch (t->type) {
1220 case MONO_TYPE_GENERICINST:
1221 if (!mono_type_generic_inst_is_valuetype (t)) {
1222 slot_info = &scalar_stack_slots [t->type];
1223 break;
1225 /* Fall through */
1226 case MONO_TYPE_VALUETYPE:
1227 if (!vtype_stack_slots)
1228 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1229 for (i = 0; i < nvtypes; ++i)
1230 if (t->data.klass == vtype_stack_slots [i].vtype)
1231 break;
1232 if (i < nvtypes)
1233 slot_info = &vtype_stack_slots [i];
1234 else {
1235 g_assert (nvtypes < 256);
1236 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1237 slot_info = &vtype_stack_slots [nvtypes];
1238 nvtypes ++;
1240 if (cfg->disable_reuse_ref_stack_slots)
1241 reuse_slot = FALSE;
1242 break;
1244 case MONO_TYPE_PTR:
1245 case MONO_TYPE_I:
1246 case MONO_TYPE_U:
1247 #if SIZEOF_VOID_P == 4
1248 case MONO_TYPE_I4:
1249 #else
1250 case MONO_TYPE_I8:
1251 #endif
1252 if (cfg->disable_ref_noref_stack_slot_share) {
1253 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1254 break;
1256 /* Fall through */
1258 case MONO_TYPE_CLASS:
1259 case MONO_TYPE_OBJECT:
1260 case MONO_TYPE_ARRAY:
1261 case MONO_TYPE_SZARRAY:
1262 case MONO_TYPE_STRING:
1263 /* Share non-float stack slots of the same size */
1264 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1265 if (cfg->disable_reuse_ref_stack_slots)
1266 reuse_slot = FALSE;
1267 break;
1269 default:
1270 slot_info = &scalar_stack_slots [t->type];
1273 slot = 0xffffff;
1274 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1275 int pos;
1276 gboolean changed;
1278 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1280 if (!current->interval->range) {
1281 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
1282 pos = ~0;
1283 else {
1284 /* Dead */
1285 inst->flags |= MONO_INST_IS_DEAD;
1286 continue;
1289 else
1290 pos = current->interval->range->from;
1292 LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
1293 if (current->interval->range)
1294 LSCAN_DEBUG (mono_linterval_print (current->interval));
1295 LSCAN_DEBUG (printf ("\n"));
1297 /* Check for intervals in active which expired or inactive */
1298 changed = TRUE;
1299 /* FIXME: Optimize this */
1300 while (changed) {
1301 changed = FALSE;
1302 for (l = slot_info->active; l != NULL; l = l->next) {
1303 MonoMethodVar *v = (MonoMethodVar*)l->data;
1305 if (v->interval->last_range->to < pos) {
1306 slot_info->active = g_list_delete_link (slot_info->active, l);
1307 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1308 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1309 changed = TRUE;
1310 break;
1312 else if (!mono_linterval_covers (v->interval, pos)) {
1313 slot_info->inactive = g_list_append (slot_info->inactive, v);
1314 slot_info->active = g_list_delete_link (slot_info->active, l);
1315 LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
1316 changed = TRUE;
1317 break;
1322 /* Check for intervals in inactive which expired or active */
1323 changed = TRUE;
1324 /* FIXME: Optimize this */
1325 while (changed) {
1326 changed = FALSE;
1327 for (l = slot_info->inactive; l != NULL; l = l->next) {
1328 MonoMethodVar *v = (MonoMethodVar*)l->data;
1330 if (v->interval->last_range->to < pos) {
1331 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1332 // FIXME: Enabling this seems to cause impossible to debug crashes
1333 //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
1334 LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
1335 changed = TRUE;
1336 break;
1338 else if (mono_linterval_covers (v->interval, pos)) {
1339 slot_info->active = g_list_append (slot_info->active, v);
1340 slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
1341 LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
1342 changed = TRUE;
1343 break;
1349 * This also handles the case when the variable is used in an
1350 * exception region, as liveness info is not computed there.
1353 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1354 * opcodes.
1356 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1357 if (slot_info->slots) {
1358 slot = GPOINTER_TO_INT (slot_info->slots->data);
1360 slot_info->slots = slot_info->slots->next;
1363 /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
1365 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1369 #if 0
1371 static int count = 0;
1372 count ++;
1374 if (count == atoi (g_getenv ("COUNT3")))
1375 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1376 if (count > atoi (g_getenv ("COUNT3")))
1377 slot = 0xffffff;
1378 else
1379 mono_print_ins (inst);
1381 #endif
1383 LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
1385 if (inst->flags & MONO_INST_LMF) {
1386 size = sizeof (MonoLMF);
1387 align = sizeof (mgreg_t);
1388 reuse_slot = FALSE;
1391 if (!reuse_slot)
1392 slot = 0xffffff;
1394 if (slot == 0xffffff) {
1396 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1397 * efficient copying (and to work around the fact that OP_MEMCPY
1398 * and OP_MEMSET ignores alignment).
1400 if (MONO_TYPE_ISSTRUCT (t)) {
1401 align = MAX (align, sizeof (gpointer));
1402 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1405 if (backward) {
1406 offset += size;
1407 offset += align - 1;
1408 offset &= ~(align - 1);
1409 slot = offset;
1411 else {
1412 offset += align - 1;
1413 offset &= ~(align - 1);
1414 slot = offset;
1415 offset += size;
1418 if (*stack_align == 0)
1419 *stack_align = align;
1422 offsets [vmv->idx] = slot;
1424 g_list_free (vars);
1425 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1426 if (scalar_stack_slots [i].active)
1427 g_list_free (scalar_stack_slots [i].active);
1429 for (i = 0; i < nvtypes; ++i) {
1430 if (vtype_stack_slots [i].active)
1431 g_list_free (vtype_stack_slots [i].active);
1434 cfg->stat_locals_stack_size += offset;
1436 *stack_size = offset;
1437 return offsets;
1441 * mono_allocate_stack_slots:
1443 * Allocate stack slots for all non register allocated variables using a
1444 * linear scan algorithm.
1445 * Returns: an array of stack offsets.
1446 * STACK_SIZE is set to the amount of stack space needed.
1447 * STACK_ALIGN is set to the alignment needed by the locals area.
1449 gint32*
1450 mono_allocate_stack_slots (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
1452 int i, slot, offset, size;
1453 guint32 align;
1454 MonoMethodVar *vmv;
1455 MonoInst *inst;
1456 gint32 *offsets;
1457 GList *vars = NULL, *l;
1458 StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
1459 MonoType *t;
1460 int nvtypes;
1461 gboolean reuse_slot;
1463 if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
1464 return mono_allocate_stack_slots2 (cfg, backward, stack_size, stack_align);
1466 scalar_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
1467 vtype_stack_slots = NULL;
1468 nvtypes = 0;
1470 offsets = (gint32 *)mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
1471 for (i = 0; i < cfg->num_varinfo; ++i)
1472 offsets [i] = -1;
1474 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1475 inst = cfg->varinfo [i];
1476 vmv = MONO_VARINFO (cfg, i);
1478 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
1479 continue;
1481 vars = g_list_prepend (vars, vmv);
1484 vars = mono_varlist_sort (cfg, vars, 0);
1485 offset = 0;
1486 *stack_align = sizeof(mgreg_t);
1487 for (l = vars; l; l = l->next) {
1488 vmv = (MonoMethodVar *)l->data;
1489 inst = cfg->varinfo [vmv->idx];
1491 t = mono_type_get_underlying_type (inst->inst_vtype);
1492 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (t))
1493 continue;
1495 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1496 * pinvoke wrappers when they call functions returning structures */
1497 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1498 size = mono_class_native_size (mono_class_from_mono_type (t), &align);
1499 } else {
1500 int ialign;
1502 size = mini_type_stack_size (t, &ialign);
1503 align = ialign;
1505 if (mono_class_has_failure (mono_class_from_mono_type (t)))
1506 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
1508 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (t)))
1509 align = 16;
1512 reuse_slot = TRUE;
1513 if (cfg->disable_reuse_stack_slots)
1514 reuse_slot = FALSE;
1516 t = mini_get_underlying_type (t);
1517 switch (t->type) {
1518 case MONO_TYPE_GENERICINST:
1519 if (!mono_type_generic_inst_is_valuetype (t)) {
1520 slot_info = &scalar_stack_slots [t->type];
1521 break;
1523 /* Fall through */
1524 case MONO_TYPE_VALUETYPE:
1525 if (!vtype_stack_slots)
1526 vtype_stack_slots = (StackSlotInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
1527 for (i = 0; i < nvtypes; ++i)
1528 if (t->data.klass == vtype_stack_slots [i].vtype)
1529 break;
1530 if (i < nvtypes)
1531 slot_info = &vtype_stack_slots [i];
1532 else {
1533 g_assert (nvtypes < 256);
1534 vtype_stack_slots [nvtypes].vtype = t->data.klass;
1535 slot_info = &vtype_stack_slots [nvtypes];
1536 nvtypes ++;
1538 if (cfg->disable_reuse_ref_stack_slots)
1539 reuse_slot = FALSE;
1540 break;
1542 case MONO_TYPE_PTR:
1543 case MONO_TYPE_I:
1544 case MONO_TYPE_U:
1545 #if SIZEOF_VOID_P == 4
1546 case MONO_TYPE_I4:
1547 #else
1548 case MONO_TYPE_I8:
1549 #endif
1550 if (cfg->disable_ref_noref_stack_slot_share) {
1551 slot_info = &scalar_stack_slots [MONO_TYPE_I];
1552 break;
1554 /* Fall through */
1556 case MONO_TYPE_CLASS:
1557 case MONO_TYPE_OBJECT:
1558 case MONO_TYPE_ARRAY:
1559 case MONO_TYPE_SZARRAY:
1560 case MONO_TYPE_STRING:
1561 /* Share non-float stack slots of the same size */
1562 slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
1563 if (cfg->disable_reuse_ref_stack_slots)
1564 reuse_slot = FALSE;
1565 break;
1566 case MONO_TYPE_VAR:
1567 case MONO_TYPE_MVAR:
1568 slot_info = &scalar_stack_slots [t->type];
1569 break;
1570 default:
1571 slot_info = &scalar_stack_slots [t->type];
1572 break;
1575 slot = 0xffffff;
1576 if (cfg->comp_done & MONO_COMP_LIVENESS) {
1577 //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
1579 /* expire old intervals in active */
1580 while (slot_info->active) {
1581 MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
1583 if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
1584 break;
1586 //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
1588 slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
1589 slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
1593 * This also handles the case when the variable is used in an
1594 * exception region, as liveness info is not computed there.
1597 * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
1598 * opcodes.
1600 if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
1601 if (slot_info->slots) {
1602 slot = GPOINTER_TO_INT (slot_info->slots->data);
1604 slot_info->slots = slot_info->slots->next;
1607 slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
1611 #if 0
1613 static int count = 0;
1614 count ++;
1616 if (count == atoi (g_getenv ("COUNT")))
1617 printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
1618 if (count > atoi (g_getenv ("COUNT")))
1619 slot = 0xffffff;
1620 else
1621 mono_print_ins (inst);
1623 #endif
1625 if (inst->flags & MONO_INST_LMF) {
1627 * This variable represents a MonoLMF structure, which has no corresponding
1628 * CLR type, so hard-code its size/alignment.
1630 size = sizeof (MonoLMF);
1631 align = sizeof (mgreg_t);
1632 reuse_slot = FALSE;
1635 if (!reuse_slot)
1636 slot = 0xffffff;
1638 if (slot == 0xffffff) {
1640 * Allways allocate valuetypes to sizeof (gpointer) to allow more
1641 * efficient copying (and to work around the fact that OP_MEMCPY
1642 * and OP_MEMSET ignores alignment).
1644 if (MONO_TYPE_ISSTRUCT (t)) {
1645 align = MAX (align, sizeof (gpointer));
1646 align = MAX (align, mono_class_min_align (mono_class_from_mono_type (t)));
1648 * Align the size too so the code generated for passing vtypes in
1649 * registers doesn't overwrite random locals.
1651 size = (size + (align - 1)) & ~(align -1);
1654 if (backward) {
1655 offset += size;
1656 offset += align - 1;
1657 offset &= ~(align - 1);
1658 slot = offset;
1660 else {
1661 offset += align - 1;
1662 offset &= ~(align - 1);
1663 slot = offset;
1664 offset += size;
1667 *stack_align = MAX (*stack_align, align);
1670 offsets [vmv->idx] = slot;
1672 g_list_free (vars);
1673 for (i = 0; i < MONO_TYPE_PINNED; ++i) {
1674 if (scalar_stack_slots [i].active)
1675 g_list_free (scalar_stack_slots [i].active);
1677 for (i = 0; i < nvtypes; ++i) {
1678 if (vtype_stack_slots [i].active)
1679 g_list_free (vtype_stack_slots [i].active);
1682 cfg->stat_locals_stack_size += offset;
1684 *stack_size = offset;
1685 return offsets;
1688 #define EMUL_HIT_SHIFT 3
1689 #define EMUL_HIT_MASK ((1 << EMUL_HIT_SHIFT) - 1)
1690 /* small hit bitmap cache */
1691 static mono_byte emul_opcode_hit_cache [(OP_LAST>>EMUL_HIT_SHIFT) + 1] = {0};
1692 static short emul_opcode_num = 0;
1693 static short emul_opcode_alloced = 0;
1694 static short *emul_opcode_opcodes;
1695 static MonoJitICallInfo **emul_opcode_map;
1697 MonoJitICallInfo *
1698 mono_find_jit_opcode_emulation (int opcode)
1700 g_assert (opcode >= 0 && opcode <= OP_LAST);
1701 if (emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] & (1 << (opcode & EMUL_HIT_MASK))) {
1702 int i;
1703 for (i = 0; i < emul_opcode_num; ++i) {
1704 if (emul_opcode_opcodes [i] == opcode)
1705 return emul_opcode_map [i];
1708 return NULL;
1711 void
1712 mini_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, const char *symbol, gboolean no_wrapper)
1714 MonoJitICallInfo *info;
1715 MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
1717 g_assert (!sig->hasthis);
1718 g_assert (sig->param_count < 3);
1720 info = mono_register_jit_icall_full (func, name, sig, no_wrapper, symbol);
1722 if (emul_opcode_num >= emul_opcode_alloced) {
1723 int incr = emul_opcode_alloced? emul_opcode_alloced/2: 16;
1724 emul_opcode_alloced += incr;
1725 emul_opcode_map = (MonoJitICallInfo **)g_realloc (emul_opcode_map, sizeof (emul_opcode_map [0]) * emul_opcode_alloced);
1726 emul_opcode_opcodes = (short *)g_realloc (emul_opcode_opcodes, sizeof (emul_opcode_opcodes [0]) * emul_opcode_alloced);
1728 emul_opcode_map [emul_opcode_num] = info;
1729 emul_opcode_opcodes [emul_opcode_num] = opcode;
1730 emul_opcode_num++;
1731 emul_opcode_hit_cache [opcode >> (EMUL_HIT_SHIFT + 3)] |= (1 << (opcode & EMUL_HIT_MASK));
1734 static void
1735 print_dfn (MonoCompile *cfg)
1737 int i, j;
1738 char *code;
1739 MonoBasicBlock *bb;
1740 MonoInst *c;
1743 char *method_name = mono_method_full_name (cfg->method, TRUE);
1744 g_print ("IR code for method %s\n", method_name);
1745 g_free (method_name);
1748 for (i = 0; i < cfg->num_bblocks; ++i) {
1749 bb = cfg->bblocks [i];
1750 /*if (bb->cil_code) {
1751 char* code1, *code2;
1752 code1 = mono_disasm_code_one (NULL, cfg->method, bb->cil_code, NULL);
1753 if (bb->last_ins->cil_code)
1754 code2 = mono_disasm_code_one (NULL, cfg->method, bb->last_ins->cil_code, NULL);
1755 else
1756 code2 = g_strdup ("");
1758 code1 [strlen (code1) - 1] = 0;
1759 code = g_strdup_printf ("%s -> %s", code1, code2);
1760 g_free (code1);
1761 g_free (code2);
1762 } else*/
1763 code = g_strdup ("\n");
1764 g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
1765 MONO_BB_FOR_EACH_INS (bb, c) {
1766 mono_print_ins_index (-1, c);
1769 g_print ("\tprev:");
1770 for (j = 0; j < bb->in_count; ++j) {
1771 g_print (" BB%d", bb->in_bb [j]->block_num);
1773 g_print ("\t\tsucc:");
1774 for (j = 0; j < bb->out_count; ++j) {
1775 g_print (" BB%d", bb->out_bb [j]->block_num);
1777 g_print ("\n\tidom: BB%d\n", bb->idom? bb->idom->block_num: -1);
1779 if (bb->idom)
1780 g_assert (mono_bitset_test_fast (bb->dominators, bb->idom->dfn));
1782 if (bb->dominators)
1783 mono_blockset_print (cfg, bb->dominators, "\tdominators", bb->idom? bb->idom->dfn: -1);
1784 if (bb->dfrontier)
1785 mono_blockset_print (cfg, bb->dfrontier, "\tdfrontier", -1);
1786 g_free (code);
1789 g_print ("\n");
1792 void
1793 mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst)
1795 MONO_ADD_INS (bb, inst);
1798 void
1799 mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1801 if (ins == NULL) {
1802 ins = bb->code;
1803 bb->code = ins_to_insert;
1805 /* Link with next */
1806 ins_to_insert->next = ins;
1807 if (ins)
1808 ins->prev = ins_to_insert;
1810 if (bb->last_ins == NULL)
1811 bb->last_ins = ins_to_insert;
1812 } else {
1813 /* Link with next */
1814 ins_to_insert->next = ins->next;
1815 if (ins->next)
1816 ins->next->prev = ins_to_insert;
1818 /* Link with previous */
1819 ins->next = ins_to_insert;
1820 ins_to_insert->prev = ins;
1822 if (bb->last_ins == ins)
1823 bb->last_ins = ins_to_insert;
1827 void
1828 mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
1830 if (ins == NULL) {
1831 ins = bb->code;
1832 if (ins)
1833 ins->prev = ins_to_insert;
1834 bb->code = ins_to_insert;
1835 ins_to_insert->next = ins;
1836 if (bb->last_ins == NULL)
1837 bb->last_ins = ins_to_insert;
1838 } else {
1839 /* Link with previous */
1840 if (ins->prev)
1841 ins->prev->next = ins_to_insert;
1842 ins_to_insert->prev = ins->prev;
1844 /* Link with next */
1845 ins->prev = ins_to_insert;
1846 ins_to_insert->next = ins;
1848 if (bb->code == ins)
1849 bb->code = ins_to_insert;
1854 * mono_verify_bblock:
1856 * Verify that the next and prev pointers are consistent inside the instructions in BB.
1858 void
1859 mono_verify_bblock (MonoBasicBlock *bb)
1861 MonoInst *ins, *prev;
1863 prev = NULL;
1864 for (ins = bb->code; ins; ins = ins->next) {
1865 g_assert (ins->prev == prev);
1866 prev = ins;
1868 if (bb->last_ins)
1869 g_assert (!bb->last_ins->next);
1873 * mono_verify_cfg:
1875 * Perform consistency checks on the JIT data structures and the IR
1877 void
1878 mono_verify_cfg (MonoCompile *cfg)
1880 MonoBasicBlock *bb;
1882 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
1883 mono_verify_bblock (bb);
1886 // This will free many fields in cfg to save
1887 // memory. Note that this must be safe to call
1888 // multiple times. It must be idempotent.
1889 void
1890 mono_empty_compile (MonoCompile *cfg)
1892 mono_free_loop_info (cfg);
1894 // These live in the mempool, and so must be freed
1895 // first
1896 for (GSList *l = cfg->headers_to_free; l; l = l->next) {
1897 mono_metadata_free_mh ((MonoMethodHeader *)l->data);
1899 cfg->headers_to_free = NULL;
1901 if (cfg->mempool) {
1902 //mono_mempool_stats (cfg->mempool);
1903 mono_mempool_destroy (cfg->mempool);
1904 cfg->mempool = NULL;
1907 g_free (cfg->varinfo);
1908 cfg->varinfo = NULL;
1910 g_free (cfg->vars);
1911 cfg->vars = NULL;
1913 if (cfg->rs) {
1914 mono_regstate_free (cfg->rs);
1915 cfg->rs = NULL;
1919 void
1920 mono_destroy_compile (MonoCompile *cfg)
1922 mono_empty_compile (cfg);
1924 if (cfg->header)
1925 mono_metadata_free_mh (cfg->header);
1927 if (cfg->spvars)
1928 g_hash_table_destroy (cfg->spvars);
1929 if (cfg->exvars)
1930 g_hash_table_destroy (cfg->exvars);
1932 g_list_free (cfg->ldstr_list);
1934 if (cfg->token_info_hash)
1935 g_hash_table_destroy (cfg->token_info_hash);
1937 if (cfg->abs_patches)
1938 g_hash_table_destroy (cfg->abs_patches);
1940 mono_debug_free_method (cfg);
1942 g_free (cfg->varinfo);
1943 g_free (cfg->vars);
1944 g_free (cfg->exception_message);
1945 g_free (cfg);
1948 void
1949 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
1951 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1953 ji->ip.i = ip;
1954 ji->type = type;
1955 ji->data.target = target;
1956 ji->next = cfg->patch_info;
1958 cfg->patch_info = ji;
1961 void
1962 mono_add_patch_info_rel (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target, int relocation)
1964 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfo));
1966 ji->ip.i = ip;
1967 ji->type = type;
1968 ji->relocation = relocation;
1969 ji->data.target = target;
1970 ji->next = cfg->patch_info;
1972 cfg->patch_info = ji;
1975 void
1976 mono_remove_patch_info (MonoCompile *cfg, int ip)
1978 MonoJumpInfo **ji = &cfg->patch_info;
1980 while (*ji) {
1981 if ((*ji)->ip.i == ip)
1982 *ji = (*ji)->next;
1983 else
1984 ji = &((*ji)->next);
1988 void
1989 mono_add_seq_point (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, int native_offset)
1991 ins->inst_offset = native_offset;
1992 g_ptr_array_add (cfg->seq_points, ins);
1993 if (bb) {
1994 bb->seq_points = g_slist_prepend_mempool (cfg->mempool, bb->seq_points, ins);
1995 bb->last_seq_point = ins;
1999 void
2000 mono_add_var_location (MonoCompile *cfg, MonoInst *var, gboolean is_reg, int reg, int offset, int from, int to)
2002 MonoDwarfLocListEntry *entry = (MonoDwarfLocListEntry *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDwarfLocListEntry));
2004 if (is_reg)
2005 g_assert (offset == 0);
2007 entry->is_reg = is_reg;
2008 entry->reg = reg;
2009 entry->offset = offset;
2010 entry->from = from;
2011 entry->to = to;
2013 if (var == cfg->args [0])
2014 cfg->this_loclist = g_slist_append_mempool (cfg->mempool, cfg->this_loclist, entry);
2015 else if (var == cfg->rgctx_var)
2016 cfg->rgctx_loclist = g_slist_append_mempool (cfg->mempool, cfg->rgctx_loclist, entry);
2019 static void
2020 mono_compile_create_vars (MonoCompile *cfg)
2022 MonoMethodSignature *sig;
2023 MonoMethodHeader *header;
2024 int i;
2026 header = cfg->header;
2028 sig = mono_method_signature (cfg->method);
2030 if (!MONO_TYPE_IS_VOID (sig->ret)) {
2031 cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
2032 /* Inhibit optimizations */
2033 cfg->ret->flags |= MONO_INST_VOLATILE;
2035 if (cfg->verbose_level > 2)
2036 g_print ("creating vars\n");
2038 cfg->args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, (sig->param_count + sig->hasthis) * sizeof (MonoInst*));
2040 if (sig->hasthis) {
2041 cfg->args [0] = mono_compile_create_var (cfg, &cfg->method->klass->this_arg, OP_ARG);
2042 cfg->this_arg = cfg->args [0];
2045 for (i = 0; i < sig->param_count; ++i) {
2046 cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
2049 if (cfg->verbose_level > 2) {
2050 if (cfg->ret) {
2051 printf ("\treturn : ");
2052 mono_print_ins (cfg->ret);
2055 if (sig->hasthis) {
2056 printf ("\tthis: ");
2057 mono_print_ins (cfg->args [0]);
2060 for (i = 0; i < sig->param_count; ++i) {
2061 printf ("\targ [%d]: ", i);
2062 mono_print_ins (cfg->args [i + sig->hasthis]);
2066 cfg->locals_start = cfg->num_varinfo;
2067 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
2069 if (cfg->verbose_level > 2)
2070 g_print ("creating locals\n");
2072 for (i = 0; i < header->num_locals; ++i) {
2073 if (cfg->verbose_level > 2)
2074 g_print ("\tlocal [%d]: ", i);
2075 cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
2078 if (cfg->verbose_level > 2)
2079 g_print ("locals done\n");
2081 #ifdef ENABLE_LLVM
2082 if (COMPILE_LLVM (cfg))
2083 mono_llvm_create_vars (cfg);
2084 else
2085 mono_arch_create_vars (cfg);
2086 #else
2087 mono_arch_create_vars (cfg);
2088 #endif
2090 if (cfg->method->save_lmf && cfg->create_lmf_var) {
2091 MonoInst *lmf_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2092 lmf_var->flags |= MONO_INST_VOLATILE;
2093 lmf_var->flags |= MONO_INST_LMF;
2094 cfg->lmf_var = lmf_var;
2098 void
2099 mono_print_code (MonoCompile *cfg, const char* msg)
2101 MonoBasicBlock *bb;
2103 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2104 mono_print_bb (bb, msg);
2107 static void
2108 mono_postprocess_patches (MonoCompile *cfg)
2110 MonoJumpInfo *patch_info;
2111 int i;
2113 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
2114 switch (patch_info->type) {
2115 case MONO_PATCH_INFO_ABS: {
2116 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
2119 * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
2120 * absolute address.
2122 if (info) {
2123 //printf ("TEST %s %p\n", info->name, patch_info->data.target);
2124 /* for these array methods we currently register the same function pointer
2125 * since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
2126 * will return the incorrect one depending on the order they are registered.
2127 * See tests/test-arr.cs
2129 if (strstr (info->name, "ves_array_new_va_") == NULL && strstr (info->name, "ves_array_element_address_") == NULL) {
2130 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
2131 patch_info->data.name = info->name;
2135 if (patch_info->type == MONO_PATCH_INFO_ABS) {
2136 if (cfg->abs_patches) {
2137 MonoJumpInfo *abs_ji = (MonoJumpInfo *)g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
2138 if (abs_ji) {
2139 patch_info->type = abs_ji->type;
2140 patch_info->data.target = abs_ji->data.target;
2145 break;
2147 case MONO_PATCH_INFO_SWITCH: {
2148 gpointer *table;
2149 if (cfg->method->dynamic) {
2150 table = (void **)mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
2151 } else {
2152 table = (void **)mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
2155 for (i = 0; i < patch_info->data.table->table_size; i++) {
2156 /* Might be NULL if the switch is eliminated */
2157 if (patch_info->data.table->table [i]) {
2158 g_assert (patch_info->data.table->table [i]->native_offset);
2159 table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
2160 } else {
2161 table [i] = NULL;
2164 patch_info->data.table->table = (MonoBasicBlock**)table;
2165 break;
2167 case MONO_PATCH_INFO_METHOD_JUMP: {
2168 MonoJumpList *jlist;
2169 MonoDomain *domain = cfg->domain;
2170 unsigned char *ip = cfg->native_code + patch_info->ip.i;
2172 mono_domain_lock (domain);
2173 jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (domain)->jump_target_hash, patch_info->data.method);
2174 if (!jlist) {
2175 jlist = (MonoJumpList *)mono_domain_alloc0 (domain, sizeof (MonoJumpList));
2176 g_hash_table_insert (domain_jit_info (domain)->jump_target_hash, patch_info->data.method, jlist);
2178 jlist->list = g_slist_prepend (jlist->list, ip);
2179 mono_domain_unlock (domain);
2180 break;
2182 default:
2183 /* do nothing */
2184 break;
2189 void
2190 mono_codegen (MonoCompile *cfg)
2192 MonoBasicBlock *bb;
2193 int max_epilog_size;
2194 guint8 *code;
2195 MonoDomain *code_domain;
2196 guint unwindlen = 0;
2198 if (mono_using_xdebug)
2200 * Recent gdb versions have trouble processing symbol files containing
2201 * overlapping address ranges, so allocate all code from the code manager
2202 * of the root domain. (#666152).
2204 code_domain = mono_get_root_domain ();
2205 else
2206 code_domain = cfg->domain;
2208 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2209 cfg->spill_count = 0;
2210 /* we reuse dfn here */
2211 /* bb->dfn = bb_count++; */
2213 mono_arch_lowering_pass (cfg, bb);
2215 if (cfg->opt & MONO_OPT_PEEPHOLE)
2216 mono_arch_peephole_pass_1 (cfg, bb);
2218 mono_local_regalloc (cfg, bb);
2220 if (cfg->opt & MONO_OPT_PEEPHOLE)
2221 mono_arch_peephole_pass_2 (cfg, bb);
2223 if (cfg->gen_seq_points && !cfg->gen_sdb_seq_points)
2224 mono_bb_deduplicate_op_il_seq_points (cfg, bb);
2227 code = mono_arch_emit_prolog (cfg);
2229 cfg->code_len = code - cfg->native_code;
2230 cfg->prolog_end = cfg->code_len;
2231 cfg->cfa_reg = cfg->cur_cfa_reg;
2232 cfg->cfa_offset = cfg->cur_cfa_offset;
2234 mono_debug_open_method (cfg);
2236 /* emit code all basic blocks */
2237 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2238 bb->native_offset = cfg->code_len;
2239 bb->real_native_offset = cfg->code_len;
2240 //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
2241 mono_arch_output_basic_block (cfg, bb);
2242 bb->native_length = cfg->code_len - bb->native_offset;
2244 if (bb == cfg->bb_exit) {
2245 cfg->epilog_begin = cfg->code_len;
2246 mono_arch_emit_epilog (cfg);
2247 cfg->epilog_end = cfg->code_len;
2250 if (bb->clause_holes) {
2251 GList *tmp;
2252 for (tmp = bb->clause_holes; tmp; tmp = tmp->prev)
2253 mono_cfg_add_try_hole (cfg, (MonoExceptionClause *)tmp->data, cfg->native_code + bb->native_offset, bb);
2257 mono_arch_emit_exceptions (cfg);
2259 max_epilog_size = 0;
2261 /* we always allocate code in cfg->domain->code_mp to increase locality */
2262 cfg->code_size = cfg->code_len + max_epilog_size;
2264 /* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
2266 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2267 if (!cfg->compile_aot)
2268 unwindlen = mono_arch_unwindinfo_init_method_unwind_info (cfg);
2269 #endif
2271 if (cfg->method->dynamic) {
2272 /* Allocate the code into a separate memory pool so it can be freed */
2273 cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
2274 cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
2275 mono_domain_lock (cfg->domain);
2276 mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
2277 mono_domain_unlock (cfg->domain);
2279 if (mono_using_xdebug)
2280 /* See the comment for cfg->code_domain */
2281 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2282 else
2283 code = (guint8 *)mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + cfg->thunk_area + unwindlen);
2284 } else {
2285 code = (guint8 *)mono_domain_code_reserve (code_domain, cfg->code_size + cfg->thunk_area + unwindlen);
2288 if (cfg->thunk_area) {
2289 cfg->thunks_offset = cfg->code_size + unwindlen;
2290 cfg->thunks = code + cfg->thunks_offset;
2291 memset (cfg->thunks, 0, cfg->thunk_area);
2294 g_assert (code);
2295 memcpy (code, cfg->native_code, cfg->code_len);
2296 g_free (cfg->native_code);
2297 cfg->native_code = code;
2298 code = cfg->native_code + cfg->code_len;
2300 /* g_assert (((int)cfg->native_code & (MONO_ARCH_CODE_ALIGNMENT - 1)) == 0); */
2301 mono_postprocess_patches (cfg);
2303 #ifdef VALGRIND_JIT_REGISTER_MAP
2304 if (valgrind_register){
2305 char* nm = mono_method_full_name (cfg->method, TRUE);
2306 VALGRIND_JIT_REGISTER_MAP (nm, cfg->native_code, cfg->native_code + cfg->code_len);
2307 g_free (nm);
2309 #endif
2311 if (cfg->verbose_level > 0) {
2312 char* nm = mono_method_get_full_name (cfg->method);
2313 g_print ("Method %s emitted at %p to %p (code length %d) [%s]\n",
2314 nm,
2315 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
2316 g_free (nm);
2320 gboolean is_generic = FALSE;
2322 if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
2323 mono_class_is_gtd (cfg->method->klass) || mono_class_is_ginst (cfg->method->klass)) {
2324 is_generic = TRUE;
2327 if (cfg->gshared)
2328 g_assert (is_generic);
2331 #ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
2332 mono_arch_save_unwind_info (cfg);
2333 #endif
2335 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
2337 MonoJumpInfo *ji;
2338 gpointer target;
2340 for (ji = cfg->patch_info; ji; ji = ji->next) {
2341 if (cfg->compile_aot) {
2342 switch (ji->type) {
2343 case MONO_PATCH_INFO_BB:
2344 case MONO_PATCH_INFO_LABEL:
2345 break;
2346 default:
2347 /* No need to patch these */
2348 continue;
2352 if (ji->type == MONO_PATCH_INFO_NONE)
2353 continue;
2355 target = mono_resolve_patch_target (cfg->method, cfg->domain, cfg->native_code, ji, cfg->run_cctors, &cfg->error);
2356 if (!mono_error_ok (&cfg->error)) {
2357 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2358 return;
2360 mono_arch_patch_code_new (cfg, cfg->domain, cfg->native_code, ji, target);
2363 #else
2364 mono_arch_patch_code (cfg, cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors, &cfg->error);
2365 if (!is_ok (&cfg->error)) {
2366 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
2367 return;
2369 #endif
2371 if (cfg->method->dynamic) {
2372 if (mono_using_xdebug)
2373 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2374 else
2375 mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
2376 } else {
2377 mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
2379 MONO_PROFILER_RAISE (jit_code_buffer, (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method));
2381 mono_arch_flush_icache (cfg->native_code, cfg->code_len);
2383 mono_debug_close_method (cfg);
2385 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
2386 if (!cfg->compile_aot)
2387 mono_arch_unwindinfo_install_method_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
2388 #endif
2391 static void
2392 compute_reachable (MonoBasicBlock *bb)
2394 int i;
2396 if (!(bb->flags & BB_VISITED)) {
2397 bb->flags |= BB_VISITED;
2398 for (i = 0; i < bb->out_count; ++i)
2399 compute_reachable (bb->out_bb [i]);
2403 static void mono_bb_ordering (MonoCompile *cfg)
2405 int dfn = 0;
2406 /* Depth-first ordering on basic blocks */
2407 cfg->bblocks = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
2409 cfg->max_block_num = cfg->num_bblocks;
2411 df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
2412 if (cfg->num_bblocks != dfn + 1) {
2413 MonoBasicBlock *bb;
2415 cfg->num_bblocks = dfn + 1;
2417 /* remove unreachable code, because the code in them may be
2418 * inconsistent (access to dead variables for example) */
2419 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2420 bb->flags &= ~BB_VISITED;
2421 compute_reachable (cfg->bb_entry);
2422 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2423 if (bb->flags & BB_EXCEPTION_HANDLER)
2424 compute_reachable (bb);
2425 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2426 if (!(bb->flags & BB_VISITED)) {
2427 if (cfg->verbose_level > 1)
2428 g_print ("found unreachable code in BB%d\n", bb->block_num);
2429 bb->code = bb->last_ins = NULL;
2430 while (bb->out_count)
2431 mono_unlink_bblock (cfg, bb, bb->out_bb [0]);
2434 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
2435 bb->flags &= ~BB_VISITED;
2439 static void
2440 mono_handle_out_of_line_bblock (MonoCompile *cfg)
2442 MonoBasicBlock *bb;
2443 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2444 if (bb->next_bb && bb->next_bb->out_of_line && bb->last_ins && !MONO_IS_BRANCH_OP (bb->last_ins)) {
2445 MonoInst *ins;
2446 MONO_INST_NEW (cfg, ins, OP_BR);
2447 MONO_ADD_INS (bb, ins);
2448 ins->inst_target_bb = bb->next_bb;
2453 static MonoJitInfo*
2454 create_jit_info (MonoCompile *cfg, MonoMethod *method_to_compile)
2456 GSList *tmp;
2457 MonoMethodHeader *header;
2458 MonoJitInfo *jinfo;
2459 MonoJitInfoFlags flags = JIT_INFO_NONE;
2460 int num_clauses, num_holes = 0;
2461 guint32 stack_size = 0;
2463 g_assert (method_to_compile == cfg->method);
2464 header = cfg->header;
2466 if (cfg->gshared)
2467 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_GENERIC_JIT_INFO);
2469 if (cfg->arch_eh_jit_info) {
2470 MonoJitArgumentInfo *arg_info;
2471 MonoMethodSignature *sig = mono_method_signature (cfg->method_to_register);
2474 * This cannot be computed during stack walking, as
2475 * mono_arch_get_argument_info () is not signal safe.
2477 arg_info = g_newa (MonoJitArgumentInfo, sig->param_count + 1);
2478 stack_size = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
2480 if (stack_size)
2481 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2484 if (cfg->has_unwind_info_for_epilog && !(flags & JIT_INFO_HAS_ARCH_EH_INFO))
2485 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_ARCH_EH_INFO);
2487 if (cfg->thunk_area)
2488 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_THUNK_INFO);
2490 if (cfg->try_block_holes) {
2491 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2492 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2493 MonoExceptionClause *ec = hole->clause;
2494 int hole_end = hole->basic_block->native_offset + hole->basic_block->native_length;
2495 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2496 g_assert (clause_last_bb);
2498 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2499 if (clause_last_bb->native_offset != hole_end)
2500 ++num_holes;
2502 if (num_holes)
2503 flags = (MonoJitInfoFlags)(flags | JIT_INFO_HAS_TRY_BLOCK_HOLES);
2504 if (G_UNLIKELY (cfg->verbose_level >= 4))
2505 printf ("Number of try block holes %d\n", num_holes);
2508 if (COMPILE_LLVM (cfg))
2509 num_clauses = cfg->llvm_ex_info_len;
2510 else
2511 num_clauses = header->num_clauses;
2513 if (cfg->method->dynamic)
2514 jinfo = (MonoJitInfo *)g_malloc0 (mono_jit_info_size (flags, num_clauses, num_holes));
2515 else
2516 jinfo = (MonoJitInfo *)mono_domain_alloc0 (cfg->domain, mono_jit_info_size (flags, num_clauses, num_holes));
2517 jinfo_try_holes_size += num_holes * sizeof (MonoTryBlockHoleJitInfo);
2519 mono_jit_info_init (jinfo, cfg->method_to_register, cfg->native_code, cfg->code_len, flags, num_clauses, num_holes);
2520 jinfo->domain_neutral = (cfg->opt & MONO_OPT_SHARED) != 0;
2522 if (COMPILE_LLVM (cfg))
2523 jinfo->from_llvm = TRUE;
2525 if (cfg->gshared) {
2526 MonoInst *inst;
2527 MonoGenericJitInfo *gi;
2528 GSList *loclist = NULL;
2530 gi = mono_jit_info_get_generic_jit_info (jinfo);
2531 g_assert (gi);
2533 if (cfg->method->dynamic)
2534 gi->generic_sharing_context = g_new0 (MonoGenericSharingContext, 1);
2535 else
2536 gi->generic_sharing_context = (MonoGenericSharingContext *)mono_domain_alloc0 (cfg->domain, sizeof (MonoGenericSharingContext));
2537 mini_init_gsctx (cfg->method->dynamic ? NULL : cfg->domain, NULL, cfg->gsctx_context, gi->generic_sharing_context);
2539 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2540 mini_method_get_context (method_to_compile)->method_inst ||
2541 method_to_compile->klass->valuetype) {
2542 g_assert (cfg->rgctx_var);
2545 gi->has_this = 1;
2547 if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
2548 mini_method_get_context (method_to_compile)->method_inst ||
2549 method_to_compile->klass->valuetype) {
2550 inst = cfg->rgctx_var;
2551 if (!COMPILE_LLVM (cfg))
2552 g_assert (inst->opcode == OP_REGOFFSET);
2553 loclist = cfg->rgctx_loclist;
2554 } else {
2555 inst = cfg->args [0];
2556 loclist = cfg->this_loclist;
2559 if (loclist) {
2560 /* Needed to handle async exceptions */
2561 GSList *l;
2562 int i;
2564 gi->nlocs = g_slist_length (loclist);
2565 if (cfg->method->dynamic)
2566 gi->locations = (MonoDwarfLocListEntry *)g_malloc0 (gi->nlocs * sizeof (MonoDwarfLocListEntry));
2567 else
2568 gi->locations = (MonoDwarfLocListEntry *)mono_domain_alloc0 (cfg->domain, gi->nlocs * sizeof (MonoDwarfLocListEntry));
2569 i = 0;
2570 for (l = loclist; l; l = l->next) {
2571 memcpy (&(gi->locations [i]), l->data, sizeof (MonoDwarfLocListEntry));
2572 i ++;
2576 if (COMPILE_LLVM (cfg)) {
2577 g_assert (cfg->llvm_this_reg != -1);
2578 gi->this_in_reg = 0;
2579 gi->this_reg = cfg->llvm_this_reg;
2580 gi->this_offset = cfg->llvm_this_offset;
2581 } else if (inst->opcode == OP_REGVAR) {
2582 gi->this_in_reg = 1;
2583 gi->this_reg = inst->dreg;
2584 } else {
2585 g_assert (inst->opcode == OP_REGOFFSET);
2586 #ifdef TARGET_X86
2587 g_assert (inst->inst_basereg == X86_EBP);
2588 #elif defined(TARGET_AMD64)
2589 g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
2590 #endif
2591 g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
2593 gi->this_in_reg = 0;
2594 gi->this_reg = inst->inst_basereg;
2595 gi->this_offset = inst->inst_offset;
2599 if (num_holes) {
2600 MonoTryBlockHoleTableJitInfo *table;
2601 int i;
2603 table = mono_jit_info_get_try_block_hole_table_info (jinfo);
2604 table->num_holes = (guint16)num_holes;
2605 i = 0;
2606 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2607 guint32 start_bb_offset;
2608 MonoTryBlockHoleJitInfo *hole;
2609 TryBlockHole *hole_data = (TryBlockHole *)tmp->data;
2610 MonoExceptionClause *ec = hole_data->clause;
2611 int hole_end = hole_data->basic_block->native_offset + hole_data->basic_block->native_length;
2612 MonoBasicBlock *clause_last_bb = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2613 g_assert (clause_last_bb);
2615 /* Holes at the end of a try region can be represented by simply reducing the size of the block itself.*/
2616 if (clause_last_bb->native_offset == hole_end)
2617 continue;
2619 start_bb_offset = hole_data->start_offset - hole_data->basic_block->native_offset;
2620 hole = &table->holes [i++];
2621 hole->clause = hole_data->clause - &header->clauses [0];
2622 hole->offset = (guint32)hole_data->start_offset;
2623 hole->length = (guint16)(hole_data->basic_block->native_length - start_bb_offset);
2625 if (G_UNLIKELY (cfg->verbose_level >= 4))
2626 printf ("\tTry block hole at eh clause %d offset %x length %x\n", hole->clause, hole->offset, hole->length);
2628 g_assert (i == num_holes);
2631 if (jinfo->has_arch_eh_info) {
2632 MonoArchEHJitInfo *info;
2634 info = mono_jit_info_get_arch_eh_info (jinfo);
2636 info->stack_size = stack_size;
2639 if (cfg->thunk_area) {
2640 MonoThunkJitInfo *info;
2642 info = mono_jit_info_get_thunk_info (jinfo);
2643 info->thunks_offset = cfg->thunks_offset;
2644 info->thunks_size = cfg->thunk_area;
2647 if (COMPILE_LLVM (cfg)) {
2648 if (num_clauses)
2649 memcpy (&jinfo->clauses [0], &cfg->llvm_ex_info [0], num_clauses * sizeof (MonoJitExceptionInfo));
2650 } else if (header->num_clauses) {
2651 int i;
2653 for (i = 0; i < header->num_clauses; i++) {
2654 MonoExceptionClause *ec = &header->clauses [i];
2655 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2656 MonoBasicBlock *tblock;
2657 MonoInst *exvar;
2659 ei->flags = ec->flags;
2661 if (G_UNLIKELY (cfg->verbose_level >= 4))
2662 printf ("IL clause: try 0x%x-0x%x handler 0x%x-0x%x filter 0x%x\n", ec->try_offset, ec->try_offset + ec->try_len, ec->handler_offset, ec->handler_offset + ec->handler_len, ec->flags == MONO_EXCEPTION_CLAUSE_FILTER ? ec->data.filter_offset : 0);
2664 exvar = mono_find_exvar_for_offset (cfg, ec->handler_offset);
2665 ei->exvar_offset = exvar ? exvar->inst_offset : 0;
2667 if (ei->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
2668 tblock = cfg->cil_offset_to_bb [ec->data.filter_offset];
2669 g_assert (tblock);
2670 ei->data.filter = cfg->native_code + tblock->native_offset;
2671 } else {
2672 ei->data.catch_class = ec->data.catch_class;
2675 tblock = cfg->cil_offset_to_bb [ec->try_offset];
2676 g_assert (tblock);
2677 g_assert (tblock->native_offset);
2678 ei->try_start = cfg->native_code + tblock->native_offset;
2679 if (tblock->extend_try_block) {
2681 * Extend the try block backwards to include parts of the previous call
2682 * instruction.
2684 ei->try_start = (guint8*)ei->try_start - cfg->backend->monitor_enter_adjustment;
2686 if (ec->try_offset + ec->try_len < header->code_size)
2687 tblock = cfg->cil_offset_to_bb [ec->try_offset + ec->try_len];
2688 else
2689 tblock = cfg->bb_exit;
2690 if (G_UNLIKELY (cfg->verbose_level >= 4))
2691 printf ("looking for end of try [%d, %d] -> %p (code size %d)\n", ec->try_offset, ec->try_len, tblock, header->code_size);
2692 g_assert (tblock);
2693 if (!tblock->native_offset) {
2694 int j, end;
2695 for (j = ec->try_offset + ec->try_len, end = ec->try_offset; j >= end; --j) {
2696 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2697 if (bb && bb->native_offset) {
2698 tblock = bb;
2699 break;
2703 ei->try_end = cfg->native_code + tblock->native_offset;
2704 g_assert (tblock->native_offset);
2705 tblock = cfg->cil_offset_to_bb [ec->handler_offset];
2706 g_assert (tblock);
2707 ei->handler_start = cfg->native_code + tblock->native_offset;
2709 for (tmp = cfg->try_block_holes; tmp; tmp = tmp->next) {
2710 TryBlockHole *hole = (TryBlockHole *)tmp->data;
2711 gpointer hole_end = cfg->native_code + (hole->basic_block->native_offset + hole->basic_block->native_length);
2712 if (hole->clause == ec && hole_end == ei->try_end) {
2713 if (G_UNLIKELY (cfg->verbose_level >= 4))
2714 printf ("\tShortening try block %d from %x to %x\n", i, (int)((guint8*)ei->try_end - cfg->native_code), hole->start_offset);
2716 ei->try_end = cfg->native_code + hole->start_offset;
2717 break;
2721 if (ec->flags == MONO_EXCEPTION_CLAUSE_FINALLY) {
2722 int end_offset;
2723 if (ec->handler_offset + ec->handler_len < header->code_size) {
2724 tblock = cfg->cil_offset_to_bb [ec->handler_offset + ec->handler_len];
2725 if (tblock->native_offset) {
2726 end_offset = tblock->native_offset;
2727 } else {
2728 int j, end;
2730 for (j = ec->handler_offset + ec->handler_len, end = ec->handler_offset; j >= end; --j) {
2731 MonoBasicBlock *bb = cfg->cil_offset_to_bb [j];
2732 if (bb && bb->native_offset) {
2733 tblock = bb;
2734 break;
2737 end_offset = tblock->native_offset + tblock->native_length;
2739 } else {
2740 end_offset = cfg->epilog_begin;
2742 ei->data.handler_end = cfg->native_code + end_offset;
2747 if (G_UNLIKELY (cfg->verbose_level >= 4)) {
2748 int i;
2749 for (i = 0; i < jinfo->num_clauses; i++) {
2750 MonoJitExceptionInfo *ei = &jinfo->clauses [i];
2751 int start = (guint8*)ei->try_start - cfg->native_code;
2752 int end = (guint8*)ei->try_end - cfg->native_code;
2753 int handler = (guint8*)ei->handler_start - cfg->native_code;
2754 int handler_end = (guint8*)ei->data.handler_end - cfg->native_code;
2756 printf ("JitInfo EH clause %d flags %x try %x-%x handler %x-%x\n", i, ei->flags, start, end, handler, handler_end);
2760 if (cfg->encoded_unwind_ops) {
2761 /* Generated by LLVM */
2762 jinfo->unwind_info = mono_cache_unwind_info (cfg->encoded_unwind_ops, cfg->encoded_unwind_ops_len);
2763 g_free (cfg->encoded_unwind_ops);
2764 } else if (cfg->unwind_ops) {
2765 guint32 info_len;
2766 guint8 *unwind_info = mono_unwind_ops_encode (cfg->unwind_ops, &info_len);
2767 guint32 unwind_desc;
2769 unwind_desc = mono_cache_unwind_info (unwind_info, info_len);
2771 if (cfg->has_unwind_info_for_epilog) {
2772 MonoArchEHJitInfo *info;
2774 info = mono_jit_info_get_arch_eh_info (jinfo);
2775 g_assert (info);
2776 info->epilog_size = cfg->code_len - cfg->epilog_begin;
2778 jinfo->unwind_info = unwind_desc;
2779 g_free (unwind_info);
2780 } else {
2781 jinfo->unwind_info = cfg->used_int_regs;
2784 return jinfo;
2787 /* Return whenever METHOD is a gsharedvt method */
2788 static gboolean
2789 is_gsharedvt_method (MonoMethod *method)
2791 MonoGenericContext *context;
2792 MonoGenericInst *inst;
2793 int i;
2795 if (!method->is_inflated)
2796 return FALSE;
2797 context = mono_method_get_context (method);
2798 inst = context->class_inst;
2799 if (inst) {
2800 for (i = 0; i < inst->type_argc; ++i)
2801 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2802 return TRUE;
2804 inst = context->method_inst;
2805 if (inst) {
2806 for (i = 0; i < inst->type_argc; ++i)
2807 if (mini_is_gsharedvt_gparam (inst->type_argv [i]))
2808 return TRUE;
2810 return FALSE;
2813 static gboolean
2814 is_open_method (MonoMethod *method)
2816 MonoGenericContext *context;
2818 if (!method->is_inflated)
2819 return FALSE;
2820 context = mono_method_get_context (method);
2821 if (context->class_inst && context->class_inst->is_open)
2822 return TRUE;
2823 if (context->method_inst && context->method_inst->is_open)
2824 return TRUE;
2825 return FALSE;
2828 static void
2829 mono_insert_nop_in_empty_bb (MonoCompile *cfg)
2831 MonoBasicBlock *bb;
2832 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2833 if (bb->code)
2834 continue;
2835 MonoInst *nop;
2836 MONO_INST_NEW (cfg, nop, OP_NOP);
2837 MONO_ADD_INS (bb, nop);
2840 static void
2841 mono_create_gc_safepoint (MonoCompile *cfg, MonoBasicBlock *bblock)
2843 MonoInst *poll_addr, *ins;
2845 if (cfg->disable_gc_safe_points)
2846 return;
2848 if (cfg->verbose_level > 1)
2849 printf ("ADDING SAFE POINT TO BB %d\n", bblock->block_num);
2851 g_assert (mono_threads_is_coop_enabled ());
2852 NEW_AOTCONST (cfg, poll_addr, MONO_PATCH_INFO_GC_SAFE_POINT_FLAG, (gpointer)&mono_polling_required);
2854 MONO_INST_NEW (cfg, ins, OP_GC_SAFE_POINT);
2855 ins->sreg1 = poll_addr->dreg;
2857 if (bblock->flags & BB_EXCEPTION_HANDLER) {
2858 MonoInst *eh_op = bblock->code;
2860 if (eh_op && eh_op->opcode != OP_START_HANDLER && eh_op->opcode != OP_GET_EX_OBJ) {
2861 eh_op = NULL;
2862 } else {
2863 MonoInst *next_eh_op = eh_op ? eh_op->next : NULL;
2864 // skip all EH relateds ops
2865 while (next_eh_op && (next_eh_op->opcode == OP_START_HANDLER || next_eh_op->opcode == OP_GET_EX_OBJ)) {
2866 eh_op = next_eh_op;
2867 next_eh_op = eh_op->next;
2871 mono_bblock_insert_after_ins (bblock, eh_op, poll_addr);
2872 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2873 } else if (bblock == cfg->bb_entry) {
2874 mono_bblock_insert_after_ins (bblock, bblock->last_ins, poll_addr);
2875 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2877 } else {
2878 mono_bblock_insert_before_ins (bblock, NULL, poll_addr);
2879 mono_bblock_insert_after_ins (bblock, poll_addr, ins);
2884 This code inserts safepoints into managed code at important code paths.
2885 Those are:
2887 -the first basic block
2888 -landing BB for exception handlers
2889 -loop body starts.
2892 static void
2893 mono_insert_safepoints (MonoCompile *cfg)
2895 MonoBasicBlock *bb;
2897 if (!mono_threads_is_coop_enabled ())
2898 return;
2900 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2901 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2902 g_assert (mono_threads_is_coop_enabled ());
2903 gpointer poll_func = &mono_threads_state_poll;
2905 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER && info->d.icall.func == poll_func) {
2906 if (cfg->verbose_level > 1)
2907 printf ("SKIPPING SAFEPOINTS for the polling function icall\n");
2908 return;
2912 if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
2913 if (cfg->verbose_level > 1)
2914 printf ("SKIPPING SAFEPOINTS for native-to-managed wrappers.\n");
2915 return;
2918 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2919 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2921 if (info && info->subtype == WRAPPER_SUBTYPE_ICALL_WRAPPER &&
2922 (info->d.icall.func == mono_thread_interruption_checkpoint ||
2923 info->d.icall.func == mono_threads_exit_gc_safe_region_unbalanced)) {
2924 /* These wrappers are called from the wrapper for the polling function, leading to potential stack overflow */
2925 if (cfg->verbose_level > 1)
2926 printf ("SKIPPING SAFEPOINTS for wrapper %s\n", cfg->method->name);
2927 return;
2931 if (cfg->verbose_level > 1)
2932 printf ("INSERTING SAFEPOINTS\n");
2933 if (cfg->verbose_level > 2)
2934 mono_print_code (cfg, "BEFORE SAFEPOINTS");
2936 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2937 if (bb->loop_body_start || bb == cfg->bb_entry || bb->flags & BB_EXCEPTION_HANDLER)
2938 mono_create_gc_safepoint (cfg, bb);
2941 if (cfg->verbose_level > 2)
2942 mono_print_code (cfg, "AFTER SAFEPOINTS");
2947 static void
2948 mono_insert_branches_between_bblocks (MonoCompile *cfg)
2950 MonoBasicBlock *bb;
2952 /* Add branches between non-consecutive bblocks */
2953 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2954 if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
2955 bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
2956 /* we are careful when inverting, since bugs like #59580
2957 * could show up when dealing with NaNs.
2959 if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
2960 MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
2961 bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
2962 bb->last_ins->inst_false_bb = tmp;
2964 bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
2965 } else {
2966 MonoInst *inst = (MonoInst *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
2967 inst->opcode = OP_BR;
2968 inst->inst_target_bb = bb->last_ins->inst_false_bb;
2969 mono_bblock_add_inst (bb, inst);
2974 if (cfg->verbose_level >= 4) {
2975 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2976 MonoInst *tree = bb->code;
2977 g_print ("DUMP BLOCK %d:\n", bb->block_num);
2978 if (!tree)
2979 continue;
2980 for (; tree; tree = tree->next) {
2981 mono_print_ins_index (-1, tree);
2986 /* FIXME: */
2987 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
2988 bb->max_vreg = cfg->next_vreg;
2992 static void
2993 init_backend (MonoBackend *backend)
2995 #ifdef MONO_ARCH_NEED_GOT_VAR
2996 backend->need_got_var = 1;
2997 #endif
2998 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2999 backend->have_card_table_wb = 1;
3000 #endif
3001 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3002 backend->have_op_generic_class_init = 1;
3003 #endif
3004 #ifdef MONO_ARCH_EMULATE_MUL_DIV
3005 backend->emulate_mul_div = 1;
3006 #endif
3007 #ifdef MONO_ARCH_EMULATE_DIV
3008 backend->emulate_div = 1;
3009 #endif
3010 #if !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
3011 backend->emulate_long_shift_opts = 1;
3012 #endif
3013 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
3014 backend->have_objc_get_selector = 1;
3015 #endif
3016 #ifdef MONO_ARCH_HAVE_GENERALIZED_IMT_TRAMPOLINE
3017 backend->have_generalized_imt_trampoline = 1;
3018 #endif
3019 #ifdef MONO_ARCH_GSHARED_SUPPORTED
3020 backend->gshared_supported = 1;
3021 #endif
3022 if (MONO_ARCH_USE_FPSTACK)
3023 backend->use_fpstack = 1;
3024 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
3025 backend->have_liverange_ops = 1;
3026 #endif
3027 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
3028 backend->have_op_tail_call = 1;
3029 #endif
3030 #ifndef MONO_ARCH_MONITOR_ENTER_ADJUSTMENT
3031 backend->monitor_enter_adjustment = 1;
3032 #else
3033 backend->monitor_enter_adjustment = MONO_ARCH_MONITOR_ENTER_ADJUSTMENT;
3034 #endif
3035 #if defined(__mono_ilp32__)
3036 backend->ilp32 = 1;
3037 #endif
3038 #ifdef MONO_ARCH_HAVE_DUMMY_INIT
3039 backend->have_dummy_init = 1;
3040 #endif
3041 #ifdef MONO_ARCH_NEED_DIV_CHECK
3042 backend->need_div_check = 1;
3043 #endif
3044 #ifdef NO_UNALIGNED_ACCESS
3045 backend->no_unaligned_access = 1;
3046 #endif
3047 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
3048 backend->dyn_call_param_area = MONO_ARCH_DYN_CALL_PARAM_AREA;
3049 #endif
3050 #ifdef MONO_ARCH_NO_DIV_WITH_MUL
3051 backend->disable_div_with_mul = 1;
3052 #endif
3056 * mini_method_compile:
3057 * @method: the method to compile
3058 * @opts: the optimization flags to use
3059 * @domain: the domain where the method will be compiled in
3060 * @flags: compilation flags
3061 * @parts: debug flag
3063 * Returns: a MonoCompile* pointer. Caller must check the exception_type
3064 * field in the returned struct to see if compilation succeded.
3066 MonoCompile*
3067 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
3069 MonoMethodHeader *header;
3070 MonoMethodSignature *sig;
3071 ERROR_DECL_VALUE (err);
3072 MonoCompile *cfg;
3073 int i;
3074 gboolean try_generic_shared, try_llvm = FALSE;
3075 MonoMethod *method_to_compile, *method_to_register;
3076 gboolean method_is_gshared = FALSE;
3077 gboolean run_cctors = (flags & JIT_FLAG_RUN_CCTORS) ? 1 : 0;
3078 gboolean compile_aot = (flags & JIT_FLAG_AOT) ? 1 : 0;
3079 gboolean full_aot = (flags & JIT_FLAG_FULL_AOT) ? 1 : 0;
3080 gboolean disable_direct_icalls = (flags & JIT_FLAG_NO_DIRECT_ICALLS) ? 1 : 0;
3081 gboolean gsharedvt_method = FALSE;
3082 #ifdef ENABLE_LLVM
3083 gboolean llvm = (flags & JIT_FLAG_LLVM) ? 1 : 0;
3084 #endif
3085 static gboolean verbose_method_inited;
3086 static char **verbose_method_names;
3088 mono_atomic_inc_i32 (&mono_jit_stats.methods_compiled);
3089 MONO_PROFILER_RAISE (jit_begin, (method));
3090 if (MONO_METHOD_COMPILE_BEGIN_ENABLED ())
3091 MONO_PROBE_METHOD_COMPILE_BEGIN (method);
3093 gsharedvt_method = is_gsharedvt_method (method);
3096 * In AOT mode, method can be the following:
3097 * - a gsharedvt method.
3098 * - a method inflated with type parameters. This is for ref/partial sharing.
3099 * - a method inflated with concrete types.
3101 if (compile_aot) {
3102 if (is_open_method (method)) {
3103 try_generic_shared = TRUE;
3104 method_is_gshared = TRUE;
3105 } else {
3106 try_generic_shared = FALSE;
3108 g_assert (opts & MONO_OPT_GSHARED);
3109 } else {
3110 try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
3111 (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_full (method, FALSE, FALSE, FALSE);
3112 if (mini_is_gsharedvt_sharable_method (method)) {
3114 if (!mono_debug_count ())
3115 try_generic_shared = FALSE;
3121 if (try_generic_shared && !mono_debug_count ())
3122 try_generic_shared = FALSE;
3125 if (opts & MONO_OPT_GSHARED) {
3126 if (try_generic_shared)
3127 mono_atomic_inc_i32 (&mono_stats.generics_sharable_methods);
3128 else if (mono_method_is_generic_impl (method))
3129 mono_atomic_inc_i32 (&mono_stats.generics_unsharable_methods);
3132 #ifdef ENABLE_LLVM
3133 try_llvm = mono_use_llvm || llvm;
3134 #endif
3136 restart_compile:
3137 if (method_is_gshared) {
3138 method_to_compile = method;
3139 } else {
3140 if (try_generic_shared) {
3141 method_to_compile = mini_get_shared_method (method);
3142 g_assert (method_to_compile);
3143 } else {
3144 method_to_compile = method;
3148 cfg = g_new0 (MonoCompile, 1);
3149 cfg->method = method_to_compile;
3150 cfg->mempool = mono_mempool_new ();
3151 cfg->opt = opts;
3152 cfg->run_cctors = run_cctors;
3153 cfg->domain = domain;
3154 cfg->verbose_level = mini_verbose;
3155 cfg->compile_aot = compile_aot;
3156 cfg->full_aot = full_aot;
3157 cfg->disable_omit_fp = debug_options.disable_omit_fp;
3158 cfg->skip_visibility = method->skip_visibility;
3159 cfg->orig_method = method;
3160 cfg->gen_seq_points = !debug_options.no_seq_points_compact_data || debug_options.gen_sdb_seq_points;
3161 cfg->gen_sdb_seq_points = debug_options.gen_sdb_seq_points;
3162 cfg->llvm_only = (flags & JIT_FLAG_LLVM_ONLY) != 0;
3163 cfg->backend = current_backend;
3165 #ifdef HOST_ANDROID
3166 if (cfg->method->wrapper_type != MONO_WRAPPER_NONE) {
3167 /* FIXME: Why is this needed */
3168 cfg->gen_seq_points = FALSE;
3169 cfg->gen_sdb_seq_points = FALSE;
3171 #endif
3172 if (cfg->method->wrapper_type == MONO_WRAPPER_ALLOC) {
3173 /* We can't have seq points inside gc critical regions */
3174 cfg->gen_seq_points = FALSE;
3175 cfg->gen_sdb_seq_points = FALSE;
3177 /* coop requires loop detection to happen */
3178 if (mono_threads_is_coop_enabled ())
3179 cfg->opt |= MONO_OPT_LOOP;
3180 cfg->explicit_null_checks = debug_options.explicit_null_checks || (flags & JIT_FLAG_EXPLICIT_NULL_CHECKS);
3181 cfg->soft_breakpoints = debug_options.soft_breakpoints;
3182 cfg->check_pinvoke_callconv = debug_options.check_pinvoke_callconv;
3183 cfg->disable_direct_icalls = disable_direct_icalls;
3184 cfg->direct_pinvoke = (flags & JIT_FLAG_DIRECT_PINVOKE) != 0;
3185 if (try_generic_shared)
3186 cfg->gshared = TRUE;
3187 cfg->compile_llvm = try_llvm;
3188 cfg->token_info_hash = g_hash_table_new (NULL, NULL);
3189 if (cfg->compile_aot)
3190 cfg->method_index = aot_method_index;
3193 if (!mono_debug_count ())
3194 cfg->opt &= ~MONO_OPT_FLOAT32;
3196 if (cfg->llvm_only)
3197 cfg->opt &= ~MONO_OPT_SIMD;
3198 cfg->r4fp = (cfg->opt & MONO_OPT_FLOAT32) ? 1 : 0;
3199 cfg->r4_stack_type = cfg->r4fp ? STACK_R4 : STACK_R8;
3201 if (cfg->gen_seq_points)
3202 cfg->seq_points = g_ptr_array_new ();
3203 error_init (&cfg->error);
3205 if (cfg->compile_aot && !try_generic_shared && (method->is_generic || mono_class_is_gtd (method->klass) || method_is_gshared)) {
3206 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
3207 return cfg;
3210 if (cfg->gshared && (gsharedvt_method || mini_is_gsharedvt_sharable_method (method))) {
3211 MonoMethodInflated *inflated;
3212 MonoGenericContext *context;
3214 if (gsharedvt_method) {
3215 g_assert (method->is_inflated);
3216 inflated = (MonoMethodInflated*)method;
3217 context = &inflated->context;
3219 /* We are compiling a gsharedvt method directly */
3220 g_assert (compile_aot);
3221 } else {
3222 g_assert (method_to_compile->is_inflated);
3223 inflated = (MonoMethodInflated*)method_to_compile;
3224 context = &inflated->context;
3227 mini_init_gsctx (NULL, cfg->mempool, context, &cfg->gsctx);
3228 cfg->gsctx_context = context;
3230 cfg->gsharedvt = TRUE;
3231 if (!cfg->llvm_only) {
3232 cfg->disable_llvm = TRUE;
3233 cfg->exception_message = g_strdup ("gsharedvt");
3237 if (cfg->gshared) {
3238 method_to_register = method_to_compile;
3239 } else {
3240 g_assert (method == method_to_compile);
3241 method_to_register = method;
3243 cfg->method_to_register = method_to_register;
3245 error_init (&err);
3246 sig = mono_method_signature_checked (cfg->method, &err);
3247 if (!sig) {
3248 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3249 cfg->exception_message = g_strdup (mono_error_get_message (&err));
3250 mono_error_cleanup (&err);
3251 if (MONO_METHOD_COMPILE_END_ENABLED ())
3252 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3253 return cfg;
3256 header = cfg->header = mono_method_get_header_checked (cfg->method, &cfg->error);
3257 if (!header) {
3258 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3259 if (MONO_METHOD_COMPILE_END_ENABLED ())
3260 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3261 return cfg;
3264 #ifdef ENABLE_LLVM
3266 static gboolean inited;
3268 if (!inited)
3269 inited = TRUE;
3272 * Check for methods which cannot be compiled by LLVM early, to avoid
3273 * the extra compilation pass.
3275 if (COMPILE_LLVM (cfg)) {
3276 mono_llvm_check_method_supported (cfg);
3277 if (cfg->disable_llvm) {
3278 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3279 //nm = mono_method_full_name (cfg->method, TRUE);
3280 printf ("LLVM failed for '%s.%s': %s\n", method->klass->name, method->name, cfg->exception_message);
3281 //g_free (nm);
3283 if (cfg->llvm_only) {
3284 g_free (cfg->exception_message);
3285 cfg->disable_aot = TRUE;
3286 return cfg;
3288 mono_destroy_compile (cfg);
3289 try_llvm = FALSE;
3290 goto restart_compile;
3294 #endif
3296 cfg->prof_flags = mono_profiler_get_call_instrumentation_flags (cfg->method);
3298 /* The debugger has no liveness information, so avoid sharing registers/stack slots */
3299 if (debug_options.mdb_optimizations || MONO_CFG_PROFILE_CALL_CONTEXT (cfg)) {
3300 cfg->disable_reuse_registers = TRUE;
3301 cfg->disable_reuse_stack_slots = TRUE;
3303 * This decreases the change the debugger will read registers/stack slots which are
3304 * not yet initialized.
3306 cfg->disable_initlocals_opt = TRUE;
3308 cfg->extend_live_ranges = TRUE;
3310 /* The debugger needs all locals to be on the stack or in a global register */
3311 cfg->disable_vreg_to_lvreg = TRUE;
3313 /* Don't remove unused variables when running inside the debugger since the user
3314 * may still want to view them. */
3315 cfg->disable_deadce_vars = TRUE;
3317 cfg->opt &= ~MONO_OPT_DEADCE;
3318 cfg->opt &= ~MONO_OPT_INLINE;
3319 cfg->opt &= ~MONO_OPT_COPYPROP;
3320 cfg->opt &= ~MONO_OPT_CONSPROP;
3322 /* This is needed for the soft debugger, which doesn't like code after the epilog */
3323 cfg->disable_out_of_line_bblocks = TRUE;
3326 if (mono_using_xdebug) {
3328 * Make each variable use its own register/stack slot and extend
3329 * their liveness to cover the whole method, making them displayable
3330 * in gdb even after they are dead.
3332 cfg->disable_reuse_registers = TRUE;
3333 cfg->disable_reuse_stack_slots = TRUE;
3334 cfg->extend_live_ranges = TRUE;
3335 cfg->compute_precise_live_ranges = TRUE;
3338 mini_gc_init_cfg (cfg);
3340 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
3341 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
3343 /* These wrappers are using linkonce linkage, so they can't access GOT slots */
3344 if ((info && (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN_SIG || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT_SIG))) {
3345 cfg->disable_gc_safe_points = TRUE;
3346 /* This is safe, these wrappers only store to the stack */
3347 cfg->gen_write_barriers = FALSE;
3351 if (COMPILE_LLVM (cfg)) {
3352 cfg->opt |= MONO_OPT_ABCREM;
3355 if (!verbose_method_inited) {
3356 char *env = g_getenv ("MONO_VERBOSE_METHOD");
3357 if (env != NULL)
3358 verbose_method_names = g_strsplit (env, ",", -1);
3360 verbose_method_inited = TRUE;
3362 if (verbose_method_names) {
3363 int i;
3365 for (i = 0; verbose_method_names [i] != NULL; i++){
3366 const char *name = verbose_method_names [i];
3368 if ((strchr (name, '.') > name) || strchr (name, ':')) {
3369 MonoMethodDesc *desc;
3371 desc = mono_method_desc_new (name, TRUE);
3372 if (mono_method_desc_full_match (desc, cfg->method)) {
3373 cfg->verbose_level = 4;
3375 mono_method_desc_free (desc);
3376 } else {
3377 if (strcmp (cfg->method->name, name) == 0)
3378 cfg->verbose_level = 4;
3383 cfg->intvars = (guint16 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
3385 if (cfg->verbose_level > 0) {
3386 char *method_name;
3388 method_name = mono_method_get_full_name (method);
3389 g_print ("converting %s%s%smethod %s\n", COMPILE_LLVM (cfg) ? "llvm " : "", cfg->gsharedvt ? "gsharedvt " : "", (cfg->gshared && !cfg->gsharedvt) ? "gshared " : "", method_name);
3391 if (COMPILE_LLVM (cfg))
3392 g_print ("converting llvm method %s\n", method_name = mono_method_full_name (method, TRUE));
3393 else if (cfg->gsharedvt)
3394 g_print ("converting gsharedvt method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3395 else if (cfg->gshared)
3396 g_print ("converting shared method %s\n", method_name = mono_method_full_name (method_to_compile, TRUE));
3397 else
3398 g_print ("converting method %s\n", method_name = mono_method_full_name (method, TRUE));
3400 g_free (method_name);
3403 if (cfg->opt & MONO_OPT_ABCREM)
3404 cfg->opt |= MONO_OPT_SSA;
3406 cfg->rs = mono_regstate_new ();
3407 cfg->next_vreg = cfg->rs->next_vreg;
3409 /* FIXME: Fix SSA to handle branches inside bblocks */
3410 if (cfg->opt & MONO_OPT_SSA)
3411 cfg->enable_extended_bblocks = FALSE;
3414 * FIXME: This confuses liveness analysis because variables which are assigned after
3415 * a branch inside a bblock become part of the kill set, even though the assignment
3416 * might not get executed. This causes the optimize_initlocals pass to delete some
3417 * assignments which are needed.
3418 * Also, the mono_if_conversion pass needs to be modified to recognize the code
3419 * created by this.
3421 //cfg->enable_extended_bblocks = TRUE;
3423 /*We must verify the method before doing any IR generation as mono_compile_create_vars can assert.*/
3424 if (mono_compile_is_broken (cfg, cfg->method, TRUE)) {
3425 if (mini_get_debug_options ()->break_on_unverified)
3426 G_BREAKPOINT ();
3427 return cfg;
3431 * create MonoInst* which represents arguments and local variables
3433 mono_compile_create_vars (cfg);
3435 mono_cfg_dump_create_context (cfg);
3436 mono_cfg_dump_begin_group (cfg);
3438 MONO_TIME_TRACK (mono_jit_stats.jit_method_to_ir, i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, NULL, NULL, 0, FALSE));
3439 mono_cfg_dump_ir (cfg, "method-to-ir");
3441 if (cfg->gdump_ctx != NULL) {
3442 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3443 mono_insert_nop_in_empty_bb (cfg);
3444 mono_cfg_dump_ir (cfg, "mono_insert_nop_in_empty_bb");
3447 if (i < 0) {
3448 if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
3449 if (compile_aot) {
3450 if (MONO_METHOD_COMPILE_END_ENABLED ())
3451 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3452 return cfg;
3454 mono_destroy_compile (cfg);
3455 try_generic_shared = FALSE;
3456 goto restart_compile;
3458 g_assert (cfg->exception_type != MONO_EXCEPTION_GENERIC_SHARING_FAILED);
3460 if (MONO_METHOD_COMPILE_END_ENABLED ())
3461 MONO_PROBE_METHOD_COMPILE_END (method, FALSE);
3462 /* cfg contains the details of the failure, so let the caller cleanup */
3463 return cfg;
3466 cfg->stat_basic_blocks += cfg->num_bblocks;
3468 if (COMPILE_LLVM (cfg)) {
3469 MonoInst *ins;
3471 /* The IR has to be in SSA form for LLVM */
3472 cfg->opt |= MONO_OPT_SSA;
3474 // FIXME:
3475 if (cfg->ret) {
3476 // Allow SSA on the result value
3477 cfg->ret->flags &= ~MONO_INST_VOLATILE;
3479 // Add an explicit return instruction referencing the return value
3480 MONO_INST_NEW (cfg, ins, OP_SETRET);
3481 ins->sreg1 = cfg->ret->dreg;
3483 MONO_ADD_INS (cfg->bb_exit, ins);
3486 cfg->opt &= ~MONO_OPT_LINEARS;
3488 /* FIXME: */
3489 cfg->opt &= ~MONO_OPT_BRANCH;
3492 /* todo: remove code when we have verified that the liveness for try/catch blocks
3493 * works perfectly
3496 * Currently, this can't be commented out since exception blocks are not
3497 * processed during liveness analysis.
3498 * It is also needed, because otherwise the local optimization passes would
3499 * delete assignments in cases like this:
3500 * r1 <- 1
3501 * <something which throws>
3502 * r1 <- 2
3503 * This also allows SSA to be run on methods containing exception clauses, since
3504 * SSA will ignore variables marked VOLATILE.
3506 MONO_TIME_TRACK (mono_jit_stats.jit_liveness_handle_exception_clauses, mono_liveness_handle_exception_clauses (cfg));
3507 mono_cfg_dump_ir (cfg, "liveness_handle_exception_clauses");
3509 MONO_TIME_TRACK (mono_jit_stats.jit_handle_out_of_line_bblock, mono_handle_out_of_line_bblock (cfg));
3510 mono_cfg_dump_ir (cfg, "handle_out_of_line_bblock");
3512 /*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
3514 if (!COMPILE_LLVM (cfg)) {
3515 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_long_opts, mono_decompose_long_opts (cfg));
3516 mono_cfg_dump_ir (cfg, "decompose_long_opts");
3519 /* Should be done before branch opts */
3520 if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) {
3521 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop, mono_local_cprop (cfg));
3522 mono_cfg_dump_ir (cfg, "local_cprop");
3525 if (cfg->flags & MONO_CFG_HAS_TYPE_CHECK) {
3526 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_typechecks, mono_decompose_typechecks (cfg));
3527 if (cfg->gdump_ctx != NULL) {
3528 /* workaround for graph visualization, as it doesn't handle empty basic blocks properly */
3529 mono_insert_nop_in_empty_bb (cfg);
3531 mono_cfg_dump_ir (cfg, "decompose_typechecks");
3535 * Should be done after cprop which can do strength reduction on
3536 * some of these ops, after propagating immediates.
3538 if (cfg->has_emulated_ops) {
3539 MONO_TIME_TRACK (mono_jit_stats.jit_local_emulate_ops, mono_local_emulate_ops (cfg));
3540 mono_cfg_dump_ir (cfg, "local_emulate_ops");
3543 if (cfg->opt & MONO_OPT_BRANCH) {
3544 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches, mono_optimize_branches (cfg));
3545 mono_cfg_dump_ir (cfg, "optimize_branches");
3548 /* This must be done _before_ global reg alloc and _after_ decompose */
3549 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs, mono_handle_global_vregs (cfg));
3550 mono_cfg_dump_ir (cfg, "handle_global_vregs");
3551 if (cfg->opt & MONO_OPT_DEADCE) {
3552 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce, mono_local_deadce (cfg));
3553 mono_cfg_dump_ir (cfg, "local_deadce");
3555 if (cfg->opt & MONO_OPT_ALIAS_ANALYSIS) {
3556 MONO_TIME_TRACK (mono_jit_stats.jit_local_alias_analysis, mono_local_alias_analysis (cfg));
3557 mono_cfg_dump_ir (cfg, "local_alias_analysis");
3559 /* Disable this for LLVM to make the IR easier to handle */
3560 if (!COMPILE_LLVM (cfg)) {
3561 MONO_TIME_TRACK (mono_jit_stats.jit_if_conversion, mono_if_conversion (cfg));
3562 mono_cfg_dump_ir (cfg, "if_conversion");
3565 mono_threads_safepoint ();
3567 MONO_TIME_TRACK (mono_jit_stats.jit_bb_ordering, mono_bb_ordering (cfg));
3568 mono_cfg_dump_ir (cfg, "bb_ordering");
3570 if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
3572 * we disable some optimizations if there are too many variables
3573 * because JIT time may become too expensive. The actual number needs
3574 * to be tweaked and eventually the non-linear algorithms should be fixed.
3576 cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
3577 cfg->disable_ssa = TRUE;
3580 if (cfg->num_varinfo > 10000 && !cfg->llvm_only)
3581 /* Disable llvm for overly complex methods */
3582 cfg->disable_ssa = TRUE;
3584 if (cfg->opt & MONO_OPT_LOOP) {
3585 MONO_TIME_TRACK (mono_jit_stats.jit_compile_dominator_info, mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM));
3586 MONO_TIME_TRACK (mono_jit_stats.jit_compute_natural_loops, mono_compute_natural_loops (cfg));
3589 MONO_TIME_TRACK (mono_jit_stats.jit_insert_safepoints, mono_insert_safepoints (cfg));
3590 mono_cfg_dump_ir (cfg, "insert_safepoints");
3592 /* after method_to_ir */
3593 if (parts == 1) {
3594 if (MONO_METHOD_COMPILE_END_ENABLED ())
3595 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3596 return cfg;
3600 if (header->num_clauses)
3601 cfg->disable_ssa = TRUE;
3604 //#define DEBUGSSA "logic_run"
3605 //#define DEBUGSSA_CLASS "Tests"
3606 #ifdef DEBUGSSA
3608 if (!cfg->disable_ssa) {
3609 mono_local_cprop (cfg);
3611 #ifndef DISABLE_SSA
3612 mono_ssa_compute (cfg);
3613 #endif
3615 #else
3616 if (cfg->opt & MONO_OPT_SSA) {
3617 if (!(cfg->comp_done & MONO_COMP_SSA) && !cfg->disable_ssa) {
3618 #ifndef DISABLE_SSA
3619 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_compute, mono_ssa_compute (cfg));
3620 mono_cfg_dump_ir (cfg, "ssa_compute");
3621 #endif
3623 if (cfg->verbose_level >= 2) {
3624 print_dfn (cfg);
3628 #endif
3630 /* after SSA translation */
3631 if (parts == 2) {
3632 if (MONO_METHOD_COMPILE_END_ENABLED ())
3633 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3634 return cfg;
3637 if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
3638 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3639 #ifndef DISABLE_SSA
3640 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_cprop, mono_ssa_cprop (cfg));
3641 mono_cfg_dump_ir (cfg, "ssa_cprop");
3642 #endif
3646 #ifndef DISABLE_SSA
3647 if (cfg->comp_done & MONO_COMP_SSA && !COMPILE_LLVM (cfg)) {
3648 //mono_ssa_strength_reduction (cfg);
3650 if (cfg->opt & MONO_OPT_DEADCE) {
3651 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_deadce, mono_ssa_deadce (cfg));
3652 mono_cfg_dump_ir (cfg, "ssa_deadce");
3655 if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM)) {
3656 MONO_TIME_TRACK (mono_jit_stats.jit_perform_abc_removal, mono_perform_abc_removal (cfg));
3657 mono_cfg_dump_ir (cfg, "perform_abc_removal");
3660 MONO_TIME_TRACK (mono_jit_stats.jit_ssa_remove, mono_ssa_remove (cfg));
3661 mono_cfg_dump_ir (cfg, "ssa_remove");
3662 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop2, mono_local_cprop (cfg));
3663 mono_cfg_dump_ir (cfg, "local_cprop2");
3664 MONO_TIME_TRACK (mono_jit_stats.jit_handle_global_vregs2, mono_handle_global_vregs (cfg));
3665 mono_cfg_dump_ir (cfg, "handle_global_vregs2");
3666 if (cfg->opt & MONO_OPT_DEADCE) {
3667 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce2, mono_local_deadce (cfg));
3668 mono_cfg_dump_ir (cfg, "local_deadce2");
3671 if (cfg->opt & MONO_OPT_BRANCH) {
3672 MONO_TIME_TRACK (mono_jit_stats.jit_optimize_branches2, mono_optimize_branches (cfg));
3673 mono_cfg_dump_ir (cfg, "optimize_branches2");
3676 #endif
3678 if (cfg->comp_done & MONO_COMP_SSA && COMPILE_LLVM (cfg)) {
3679 mono_ssa_loop_invariant_code_motion (cfg);
3680 mono_cfg_dump_ir (cfg, "loop_invariant_code_motion");
3681 /* This removes MONO_INST_FAULT flags too so perform it unconditionally */
3682 if (cfg->opt & MONO_OPT_ABCREM) {
3683 mono_perform_abc_removal (cfg);
3684 mono_cfg_dump_ir (cfg, "abc_removal");
3688 /* after SSA removal */
3689 if (parts == 3) {
3690 if (MONO_METHOD_COMPILE_END_ENABLED ())
3691 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3692 return cfg;
3695 if (cfg->llvm_only && cfg->gsharedvt)
3696 mono_ssa_remove_gsharedvt (cfg);
3698 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
3699 if (COMPILE_SOFT_FLOAT (cfg))
3700 mono_decompose_soft_float (cfg);
3701 #endif
3702 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_vtype_opts, mono_decompose_vtype_opts (cfg));
3703 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS) {
3704 MONO_TIME_TRACK (mono_jit_stats.jit_decompose_array_access_opts, mono_decompose_array_access_opts (cfg));
3705 mono_cfg_dump_ir (cfg, "decompose_array_access_opts");
3708 if (cfg->got_var) {
3709 #ifndef MONO_ARCH_GOT_REG
3710 GList *regs;
3711 #endif
3712 int got_reg;
3714 g_assert (cfg->got_var_allocated);
3717 * Allways allocate the GOT var to a register, because keeping it
3718 * in memory will increase the number of live temporaries in some
3719 * code created by inssel.brg, leading to the well known spills+
3720 * branches problem. Testcase: mcs crash in
3721 * System.MonoCustomAttrs:GetCustomAttributes.
3723 #ifdef MONO_ARCH_GOT_REG
3724 got_reg = MONO_ARCH_GOT_REG;
3725 #else
3726 regs = mono_arch_get_global_int_regs (cfg);
3727 g_assert (regs);
3728 got_reg = GPOINTER_TO_INT (regs->data);
3729 g_list_free (regs);
3730 #endif
3731 cfg->got_var->opcode = OP_REGVAR;
3732 cfg->got_var->dreg = got_reg;
3733 cfg->used_int_regs |= 1LL << cfg->got_var->dreg;
3737 * Have to call this again to process variables added since the first call.
3739 MONO_TIME_TRACK(mono_jit_stats.jit_liveness_handle_exception_clauses2, mono_liveness_handle_exception_clauses (cfg));
3741 if (cfg->opt & MONO_OPT_LINEARS) {
3742 GList *vars, *regs, *l;
3744 /* fixme: maybe we can avoid to compute livenesss here if already computed ? */
3745 cfg->comp_done &= ~MONO_COMP_LIVENESS;
3746 if (!(cfg->comp_done & MONO_COMP_LIVENESS))
3747 MONO_TIME_TRACK (mono_jit_stats.jit_analyze_liveness, mono_analyze_liveness (cfg));
3749 if ((vars = mono_arch_get_allocatable_int_vars (cfg))) {
3750 regs = mono_arch_get_global_int_regs (cfg);
3751 /* Remove the reg reserved for holding the GOT address */
3752 if (cfg->got_var) {
3753 for (l = regs; l; l = l->next) {
3754 if (GPOINTER_TO_UINT (l->data) == cfg->got_var->dreg) {
3755 regs = g_list_delete_link (regs, l);
3756 break;
3760 MONO_TIME_TRACK (mono_jit_stats.jit_linear_scan, mono_linear_scan (cfg, vars, regs, &cfg->used_int_regs));
3761 mono_cfg_dump_ir (cfg, "linear_scan");
3765 //mono_print_code (cfg, "");
3767 //print_dfn (cfg);
3769 /* variables are allocated after decompose, since decompose could create temps */
3770 if (!COMPILE_LLVM (cfg)) {
3771 MONO_TIME_TRACK (mono_jit_stats.jit_arch_allocate_vars, mono_arch_allocate_vars (cfg));
3772 mono_cfg_dump_ir (cfg, "arch_allocate_vars");
3773 if (cfg->exception_type)
3774 return cfg;
3777 if (cfg->gsharedvt)
3778 mono_allocate_gsharedvt_vars (cfg);
3780 if (!COMPILE_LLVM (cfg)) {
3781 gboolean need_local_opts;
3782 MONO_TIME_TRACK (mono_jit_stats.jit_spill_global_vars, mono_spill_global_vars (cfg, &need_local_opts));
3783 mono_cfg_dump_ir (cfg, "spill_global_vars");
3785 if (need_local_opts || cfg->compile_aot) {
3786 /* To optimize code created by spill_global_vars */
3787 MONO_TIME_TRACK (mono_jit_stats.jit_local_cprop3, mono_local_cprop (cfg));
3788 if (cfg->opt & MONO_OPT_DEADCE)
3789 MONO_TIME_TRACK (mono_jit_stats.jit_local_deadce3, mono_local_deadce (cfg));
3790 mono_cfg_dump_ir (cfg, "needs_local_opts");
3794 mono_insert_branches_between_bblocks (cfg);
3796 if (COMPILE_LLVM (cfg)) {
3797 #ifdef ENABLE_LLVM
3798 char *nm;
3800 /* The IR has to be in SSA form for LLVM */
3801 if (!(cfg->comp_done & MONO_COMP_SSA)) {
3802 cfg->exception_message = g_strdup ("SSA disabled.");
3803 cfg->disable_llvm = TRUE;
3806 if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
3807 mono_decompose_array_access_opts (cfg);
3809 if (!cfg->disable_llvm)
3810 mono_llvm_emit_method (cfg);
3811 if (cfg->disable_llvm) {
3812 if (cfg->verbose_level >= (cfg->llvm_only ? 0 : 1)) {
3813 //nm = mono_method_full_name (cfg->method, TRUE);
3814 printf ("LLVM failed for '%s.%s': %s\n", method->klass->name, method->name, cfg->exception_message);
3815 //g_free (nm);
3817 if (cfg->llvm_only) {
3818 cfg->disable_aot = TRUE;
3819 return cfg;
3821 mono_destroy_compile (cfg);
3822 try_llvm = FALSE;
3823 goto restart_compile;
3826 if (cfg->verbose_level > 0 && !cfg->compile_aot) {
3827 nm = mono_method_full_name (cfg->method, TRUE);
3828 g_print ("LLVM Method %s emitted at %p to %p (code length %d) [%s]\n",
3829 nm,
3830 cfg->native_code, cfg->native_code + cfg->code_len, cfg->code_len, cfg->domain->friendly_name);
3831 g_free (nm);
3833 #endif
3834 } else {
3835 MONO_TIME_TRACK (mono_jit_stats.jit_codegen, mono_codegen (cfg));
3836 mono_cfg_dump_ir (cfg, "codegen");
3837 if (cfg->exception_type)
3838 return cfg;
3841 if (COMPILE_LLVM (cfg))
3842 mono_atomic_inc_i32 (&mono_jit_stats.methods_with_llvm);
3843 else
3844 mono_atomic_inc_i32 (&mono_jit_stats.methods_without_llvm);
3846 MONO_TIME_TRACK (mono_jit_stats.jit_create_jit_info, cfg->jit_info = create_jit_info (cfg, method_to_compile));
3848 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
3849 if (cfg->extend_live_ranges) {
3850 /* Extend live ranges to cover the whole method */
3851 for (i = 0; i < cfg->num_varinfo; ++i)
3852 MONO_VARINFO (cfg, i)->live_range_end = cfg->code_len;
3854 #endif
3856 MONO_TIME_TRACK (mono_jit_stats.jit_gc_create_gc_map, mini_gc_create_gc_map (cfg));
3857 MONO_TIME_TRACK (mono_jit_stats.jit_save_seq_point_info, mono_save_seq_point_info (cfg));
3859 if (!cfg->compile_aot) {
3860 mono_save_xdebug_info (cfg);
3861 mono_lldb_save_method_info (cfg);
3864 if (cfg->verbose_level >= 2) {
3865 char *id = mono_method_full_name (cfg->method, FALSE);
3866 mono_disassemble_code (cfg, cfg->native_code, cfg->code_len, id + 3);
3867 g_free (id);
3870 if (!cfg->compile_aot && !(flags & JIT_FLAG_DISCARD_RESULTS)) {
3871 mono_domain_lock (cfg->domain);
3872 mono_jit_info_table_add (cfg->domain, cfg->jit_info);
3874 if (cfg->method->dynamic)
3875 mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = cfg->jit_info;
3876 mono_domain_unlock (cfg->domain);
3879 #if 0
3880 if (cfg->gsharedvt)
3881 printf ("GSHAREDVT: %s\n", mono_method_full_name (cfg->method, TRUE));
3882 #endif
3884 /* collect statistics */
3885 #ifndef DISABLE_PERFCOUNTERS
3886 mono_atomic_inc_i32 (&mono_perfcounters->jit_methods);
3887 mono_atomic_fetch_add_i32 (&mono_perfcounters->jit_bytes, header->code_size);
3888 #endif
3889 gint32 code_size_ratio = cfg->code_len;
3890 mono_atomic_fetch_add_i32 (&mono_jit_stats.allocated_code_size, code_size_ratio);
3891 mono_atomic_fetch_add_i32 (&mono_jit_stats.native_code_size, code_size_ratio);
3892 /* FIXME: use an explicit function to read booleans */
3893 if ((gboolean)mono_atomic_load_i32 ((gint32*)&mono_jit_stats.enabled)) {
3894 if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.biggest_method_size)) {
3895 mono_atomic_store_i32 (&mono_jit_stats.biggest_method_size, code_size_ratio);
3896 char *biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
3897 biggest_method = mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.biggest_method, biggest_method);
3898 g_free (biggest_method);
3900 code_size_ratio = (code_size_ratio * 100) / header->code_size;
3901 if (code_size_ratio > mono_atomic_load_i32 (&mono_jit_stats.max_code_size_ratio)) {
3902 mono_atomic_store_i32 (&mono_jit_stats.max_code_size_ratio, code_size_ratio);
3903 char *max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
3904 max_ratio_method = mono_atomic_xchg_ptr ((gpointer*)&mono_jit_stats.max_ratio_method, max_ratio_method);
3905 g_free (max_ratio_method);
3909 if (MONO_METHOD_COMPILE_END_ENABLED ())
3910 MONO_PROBE_METHOD_COMPILE_END (method, TRUE);
3912 mono_cfg_dump_close_group (cfg);
3914 return cfg;
3917 gboolean
3918 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3920 int i;
3921 MonoGenericContainer *container;
3922 MonoGenericInst *ginst;
3924 if (mono_class_is_ginst (klass)) {
3925 container = mono_class_get_generic_container (mono_class_get_generic_class (klass)->container_class);
3926 ginst = mono_class_get_generic_class (klass)->context.class_inst;
3927 } else if (mono_class_is_gtd (klass) && context_used) {
3928 container = mono_class_get_generic_container (klass);
3929 ginst = container->context.class_inst;
3930 } else {
3931 return FALSE;
3934 for (i = 0; i < container->type_argc; ++i) {
3935 MonoType *type;
3936 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3937 continue;
3938 type = ginst->type_argv [i];
3939 if (mini_type_is_reference (type))
3940 return TRUE;
3942 return FALSE;
3945 void
3946 mono_cfg_add_try_hole (MonoCompile *cfg, MonoExceptionClause *clause, guint8 *start, MonoBasicBlock *bb)
3948 TryBlockHole *hole = (TryBlockHole *)mono_mempool_alloc (cfg->mempool, sizeof (TryBlockHole));
3949 hole->clause = clause;
3950 hole->start_offset = start - cfg->native_code;
3951 hole->basic_block = bb;
3953 cfg->try_block_holes = g_slist_append_mempool (cfg->mempool, cfg->try_block_holes, hole);
3956 void
3957 mono_cfg_set_exception (MonoCompile *cfg, int type)
3959 cfg->exception_type = type;
3962 /* Assumes ownership of the MSG argument */
3963 void
3964 mono_cfg_set_exception_invalid_program (MonoCompile *cfg, char *msg)
3966 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
3967 mono_error_set_generic_error (&cfg->error, "System", "InvalidProgramException", "%s", msg);
3970 #endif /* DISABLE_JIT */
3972 static MonoJitInfo*
3973 create_jit_info_for_trampoline (MonoMethod *wrapper, MonoTrampInfo *info)
3975 MonoDomain *domain = mono_get_root_domain ();
3976 MonoJitInfo *jinfo;
3977 guint8 *uw_info;
3978 guint32 info_len;
3980 if (info->uw_info) {
3981 uw_info = info->uw_info;
3982 info_len = info->uw_info_len;
3983 } else {
3984 uw_info = mono_unwind_ops_encode (info->unwind_ops, &info_len);
3987 jinfo = (MonoJitInfo *)mono_domain_alloc0 (domain, MONO_SIZEOF_JIT_INFO);
3988 jinfo->d.method = wrapper;
3989 jinfo->code_start = info->code;
3990 jinfo->code_size = info->code_size;
3991 jinfo->unwind_info = mono_cache_unwind_info (uw_info, info_len);
3993 if (!info->uw_info)
3994 g_free (uw_info);
3996 return jinfo;
3999 GTimer *mono_time_track_start ()
4001 return g_timer_new ();
4005 * mono_time_track_end:
4007 * Uses UnlockedAddDouble () to update \param time.
4009 void mono_time_track_end (gdouble *time, GTimer *timer)
4011 g_timer_stop (timer);
4012 UnlockedAddDouble (time, g_timer_elapsed (timer, NULL));
4013 g_timer_destroy (timer);
4017 * mono_update_jit_stats:
4019 * Only call this function in locked environments to avoid data races.
4021 MONO_NO_SANITIZE_THREAD
4022 void
4023 mono_update_jit_stats (MonoCompile *cfg)
4025 mono_jit_stats.allocate_var += cfg->stat_allocate_var;
4026 mono_jit_stats.locals_stack_size += cfg->stat_locals_stack_size;
4027 mono_jit_stats.basic_blocks += cfg->stat_basic_blocks;
4028 mono_jit_stats.max_basic_blocks = MAX (cfg->stat_basic_blocks, mono_jit_stats.max_basic_blocks);
4029 mono_jit_stats.cil_code_size += cfg->stat_cil_code_size;
4030 mono_jit_stats.regvars += cfg->stat_n_regvars;
4031 mono_jit_stats.inlineable_methods += cfg->stat_inlineable_methods;
4032 mono_jit_stats.inlined_methods += cfg->stat_inlined_methods;
4033 mono_jit_stats.code_reallocs += cfg->stat_code_reallocs;
4037 * mono_jit_compile_method_inner:
4039 * Main entry point for the JIT.
4041 gpointer
4042 mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt, MonoError *error)
4044 MonoCompile *cfg;
4045 gpointer code = NULL;
4046 MonoJitInfo *jinfo, *info;
4047 MonoVTable *vtable;
4048 MonoException *ex = NULL;
4049 GTimer *jit_timer;
4050 MonoMethod *prof_method, *shared;
4052 error_init (error);
4054 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4055 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
4056 MonoMethod *nm;
4057 MonoMethodPInvoke* piinfo = (MonoMethodPInvoke *) method;
4059 if (!piinfo->addr) {
4060 if (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)
4061 piinfo->addr = mono_lookup_internal_call (method);
4062 else if (method->iflags & METHOD_IMPL_ATTRIBUTE_NATIVE)
4063 #ifdef HOST_WIN32
4064 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono in modules loaded from byte arrays. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
4065 #else
4066 g_warning ("Method '%s' in assembly '%s' contains native code that cannot be executed by Mono on this platform. The assembly was probably created using C++/CLI.\n", mono_method_full_name (method, TRUE), method->klass->image->name);
4067 #endif
4068 else
4069 mono_lookup_pinvoke_call (method, NULL, NULL);
4071 nm = mono_marshal_get_native_wrapper (method, TRUE, mono_aot_only);
4072 gpointer compiled_method = mono_compile_method_checked (nm, error);
4073 return_val_if_nok (error, NULL);
4074 code = mono_get_addr_from_ftnptr (compiled_method);
4075 jinfo = mono_jit_info_table_find (target_domain, code);
4076 if (!jinfo)
4077 jinfo = mono_jit_info_table_find (mono_domain_get (), code);
4078 if (jinfo)
4079 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4080 return code;
4081 } else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
4082 const char *name = method->name;
4083 char *full_name, *msg;
4084 MonoMethod *nm;
4086 if (method->klass->parent == mono_defaults.multicastdelegate_class) {
4087 if (*name == '.' && (strcmp (name, ".ctor") == 0)) {
4088 MonoJitICallInfo *mi = mono_find_jit_icall_by_name ("ves_icall_mono_delegate_ctor");
4089 g_assert (mi);
4091 * We need to make sure this wrapper
4092 * is compiled because it might end up
4093 * in an (M)RGCTX if generic sharing
4094 * is enabled, and would be called
4095 * indirectly. If it were a
4096 * trampoline we'd try to patch that
4097 * indirect call, which is not
4098 * possible.
4100 return mono_get_addr_from_ftnptr ((gpointer)mono_icall_get_wrapper_full (mi, TRUE));
4101 } else if (*name == 'I' && (strcmp (name, "Invoke") == 0)) {
4102 if (mono_llvm_only) {
4103 nm = mono_marshal_get_delegate_invoke (method, NULL);
4104 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4105 mono_error_assert_ok (error);
4106 return mono_get_addr_from_ftnptr (compiled_ptr);
4108 return mono_create_delegate_trampoline (target_domain, method->klass);
4109 } else if (*name == 'B' && (strcmp (name, "BeginInvoke") == 0)) {
4110 nm = mono_marshal_get_delegate_begin_invoke (method);
4111 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4112 mono_error_assert_ok (error);
4113 return mono_get_addr_from_ftnptr (compiled_ptr);
4114 } else if (*name == 'E' && (strcmp (name, "EndInvoke") == 0)) {
4115 nm = mono_marshal_get_delegate_end_invoke (method);
4116 gpointer compiled_ptr = mono_compile_method_checked (nm, error);
4117 mono_error_assert_ok (error);
4118 return mono_get_addr_from_ftnptr (compiled_ptr);
4122 full_name = mono_method_full_name (method, TRUE);
4123 msg = g_strdup_printf ("Unrecognizable runtime implemented method '%s'", full_name);
4124 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", msg);
4125 mono_error_set_exception_instance (error, ex);
4126 g_free (full_name);
4127 g_free (msg);
4128 return NULL;
4131 if (method->wrapper_type == MONO_WRAPPER_UNKNOWN) {
4132 WrapperInfo *info = mono_marshal_get_wrapper_info (method);
4134 if (info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN || info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_OUT) {
4135 static MonoTrampInfo *in_tinfo, *out_tinfo;
4136 MonoTrampInfo *tinfo;
4137 MonoJitInfo *jinfo;
4138 gboolean is_in = info->subtype == WRAPPER_SUBTYPE_GSHAREDVT_IN;
4140 if (is_in && in_tinfo)
4141 return in_tinfo->code;
4142 else if (!is_in && out_tinfo)
4143 return out_tinfo->code;
4146 * This is a special wrapper whose body is implemented in assembly, like a trampoline. We use a wrapper so EH
4147 * works.
4148 * FIXME: The caller signature doesn't match the callee, which might cause problems on some platforms
4150 if (mono_aot_only)
4151 mono_aot_get_trampoline_full (is_in ? "gsharedvt_trampoline" : "gsharedvt_out_trampoline", &tinfo);
4152 else
4153 mono_arch_get_gsharedvt_trampoline (&tinfo, FALSE);
4154 jinfo = create_jit_info_for_trampoline (method, tinfo);
4155 mono_jit_info_table_add (mono_get_root_domain (), jinfo);
4156 if (is_in)
4157 in_tinfo = tinfo;
4158 else
4159 out_tinfo = tinfo;
4160 return tinfo->code;
4164 if (mono_aot_only) {
4165 char *fullname = mono_method_full_name (method, TRUE);
4166 mono_error_set_execution_engine (error, "Attempting to JIT compile method '%s' while running in aot-only mode. See https://developer.xamarin.com/guides/ios/advanced_topics/limitations/ for more information.\n", fullname);
4167 g_free (fullname);
4169 return NULL;
4172 jit_timer = mono_time_track_start ();
4173 cfg = mini_method_compile (method, opt, target_domain, JIT_FLAG_RUN_CCTORS, 0, -1);
4174 gdouble jit_time = 0.0;
4175 mono_time_track_end (&jit_time, jit_timer);
4176 UnlockedAddDouble (&mono_jit_stats.jit_time, jit_time);
4178 prof_method = cfg->method;
4180 switch (cfg->exception_type) {
4181 case MONO_EXCEPTION_NONE:
4182 break;
4183 case MONO_EXCEPTION_TYPE_LOAD:
4184 case MONO_EXCEPTION_MISSING_FIELD:
4185 case MONO_EXCEPTION_MISSING_METHOD:
4186 case MONO_EXCEPTION_FILE_NOT_FOUND:
4187 case MONO_EXCEPTION_BAD_IMAGE:
4188 case MONO_EXCEPTION_INVALID_PROGRAM: {
4189 /* Throw a type load exception if needed */
4190 if (cfg->exception_ptr) {
4191 ex = mono_class_get_exception_for_failure ((MonoClass *)cfg->exception_ptr);
4192 } else {
4193 if (cfg->exception_type == MONO_EXCEPTION_MISSING_FIELD)
4194 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingFieldException", cfg->exception_message);
4195 else if (cfg->exception_type == MONO_EXCEPTION_MISSING_METHOD)
4196 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "MissingMethodException", cfg->exception_message);
4197 else if (cfg->exception_type == MONO_EXCEPTION_TYPE_LOAD)
4198 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
4199 else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
4200 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System.IO", "FileNotFoundException", cfg->exception_message);
4201 else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
4202 ex = mono_get_exception_bad_image_format (cfg->exception_message);
4203 else if (cfg->exception_type == MONO_EXCEPTION_INVALID_PROGRAM)
4204 ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "InvalidProgramException", cfg->exception_message);
4205 else
4206 g_assert_not_reached ();
4208 break;
4210 case MONO_EXCEPTION_MONO_ERROR:
4211 // FIXME: MonoError has no copy ctor
4212 g_assert (!mono_error_ok (&cfg->error));
4213 ex = mono_error_convert_to_exception (&cfg->error);
4214 break;
4215 default:
4216 g_assert_not_reached ();
4219 if (ex) {
4220 MONO_PROFILER_RAISE (jit_failed, (method));
4222 mono_destroy_compile (cfg);
4223 mono_error_set_exception_instance (error, ex);
4225 return NULL;
4228 if (mono_method_is_generic_sharable (method, FALSE))
4229 shared = mini_get_shared_method (method);
4230 else
4231 shared = NULL;
4233 mono_domain_lock (target_domain);
4235 /* Check if some other thread already did the job. In this case, we can
4236 discard the code this thread generated. */
4238 info = mini_lookup_method (target_domain, method, shared);
4239 if (info) {
4240 /* We can't use a domain specific method in another domain */
4241 if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
4242 code = info->code_start;
4243 discarded_code ++;
4244 discarded_jit_time += jit_time;
4247 if (code == NULL) {
4248 /* The lookup + insert is atomic since this is done inside the domain lock */
4249 mono_domain_jit_code_hash_lock (target_domain);
4250 mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->d.method, cfg->jit_info);
4251 mono_domain_jit_code_hash_unlock (target_domain);
4253 code = cfg->native_code;
4255 if (cfg->gshared && mono_method_is_generic_sharable (method, FALSE))
4256 mono_atomic_inc_i32 (&mono_stats.generics_shared_methods);
4257 if (cfg->gsharedvt)
4258 mono_atomic_inc_i32 (&mono_stats.gsharedvt_methods);
4261 jinfo = cfg->jit_info;
4264 * Update global stats while holding a lock, instead of doing many
4265 * mono_atomic_inc_i32 operations during JITting.
4267 mono_update_jit_stats (cfg);
4269 mono_destroy_compile (cfg);
4271 #ifndef DISABLE_JIT
4272 if (domain_jit_info (target_domain)->jump_target_hash) {
4273 MonoJumpInfo patch_info;
4274 MonoJumpList *jlist;
4275 GSList *tmp;
4276 jlist = (MonoJumpList *)g_hash_table_lookup (domain_jit_info (target_domain)->jump_target_hash, method);
4277 if (jlist) {
4278 patch_info.next = NULL;
4279 patch_info.ip.i = 0;
4280 patch_info.type = MONO_PATCH_INFO_METHOD_JUMP;
4281 patch_info.data.method = method;
4282 g_hash_table_remove (domain_jit_info (target_domain)->jump_target_hash, method);
4284 #ifdef MONO_ARCH_HAVE_PATCH_CODE_NEW
4285 for (tmp = jlist->list; tmp; tmp = tmp->next) {
4286 gpointer target = mono_resolve_patch_target (NULL, target_domain, (guint8 *)tmp->data, &patch_info, TRUE, error);
4287 if (!mono_error_ok (error))
4288 break;
4289 mono_arch_patch_code_new (NULL, target_domain, (guint8 *)tmp->data, &patch_info, target);
4291 #else
4292 for (tmp = jlist->list; tmp; tmp = tmp->next) {
4293 mono_arch_patch_code (NULL, NULL, target_domain, tmp->data, &patch_info, TRUE, error);
4294 if (!is_ok (error))
4295 break;
4297 #endif
4301 /* Update llvm callees */
4302 if (domain_jit_info (target_domain)->llvm_jit_callees) {
4303 GSList *callees = g_hash_table_lookup (domain_jit_info (target_domain)->llvm_jit_callees, method);
4304 GSList *l;
4306 for (l = callees; l; l = l->next) {
4307 gpointer *addr = (gpointer*)l->data;
4309 *addr = code;
4313 mono_emit_jit_map (jinfo);
4314 #endif
4315 mono_domain_unlock (target_domain);
4317 if (!mono_error_ok (error))
4318 return NULL;
4320 vtable = mono_class_vtable_checked (target_domain, method->klass, error);
4321 return_val_if_nok (error, NULL);
4323 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
4324 if (mono_marshal_method_from_wrapper (method)) {
4325 /* Native func wrappers have no method */
4326 /* The profiler doesn't know about wrappers, so pass the original icall method */
4327 MONO_PROFILER_RAISE (jit_done, (mono_marshal_method_from_wrapper (method), jinfo));
4330 MONO_PROFILER_RAISE (jit_done, (method, jinfo));
4331 if (prof_method != method)
4332 MONO_PROFILER_RAISE (jit_done, (prof_method, jinfo));
4334 if (!(method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE ||
4335 method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK ||
4336 method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE)) {
4337 if (!mono_runtime_class_init_full (vtable, error))
4338 return NULL;
4340 return code;
4344 * mini_get_underlying_type:
4346 * Return the type the JIT will use during compilation.
4347 * Handles: byref, enums, native types, bool/char, ref types, generic sharing.
4348 * For gsharedvt types, it will return the original VAR/MVAR.
4350 MonoType*
4351 mini_get_underlying_type (MonoType *type)
4353 return mini_type_get_underlying_type (type);
4356 void
4357 mini_jit_init (void)
4359 mono_counters_register ("Discarded method code", MONO_COUNTER_JIT | MONO_COUNTER_INT, &discarded_code);
4360 mono_counters_register ("Time spent JITting discarded code", MONO_COUNTER_JIT | MONO_COUNTER_DOUBLE, &discarded_jit_time);
4361 mono_counters_register ("Try holes memory size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &jinfo_try_holes_size);
4363 mono_os_mutex_init_recursive (&jit_mutex);
4364 #ifndef DISABLE_JIT
4365 current_backend = g_new0 (MonoBackend, 1);
4366 init_backend (current_backend);
4367 #endif
4370 void
4371 mini_jit_cleanup (void)
4373 #ifndef DISABLE_JIT
4374 g_free (emul_opcode_map);
4375 g_free (emul_opcode_opcodes);
4376 #endif
4379 #ifndef ENABLE_LLVM
4380 void
4381 mono_llvm_emit_aot_file_info (MonoAotFileInfo *info, gboolean has_jitted_code)
4383 g_assert_not_reached ();
4386 void mono_llvm_emit_aot_data (const char *symbol, guint8 *data, int data_len)
4388 g_assert_not_reached ();
4391 #endif
4393 #if !defined(ENABLE_LLVM_RUNTIME) && !defined(ENABLE_LLVM)
4395 void
4396 mono_llvm_cpp_throw_exception (void)
4398 g_assert_not_reached ();
4401 #endif
4403 #ifdef DISABLE_JIT
4405 MonoCompile*
4406 mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, JitFlags flags, int parts, int aot_method_index)
4408 g_assert_not_reached ();
4409 return NULL;
4412 void
4413 mono_destroy_compile (MonoCompile *cfg)
4415 g_assert_not_reached ();
4418 void
4419 mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target)
4421 g_assert_not_reached ();
4424 #endif /* DISABLE_JIT */
4426 gboolean
4427 mini_class_is_system_array (MonoClass *klass)
4429 if (klass->parent == mono_defaults.array_class)
4430 return TRUE;
4431 else
4432 return FALSE;