[amd64] Remove the callee saved registers from MonoLMF, save/restore them normally...
[mono-project.git] / mono / mini / method-to-ir.c
bloba186c9849332574c3ab4cf49598961275e40c2f6
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 #include <config.h>
15 #ifndef DISABLE_JIT
17 #include <signal.h>
19 #ifdef HAVE_UNISTD_H
20 #include <unistd.h>
21 #endif
23 #include <math.h>
24 #include <string.h>
25 #include <ctype.h>
27 #ifdef HAVE_SYS_TIME_H
28 #include <sys/time.h>
29 #endif
31 #ifdef HAVE_ALLOCA_H
32 #include <alloca.h>
33 #endif
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
63 #include "mini.h"
64 #include "trace.h"
66 #include "ir-emit.h"
68 #include "jit-icalls.h"
69 #include "jit.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
79 } \
80 } while (0)
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
83 goto exception_exit;\
84 } while (0)
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
93 } while (0)
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
102 } while (0)
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
110 } while (0)
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
119 } while (0)
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
123 } while (0)
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
128 } while (0)
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
153 #ifdef MINI_OP
154 #undef MINI_OP
155 #endif
156 #ifdef MINI_OP3
157 #undef MINI_OP3
158 #endif
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
161 #define NONE ' '
162 #define IREG 'i'
163 #define FREG 'f'
164 #define VREG 'v'
165 #define XREG 'x'
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
167 #define LREG IREG
168 #else
169 #define LREG 'l'
170 #endif
171 /* keep in sync with the enum in mini.h */
172 const char
173 ins_info[] = {
174 #include "mini-ops.h"
176 #undef MINI_OP
177 #undef MINI_OP3
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
188 #undef MINI_OP
189 #undef MINI_OP3
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
193 (vi)->reg = -1; \
194 (vi)->idx = (id); \
195 } while (0)
197 void
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
205 guint32
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
211 guint32
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
217 guint32
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
223 guint32
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
229 guint32
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
240 guint32
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
251 guint32
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
262 guint32
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
269 else
270 return alloc_ireg (cfg);
273 guint
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
276 if (type->byref)
277 return OP_MOVE;
279 type = mini_replace_type (type);
280 handle_enum:
281 switch (type->type) {
282 case MONO_TYPE_I1:
283 case MONO_TYPE_U1:
284 case MONO_TYPE_BOOLEAN:
285 return OP_MOVE;
286 case MONO_TYPE_I2:
287 case MONO_TYPE_U2:
288 case MONO_TYPE_CHAR:
289 return OP_MOVE;
290 case MONO_TYPE_I4:
291 case MONO_TYPE_U4:
292 return OP_MOVE;
293 case MONO_TYPE_I:
294 case MONO_TYPE_U:
295 case MONO_TYPE_PTR:
296 case MONO_TYPE_FNPTR:
297 return OP_MOVE;
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
303 return OP_MOVE;
304 case MONO_TYPE_I8:
305 case MONO_TYPE_U8:
306 #if SIZEOF_REGISTER == 8
307 return OP_MOVE;
308 #else
309 return OP_LMOVE;
310 #endif
311 case MONO_TYPE_R4:
312 return OP_FMOVE;
313 case MONO_TYPE_R8:
314 return OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
318 goto handle_enum;
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 return OP_XMOVE;
322 return OP_VMOVE;
323 case MONO_TYPE_TYPEDBYREF:
324 return OP_VMOVE;
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
327 goto handle_enum;
328 case MONO_TYPE_VAR:
329 case MONO_TYPE_MVAR:
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
332 return OP_VMOVE;
333 else
334 return OP_MOVE;
335 default:
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
338 return -1;
341 void
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
344 int i;
345 MonoInst *tree;
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 printf (", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 printf (" ]\n");
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
358 void
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
371 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
372 * foo<T> (int i) { ldarg.0; box T; }
374 #define UNVERIFIED do { \
375 if (cfg->gsharedvt) { \
376 if (cfg->verbose_level > 2) \
377 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
379 goto exception_exit; \
381 if (mini_get_debug_options ()->break_on_unverified) \
382 G_BREAKPOINT (); \
383 else \
384 goto unverified; \
385 } while (0)
387 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
389 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
391 #define GET_BBLOCK(cfg,tblock,ip) do { \
392 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
393 if (!(tblock)) { \
394 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
395 NEW_BBLOCK (cfg, (tblock)); \
396 (tblock)->cil_code = (ip); \
397 ADD_BBLOCK (cfg, (tblock)); \
399 } while (0)
401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
402 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
403 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
404 (dest)->dreg = alloc_ireg_mp ((cfg)); \
405 (dest)->sreg1 = (sr1); \
406 (dest)->sreg2 = (sr2); \
407 (dest)->inst_imm = (imm); \
408 (dest)->backend.shift_amount = (shift); \
409 MONO_ADD_INS ((cfg)->cbb, (dest)); \
410 } while (0)
411 #endif
413 #if SIZEOF_REGISTER == 8
414 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
415 /* FIXME: Need to add many more cases */ \
416 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
417 MonoInst *widen; \
418 int dr = alloc_preg (cfg); \
419 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
420 (ins)->sreg2 = widen->dreg; \
422 } while (0)
423 #else
424 #define ADD_WIDEN_OP(ins, arg1, arg2)
425 #endif
427 #define ADD_BINOP(op) do { \
428 MONO_INST_NEW (cfg, ins, (op)); \
429 sp -= 2; \
430 ins->sreg1 = sp [0]->dreg; \
431 ins->sreg2 = sp [1]->dreg; \
432 type_from_op (ins, sp [0], sp [1]); \
433 CHECK_TYPE (ins); \
434 /* Have to insert a widening op */ \
435 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
436 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
437 MONO_ADD_INS ((cfg)->cbb, (ins)); \
438 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
439 } while (0)
441 #define ADD_UNOP(op) do { \
442 MONO_INST_NEW (cfg, ins, (op)); \
443 sp--; \
444 ins->sreg1 = sp [0]->dreg; \
445 type_from_op (ins, sp [0], NULL); \
446 CHECK_TYPE (ins); \
447 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
448 MONO_ADD_INS ((cfg)->cbb, (ins)); \
449 *sp++ = mono_decompose_opcode (cfg, ins); \
450 } while (0)
452 #define ADD_BINCOND(next_block) do { \
453 MonoInst *cmp; \
454 sp -= 2; \
455 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
456 cmp->sreg1 = sp [0]->dreg; \
457 cmp->sreg2 = sp [1]->dreg; \
458 type_from_op (cmp, sp [0], sp [1]); \
459 CHECK_TYPE (cmp); \
460 type_from_op (ins, sp [0], sp [1]); \
461 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
462 GET_BBLOCK (cfg, tblock, target); \
463 link_bblock (cfg, bblock, tblock); \
464 ins->inst_true_bb = tblock; \
465 if ((next_block)) { \
466 link_bblock (cfg, bblock, (next_block)); \
467 ins->inst_false_bb = (next_block); \
468 start_new_bblock = 1; \
469 } else { \
470 GET_BBLOCK (cfg, tblock, ip); \
471 link_bblock (cfg, bblock, tblock); \
472 ins->inst_false_bb = tblock; \
473 start_new_bblock = 2; \
475 if (sp != stack_start) { \
476 handle_stack_args (cfg, stack_start, sp - stack_start); \
477 CHECK_UNVERIFIABLE (cfg); \
479 MONO_ADD_INS (bblock, cmp); \
480 MONO_ADD_INS (bblock, ins); \
481 } while (0)
483 /* *
484 * link_bblock: Links two basic blocks
486 * links two basic blocks in the control flow graph, the 'from'
487 * argument is the starting block and the 'to' argument is the block
488 * the control flow ends to after 'from'.
490 static void
491 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
493 MonoBasicBlock **newa;
494 int i, found;
496 #if 0
497 if (from->cil_code) {
498 if (to->cil_code)
499 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
500 else
501 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
502 } else {
503 if (to->cil_code)
504 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
505 else
506 printf ("edge from entry to exit\n");
508 #endif
510 found = FALSE;
511 for (i = 0; i < from->out_count; ++i) {
512 if (to == from->out_bb [i]) {
513 found = TRUE;
514 break;
517 if (!found) {
518 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
519 for (i = 0; i < from->out_count; ++i) {
520 newa [i] = from->out_bb [i];
522 newa [i] = to;
523 from->out_count++;
524 from->out_bb = newa;
527 found = FALSE;
528 for (i = 0; i < to->in_count; ++i) {
529 if (from == to->in_bb [i]) {
530 found = TRUE;
531 break;
534 if (!found) {
535 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
536 for (i = 0; i < to->in_count; ++i) {
537 newa [i] = to->in_bb [i];
539 newa [i] = from;
540 to->in_count++;
541 to->in_bb = newa;
545 void
546 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
548 link_bblock (cfg, from, to);
552 * mono_find_block_region:
554 * We mark each basic block with a region ID. We use that to avoid BB
555 * optimizations when blocks are in different regions.
557 * Returns:
558 * A region token that encodes where this region is, and information
559 * about the clause owner for this block.
561 * The region encodes the try/catch/filter clause that owns this block
562 * as well as the type. -1 is a special value that represents a block
563 * that is in none of try/catch/filter.
565 static int
566 mono_find_block_region (MonoCompile *cfg, int offset)
568 MonoMethodHeader *header = cfg->header;
569 MonoExceptionClause *clause;
570 int i;
572 for (i = 0; i < header->num_clauses; ++i) {
573 clause = &header->clauses [i];
574 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
575 (offset < (clause->handler_offset)))
576 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
578 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
579 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
580 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
581 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
582 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
583 else
584 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
587 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
588 return ((i + 1) << 8) | clause->flags;
591 return -1;
594 static GList*
595 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
597 MonoMethodHeader *header = cfg->header;
598 MonoExceptionClause *clause;
599 int i;
600 GList *res = NULL;
602 for (i = 0; i < header->num_clauses; ++i) {
603 clause = &header->clauses [i];
604 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
605 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
606 if (clause->flags == type)
607 res = g_list_append (res, clause);
610 return res;
613 static void
614 mono_create_spvar_for_region (MonoCompile *cfg, int region)
616 MonoInst *var;
618 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
619 if (var)
620 return;
622 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
623 /* prevent it from being register allocated */
624 var->flags |= MONO_INST_VOLATILE;
626 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
629 MonoInst *
630 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
632 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
635 static MonoInst*
636 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
638 MonoInst *var;
640 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
641 if (var)
642 return var;
644 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
645 /* prevent it from being register allocated */
646 var->flags |= MONO_INST_VOLATILE;
648 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
650 return var;
654 * Returns the type used in the eval stack when @type is loaded.
655 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
657 void
658 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
660 MonoClass *klass;
662 type = mini_replace_type (type);
663 inst->klass = klass = mono_class_from_mono_type (type);
664 if (type->byref) {
665 inst->type = STACK_MP;
666 return;
669 handle_enum:
670 switch (type->type) {
671 case MONO_TYPE_VOID:
672 inst->type = STACK_INV;
673 return;
674 case MONO_TYPE_I1:
675 case MONO_TYPE_U1:
676 case MONO_TYPE_BOOLEAN:
677 case MONO_TYPE_I2:
678 case MONO_TYPE_U2:
679 case MONO_TYPE_CHAR:
680 case MONO_TYPE_I4:
681 case MONO_TYPE_U4:
682 inst->type = STACK_I4;
683 return;
684 case MONO_TYPE_I:
685 case MONO_TYPE_U:
686 case MONO_TYPE_PTR:
687 case MONO_TYPE_FNPTR:
688 inst->type = STACK_PTR;
689 return;
690 case MONO_TYPE_CLASS:
691 case MONO_TYPE_STRING:
692 case MONO_TYPE_OBJECT:
693 case MONO_TYPE_SZARRAY:
694 case MONO_TYPE_ARRAY:
695 inst->type = STACK_OBJ;
696 return;
697 case MONO_TYPE_I8:
698 case MONO_TYPE_U8:
699 inst->type = STACK_I8;
700 return;
701 case MONO_TYPE_R4:
702 case MONO_TYPE_R8:
703 inst->type = STACK_R8;
704 return;
705 case MONO_TYPE_VALUETYPE:
706 if (type->data.klass->enumtype) {
707 type = mono_class_enum_basetype (type->data.klass);
708 goto handle_enum;
709 } else {
710 inst->klass = klass;
711 inst->type = STACK_VTYPE;
712 return;
714 case MONO_TYPE_TYPEDBYREF:
715 inst->klass = mono_defaults.typed_reference_class;
716 inst->type = STACK_VTYPE;
717 return;
718 case MONO_TYPE_GENERICINST:
719 type = &type->data.generic_class->container_class->byval_arg;
720 goto handle_enum;
721 case MONO_TYPE_VAR:
722 case MONO_TYPE_MVAR:
723 g_assert (cfg->generic_sharing_context);
724 if (mini_is_gsharedvt_type (cfg, type)) {
725 g_assert (cfg->gsharedvt);
726 inst->type = STACK_VTYPE;
727 } else {
728 inst->type = STACK_OBJ;
730 return;
731 default:
732 g_error ("unknown type 0x%02x in eval stack type", type->type);
737 * The following tables are used to quickly validate the IL code in type_from_op ().
739 static const char
740 bin_num_table [STACK_MAX] [STACK_MAX] = {
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
751 static const char
752 neg_table [] = {
753 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
756 /* reduce the size of this table */
757 static const char
758 bin_int_table [STACK_MAX] [STACK_MAX] = {
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
761 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
762 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
763 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
764 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
765 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
766 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
769 static const char
770 bin_comp_table [STACK_MAX] [STACK_MAX] = {
771 /* Inv i L p F & O vt */
772 {0},
773 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
774 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
775 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
776 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
777 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
778 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
779 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
782 /* reduce the size of this table */
783 static const char
784 shift_table [STACK_MAX] [STACK_MAX] = {
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
787 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
788 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
796 * Tables to map from the non-specific opcode to the matching
797 * type-specific opcode.
799 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
800 static const guint16
801 binops_op_map [STACK_MAX] = {
802 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
805 /* handles from CEE_NEG to CEE_CONV_U8 */
806 static const guint16
807 unops_op_map [STACK_MAX] = {
808 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
811 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
812 static const guint16
813 ovfops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
817 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
818 static const guint16
819 ovf2ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
823 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
824 static const guint16
825 ovf3ops_op_map [STACK_MAX] = {
826 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
829 /* handles from CEE_BEQ to CEE_BLT_UN */
830 static const guint16
831 beqops_op_map [STACK_MAX] = {
832 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
835 /* handles from CEE_CEQ to CEE_CLT_UN */
836 static const guint16
837 ceqops_op_map [STACK_MAX] = {
838 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
842 * Sets ins->type (the type on the eval stack) according to the
843 * type of the opcode and the arguments to it.
844 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
846 * FIXME: this function sets ins->type unconditionally in some cases, but
847 * it should set it to invalid for some types (a conv.x on an object)
849 static void
850 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
852 switch (ins->opcode) {
853 /* binops */
854 case CEE_ADD:
855 case CEE_SUB:
856 case CEE_MUL:
857 case CEE_DIV:
858 case CEE_REM:
859 /* FIXME: check unverifiable args for STACK_MP */
860 ins->type = bin_num_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
862 break;
863 case CEE_DIV_UN:
864 case CEE_REM_UN:
865 case CEE_AND:
866 case CEE_OR:
867 case CEE_XOR:
868 ins->type = bin_int_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
870 break;
871 case CEE_SHL:
872 case CEE_SHR:
873 case CEE_SHR_UN:
874 ins->type = shift_table [src1->type] [src2->type];
875 ins->opcode += binops_op_map [ins->type];
876 break;
877 case OP_COMPARE:
878 case OP_LCOMPARE:
879 case OP_ICOMPARE:
880 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
881 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
882 ins->opcode = OP_LCOMPARE;
883 else if (src1->type == STACK_R8)
884 ins->opcode = OP_FCOMPARE;
885 else
886 ins->opcode = OP_ICOMPARE;
887 break;
888 case OP_ICOMPARE_IMM:
889 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
890 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
891 ins->opcode = OP_LCOMPARE_IMM;
892 break;
893 case CEE_BEQ:
894 case CEE_BGE:
895 case CEE_BGT:
896 case CEE_BLE:
897 case CEE_BLT:
898 case CEE_BNE_UN:
899 case CEE_BGE_UN:
900 case CEE_BGT_UN:
901 case CEE_BLE_UN:
902 case CEE_BLT_UN:
903 ins->opcode += beqops_op_map [src1->type];
904 break;
905 case OP_CEQ:
906 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
907 ins->opcode += ceqops_op_map [src1->type];
908 break;
909 case OP_CGT:
910 case OP_CGT_UN:
911 case OP_CLT:
912 case OP_CLT_UN:
913 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
914 ins->opcode += ceqops_op_map [src1->type];
915 break;
916 /* unops */
917 case CEE_NEG:
918 ins->type = neg_table [src1->type];
919 ins->opcode += unops_op_map [ins->type];
920 break;
921 case CEE_NOT:
922 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
923 ins->type = src1->type;
924 else
925 ins->type = STACK_INV;
926 ins->opcode += unops_op_map [ins->type];
927 break;
928 case CEE_CONV_I1:
929 case CEE_CONV_I2:
930 case CEE_CONV_I4:
931 case CEE_CONV_U4:
932 ins->type = STACK_I4;
933 ins->opcode += unops_op_map [src1->type];
934 break;
935 case CEE_CONV_R_UN:
936 ins->type = STACK_R8;
937 switch (src1->type) {
938 case STACK_I4:
939 case STACK_PTR:
940 ins->opcode = OP_ICONV_TO_R_UN;
941 break;
942 case STACK_I8:
943 ins->opcode = OP_LCONV_TO_R_UN;
944 break;
946 break;
947 case CEE_CONV_OVF_I1:
948 case CEE_CONV_OVF_U1:
949 case CEE_CONV_OVF_I2:
950 case CEE_CONV_OVF_U2:
951 case CEE_CONV_OVF_I4:
952 case CEE_CONV_OVF_U4:
953 ins->type = STACK_I4;
954 ins->opcode += ovf3ops_op_map [src1->type];
955 break;
956 case CEE_CONV_OVF_I_UN:
957 case CEE_CONV_OVF_U_UN:
958 ins->type = STACK_PTR;
959 ins->opcode += ovf2ops_op_map [src1->type];
960 break;
961 case CEE_CONV_OVF_I1_UN:
962 case CEE_CONV_OVF_I2_UN:
963 case CEE_CONV_OVF_I4_UN:
964 case CEE_CONV_OVF_U1_UN:
965 case CEE_CONV_OVF_U2_UN:
966 case CEE_CONV_OVF_U4_UN:
967 ins->type = STACK_I4;
968 ins->opcode += ovf2ops_op_map [src1->type];
969 break;
970 case CEE_CONV_U:
971 ins->type = STACK_PTR;
972 switch (src1->type) {
973 case STACK_I4:
974 ins->opcode = OP_ICONV_TO_U;
975 break;
976 case STACK_PTR:
977 case STACK_MP:
978 #if SIZEOF_VOID_P == 8
979 ins->opcode = OP_LCONV_TO_U;
980 #else
981 ins->opcode = OP_MOVE;
982 #endif
983 break;
984 case STACK_I8:
985 ins->opcode = OP_LCONV_TO_U;
986 break;
987 case STACK_R8:
988 ins->opcode = OP_FCONV_TO_U;
989 break;
991 break;
992 case CEE_CONV_I8:
993 case CEE_CONV_U8:
994 ins->type = STACK_I8;
995 ins->opcode += unops_op_map [src1->type];
996 break;
997 case CEE_CONV_OVF_I8:
998 case CEE_CONV_OVF_U8:
999 ins->type = STACK_I8;
1000 ins->opcode += ovf3ops_op_map [src1->type];
1001 break;
1002 case CEE_CONV_OVF_U8_UN:
1003 case CEE_CONV_OVF_I8_UN:
1004 ins->type = STACK_I8;
1005 ins->opcode += ovf2ops_op_map [src1->type];
1006 break;
1007 case CEE_CONV_R4:
1008 case CEE_CONV_R8:
1009 ins->type = STACK_R8;
1010 ins->opcode += unops_op_map [src1->type];
1011 break;
1012 case OP_CKFINITE:
1013 ins->type = STACK_R8;
1014 break;
1015 case CEE_CONV_U2:
1016 case CEE_CONV_U1:
1017 ins->type = STACK_I4;
1018 ins->opcode += ovfops_op_map [src1->type];
1019 break;
1020 case CEE_CONV_I:
1021 case CEE_CONV_OVF_I:
1022 case CEE_CONV_OVF_U:
1023 ins->type = STACK_PTR;
1024 ins->opcode += ovfops_op_map [src1->type];
1025 break;
1026 case CEE_ADD_OVF:
1027 case CEE_ADD_OVF_UN:
1028 case CEE_MUL_OVF:
1029 case CEE_MUL_OVF_UN:
1030 case CEE_SUB_OVF:
1031 case CEE_SUB_OVF_UN:
1032 ins->type = bin_num_table [src1->type] [src2->type];
1033 ins->opcode += ovfops_op_map [src1->type];
1034 if (ins->type == STACK_R8)
1035 ins->type = STACK_INV;
1036 break;
1037 case OP_LOAD_MEMBASE:
1038 ins->type = STACK_PTR;
1039 break;
1040 case OP_LOADI1_MEMBASE:
1041 case OP_LOADU1_MEMBASE:
1042 case OP_LOADI2_MEMBASE:
1043 case OP_LOADU2_MEMBASE:
1044 case OP_LOADI4_MEMBASE:
1045 case OP_LOADU4_MEMBASE:
1046 ins->type = STACK_PTR;
1047 break;
1048 case OP_LOADI8_MEMBASE:
1049 ins->type = STACK_I8;
1050 break;
1051 case OP_LOADR4_MEMBASE:
1052 case OP_LOADR8_MEMBASE:
1053 ins->type = STACK_R8;
1054 break;
1055 default:
1056 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1057 break;
1060 if (ins->type == STACK_MP)
1061 ins->klass = mono_defaults.object_class;
1064 static const char
1065 ldind_type [] = {
1066 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1069 #if 0
1071 static const char
1072 param_table [STACK_MAX] [STACK_MAX] = {
1073 {0},
1076 static int
1077 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1078 int i;
1080 if (sig->hasthis) {
1081 switch (args->type) {
1082 case STACK_I4:
1083 case STACK_I8:
1084 case STACK_R8:
1085 case STACK_VTYPE:
1086 case STACK_INV:
1087 return 0;
1089 args++;
1091 for (i = 0; i < sig->param_count; ++i) {
1092 switch (args [i].type) {
1093 case STACK_INV:
1094 return 0;
1095 case STACK_MP:
1096 if (!sig->params [i]->byref)
1097 return 0;
1098 continue;
1099 case STACK_OBJ:
1100 if (sig->params [i]->byref)
1101 return 0;
1102 switch (sig->params [i]->type) {
1103 case MONO_TYPE_CLASS:
1104 case MONO_TYPE_STRING:
1105 case MONO_TYPE_OBJECT:
1106 case MONO_TYPE_SZARRAY:
1107 case MONO_TYPE_ARRAY:
1108 break;
1109 default:
1110 return 0;
1112 continue;
1113 case STACK_R8:
1114 if (sig->params [i]->byref)
1115 return 0;
1116 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1117 return 0;
1118 continue;
1119 case STACK_PTR:
1120 case STACK_I4:
1121 case STACK_I8:
1122 case STACK_VTYPE:
1123 break;
1125 /*if (!param_table [args [i].type] [sig->params [i]->type])
1126 return 0;*/
1128 return 1;
1130 #endif
1133 * When we need a pointer to the current domain many times in a method, we
1134 * call mono_domain_get() once and we store the result in a local variable.
1135 * This function returns the variable that represents the MonoDomain*.
1137 inline static MonoInst *
1138 mono_get_domainvar (MonoCompile *cfg)
1140 if (!cfg->domainvar)
1141 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1142 return cfg->domainvar;
1146 * The got_var contains the address of the Global Offset Table when AOT
1147 * compiling.
1149 MonoInst *
1150 mono_get_got_var (MonoCompile *cfg)
1152 #ifdef MONO_ARCH_NEED_GOT_VAR
1153 if (!cfg->compile_aot)
1154 return NULL;
1155 if (!cfg->got_var) {
1156 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1158 return cfg->got_var;
1159 #else
1160 return NULL;
1161 #endif
1164 static MonoInst *
1165 mono_get_vtable_var (MonoCompile *cfg)
1167 g_assert (cfg->generic_sharing_context);
1169 if (!cfg->rgctx_var) {
1170 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1171 /* force the var to be stack allocated */
1172 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1175 return cfg->rgctx_var;
1178 static MonoType*
1179 type_from_stack_type (MonoInst *ins) {
1180 switch (ins->type) {
1181 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1182 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1183 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1184 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1185 case STACK_MP:
1186 return &ins->klass->this_arg;
1187 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1188 case STACK_VTYPE: return &ins->klass->byval_arg;
1189 default:
1190 g_error ("stack type %d to monotype not handled\n", ins->type);
1192 return NULL;
1195 static G_GNUC_UNUSED int
1196 type_to_stack_type (MonoType *t)
1198 t = mono_type_get_underlying_type (t);
1199 switch (t->type) {
1200 case MONO_TYPE_I1:
1201 case MONO_TYPE_U1:
1202 case MONO_TYPE_BOOLEAN:
1203 case MONO_TYPE_I2:
1204 case MONO_TYPE_U2:
1205 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_I4:
1207 case MONO_TYPE_U4:
1208 return STACK_I4;
1209 case MONO_TYPE_I:
1210 case MONO_TYPE_U:
1211 case MONO_TYPE_PTR:
1212 case MONO_TYPE_FNPTR:
1213 return STACK_PTR;
1214 case MONO_TYPE_CLASS:
1215 case MONO_TYPE_STRING:
1216 case MONO_TYPE_OBJECT:
1217 case MONO_TYPE_SZARRAY:
1218 case MONO_TYPE_ARRAY:
1219 return STACK_OBJ;
1220 case MONO_TYPE_I8:
1221 case MONO_TYPE_U8:
1222 return STACK_I8;
1223 case MONO_TYPE_R4:
1224 case MONO_TYPE_R8:
1225 return STACK_R8;
1226 case MONO_TYPE_VALUETYPE:
1227 case MONO_TYPE_TYPEDBYREF:
1228 return STACK_VTYPE;
1229 case MONO_TYPE_GENERICINST:
1230 if (mono_type_generic_inst_is_valuetype (t))
1231 return STACK_VTYPE;
1232 else
1233 return STACK_OBJ;
1234 break;
1235 default:
1236 g_assert_not_reached ();
1239 return -1;
1242 static MonoClass*
1243 array_access_to_klass (int opcode)
1245 switch (opcode) {
1246 case CEE_LDELEM_U1:
1247 return mono_defaults.byte_class;
1248 case CEE_LDELEM_U2:
1249 return mono_defaults.uint16_class;
1250 case CEE_LDELEM_I:
1251 case CEE_STELEM_I:
1252 return mono_defaults.int_class;
1253 case CEE_LDELEM_I1:
1254 case CEE_STELEM_I1:
1255 return mono_defaults.sbyte_class;
1256 case CEE_LDELEM_I2:
1257 case CEE_STELEM_I2:
1258 return mono_defaults.int16_class;
1259 case CEE_LDELEM_I4:
1260 case CEE_STELEM_I4:
1261 return mono_defaults.int32_class;
1262 case CEE_LDELEM_U4:
1263 return mono_defaults.uint32_class;
1264 case CEE_LDELEM_I8:
1265 case CEE_STELEM_I8:
1266 return mono_defaults.int64_class;
1267 case CEE_LDELEM_R4:
1268 case CEE_STELEM_R4:
1269 return mono_defaults.single_class;
1270 case CEE_LDELEM_R8:
1271 case CEE_STELEM_R8:
1272 return mono_defaults.double_class;
1273 case CEE_LDELEM_REF:
1274 case CEE_STELEM_REF:
1275 return mono_defaults.object_class;
1276 default:
1277 g_assert_not_reached ();
1279 return NULL;
1283 * We try to share variables when possible
1285 static MonoInst *
1286 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1288 MonoInst *res;
1289 int pos, vnum;
1291 /* inlining can result in deeper stacks */
1292 if (slot >= cfg->header->max_stack)
1293 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1295 pos = ins->type - 1 + slot * STACK_MAX;
1297 switch (ins->type) {
1298 case STACK_I4:
1299 case STACK_I8:
1300 case STACK_R8:
1301 case STACK_PTR:
1302 case STACK_MP:
1303 case STACK_OBJ:
1304 if ((vnum = cfg->intvars [pos]))
1305 return cfg->varinfo [vnum];
1306 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1307 cfg->intvars [pos] = res->inst_c0;
1308 break;
1309 default:
1310 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1312 return res;
1315 static void
1316 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1319 * Don't use this if a generic_context is set, since that means AOT can't
1320 * look up the method using just the image+token.
1321 * table == 0 means this is a reference made from a wrapper.
1323 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1324 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1325 jump_info_token->image = image;
1326 jump_info_token->token = token;
1327 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1332 * This function is called to handle items that are left on the evaluation stack
1333 * at basic block boundaries. What happens is that we save the values to local variables
1334 * and we reload them later when first entering the target basic block (with the
1335 * handle_loaded_temps () function).
1336 * A single joint point will use the same variables (stored in the array bb->out_stack or
1337 * bb->in_stack, if the basic block is before or after the joint point).
1339 * This function needs to be called _before_ emitting the last instruction of
1340 * the bb (i.e. before emitting a branch).
1341 * If the stack merge fails at a join point, cfg->unverifiable is set.
1343 static void
1344 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1346 int i, bindex;
1347 MonoBasicBlock *bb = cfg->cbb;
1348 MonoBasicBlock *outb;
1349 MonoInst *inst, **locals;
1350 gboolean found;
1352 if (!count)
1353 return;
1354 if (cfg->verbose_level > 3)
1355 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1356 if (!bb->out_scount) {
1357 bb->out_scount = count;
1358 //printf ("bblock %d has out:", bb->block_num);
1359 found = FALSE;
1360 for (i = 0; i < bb->out_count; ++i) {
1361 outb = bb->out_bb [i];
1362 /* exception handlers are linked, but they should not be considered for stack args */
1363 if (outb->flags & BB_EXCEPTION_HANDLER)
1364 continue;
1365 //printf (" %d", outb->block_num);
1366 if (outb->in_stack) {
1367 found = TRUE;
1368 bb->out_stack = outb->in_stack;
1369 break;
1372 //printf ("\n");
1373 if (!found) {
1374 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1375 for (i = 0; i < count; ++i) {
1377 * try to reuse temps already allocated for this purpouse, if they occupy the same
1378 * stack slot and if they are of the same type.
1379 * This won't cause conflicts since if 'local' is used to
1380 * store one of the values in the in_stack of a bblock, then
1381 * the same variable will be used for the same outgoing stack
1382 * slot as well.
1383 * This doesn't work when inlining methods, since the bblocks
1384 * in the inlined methods do not inherit their in_stack from
1385 * the bblock they are inlined to. See bug #58863 for an
1386 * example.
1388 if (cfg->inlined_method)
1389 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1390 else
1391 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1396 for (i = 0; i < bb->out_count; ++i) {
1397 outb = bb->out_bb [i];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER)
1400 continue;
1401 if (outb->in_scount) {
1402 if (outb->in_scount != bb->out_scount) {
1403 cfg->unverifiable = TRUE;
1404 return;
1406 continue; /* check they are the same locals */
1408 outb->in_scount = count;
1409 outb->in_stack = bb->out_stack;
1412 locals = bb->out_stack;
1413 cfg->cbb = bb;
1414 for (i = 0; i < count; ++i) {
1415 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1416 inst->cil_code = sp [i]->cil_code;
1417 sp [i] = locals [i];
1418 if (cfg->verbose_level > 3)
1419 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1423 * It is possible that the out bblocks already have in_stack assigned, and
1424 * the in_stacks differ. In this case, we will store to all the different
1425 * in_stacks.
1428 found = TRUE;
1429 bindex = 0;
1430 while (found) {
1431 /* Find a bblock which has a different in_stack */
1432 found = FALSE;
1433 while (bindex < bb->out_count) {
1434 outb = bb->out_bb [bindex];
1435 /* exception handlers are linked, but they should not be considered for stack args */
1436 if (outb->flags & BB_EXCEPTION_HANDLER) {
1437 bindex++;
1438 continue;
1440 if (outb->in_stack != locals) {
1441 for (i = 0; i < count; ++i) {
1442 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1443 inst->cil_code = sp [i]->cil_code;
1444 sp [i] = locals [i];
1445 if (cfg->verbose_level > 3)
1446 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1448 locals = outb->in_stack;
1449 found = TRUE;
1450 break;
1452 bindex ++;
1457 /* Emit code which loads interface_offsets [klass->interface_id]
1458 * The array is stored in memory before vtable.
1460 static void
1461 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1463 if (cfg->compile_aot) {
1464 int ioffset_reg = alloc_preg (cfg);
1465 int iid_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1471 else {
1472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1476 static void
1477 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1479 int ibitmap_reg = alloc_preg (cfg);
1480 #ifdef COMPRESSED_INTERFACE_BITMAP
1481 MonoInst *args [2];
1482 MonoInst *res, *ins;
1483 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1484 MONO_ADD_INS (cfg->cbb, ins);
1485 args [0] = ins;
1486 if (cfg->compile_aot)
1487 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1488 else
1489 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1490 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1491 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1492 #else
1493 int ibitmap_byte_reg = alloc_preg (cfg);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1497 if (cfg->compile_aot) {
1498 int iid_reg = alloc_preg (cfg);
1499 int shifted_iid_reg = alloc_preg (cfg);
1500 int ibitmap_byte_address_reg = alloc_preg (cfg);
1501 int masked_iid_reg = alloc_preg (cfg);
1502 int iid_one_bit_reg = alloc_preg (cfg);
1503 int iid_bit_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1509 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1511 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1512 } else {
1513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1516 #endif
1520 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1521 * stored in "klass_reg" implements the interface "klass".
1523 static void
1524 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1526 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1530 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1531 * stored in "vtable_reg" implements the interface "klass".
1533 static void
1534 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1536 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1540 * Emit code which checks whenever the interface id of @klass is smaller than
1541 * than the value given by max_iid_reg.
1543 static void
1544 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1545 MonoBasicBlock *false_target)
1547 if (cfg->compile_aot) {
1548 int iid_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1552 else
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1554 if (false_target)
1555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1556 else
1557 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1560 /* Same as above, but obtains max_iid from a vtable */
1561 static void
1562 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 /* Same as above, but obtains max_iid from a klass */
1572 static void
1573 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1574 MonoBasicBlock *false_target)
1576 int max_iid_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1579 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1582 static void
1583 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 mono_class_setup_supertypes (klass);
1591 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1598 if (klass_ins) {
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1600 } else if (cfg->compile_aot) {
1601 int const_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1604 } else {
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1610 static void
1611 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1616 static void
1617 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1619 int intf_reg = alloc_preg (cfg);
1621 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1622 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1624 if (true_target)
1625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1626 else
1627 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1631 * Variant of the above that takes a register to the class, not the vtable.
1633 static void
1634 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1636 int intf_bit_reg = alloc_preg (cfg);
1638 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1639 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1641 if (true_target)
1642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1643 else
1644 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1647 static inline void
1648 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1650 if (klass_inst) {
1651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1652 } else if (cfg->compile_aot) {
1653 int const_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1655 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1656 } else {
1657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1659 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1662 static inline void
1663 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1665 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1668 static inline void
1669 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1671 if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1675 } else {
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1681 static void
1682 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1684 static void
1685 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1687 if (klass->rank) {
1688 int rank_reg = alloc_preg (cfg);
1689 int eclass_reg = alloc_preg (cfg);
1691 g_assert (!klass_inst);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1694 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1695 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1697 if (klass->cast_class == mono_defaults.object_class) {
1698 int parent_reg = alloc_preg (cfg);
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1700 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1701 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1702 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1703 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1704 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1705 } else if (klass->cast_class == mono_defaults.enum_class) {
1706 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1707 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1708 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1709 } else {
1710 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1711 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1714 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1715 /* Check that the object is a vector too */
1716 int bounds_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1719 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1721 } else {
1722 int idepth_reg = alloc_preg (cfg);
1723 int stypes_reg = alloc_preg (cfg);
1724 int stype = alloc_preg (cfg);
1726 mono_class_setup_supertypes (klass);
1728 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1731 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1735 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1739 static void
1740 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1742 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1745 static void
1746 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1748 int val_reg;
1750 g_assert (val == 0);
1752 if (align == 0)
1753 align = 4;
1755 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1756 switch (size) {
1757 case 1:
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1759 return;
1760 case 2:
1761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1762 return;
1763 case 4:
1764 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1765 return;
1766 #if SIZEOF_REGISTER == 8
1767 case 8:
1768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1769 return;
1770 #endif
1774 val_reg = alloc_preg (cfg);
1776 if (SIZEOF_REGISTER == 8)
1777 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1778 else
1779 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1781 if (align < 4) {
1782 /* This could be optimized further if neccesary */
1783 while (size >= 1) {
1784 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 offset += 1;
1786 size -= 1;
1788 return;
1791 #if !NO_UNALIGNED_ACCESS
1792 if (SIZEOF_REGISTER == 8) {
1793 if (offset % 8) {
1794 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1795 offset += 4;
1796 size -= 4;
1798 while (size >= 8) {
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1800 offset += 8;
1801 size -= 8;
1804 #endif
1806 while (size >= 4) {
1807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1808 offset += 4;
1809 size -= 4;
1811 while (size >= 2) {
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1813 offset += 2;
1814 size -= 2;
1816 while (size >= 1) {
1817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 offset += 1;
1819 size -= 1;
1823 void
1824 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1826 int cur_reg;
1828 if (align == 0)
1829 align = 4;
1831 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1832 g_assert (size < 10000);
1834 if (align < 4) {
1835 /* This could be optimized further if neccesary */
1836 while (size >= 1) {
1837 cur_reg = alloc_preg (cfg);
1838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 doffset += 1;
1841 soffset += 1;
1842 size -= 1;
1846 #if !NO_UNALIGNED_ACCESS
1847 if (SIZEOF_REGISTER == 8) {
1848 while (size >= 8) {
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1852 doffset += 8;
1853 soffset += 8;
1854 size -= 8;
1857 #endif
1859 while (size >= 4) {
1860 cur_reg = alloc_preg (cfg);
1861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1863 doffset += 4;
1864 soffset += 4;
1865 size -= 4;
1867 while (size >= 2) {
1868 cur_reg = alloc_preg (cfg);
1869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1871 doffset += 2;
1872 soffset += 2;
1873 size -= 2;
1875 while (size >= 1) {
1876 cur_reg = alloc_preg (cfg);
1877 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1879 doffset += 1;
1880 soffset += 1;
1881 size -= 1;
1885 static void
1886 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1888 MonoInst *ins, *c;
1890 if (cfg->compile_aot) {
1891 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1892 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1893 ins->sreg1 = sreg1;
1894 ins->sreg2 = c->dreg;
1895 MONO_ADD_INS (cfg->cbb, ins);
1896 } else {
1897 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1898 ins->sreg1 = sreg1;
1899 ins->inst_offset = mini_get_tls_offset (tls_key);
1900 MONO_ADD_INS (cfg->cbb, ins);
1905 * emit_push_lmf:
1907 * Emit IR to push the current LMF onto the LMF stack.
1909 static void
1910 emit_push_lmf (MonoCompile *cfg)
1913 * Emit IR to push the LMF:
1914 * lmf_addr = <lmf_addr from tls>
1915 * lmf->lmf_addr = lmf_addr
1916 * lmf->prev_lmf = *lmf_addr
1917 * *lmf_addr = lmf
1919 int lmf_reg, prev_lmf_reg;
1920 MonoInst *ins, *lmf_ins;
1922 if (!cfg->lmf_ir)
1923 return;
1925 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1926 /* Load current lmf */
1927 lmf_ins = mono_get_lmf_intrinsic (cfg);
1928 g_assert (lmf_ins);
1929 MONO_ADD_INS (cfg->cbb, lmf_ins);
1930 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1931 lmf_reg = ins->dreg;
1932 /* Save previous_lmf */
1933 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1934 /* Set new LMF */
1935 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1936 } else {
1938 * Store lmf_addr in a variable, so it can be allocated to a global register.
1940 if (!cfg->lmf_addr_var)
1941 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1943 #ifdef HOST_WIN32
1944 ins = mono_get_jit_tls_intrinsic (cfg);
1945 if (ins) {
1946 int jit_tls_dreg = ins->dreg;
1948 MONO_ADD_INS (cfg->cbb, ins);
1949 lmf_reg = alloc_preg (cfg);
1950 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1951 } else {
1952 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1954 #else
1955 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1956 if (lmf_ins) {
1957 MONO_ADD_INS (cfg->cbb, lmf_ins);
1958 } else {
1959 #ifdef TARGET_IOS
1960 MonoInst *args [16], *jit_tls_ins, *ins;
1962 /* Inline mono_get_lmf_addr () */
1963 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
1965 /* Load mono_jit_tls_id */
1966 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
1967 /* call pthread_getspecific () */
1968 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
1969 /* lmf_addr = &jit_tls->lmf */
1970 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1971 lmf_ins = ins;
1972 #else
1973 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1974 #endif
1976 #endif
1977 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1982 prev_lmf_reg = alloc_preg (cfg);
1983 /* Save previous_lmf */
1984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1985 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1986 /* Set new lmf */
1987 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1992 * emit_pop_lmf:
1994 * Emit IR to pop the current LMF from the LMF stack.
1996 static void
1997 emit_pop_lmf (MonoCompile *cfg)
1999 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2000 MonoInst *ins;
2002 if (!cfg->lmf_ir)
2003 return;
2005 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2006 lmf_reg = ins->dreg;
2008 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2009 /* Load previous_lmf */
2010 prev_lmf_reg = alloc_preg (cfg);
2011 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2012 /* Set new LMF */
2013 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2014 } else {
2016 * Emit IR to pop the LMF:
2017 * *(lmf->lmf_addr) = lmf->prev_lmf
2019 /* This could be called before emit_push_lmf () */
2020 if (!cfg->lmf_addr_var)
2021 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2022 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2024 prev_lmf_reg = alloc_preg (cfg);
2025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2026 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2030 static void
2031 emit_instrumentation_call (MonoCompile *cfg, void *func)
2033 MonoInst *iargs [1];
2036 * Avoid instrumenting inlined methods since it can
2037 * distort profiling results.
2039 if (cfg->method != cfg->current_method)
2040 return;
2042 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2043 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2044 mono_emit_jit_icall (cfg, func, iargs);
2048 static int
2049 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2051 if (type->byref)
2052 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2054 handle_enum:
2055 type = mini_get_basic_type_from_generic (gsctx, type);
2056 type = mini_replace_type (type);
2057 switch (type->type) {
2058 case MONO_TYPE_VOID:
2059 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2060 case MONO_TYPE_I1:
2061 case MONO_TYPE_U1:
2062 case MONO_TYPE_BOOLEAN:
2063 case MONO_TYPE_I2:
2064 case MONO_TYPE_U2:
2065 case MONO_TYPE_CHAR:
2066 case MONO_TYPE_I4:
2067 case MONO_TYPE_U4:
2068 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2069 case MONO_TYPE_I:
2070 case MONO_TYPE_U:
2071 case MONO_TYPE_PTR:
2072 case MONO_TYPE_FNPTR:
2073 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2074 case MONO_TYPE_CLASS:
2075 case MONO_TYPE_STRING:
2076 case MONO_TYPE_OBJECT:
2077 case MONO_TYPE_SZARRAY:
2078 case MONO_TYPE_ARRAY:
2079 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2080 case MONO_TYPE_I8:
2081 case MONO_TYPE_U8:
2082 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2083 case MONO_TYPE_R4:
2084 case MONO_TYPE_R8:
2085 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2086 case MONO_TYPE_VALUETYPE:
2087 if (type->data.klass->enumtype) {
2088 type = mono_class_enum_basetype (type->data.klass);
2089 goto handle_enum;
2090 } else
2091 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2092 case MONO_TYPE_TYPEDBYREF:
2093 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2094 case MONO_TYPE_GENERICINST:
2095 type = &type->data.generic_class->container_class->byval_arg;
2096 goto handle_enum;
2097 case MONO_TYPE_VAR:
2098 case MONO_TYPE_MVAR:
2099 /* gsharedvt */
2100 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2101 default:
2102 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2104 return -1;
2108 * target_type_is_incompatible:
2109 * @cfg: MonoCompile context
2111 * Check that the item @arg on the evaluation stack can be stored
2112 * in the target type (can be a local, or field, etc).
2113 * The cfg arg can be used to check if we need verification or just
2114 * validity checks.
2116 * Returns: non-0 value if arg can't be stored on a target.
2118 static int
2119 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2121 MonoType *simple_type;
2122 MonoClass *klass;
2124 target = mini_replace_type (target);
2125 if (target->byref) {
2126 /* FIXME: check that the pointed to types match */
2127 if (arg->type == STACK_MP)
2128 return arg->klass != mono_class_from_mono_type (target);
2129 if (arg->type == STACK_PTR)
2130 return 0;
2131 return 1;
2134 simple_type = mono_type_get_underlying_type (target);
2135 switch (simple_type->type) {
2136 case MONO_TYPE_VOID:
2137 return 1;
2138 case MONO_TYPE_I1:
2139 case MONO_TYPE_U1:
2140 case MONO_TYPE_BOOLEAN:
2141 case MONO_TYPE_I2:
2142 case MONO_TYPE_U2:
2143 case MONO_TYPE_CHAR:
2144 case MONO_TYPE_I4:
2145 case MONO_TYPE_U4:
2146 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2147 return 1;
2148 return 0;
2149 case MONO_TYPE_PTR:
2150 /* STACK_MP is needed when setting pinned locals */
2151 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2152 return 1;
2153 return 0;
2154 case MONO_TYPE_I:
2155 case MONO_TYPE_U:
2156 case MONO_TYPE_FNPTR:
2158 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2159 * in native int. (#688008).
2161 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2162 return 1;
2163 return 0;
2164 case MONO_TYPE_CLASS:
2165 case MONO_TYPE_STRING:
2166 case MONO_TYPE_OBJECT:
2167 case MONO_TYPE_SZARRAY:
2168 case MONO_TYPE_ARRAY:
2169 if (arg->type != STACK_OBJ)
2170 return 1;
2171 /* FIXME: check type compatibility */
2172 return 0;
2173 case MONO_TYPE_I8:
2174 case MONO_TYPE_U8:
2175 if (arg->type != STACK_I8)
2176 return 1;
2177 return 0;
2178 case MONO_TYPE_R4:
2179 case MONO_TYPE_R8:
2180 if (arg->type != STACK_R8)
2181 return 1;
2182 return 0;
2183 case MONO_TYPE_VALUETYPE:
2184 if (arg->type != STACK_VTYPE)
2185 return 1;
2186 klass = mono_class_from_mono_type (simple_type);
2187 if (klass != arg->klass)
2188 return 1;
2189 return 0;
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (arg->type != STACK_VTYPE)
2192 return 1;
2193 klass = mono_class_from_mono_type (simple_type);
2194 if (klass != arg->klass)
2195 return 1;
2196 return 0;
2197 case MONO_TYPE_GENERICINST:
2198 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2199 if (arg->type != STACK_VTYPE)
2200 return 1;
2201 klass = mono_class_from_mono_type (simple_type);
2202 if (klass != arg->klass)
2203 return 1;
2204 return 0;
2205 } else {
2206 if (arg->type != STACK_OBJ)
2207 return 1;
2208 /* FIXME: check type compatibility */
2209 return 0;
2211 case MONO_TYPE_VAR:
2212 case MONO_TYPE_MVAR:
2213 g_assert (cfg->generic_sharing_context);
2214 if (mini_type_var_is_vt (cfg, simple_type)) {
2215 if (arg->type != STACK_VTYPE)
2216 return 1;
2217 } else {
2218 if (arg->type != STACK_OBJ)
2219 return 1;
2221 return 0;
2222 default:
2223 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2225 return 1;
2229 * Prepare arguments for passing to a function call.
2230 * Return a non-zero value if the arguments can't be passed to the given
2231 * signature.
2232 * The type checks are not yet complete and some conversions may need
2233 * casts on 32 or 64 bit architectures.
2235 * FIXME: implement this using target_type_is_incompatible ()
2237 static int
2238 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2240 MonoType *simple_type;
2241 int i;
2243 if (sig->hasthis) {
2244 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2245 return 1;
2246 args++;
2248 for (i = 0; i < sig->param_count; ++i) {
2249 if (sig->params [i]->byref) {
2250 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2251 return 1;
2252 continue;
2254 simple_type = sig->params [i];
2255 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2256 handle_enum:
2257 switch (simple_type->type) {
2258 case MONO_TYPE_VOID:
2259 return 1;
2260 continue;
2261 case MONO_TYPE_I1:
2262 case MONO_TYPE_U1:
2263 case MONO_TYPE_BOOLEAN:
2264 case MONO_TYPE_I2:
2265 case MONO_TYPE_U2:
2266 case MONO_TYPE_CHAR:
2267 case MONO_TYPE_I4:
2268 case MONO_TYPE_U4:
2269 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2270 return 1;
2271 continue;
2272 case MONO_TYPE_I:
2273 case MONO_TYPE_U:
2274 case MONO_TYPE_PTR:
2275 case MONO_TYPE_FNPTR:
2276 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2277 return 1;
2278 continue;
2279 case MONO_TYPE_CLASS:
2280 case MONO_TYPE_STRING:
2281 case MONO_TYPE_OBJECT:
2282 case MONO_TYPE_SZARRAY:
2283 case MONO_TYPE_ARRAY:
2284 if (args [i]->type != STACK_OBJ)
2285 return 1;
2286 continue;
2287 case MONO_TYPE_I8:
2288 case MONO_TYPE_U8:
2289 if (args [i]->type != STACK_I8)
2290 return 1;
2291 continue;
2292 case MONO_TYPE_R4:
2293 case MONO_TYPE_R8:
2294 if (args [i]->type != STACK_R8)
2295 return 1;
2296 continue;
2297 case MONO_TYPE_VALUETYPE:
2298 if (simple_type->data.klass->enumtype) {
2299 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2300 goto handle_enum;
2302 if (args [i]->type != STACK_VTYPE)
2303 return 1;
2304 continue;
2305 case MONO_TYPE_TYPEDBYREF:
2306 if (args [i]->type != STACK_VTYPE)
2307 return 1;
2308 continue;
2309 case MONO_TYPE_GENERICINST:
2310 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2311 goto handle_enum;
2312 case MONO_TYPE_VAR:
2313 case MONO_TYPE_MVAR:
2314 /* gsharedvt */
2315 if (args [i]->type != STACK_VTYPE)
2316 return 1;
2317 continue;
2318 default:
2319 g_error ("unknown type 0x%02x in check_call_signature",
2320 simple_type->type);
2323 return 0;
2326 static int
2327 callvirt_to_call (int opcode)
2329 switch (opcode) {
2330 case OP_CALL_MEMBASE:
2331 return OP_CALL;
2332 case OP_VOIDCALL_MEMBASE:
2333 return OP_VOIDCALL;
2334 case OP_FCALL_MEMBASE:
2335 return OP_FCALL;
2336 case OP_VCALL_MEMBASE:
2337 return OP_VCALL;
2338 case OP_LCALL_MEMBASE:
2339 return OP_LCALL;
2340 default:
2341 g_assert_not_reached ();
2344 return -1;
2347 #ifdef MONO_ARCH_HAVE_IMT
2348 /* Either METHOD or IMT_ARG needs to be set */
2349 static void
2350 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2352 int method_reg;
2354 if (COMPILE_LLVM (cfg)) {
2355 method_reg = alloc_preg (cfg);
2357 if (imt_arg) {
2358 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2359 } else if (cfg->compile_aot) {
2360 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2361 } else {
2362 MonoInst *ins;
2363 MONO_INST_NEW (cfg, ins, OP_PCONST);
2364 ins->inst_p0 = method;
2365 ins->dreg = method_reg;
2366 MONO_ADD_INS (cfg->cbb, ins);
2369 #ifdef ENABLE_LLVM
2370 call->imt_arg_reg = method_reg;
2371 #endif
2372 #ifdef MONO_ARCH_IMT_REG
2373 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2374 #else
2375 /* Need this to keep the IMT arg alive */
2376 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2377 #endif
2378 return;
2381 #ifdef MONO_ARCH_IMT_REG
2382 method_reg = alloc_preg (cfg);
2384 if (imt_arg) {
2385 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2386 } else if (cfg->compile_aot) {
2387 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2388 } else {
2389 MonoInst *ins;
2390 MONO_INST_NEW (cfg, ins, OP_PCONST);
2391 ins->inst_p0 = method;
2392 ins->dreg = method_reg;
2393 MONO_ADD_INS (cfg->cbb, ins);
2396 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2397 #else
2398 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2399 #endif
2401 #endif
2403 static MonoJumpInfo *
2404 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2406 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2408 ji->ip.i = ip;
2409 ji->type = type;
2410 ji->data.target = target;
2412 return ji;
2415 static int
2416 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2418 if (cfg->generic_sharing_context)
2419 return mono_class_check_context_used (klass);
2420 else
2421 return 0;
2424 static int
2425 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2427 if (cfg->generic_sharing_context)
2428 return mono_method_check_context_used (method);
2429 else
2430 return 0;
2434 * check_method_sharing:
2436 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2438 static void
2439 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2441 gboolean pass_vtable = FALSE;
2442 gboolean pass_mrgctx = FALSE;
2444 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2445 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2446 gboolean sharable = FALSE;
2448 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2449 sharable = TRUE;
2450 } else {
2451 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2452 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2453 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2455 sharable = sharing_enabled && context_sharable;
2459 * Pass vtable iff target method might
2460 * be shared, which means that sharing
2461 * is enabled for its class and its
2462 * context is sharable (and it's not a
2463 * generic method).
2465 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2466 pass_vtable = TRUE;
2469 if (mini_method_get_context (cmethod) &&
2470 mini_method_get_context (cmethod)->method_inst) {
2471 g_assert (!pass_vtable);
2473 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2474 pass_mrgctx = TRUE;
2475 } else {
2476 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2477 MonoGenericContext *context = mini_method_get_context (cmethod);
2478 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2480 if (sharing_enabled && context_sharable)
2481 pass_mrgctx = TRUE;
2482 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2483 pass_mrgctx = TRUE;
2487 if (out_pass_vtable)
2488 *out_pass_vtable = pass_vtable;
2489 if (out_pass_mrgctx)
2490 *out_pass_mrgctx = pass_mrgctx;
2493 inline static MonoCallInst *
2494 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2495 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2497 MonoType *sig_ret;
2498 MonoCallInst *call;
2499 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2500 int i;
2501 #endif
2503 if (tail) {
2504 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2506 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2507 } else
2508 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2510 call->args = args;
2511 call->signature = sig;
2512 call->rgctx_reg = rgctx;
2513 sig_ret = mini_replace_type (sig->ret);
2515 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2517 if (tail) {
2518 if (mini_type_is_vtype (cfg, sig_ret)) {
2519 call->vret_var = cfg->vret_addr;
2520 //g_assert_not_reached ();
2522 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2523 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2524 MonoInst *loada;
2526 temp->backend.is_pinvoke = sig->pinvoke;
2529 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2530 * address of return value to increase optimization opportunities.
2531 * Before vtype decomposition, the dreg of the call ins itself represents the
2532 * fact the call modifies the return value. After decomposition, the call will
2533 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2534 * will be transformed into an LDADDR.
2536 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2537 loada->dreg = alloc_preg (cfg);
2538 loada->inst_p0 = temp;
2539 /* We reference the call too since call->dreg could change during optimization */
2540 loada->inst_p1 = call;
2541 MONO_ADD_INS (cfg->cbb, loada);
2543 call->inst.dreg = temp->dreg;
2545 call->vret_var = loada;
2546 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2547 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2549 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2550 if (COMPILE_SOFT_FLOAT (cfg)) {
2552 * If the call has a float argument, we would need to do an r8->r4 conversion using
2553 * an icall, but that cannot be done during the call sequence since it would clobber
2554 * the call registers + the stack. So we do it before emitting the call.
2556 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2557 MonoType *t;
2558 MonoInst *in = call->args [i];
2560 if (i >= sig->hasthis)
2561 t = sig->params [i - sig->hasthis];
2562 else
2563 t = &mono_defaults.int_class->byval_arg;
2564 t = mono_type_get_underlying_type (t);
2566 if (!t->byref && t->type == MONO_TYPE_R4) {
2567 MonoInst *iargs [1];
2568 MonoInst *conv;
2570 iargs [0] = in;
2571 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2573 /* The result will be in an int vreg */
2574 call->args [i] = conv;
2578 #endif
2580 call->need_unbox_trampoline = unbox_trampoline;
2582 #ifdef ENABLE_LLVM
2583 if (COMPILE_LLVM (cfg))
2584 mono_llvm_emit_call (cfg, call);
2585 else
2586 mono_arch_emit_call (cfg, call);
2587 #else
2588 mono_arch_emit_call (cfg, call);
2589 #endif
2591 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2592 cfg->flags |= MONO_CFG_HAS_CALLS;
2594 return call;
2597 static void
2598 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2600 #ifdef MONO_ARCH_RGCTX_REG
2601 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2602 cfg->uses_rgctx_reg = TRUE;
2603 call->rgctx_reg = TRUE;
2604 #ifdef ENABLE_LLVM
2605 call->rgctx_arg_reg = rgctx_reg;
2606 #endif
2607 #else
2608 NOT_IMPLEMENTED;
2609 #endif
2612 inline static MonoInst*
2613 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2615 MonoCallInst *call;
2616 MonoInst *ins;
2617 int rgctx_reg = -1;
2618 gboolean check_sp = FALSE;
2620 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2621 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2623 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2624 check_sp = TRUE;
2627 if (rgctx_arg) {
2628 rgctx_reg = mono_alloc_preg (cfg);
2629 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2632 if (check_sp) {
2633 if (!cfg->stack_inbalance_var)
2634 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2636 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2637 ins->dreg = cfg->stack_inbalance_var->dreg;
2638 MONO_ADD_INS (cfg->cbb, ins);
2641 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2643 call->inst.sreg1 = addr->dreg;
2645 if (imt_arg)
2646 emit_imt_argument (cfg, call, NULL, imt_arg);
2648 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2650 if (check_sp) {
2651 int sp_reg;
2653 sp_reg = mono_alloc_preg (cfg);
2655 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2656 ins->dreg = sp_reg;
2657 MONO_ADD_INS (cfg->cbb, ins);
2659 /* Restore the stack so we don't crash when throwing the exception */
2660 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2661 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2662 MONO_ADD_INS (cfg->cbb, ins);
2664 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2665 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2668 if (rgctx_arg)
2669 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2671 return (MonoInst*)call;
2674 static MonoInst*
2675 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2677 static MonoInst*
2678 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2679 static MonoInst*
2680 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2682 static MonoInst*
2683 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2684 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2686 #ifndef DISABLE_REMOTING
2687 gboolean might_be_remote = FALSE;
2688 #endif
2689 gboolean virtual = this != NULL;
2690 gboolean enable_for_aot = TRUE;
2691 int context_used;
2692 MonoCallInst *call;
2693 int rgctx_reg = 0;
2694 gboolean need_unbox_trampoline;
2696 if (!sig)
2697 sig = mono_method_signature (method);
2699 if (rgctx_arg) {
2700 rgctx_reg = mono_alloc_preg (cfg);
2701 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2704 if (method->string_ctor) {
2705 /* Create the real signature */
2706 /* FIXME: Cache these */
2707 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2708 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2710 sig = ctor_sig;
2713 context_used = mini_method_check_context_used (cfg, method);
2715 #ifndef DISABLE_REMOTING
2716 might_be_remote = this && sig->hasthis &&
2717 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2718 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2720 if (might_be_remote && context_used) {
2721 MonoInst *addr;
2723 g_assert (cfg->generic_sharing_context);
2725 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2727 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2729 #endif
2731 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2733 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2735 #ifndef DISABLE_REMOTING
2736 if (might_be_remote)
2737 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2738 else
2739 #endif
2740 call->method = method;
2741 call->inst.flags |= MONO_INST_HAS_METHOD;
2742 call->inst.inst_left = this;
2743 call->tail_call = tail;
2745 if (virtual) {
2746 int vtable_reg, slot_reg, this_reg;
2747 int offset;
2749 this_reg = this->dreg;
2751 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2752 MonoInst *dummy_use;
2754 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2756 /* Make a call to delegate->invoke_impl */
2757 call->inst.inst_basereg = this_reg;
2758 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2759 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2761 /* We must emit a dummy use here because the delegate trampoline will
2762 replace the 'this' argument with the delegate target making this activation
2763 no longer a root for the delegate.
2764 This is an issue for delegates that target collectible code such as dynamic
2765 methods of GC'able assemblies.
2767 For a test case look into #667921.
2769 FIXME: a dummy use is not the best way to do it as the local register allocator
2770 will put it on a caller save register and spil it around the call.
2771 Ideally, we would either put it on a callee save register or only do the store part.
2773 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2775 return (MonoInst*)call;
2778 if ((!cfg->compile_aot || enable_for_aot) &&
2779 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2780 (MONO_METHOD_IS_FINAL (method) &&
2781 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2782 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2784 * the method is not virtual, we just need to ensure this is not null
2785 * and then we can call the method directly.
2787 #ifndef DISABLE_REMOTING
2788 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2790 * The check above ensures method is not gshared, this is needed since
2791 * gshared methods can't have wrappers.
2793 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2795 #endif
2797 if (!method->string_ctor)
2798 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2800 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2801 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2803 * the method is virtual, but we can statically dispatch since either
2804 * it's class or the method itself are sealed.
2805 * But first we need to ensure it's not a null reference.
2807 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2809 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2810 } else {
2811 vtable_reg = alloc_preg (cfg);
2812 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2813 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2814 slot_reg = -1;
2815 #ifdef MONO_ARCH_HAVE_IMT
2816 if (mono_use_imt) {
2817 guint32 imt_slot = mono_method_get_imt_slot (method);
2818 emit_imt_argument (cfg, call, call->method, imt_arg);
2819 slot_reg = vtable_reg;
2820 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2822 #endif
2823 if (slot_reg == -1) {
2824 slot_reg = alloc_preg (cfg);
2825 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2826 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2828 } else {
2829 slot_reg = vtable_reg;
2830 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2831 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2832 #ifdef MONO_ARCH_HAVE_IMT
2833 if (imt_arg) {
2834 g_assert (mono_method_signature (method)->generic_param_count);
2835 emit_imt_argument (cfg, call, call->method, imt_arg);
2837 #endif
2840 call->inst.sreg1 = slot_reg;
2841 call->inst.inst_offset = offset;
2842 call->virtual = TRUE;
2846 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2848 if (rgctx_arg)
2849 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2851 return (MonoInst*)call;
2854 MonoInst*
2855 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2857 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2860 MonoInst*
2861 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2862 MonoInst **args)
2864 MonoCallInst *call;
2866 g_assert (sig);
2868 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2869 call->fptr = func;
2871 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2873 return (MonoInst*)call;
2876 MonoInst*
2877 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2879 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2881 g_assert (info);
2883 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2887 * mono_emit_abs_call:
2889 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2891 inline static MonoInst*
2892 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2893 MonoMethodSignature *sig, MonoInst **args)
2895 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2896 MonoInst *ins;
2899 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2900 * handle it.
2902 if (cfg->abs_patches == NULL)
2903 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2904 g_hash_table_insert (cfg->abs_patches, ji, ji);
2905 ins = mono_emit_native_call (cfg, ji, sig, args);
2906 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2907 return ins;
2910 static MonoInst*
2911 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2913 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2914 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2915 int widen_op = -1;
2918 * Native code might return non register sized integers
2919 * without initializing the upper bits.
2921 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2922 case OP_LOADI1_MEMBASE:
2923 widen_op = OP_ICONV_TO_I1;
2924 break;
2925 case OP_LOADU1_MEMBASE:
2926 widen_op = OP_ICONV_TO_U1;
2927 break;
2928 case OP_LOADI2_MEMBASE:
2929 widen_op = OP_ICONV_TO_I2;
2930 break;
2931 case OP_LOADU2_MEMBASE:
2932 widen_op = OP_ICONV_TO_U2;
2933 break;
2934 default:
2935 break;
2938 if (widen_op != -1) {
2939 int dreg = alloc_preg (cfg);
2940 MonoInst *widen;
2942 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2943 widen->type = ins->type;
2944 ins = widen;
2949 return ins;
2952 static MonoMethod*
2953 get_memcpy_method (void)
2955 static MonoMethod *memcpy_method = NULL;
2956 if (!memcpy_method) {
2957 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2958 if (!memcpy_method)
2959 g_error ("Old corlib found. Install a new one");
2961 return memcpy_method;
2964 static void
2965 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2967 MonoClassField *field;
2968 gpointer iter = NULL;
2970 while ((field = mono_class_get_fields (klass, &iter))) {
2971 int foffset;
2973 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2974 continue;
2975 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2976 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2977 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2978 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2979 } else {
2980 MonoClass *field_class = mono_class_from_mono_type (field->type);
2981 if (field_class->has_references)
2982 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2987 static void
2988 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2990 int card_table_shift_bits;
2991 gpointer card_table_mask;
2992 guint8 *card_table;
2993 MonoInst *dummy_use;
2994 int nursery_shift_bits;
2995 size_t nursery_size;
2996 gboolean has_card_table_wb = FALSE;
2998 if (!cfg->gen_write_barriers)
2999 return;
3001 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3003 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3005 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3006 has_card_table_wb = TRUE;
3007 #endif
3009 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3010 MonoInst *wbarrier;
3012 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3013 wbarrier->sreg1 = ptr->dreg;
3014 wbarrier->sreg2 = value->dreg;
3015 MONO_ADD_INS (cfg->cbb, wbarrier);
3016 } else if (card_table) {
3017 int offset_reg = alloc_preg (cfg);
3018 int card_reg = alloc_preg (cfg);
3019 MonoInst *ins;
3021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3022 if (card_table_mask)
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3025 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3026 * IMM's larger than 32bits.
3028 if (cfg->compile_aot) {
3029 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3030 } else {
3031 MONO_INST_NEW (cfg, ins, OP_PCONST);
3032 ins->inst_p0 = card_table;
3033 ins->dreg = card_reg;
3034 MONO_ADD_INS (cfg->cbb, ins);
3037 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3038 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3039 } else {
3040 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3041 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3044 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3047 static gboolean
3048 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3050 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3051 unsigned need_wb = 0;
3053 if (align == 0)
3054 align = 4;
3056 /*types with references can't have alignment smaller than sizeof(void*) */
3057 if (align < SIZEOF_VOID_P)
3058 return FALSE;
3060 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3061 if (size > 32 * SIZEOF_VOID_P)
3062 return FALSE;
3064 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3066 /* We don't unroll more than 5 stores to avoid code bloat. */
3067 if (size > 5 * SIZEOF_VOID_P) {
3068 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3069 size += (SIZEOF_VOID_P - 1);
3070 size &= ~(SIZEOF_VOID_P - 1);
3072 EMIT_NEW_ICONST (cfg, iargs [2], size);
3073 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3074 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3075 return TRUE;
3078 destreg = iargs [0]->dreg;
3079 srcreg = iargs [1]->dreg;
3080 offset = 0;
3082 dest_ptr_reg = alloc_preg (cfg);
3083 tmp_reg = alloc_preg (cfg);
3085 /*tmp = dreg*/
3086 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3088 while (size >= SIZEOF_VOID_P) {
3089 MonoInst *load_inst;
3090 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3091 load_inst->dreg = tmp_reg;
3092 load_inst->inst_basereg = srcreg;
3093 load_inst->inst_offset = offset;
3094 MONO_ADD_INS (cfg->cbb, load_inst);
3096 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3098 if (need_wb & 0x1)
3099 emit_write_barrier (cfg, iargs [0], load_inst);
3101 offset += SIZEOF_VOID_P;
3102 size -= SIZEOF_VOID_P;
3103 need_wb >>= 1;
3105 /*tmp += sizeof (void*)*/
3106 if (size >= SIZEOF_VOID_P) {
3107 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3108 MONO_ADD_INS (cfg->cbb, iargs [0]);
3112 /* Those cannot be references since size < sizeof (void*) */
3113 while (size >= 4) {
3114 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3116 offset += 4;
3117 size -= 4;
3120 while (size >= 2) {
3121 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3122 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3123 offset += 2;
3124 size -= 2;
3127 while (size >= 1) {
3128 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3129 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3130 offset += 1;
3131 size -= 1;
3134 return TRUE;
3138 * Emit code to copy a valuetype of type @klass whose address is stored in
3139 * @src->dreg to memory whose address is stored at @dest->dreg.
3141 void
3142 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3144 MonoInst *iargs [4];
3145 int context_used, n;
3146 guint32 align = 0;
3147 MonoMethod *memcpy_method;
3148 MonoInst *size_ins = NULL;
3149 MonoInst *memcpy_ins = NULL;
3151 g_assert (klass);
3153 * This check breaks with spilled vars... need to handle it during verification anyway.
3154 * g_assert (klass && klass == src->klass && klass == dest->klass);
3157 if (mini_is_gsharedvt_klass (cfg, klass)) {
3158 g_assert (!native);
3159 context_used = mini_class_check_context_used (cfg, klass);
3160 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3161 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3164 if (native)
3165 n = mono_class_native_size (klass, &align);
3166 else
3167 n = mono_class_value_size (klass, &align);
3169 /* if native is true there should be no references in the struct */
3170 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3171 /* Avoid barriers when storing to the stack */
3172 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3173 (dest->opcode == OP_LDADDR))) {
3174 int context_used;
3176 iargs [0] = dest;
3177 iargs [1] = src;
3179 context_used = mini_class_check_context_used (cfg, klass);
3181 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3182 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3183 return;
3184 } else if (context_used) {
3185 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3186 } else {
3187 if (cfg->compile_aot) {
3188 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3189 } else {
3190 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3191 mono_class_compute_gc_descriptor (klass);
3195 if (size_ins)
3196 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3197 else
3198 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3199 return;
3203 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3204 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3205 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3206 } else {
3207 iargs [0] = dest;
3208 iargs [1] = src;
3209 if (size_ins)
3210 iargs [2] = size_ins;
3211 else
3212 EMIT_NEW_ICONST (cfg, iargs [2], n);
3214 memcpy_method = get_memcpy_method ();
3215 if (memcpy_ins)
3216 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3217 else
3218 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3222 static MonoMethod*
3223 get_memset_method (void)
3225 static MonoMethod *memset_method = NULL;
3226 if (!memset_method) {
3227 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3228 if (!memset_method)
3229 g_error ("Old corlib found. Install a new one");
3231 return memset_method;
3234 void
3235 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3237 MonoInst *iargs [3];
3238 int n, context_used;
3239 guint32 align;
3240 MonoMethod *memset_method;
3241 MonoInst *size_ins = NULL;
3242 MonoInst *bzero_ins = NULL;
3243 static MonoMethod *bzero_method;
3245 /* FIXME: Optimize this for the case when dest is an LDADDR */
3247 mono_class_init (klass);
3248 if (mini_is_gsharedvt_klass (cfg, klass)) {
3249 context_used = mini_class_check_context_used (cfg, klass);
3250 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3251 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3252 if (!bzero_method)
3253 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3254 g_assert (bzero_method);
3255 iargs [0] = dest;
3256 iargs [1] = size_ins;
3257 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3258 return;
3261 n = mono_class_value_size (klass, &align);
3263 if (n <= sizeof (gpointer) * 5) {
3264 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3266 else {
3267 memset_method = get_memset_method ();
3268 iargs [0] = dest;
3269 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3270 EMIT_NEW_ICONST (cfg, iargs [2], n);
3271 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3275 static MonoInst*
3276 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3278 MonoInst *this = NULL;
3280 g_assert (cfg->generic_sharing_context);
3282 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3283 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3284 !method->klass->valuetype)
3285 EMIT_NEW_ARGLOAD (cfg, this, 0);
3287 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3288 MonoInst *mrgctx_loc, *mrgctx_var;
3290 g_assert (!this);
3291 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3293 mrgctx_loc = mono_get_vtable_var (cfg);
3294 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3296 return mrgctx_var;
3297 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3298 MonoInst *vtable_loc, *vtable_var;
3300 g_assert (!this);
3302 vtable_loc = mono_get_vtable_var (cfg);
3303 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3305 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3306 MonoInst *mrgctx_var = vtable_var;
3307 int vtable_reg;
3309 vtable_reg = alloc_preg (cfg);
3310 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3311 vtable_var->type = STACK_PTR;
3314 return vtable_var;
3315 } else {
3316 MonoInst *ins;
3317 int vtable_reg;
3319 vtable_reg = alloc_preg (cfg);
3320 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3321 return ins;
3325 static MonoJumpInfoRgctxEntry *
3326 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3328 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3329 res->method = method;
3330 res->in_mrgctx = in_mrgctx;
3331 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3332 res->data->type = patch_type;
3333 res->data->data.target = patch_data;
3334 res->info_type = info_type;
3336 return res;
3339 static inline MonoInst*
3340 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3342 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3345 static MonoInst*
3346 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3347 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3349 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3350 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3352 return emit_rgctx_fetch (cfg, rgctx, entry);
3355 static MonoInst*
3356 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3357 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3359 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3360 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3362 return emit_rgctx_fetch (cfg, rgctx, entry);
3365 static MonoInst*
3366 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3367 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3369 MonoJumpInfoGSharedVtCall *call_info;
3370 MonoJumpInfoRgctxEntry *entry;
3371 MonoInst *rgctx;
3373 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3374 call_info->sig = sig;
3375 call_info->method = cmethod;
3377 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3378 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3380 return emit_rgctx_fetch (cfg, rgctx, entry);
3384 static MonoInst*
3385 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3386 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3388 MonoJumpInfoRgctxEntry *entry;
3389 MonoInst *rgctx;
3391 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3392 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3394 return emit_rgctx_fetch (cfg, rgctx, entry);
3398 * emit_get_rgctx_method:
3400 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3401 * normal constants, else emit a load from the rgctx.
3403 static MonoInst*
3404 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3405 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3407 if (!context_used) {
3408 MonoInst *ins;
3410 switch (rgctx_type) {
3411 case MONO_RGCTX_INFO_METHOD:
3412 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3413 return ins;
3414 case MONO_RGCTX_INFO_METHOD_RGCTX:
3415 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3416 return ins;
3417 default:
3418 g_assert_not_reached ();
3420 } else {
3421 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3422 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3424 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 static MonoInst*
3429 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3430 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3432 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3433 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3435 return emit_rgctx_fetch (cfg, rgctx, entry);
3438 static int
3439 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3441 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3442 MonoRuntimeGenericContextInfoTemplate *template;
3443 int i, idx;
3445 g_assert (info);
3447 for (i = 0; i < info->num_entries; ++i) {
3448 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3450 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3451 return i;
3454 if (info->num_entries == info->count_entries) {
3455 MonoRuntimeGenericContextInfoTemplate *new_entries;
3456 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3458 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3460 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3461 info->entries = new_entries;
3462 info->count_entries = new_count_entries;
3465 idx = info->num_entries;
3466 template = &info->entries [idx];
3467 template->info_type = rgctx_type;
3468 template->data = data;
3470 info->num_entries ++;
3472 return idx;
3476 * emit_get_gsharedvt_info:
3478 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3480 static MonoInst*
3481 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3483 MonoInst *ins;
3484 int idx, dreg;
3486 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3487 /* Load info->entries [idx] */
3488 dreg = alloc_preg (cfg);
3489 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3491 return ins;
3494 static MonoInst*
3495 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3497 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3501 * On return the caller must check @klass for load errors.
3503 static void
3504 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3506 MonoInst *vtable_arg;
3507 MonoCallInst *call;
3508 int context_used;
3510 context_used = mini_class_check_context_used (cfg, klass);
3512 if (context_used) {
3513 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3514 klass, MONO_RGCTX_INFO_VTABLE);
3515 } else {
3516 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3518 if (!vtable)
3519 return;
3520 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3523 if (COMPILE_LLVM (cfg))
3524 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3525 else
3526 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3527 #ifdef MONO_ARCH_VTABLE_REG
3528 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3529 cfg->uses_vtable_reg = TRUE;
3530 #else
3531 NOT_IMPLEMENTED;
3532 #endif
3535 static void
3536 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3538 MonoInst *ins;
3540 if (cfg->gen_seq_points && cfg->method == method) {
3541 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3542 if (nonempty_stack)
3543 ins->flags |= MONO_INST_NONEMPTY_STACK;
3544 MONO_ADD_INS (cfg->cbb, ins);
3548 static void
3549 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3551 if (mini_get_debug_options ()->better_cast_details) {
3552 int to_klass_reg = alloc_preg (cfg);
3553 int vtable_reg = alloc_preg (cfg);
3554 int klass_reg = alloc_preg (cfg);
3555 MonoBasicBlock *is_null_bb = NULL;
3556 MonoInst *tls_get;
3558 if (null_check) {
3559 NEW_BBLOCK (cfg, is_null_bb);
3561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3565 tls_get = mono_get_jit_tls_intrinsic (cfg);
3566 if (!tls_get) {
3567 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3568 exit (1);
3571 MONO_ADD_INS (cfg->cbb, tls_get);
3572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3575 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3576 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3577 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3579 if (null_check) {
3580 MONO_START_BB (cfg, is_null_bb);
3581 if (out_bblock)
3582 *out_bblock = cfg->cbb;
3587 static void
3588 reset_cast_details (MonoCompile *cfg)
3590 /* Reset the variables holding the cast details */
3591 if (mini_get_debug_options ()->better_cast_details) {
3592 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3594 MONO_ADD_INS (cfg->cbb, tls_get);
3595 /* It is enough to reset the from field */
3596 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3601 * On return the caller must check @array_class for load errors
3603 static void
3604 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3606 int vtable_reg = alloc_preg (cfg);
3607 int context_used;
3609 context_used = mini_class_check_context_used (cfg, array_class);
3611 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3613 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3615 if (cfg->opt & MONO_OPT_SHARED) {
3616 int class_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3618 if (cfg->compile_aot) {
3619 int klass_reg = alloc_preg (cfg);
3620 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3621 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3622 } else {
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3625 } else if (context_used) {
3626 MonoInst *vtable_ins;
3628 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3630 } else {
3631 if (cfg->compile_aot) {
3632 int vt_reg;
3633 MonoVTable *vtable;
3635 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3636 return;
3637 vt_reg = alloc_preg (cfg);
3638 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3640 } else {
3641 MonoVTable *vtable;
3642 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3643 return;
3644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3650 reset_cast_details (cfg);
3654 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3655 * generic code is generated.
3657 static MonoInst*
3658 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3660 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3662 if (context_used) {
3663 MonoInst *rgctx, *addr;
3665 /* FIXME: What if the class is shared? We might not
3666 have to get the address of the method from the
3667 RGCTX. */
3668 addr = emit_get_rgctx_method (cfg, context_used, method,
3669 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3671 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3673 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3674 } else {
3675 gboolean pass_vtable, pass_mrgctx;
3676 MonoInst *rgctx_arg = NULL;
3678 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3679 g_assert (!pass_mrgctx);
3681 if (pass_vtable) {
3682 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3684 g_assert (vtable);
3685 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3688 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3692 static MonoInst*
3693 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3695 MonoInst *add;
3696 int obj_reg;
3697 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3698 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3699 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3700 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3702 obj_reg = sp [0]->dreg;
3703 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3706 /* FIXME: generics */
3707 g_assert (klass->rank == 0);
3709 // Check rank == 0
3710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3711 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3714 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3716 if (context_used) {
3717 MonoInst *element_class;
3719 /* This assertion is from the unboxcast insn */
3720 g_assert (klass->rank == 0);
3722 element_class = emit_get_rgctx_klass (cfg, context_used,
3723 klass->element_class, MONO_RGCTX_INFO_KLASS);
3725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3727 } else {
3728 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3729 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3730 reset_cast_details (cfg);
3733 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3734 MONO_ADD_INS (cfg->cbb, add);
3735 add->type = STACK_MP;
3736 add->klass = klass;
3738 return add;
3741 static MonoInst*
3742 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3744 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3745 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3746 MonoInst *ins;
3747 int dreg, addr_reg;
3749 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3751 /* obj */
3752 args [0] = obj;
3754 /* klass */
3755 args [1] = klass_inst;
3757 /* CASTCLASS */
3758 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3760 NEW_BBLOCK (cfg, is_ref_bb);
3761 NEW_BBLOCK (cfg, is_nullable_bb);
3762 NEW_BBLOCK (cfg, end_bb);
3763 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3765 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3770 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3771 addr_reg = alloc_dreg (cfg, STACK_MP);
3773 /* Non-ref case */
3774 /* UNBOX */
3775 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3776 MONO_ADD_INS (cfg->cbb, addr);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3780 /* Ref case */
3781 MONO_START_BB (cfg, is_ref_bb);
3783 /* Save the ref to a temporary */
3784 dreg = alloc_ireg (cfg);
3785 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3786 addr->dreg = addr_reg;
3787 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3790 /* Nullable case */
3791 MONO_START_BB (cfg, is_nullable_bb);
3794 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3795 MonoInst *unbox_call;
3796 MonoMethodSignature *unbox_sig;
3797 MonoInst *var;
3799 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3801 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3802 unbox_sig->ret = &klass->byval_arg;
3803 unbox_sig->param_count = 1;
3804 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3805 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3807 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3808 addr->dreg = addr_reg;
3811 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3813 /* End */
3814 MONO_START_BB (cfg, end_bb);
3816 /* LDOBJ */
3817 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3819 *out_cbb = cfg->cbb;
3821 return ins;
3825 * Returns NULL and set the cfg exception on error.
3827 static MonoInst*
3828 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3830 MonoInst *iargs [2];
3831 void *alloc_ftn;
3833 if (context_used) {
3834 MonoInst *data;
3835 int rgctx_info;
3836 MonoInst *iargs [2];
3838 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3840 if (cfg->opt & MONO_OPT_SHARED)
3841 rgctx_info = MONO_RGCTX_INFO_KLASS;
3842 else
3843 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3844 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3846 if (cfg->opt & MONO_OPT_SHARED) {
3847 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3848 iargs [1] = data;
3849 alloc_ftn = mono_object_new;
3850 } else {
3851 iargs [0] = data;
3852 alloc_ftn = mono_object_new_specific;
3855 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3856 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3858 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3861 if (cfg->opt & MONO_OPT_SHARED) {
3862 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3863 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3865 alloc_ftn = mono_object_new;
3866 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3867 /* This happens often in argument checking code, eg. throw new FooException... */
3868 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3869 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3870 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3871 } else {
3872 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3873 MonoMethod *managed_alloc = NULL;
3874 gboolean pass_lw;
3876 if (!vtable) {
3877 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3878 cfg->exception_ptr = klass;
3879 return NULL;
3882 #ifndef MONO_CROSS_COMPILE
3883 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3884 #endif
3886 if (managed_alloc) {
3887 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3888 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3890 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3891 if (pass_lw) {
3892 guint32 lw = vtable->klass->instance_size;
3893 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3894 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3895 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3897 else {
3898 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3902 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3906 * Returns NULL and set the cfg exception on error.
3908 static MonoInst*
3909 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3911 MonoInst *alloc, *ins;
3913 *out_cbb = cfg->cbb;
3915 if (mono_class_is_nullable (klass)) {
3916 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3918 if (context_used) {
3919 /* FIXME: What if the class is shared? We might not
3920 have to get the method address from the RGCTX. */
3921 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3922 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3923 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3925 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3926 } else {
3927 gboolean pass_vtable, pass_mrgctx;
3928 MonoInst *rgctx_arg = NULL;
3930 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3931 g_assert (!pass_mrgctx);
3933 if (pass_vtable) {
3934 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3936 g_assert (vtable);
3937 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3940 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3944 if (mini_is_gsharedvt_klass (cfg, klass)) {
3945 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3946 MonoInst *res, *is_ref, *src_var, *addr;
3947 int addr_reg, dreg;
3949 dreg = alloc_ireg (cfg);
3951 NEW_BBLOCK (cfg, is_ref_bb);
3952 NEW_BBLOCK (cfg, is_nullable_bb);
3953 NEW_BBLOCK (cfg, end_bb);
3954 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3958 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3961 /* Non-ref case */
3962 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3963 if (!alloc)
3964 return NULL;
3965 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3966 ins->opcode = OP_STOREV_MEMBASE;
3968 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3969 res->type = STACK_OBJ;
3970 res->klass = klass;
3971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3973 /* Ref case */
3974 MONO_START_BB (cfg, is_ref_bb);
3975 addr_reg = alloc_ireg (cfg);
3977 /* val is a vtype, so has to load the value manually */
3978 src_var = get_vreg_to_inst (cfg, val->dreg);
3979 if (!src_var)
3980 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3981 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3983 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3985 /* Nullable case */
3986 MONO_START_BB (cfg, is_nullable_bb);
3989 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3990 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3991 MonoInst *box_call;
3992 MonoMethodSignature *box_sig;
3995 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3996 * construct that method at JIT time, so have to do things by hand.
3998 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3999 box_sig->ret = &mono_defaults.object_class->byval_arg;
4000 box_sig->param_count = 1;
4001 box_sig->params [0] = &klass->byval_arg;
4002 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4003 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4004 res->type = STACK_OBJ;
4005 res->klass = klass;
4008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4010 MONO_START_BB (cfg, end_bb);
4012 *out_cbb = cfg->cbb;
4014 return res;
4015 } else {
4016 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4017 if (!alloc)
4018 return NULL;
4020 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4021 return alloc;
4026 static gboolean
4027 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4029 int i;
4030 MonoGenericContainer *container;
4031 MonoGenericInst *ginst;
4033 if (klass->generic_class) {
4034 container = klass->generic_class->container_class->generic_container;
4035 ginst = klass->generic_class->context.class_inst;
4036 } else if (klass->generic_container && context_used) {
4037 container = klass->generic_container;
4038 ginst = container->context.class_inst;
4039 } else {
4040 return FALSE;
4043 for (i = 0; i < container->type_argc; ++i) {
4044 MonoType *type;
4045 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4046 continue;
4047 type = ginst->type_argv [i];
4048 if (mini_type_is_reference (cfg, type))
4049 return TRUE;
4051 return FALSE;
4054 // FIXME: This doesn't work yet (class libs tests fail?)
4055 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4057 static MonoInst*
4058 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4060 MonoMethod *mono_castclass;
4061 MonoInst *res;
4063 mono_castclass = mono_marshal_get_castclass_with_cache ();
4065 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4066 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4067 reset_cast_details (cfg);
4069 return res;
4073 * Returns NULL and set the cfg exception on error.
4075 static MonoInst*
4076 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4078 MonoBasicBlock *is_null_bb;
4079 int obj_reg = src->dreg;
4080 int vtable_reg = alloc_preg (cfg);
4081 MonoInst *klass_inst = NULL;
4083 if (context_used) {
4084 MonoInst *args [3];
4086 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4087 MonoInst *cache_ins;
4089 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4091 /* obj */
4092 args [0] = src;
4094 /* klass - it's the second element of the cache entry*/
4095 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4097 /* cache */
4098 args [2] = cache_ins;
4100 return emit_castclass_with_cache (cfg, klass, args, NULL);
4103 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4106 NEW_BBLOCK (cfg, is_null_bb);
4108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4111 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4113 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4115 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4116 } else {
4117 int klass_reg = alloc_preg (cfg);
4119 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4121 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4122 /* the remoting code is broken, access the class for now */
4123 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4124 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4125 if (!vt) {
4126 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4127 cfg->exception_ptr = klass;
4128 return NULL;
4130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4131 } else {
4132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4135 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4136 } else {
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4138 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4142 MONO_START_BB (cfg, is_null_bb);
4144 reset_cast_details (cfg);
4146 return src;
4150 * Returns NULL and set the cfg exception on error.
4152 static MonoInst*
4153 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4155 MonoInst *ins;
4156 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4157 int obj_reg = src->dreg;
4158 int vtable_reg = alloc_preg (cfg);
4159 int res_reg = alloc_ireg_ref (cfg);
4160 MonoInst *klass_inst = NULL;
4162 if (context_used) {
4163 MonoInst *args [3];
4165 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4166 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4167 MonoInst *cache_ins;
4169 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4171 /* obj */
4172 args [0] = src;
4174 /* klass - it's the second element of the cache entry*/
4175 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4177 /* cache */
4178 args [2] = cache_ins;
4180 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4183 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4186 NEW_BBLOCK (cfg, is_null_bb);
4187 NEW_BBLOCK (cfg, false_bb);
4188 NEW_BBLOCK (cfg, end_bb);
4190 /* Do the assignment at the beginning, so the other assignment can be if converted */
4191 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4192 ins->type = STACK_OBJ;
4193 ins->klass = klass;
4195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4200 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4201 g_assert (!context_used);
4202 /* the is_null_bb target simply copies the input register to the output */
4203 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4204 } else {
4205 int klass_reg = alloc_preg (cfg);
4207 if (klass->rank) {
4208 int rank_reg = alloc_preg (cfg);
4209 int eclass_reg = alloc_preg (cfg);
4211 g_assert (!context_used);
4212 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4213 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4214 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4215 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4216 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4217 if (klass->cast_class == mono_defaults.object_class) {
4218 int parent_reg = alloc_preg (cfg);
4219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4220 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4221 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4223 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4224 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4225 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4226 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4227 } else if (klass->cast_class == mono_defaults.enum_class) {
4228 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4229 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4230 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4231 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4232 } else {
4233 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4234 /* Check that the object is a vector too */
4235 int bounds_reg = alloc_preg (cfg);
4236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4237 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4238 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4241 /* the is_null_bb target simply copies the input register to the output */
4242 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4244 } else if (mono_class_is_nullable (klass)) {
4245 g_assert (!context_used);
4246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4247 /* the is_null_bb target simply copies the input register to the output */
4248 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4249 } else {
4250 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4251 g_assert (!context_used);
4252 /* the remoting code is broken, access the class for now */
4253 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4254 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4255 if (!vt) {
4256 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4257 cfg->exception_ptr = klass;
4258 return NULL;
4260 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4261 } else {
4262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4267 } else {
4268 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4269 /* the is_null_bb target simply copies the input register to the output */
4270 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4275 MONO_START_BB (cfg, false_bb);
4277 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4280 MONO_START_BB (cfg, is_null_bb);
4282 MONO_START_BB (cfg, end_bb);
4284 return ins;
4287 static MonoInst*
4288 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4290 /* This opcode takes as input an object reference and a class, and returns:
4291 0) if the object is an instance of the class,
4292 1) if the object is not instance of the class,
4293 2) if the object is a proxy whose type cannot be determined */
4295 MonoInst *ins;
4296 #ifndef DISABLE_REMOTING
4297 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4298 #else
4299 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4300 #endif
4301 int obj_reg = src->dreg;
4302 int dreg = alloc_ireg (cfg);
4303 int tmp_reg;
4304 #ifndef DISABLE_REMOTING
4305 int klass_reg = alloc_preg (cfg);
4306 #endif
4308 NEW_BBLOCK (cfg, true_bb);
4309 NEW_BBLOCK (cfg, false_bb);
4310 NEW_BBLOCK (cfg, end_bb);
4311 #ifndef DISABLE_REMOTING
4312 NEW_BBLOCK (cfg, false2_bb);
4313 NEW_BBLOCK (cfg, no_proxy_bb);
4314 #endif
4316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4317 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4319 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4320 #ifndef DISABLE_REMOTING
4321 NEW_BBLOCK (cfg, interface_fail_bb);
4322 #endif
4324 tmp_reg = alloc_preg (cfg);
4325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4326 #ifndef DISABLE_REMOTING
4327 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4328 MONO_START_BB (cfg, interface_fail_bb);
4329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4331 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4333 tmp_reg = alloc_preg (cfg);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4337 #else
4338 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4339 #endif
4340 } else {
4341 #ifndef DISABLE_REMOTING
4342 tmp_reg = alloc_preg (cfg);
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4346 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4347 tmp_reg = alloc_preg (cfg);
4348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4351 tmp_reg = alloc_preg (cfg);
4352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4356 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4359 MONO_START_BB (cfg, no_proxy_bb);
4361 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4362 #else
4363 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4364 #endif
4367 MONO_START_BB (cfg, false_bb);
4369 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4370 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4372 #ifndef DISABLE_REMOTING
4373 MONO_START_BB (cfg, false2_bb);
4375 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4377 #endif
4379 MONO_START_BB (cfg, true_bb);
4381 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4383 MONO_START_BB (cfg, end_bb);
4385 /* FIXME: */
4386 MONO_INST_NEW (cfg, ins, OP_ICONST);
4387 ins->dreg = dreg;
4388 ins->type = STACK_I4;
4390 return ins;
4393 static MonoInst*
4394 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4396 /* This opcode takes as input an object reference and a class, and returns:
4397 0) if the object is an instance of the class,
4398 1) if the object is a proxy whose type cannot be determined
4399 an InvalidCastException exception is thrown otherwhise*/
4401 MonoInst *ins;
4402 #ifndef DISABLE_REMOTING
4403 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4404 #else
4405 MonoBasicBlock *ok_result_bb;
4406 #endif
4407 int obj_reg = src->dreg;
4408 int dreg = alloc_ireg (cfg);
4409 int tmp_reg = alloc_preg (cfg);
4411 #ifndef DISABLE_REMOTING
4412 int klass_reg = alloc_preg (cfg);
4413 NEW_BBLOCK (cfg, end_bb);
4414 #endif
4416 NEW_BBLOCK (cfg, ok_result_bb);
4418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4421 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4423 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4424 #ifndef DISABLE_REMOTING
4425 NEW_BBLOCK (cfg, interface_fail_bb);
4427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4428 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4429 MONO_START_BB (cfg, interface_fail_bb);
4430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4432 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4434 tmp_reg = alloc_preg (cfg);
4435 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4437 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4439 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4441 #else
4442 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4443 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4444 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4445 #endif
4446 } else {
4447 #ifndef DISABLE_REMOTING
4448 NEW_BBLOCK (cfg, no_proxy_bb);
4450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4452 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4454 tmp_reg = alloc_preg (cfg);
4455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4458 tmp_reg = alloc_preg (cfg);
4459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4463 NEW_BBLOCK (cfg, fail_1_bb);
4465 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4467 MONO_START_BB (cfg, fail_1_bb);
4469 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4472 MONO_START_BB (cfg, no_proxy_bb);
4474 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4475 #else
4476 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4477 #endif
4480 MONO_START_BB (cfg, ok_result_bb);
4482 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4484 #ifndef DISABLE_REMOTING
4485 MONO_START_BB (cfg, end_bb);
4486 #endif
4488 /* FIXME: */
4489 MONO_INST_NEW (cfg, ins, OP_ICONST);
4490 ins->dreg = dreg;
4491 ins->type = STACK_I4;
4493 return ins;
4497 * Returns NULL and set the cfg exception on error.
4499 static G_GNUC_UNUSED MonoInst*
4500 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4502 MonoInst *ptr;
4503 int dreg;
4504 gpointer *trampoline;
4505 MonoInst *obj, *method_ins, *tramp_ins;
4506 MonoDomain *domain;
4507 guint8 **code_slot;
4509 obj = handle_alloc (cfg, klass, FALSE, 0);
4510 if (!obj)
4511 return NULL;
4513 /* Inline the contents of mono_delegate_ctor */
4515 /* Set target field */
4516 /* Optimize away setting of NULL target */
4517 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4518 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4519 if (cfg->gen_write_barriers) {
4520 dreg = alloc_preg (cfg);
4521 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4522 emit_write_barrier (cfg, ptr, target);
4526 /* Set method field */
4527 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4528 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4530 * To avoid looking up the compiled code belonging to the target method
4531 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4532 * store it, and we fill it after the method has been compiled.
4534 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4535 MonoInst *code_slot_ins;
4537 if (context_used) {
4538 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4539 } else {
4540 domain = mono_domain_get ();
4541 mono_domain_lock (domain);
4542 if (!domain_jit_info (domain)->method_code_hash)
4543 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4544 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4545 if (!code_slot) {
4546 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4547 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4549 mono_domain_unlock (domain);
4551 if (cfg->compile_aot)
4552 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4553 else
4554 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4556 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4559 /* Set invoke_impl field */
4560 if (cfg->compile_aot) {
4561 MonoClassMethodPair *del_tramp;
4563 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4564 del_tramp->klass = klass;
4565 del_tramp->method = context_used ? NULL : method;
4566 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4567 } else {
4568 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4569 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4571 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4573 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4575 return obj;
4578 static MonoInst*
4579 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4581 MonoJitICallInfo *info;
4583 /* Need to register the icall so it gets an icall wrapper */
4584 info = mono_get_array_new_va_icall (rank);
4586 cfg->flags |= MONO_CFG_HAS_VARARGS;
4588 /* mono_array_new_va () needs a vararg calling convention */
4589 cfg->disable_llvm = TRUE;
4591 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4592 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4595 static void
4596 mono_emit_load_got_addr (MonoCompile *cfg)
4598 MonoInst *getaddr, *dummy_use;
4600 if (!cfg->got_var || cfg->got_var_allocated)
4601 return;
4603 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4604 getaddr->cil_code = cfg->header->code;
4605 getaddr->dreg = cfg->got_var->dreg;
4607 /* Add it to the start of the first bblock */
4608 if (cfg->bb_entry->code) {
4609 getaddr->next = cfg->bb_entry->code;
4610 cfg->bb_entry->code = getaddr;
4612 else
4613 MONO_ADD_INS (cfg->bb_entry, getaddr);
4615 cfg->got_var_allocated = TRUE;
4618 * Add a dummy use to keep the got_var alive, since real uses might
4619 * only be generated by the back ends.
4620 * Add it to end_bblock, so the variable's lifetime covers the whole
4621 * method.
4622 * It would be better to make the usage of the got var explicit in all
4623 * cases when the backend needs it (i.e. calls, throw etc.), so this
4624 * wouldn't be needed.
4626 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4627 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4630 static int inline_limit;
4631 static gboolean inline_limit_inited;
4633 static gboolean
4634 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4636 MonoMethodHeaderSummary header;
4637 MonoVTable *vtable;
4638 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4639 MonoMethodSignature *sig = mono_method_signature (method);
4640 int i;
4641 #endif
4643 if (cfg->generic_sharing_context)
4644 return FALSE;
4646 if (cfg->inline_depth > 10)
4647 return FALSE;
4649 #ifdef MONO_ARCH_HAVE_LMF_OPS
4650 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4651 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4652 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4653 return TRUE;
4654 #endif
4657 if (!mono_method_get_header_summary (method, &header))
4658 return FALSE;
4660 /*runtime, icall and pinvoke are checked by summary call*/
4661 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4662 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4663 (mono_class_is_marshalbyref (method->klass)) ||
4664 header.has_clauses)
4665 return FALSE;
4667 /* also consider num_locals? */
4668 /* Do the size check early to avoid creating vtables */
4669 if (!inline_limit_inited) {
4670 if (g_getenv ("MONO_INLINELIMIT"))
4671 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4672 else
4673 inline_limit = INLINE_LENGTH_LIMIT;
4674 inline_limit_inited = TRUE;
4676 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4677 return FALSE;
4680 * if we can initialize the class of the method right away, we do,
4681 * otherwise we don't allow inlining if the class needs initialization,
4682 * since it would mean inserting a call to mono_runtime_class_init()
4683 * inside the inlined code
4685 if (!(cfg->opt & MONO_OPT_SHARED)) {
4686 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4687 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4688 vtable = mono_class_vtable (cfg->domain, method->klass);
4689 if (!vtable)
4690 return FALSE;
4691 if (!cfg->compile_aot)
4692 mono_runtime_class_init (vtable);
4693 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4694 if (cfg->run_cctors && method->klass->has_cctor) {
4695 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4696 if (!method->klass->runtime_info)
4697 /* No vtable created yet */
4698 return FALSE;
4699 vtable = mono_class_vtable (cfg->domain, method->klass);
4700 if (!vtable)
4701 return FALSE;
4702 /* This makes so that inline cannot trigger */
4703 /* .cctors: too many apps depend on them */
4704 /* running with a specific order... */
4705 if (! vtable->initialized)
4706 return FALSE;
4707 mono_runtime_class_init (vtable);
4709 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4710 if (!method->klass->runtime_info)
4711 /* No vtable created yet */
4712 return FALSE;
4713 vtable = mono_class_vtable (cfg->domain, method->klass);
4714 if (!vtable)
4715 return FALSE;
4716 if (!vtable->initialized)
4717 return FALSE;
4719 } else {
4721 * If we're compiling for shared code
4722 * the cctor will need to be run at aot method load time, for example,
4723 * or at the end of the compilation of the inlining method.
4725 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4726 return FALSE;
4730 * CAS - do not inline methods with declarative security
4731 * Note: this has to be before any possible return TRUE;
4733 if (mono_security_method_has_declsec (method))
4734 return FALSE;
4736 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4737 if (mono_arch_is_soft_float ()) {
4738 /* FIXME: */
4739 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4740 return FALSE;
4741 for (i = 0; i < sig->param_count; ++i)
4742 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4743 return FALSE;
4745 #endif
4747 return TRUE;
4750 static gboolean
4751 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4753 if (!cfg->compile_aot) {
4754 g_assert (vtable);
4755 if (vtable->initialized)
4756 return FALSE;
4759 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4760 if (cfg->method == method)
4761 return FALSE;
4764 if (!mono_class_needs_cctor_run (klass, method))
4765 return FALSE;
4767 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4768 /* The initialization is already done before the method is called */
4769 return FALSE;
4771 return TRUE;
4774 static MonoInst*
4775 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4777 MonoInst *ins;
4778 guint32 size;
4779 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4780 int context_used;
4782 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4783 size = -1;
4784 } else {
4785 mono_class_init (klass);
4786 size = mono_class_array_element_size (klass);
4789 mult_reg = alloc_preg (cfg);
4790 array_reg = arr->dreg;
4791 index_reg = index->dreg;
4793 #if SIZEOF_REGISTER == 8
4794 /* The array reg is 64 bits but the index reg is only 32 */
4795 if (COMPILE_LLVM (cfg)) {
4796 /* Not needed */
4797 index2_reg = index_reg;
4798 } else {
4799 index2_reg = alloc_preg (cfg);
4800 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4802 #else
4803 if (index->type == STACK_I8) {
4804 index2_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4806 } else {
4807 index2_reg = index_reg;
4809 #endif
4811 if (bcheck)
4812 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4814 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4815 if (size == 1 || size == 2 || size == 4 || size == 8) {
4816 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4818 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4819 ins->klass = mono_class_get_element_class (klass);
4820 ins->type = STACK_MP;
4822 return ins;
4824 #endif
4826 add_reg = alloc_ireg_mp (cfg);
4828 if (size == -1) {
4829 MonoInst *rgctx_ins;
4831 /* gsharedvt */
4832 g_assert (cfg->generic_sharing_context);
4833 context_used = mini_class_check_context_used (cfg, klass);
4834 g_assert (context_used);
4835 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4836 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4837 } else {
4838 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4840 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4841 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4842 ins->klass = mono_class_get_element_class (klass);
4843 ins->type = STACK_MP;
4844 MONO_ADD_INS (cfg->cbb, ins);
4846 return ins;
4849 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4850 static MonoInst*
4851 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4853 int bounds_reg = alloc_preg (cfg);
4854 int add_reg = alloc_ireg_mp (cfg);
4855 int mult_reg = alloc_preg (cfg);
4856 int mult2_reg = alloc_preg (cfg);
4857 int low1_reg = alloc_preg (cfg);
4858 int low2_reg = alloc_preg (cfg);
4859 int high1_reg = alloc_preg (cfg);
4860 int high2_reg = alloc_preg (cfg);
4861 int realidx1_reg = alloc_preg (cfg);
4862 int realidx2_reg = alloc_preg (cfg);
4863 int sum_reg = alloc_preg (cfg);
4864 int index1, index2, tmpreg;
4865 MonoInst *ins;
4866 guint32 size;
4868 mono_class_init (klass);
4869 size = mono_class_array_element_size (klass);
4871 index1 = index_ins1->dreg;
4872 index2 = index_ins2->dreg;
4874 #if SIZEOF_REGISTER == 8
4875 /* The array reg is 64 bits but the index reg is only 32 */
4876 if (COMPILE_LLVM (cfg)) {
4877 /* Not needed */
4878 } else {
4879 tmpreg = alloc_preg (cfg);
4880 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4881 index1 = tmpreg;
4882 tmpreg = alloc_preg (cfg);
4883 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4884 index2 = tmpreg;
4886 #else
4887 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4888 tmpreg = -1;
4889 #endif
4891 /* range checking */
4892 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4893 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4895 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4896 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4897 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4898 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4899 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4900 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4901 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4903 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4904 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4905 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4906 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4907 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4908 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4909 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4911 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4912 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4914 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4915 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4917 ins->type = STACK_MP;
4918 ins->klass = klass;
4919 MONO_ADD_INS (cfg->cbb, ins);
4921 return ins;
4923 #endif
4925 static MonoInst*
4926 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4928 int rank;
4929 MonoInst *addr;
4930 MonoMethod *addr_method;
4931 int element_size;
4933 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4935 if (rank == 1)
4936 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4938 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4939 /* emit_ldelema_2 depends on OP_LMUL */
4940 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4941 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4943 #endif
4945 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4946 addr_method = mono_marshal_get_array_address (rank, element_size);
4947 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4949 return addr;
4952 static MonoBreakPolicy
4953 always_insert_breakpoint (MonoMethod *method)
4955 return MONO_BREAK_POLICY_ALWAYS;
4958 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4961 * mono_set_break_policy:
4962 * policy_callback: the new callback function
4964 * Allow embedders to decide wherther to actually obey breakpoint instructions
4965 * (both break IL instructions and Debugger.Break () method calls), for example
4966 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4967 * untrusted or semi-trusted code.
4969 * @policy_callback will be called every time a break point instruction needs to
4970 * be inserted with the method argument being the method that calls Debugger.Break()
4971 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4972 * if it wants the breakpoint to not be effective in the given method.
4973 * #MONO_BREAK_POLICY_ALWAYS is the default.
4975 void
4976 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4978 if (policy_callback)
4979 break_policy_func = policy_callback;
4980 else
4981 break_policy_func = always_insert_breakpoint;
4984 static gboolean
4985 should_insert_brekpoint (MonoMethod *method) {
4986 switch (break_policy_func (method)) {
4987 case MONO_BREAK_POLICY_ALWAYS:
4988 return TRUE;
4989 case MONO_BREAK_POLICY_NEVER:
4990 return FALSE;
4991 case MONO_BREAK_POLICY_ON_DBG:
4992 g_warning ("mdb no longer supported");
4993 return FALSE;
4994 default:
4995 g_warning ("Incorrect value returned from break policy callback");
4996 return FALSE;
5000 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5001 static MonoInst*
5002 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5004 MonoInst *addr, *store, *load;
5005 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5007 /* the bounds check is already done by the callers */
5008 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5009 if (is_set) {
5010 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5011 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5012 if (mini_type_is_reference (cfg, fsig->params [2]))
5013 emit_write_barrier (cfg, addr, load);
5014 } else {
5015 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5016 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5018 return store;
5022 static gboolean
5023 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5025 return mini_type_is_reference (cfg, &klass->byval_arg);
5028 static MonoInst*
5029 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5031 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5032 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5033 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5034 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5035 MonoInst *iargs [3];
5037 if (!helper->slot)
5038 mono_class_setup_vtable (obj_array);
5039 g_assert (helper->slot);
5041 if (sp [0]->type != STACK_OBJ)
5042 return NULL;
5043 if (sp [2]->type != STACK_OBJ)
5044 return NULL;
5046 iargs [2] = sp [2];
5047 iargs [1] = sp [1];
5048 iargs [0] = sp [0];
5050 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5051 } else {
5052 MonoInst *ins;
5054 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5055 MonoInst *addr;
5057 // FIXME-VT: OP_ICONST optimization
5058 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5059 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5060 ins->opcode = OP_STOREV_MEMBASE;
5061 } else if (sp [1]->opcode == OP_ICONST) {
5062 int array_reg = sp [0]->dreg;
5063 int index_reg = sp [1]->dreg;
5064 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5066 if (safety_checks)
5067 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5068 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5069 } else {
5070 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5071 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5072 if (generic_class_is_reference_type (cfg, klass))
5073 emit_write_barrier (cfg, addr, sp [2]);
5075 return ins;
5079 static MonoInst*
5080 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5082 MonoClass *eklass;
5084 if (is_set)
5085 eklass = mono_class_from_mono_type (fsig->params [2]);
5086 else
5087 eklass = mono_class_from_mono_type (fsig->ret);
5089 if (is_set) {
5090 return emit_array_store (cfg, eklass, args, FALSE);
5091 } else {
5092 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5093 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5094 return ins;
5098 static gboolean
5099 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5101 uint32_t align;
5103 //Only allow for valuetypes
5104 if (!param_klass->valuetype || !return_klass->valuetype)
5105 return FALSE;
5107 //That are blitable
5108 if (param_klass->has_references || return_klass->has_references)
5109 return FALSE;
5111 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5112 if ((MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5113 (!MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5114 return FALSE;
5116 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5117 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5118 return FALSE;
5120 //And have the same size
5121 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5122 return FALSE;
5123 return TRUE;
5126 static MonoInst*
5127 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5129 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5130 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5132 //Valuetypes that are semantically equivalent
5133 if (is_unsafe_mov_compatible (param_klass, return_klass))
5134 return args [0];
5136 //Arrays of valuetypes that are semantically equivalent
5137 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5138 return args [0];
5140 return NULL;
5143 static MonoInst*
5144 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5146 #ifdef MONO_ARCH_SIMD_INTRINSICS
5147 MonoInst *ins = NULL;
5149 if (cfg->opt & MONO_OPT_SIMD) {
5150 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5151 if (ins)
5152 return ins;
5154 #endif
5156 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5159 static MonoInst*
5160 emit_memory_barrier (MonoCompile *cfg, int kind)
5162 MonoInst *ins = NULL;
5163 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5164 MONO_ADD_INS (cfg->cbb, ins);
5165 ins->backend.memory_barrier_kind = kind;
5167 return ins;
5170 static MonoInst*
5171 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5173 MonoInst *ins = NULL;
5174 int opcode = 0;
5176 /* The LLVM backend supports these intrinsics */
5177 if (cmethod->klass == mono_defaults.math_class) {
5178 if (strcmp (cmethod->name, "Sin") == 0) {
5179 opcode = OP_SIN;
5180 } else if (strcmp (cmethod->name, "Cos") == 0) {
5181 opcode = OP_COS;
5182 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5183 opcode = OP_SQRT;
5184 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5185 opcode = OP_ABS;
5188 if (opcode) {
5189 MONO_INST_NEW (cfg, ins, opcode);
5190 ins->type = STACK_R8;
5191 ins->dreg = mono_alloc_freg (cfg);
5192 ins->sreg1 = args [0]->dreg;
5193 MONO_ADD_INS (cfg->cbb, ins);
5196 opcode = 0;
5197 if (cfg->opt & MONO_OPT_CMOV) {
5198 if (strcmp (cmethod->name, "Min") == 0) {
5199 if (fsig->params [0]->type == MONO_TYPE_I4)
5200 opcode = OP_IMIN;
5201 if (fsig->params [0]->type == MONO_TYPE_U4)
5202 opcode = OP_IMIN_UN;
5203 else if (fsig->params [0]->type == MONO_TYPE_I8)
5204 opcode = OP_LMIN;
5205 else if (fsig->params [0]->type == MONO_TYPE_U8)
5206 opcode = OP_LMIN_UN;
5207 } else if (strcmp (cmethod->name, "Max") == 0) {
5208 if (fsig->params [0]->type == MONO_TYPE_I4)
5209 opcode = OP_IMAX;
5210 if (fsig->params [0]->type == MONO_TYPE_U4)
5211 opcode = OP_IMAX_UN;
5212 else if (fsig->params [0]->type == MONO_TYPE_I8)
5213 opcode = OP_LMAX;
5214 else if (fsig->params [0]->type == MONO_TYPE_U8)
5215 opcode = OP_LMAX_UN;
5219 if (opcode) {
5220 MONO_INST_NEW (cfg, ins, opcode);
5221 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5222 ins->dreg = mono_alloc_ireg (cfg);
5223 ins->sreg1 = args [0]->dreg;
5224 ins->sreg2 = args [1]->dreg;
5225 MONO_ADD_INS (cfg->cbb, ins);
5229 return ins;
5232 static MonoInst*
5233 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5235 if (cmethod->klass == mono_defaults.array_class) {
5236 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5237 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5238 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5239 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5240 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5241 return emit_array_unsafe_mov (cfg, fsig, args);
5244 return NULL;
5247 static MonoInst*
5248 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5250 MonoInst *ins = NULL;
5252 static MonoClass *runtime_helpers_class = NULL;
5253 if (! runtime_helpers_class)
5254 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5255 "System.Runtime.CompilerServices", "RuntimeHelpers");
5257 if (cmethod->klass == mono_defaults.string_class) {
5258 if (strcmp (cmethod->name, "get_Chars") == 0) {
5259 int dreg = alloc_ireg (cfg);
5260 int index_reg = alloc_preg (cfg);
5261 int mult_reg = alloc_preg (cfg);
5262 int add_reg = alloc_preg (cfg);
5264 #if SIZEOF_REGISTER == 8
5265 /* The array reg is 64 bits but the index reg is only 32 */
5266 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5267 #else
5268 index_reg = args [1]->dreg;
5269 #endif
5270 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5272 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5273 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5274 add_reg = ins->dreg;
5275 /* Avoid a warning */
5276 mult_reg = 0;
5277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5278 add_reg, 0);
5279 #else
5280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5281 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5282 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5283 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5284 #endif
5285 type_from_op (ins, NULL, NULL);
5286 return ins;
5287 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5288 int dreg = alloc_ireg (cfg);
5289 /* Decompose later to allow more optimizations */
5290 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5291 ins->type = STACK_I4;
5292 ins->flags |= MONO_INST_FAULT;
5293 cfg->cbb->has_array_access = TRUE;
5294 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5296 return ins;
5297 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5298 int mult_reg = alloc_preg (cfg);
5299 int add_reg = alloc_preg (cfg);
5301 /* The corlib functions check for oob already. */
5302 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5303 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5304 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5305 return cfg->cbb->last_ins;
5306 } else
5307 return NULL;
5308 } else if (cmethod->klass == mono_defaults.object_class) {
5310 if (strcmp (cmethod->name, "GetType") == 0) {
5311 int dreg = alloc_ireg_ref (cfg);
5312 int vt_reg = alloc_preg (cfg);
5313 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5314 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5315 type_from_op (ins, NULL, NULL);
5317 return ins;
5318 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5319 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5320 int dreg = alloc_ireg (cfg);
5321 int t1 = alloc_ireg (cfg);
5323 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5324 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5325 ins->type = STACK_I4;
5327 return ins;
5328 #endif
5329 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5330 MONO_INST_NEW (cfg, ins, OP_NOP);
5331 MONO_ADD_INS (cfg->cbb, ins);
5332 return ins;
5333 } else
5334 return NULL;
5335 } else if (cmethod->klass == mono_defaults.array_class) {
5336 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5337 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5339 #ifndef MONO_BIG_ARRAYS
5341 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5342 * Array methods.
5344 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5345 int dreg = alloc_ireg (cfg);
5346 int bounds_reg = alloc_ireg_mp (cfg);
5347 MonoBasicBlock *end_bb, *szarray_bb;
5348 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5350 NEW_BBLOCK (cfg, end_bb);
5351 NEW_BBLOCK (cfg, szarray_bb);
5353 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5354 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5356 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5357 /* Non-szarray case */
5358 if (get_length)
5359 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5360 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5361 else
5362 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5363 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5364 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5365 MONO_START_BB (cfg, szarray_bb);
5366 /* Szarray case */
5367 if (get_length)
5368 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5369 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5370 else
5371 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5372 MONO_START_BB (cfg, end_bb);
5374 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5375 ins->type = STACK_I4;
5377 return ins;
5379 #endif
5381 if (cmethod->name [0] != 'g')
5382 return NULL;
5384 if (strcmp (cmethod->name, "get_Rank") == 0) {
5385 int dreg = alloc_ireg (cfg);
5386 int vtable_reg = alloc_preg (cfg);
5387 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5388 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5389 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5390 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5391 type_from_op (ins, NULL, NULL);
5393 return ins;
5394 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5395 int dreg = alloc_ireg (cfg);
5397 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5398 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5399 type_from_op (ins, NULL, NULL);
5401 return ins;
5402 } else
5403 return NULL;
5404 } else if (cmethod->klass == runtime_helpers_class) {
5406 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5407 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5408 return ins;
5409 } else
5410 return NULL;
5411 } else if (cmethod->klass == mono_defaults.thread_class) {
5412 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5413 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5414 MONO_ADD_INS (cfg->cbb, ins);
5415 return ins;
5416 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5417 return emit_memory_barrier (cfg, FullBarrier);
5419 } else if (cmethod->klass == mono_defaults.monitor_class) {
5421 /* FIXME this should be integrated to the check below once we support the trampoline version */
5422 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5423 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5424 MonoMethod *fast_method = NULL;
5426 /* Avoid infinite recursion */
5427 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5428 return NULL;
5430 fast_method = mono_monitor_get_fast_path (cmethod);
5431 if (!fast_method)
5432 return NULL;
5434 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5436 #endif
5438 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5439 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5440 MonoCallInst *call;
5442 if (COMPILE_LLVM (cfg)) {
5444 * Pass the argument normally, the LLVM backend will handle the
5445 * calling convention problems.
5447 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5448 } else {
5449 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5450 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5451 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5452 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5455 return (MonoInst*)call;
5456 } else if (strcmp (cmethod->name, "Exit") == 0) {
5457 MonoCallInst *call;
5459 if (COMPILE_LLVM (cfg)) {
5460 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5461 } else {
5462 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5463 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5464 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5465 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5468 return (MonoInst*)call;
5470 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5472 MonoMethod *fast_method = NULL;
5474 /* Avoid infinite recursion */
5475 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5476 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5477 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5478 return NULL;
5480 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5481 strcmp (cmethod->name, "Exit") == 0)
5482 fast_method = mono_monitor_get_fast_path (cmethod);
5483 if (!fast_method)
5484 return NULL;
5486 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5488 #endif
5489 } else if (cmethod->klass->image == mono_defaults.corlib &&
5490 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5491 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5492 ins = NULL;
5494 #if SIZEOF_REGISTER == 8
5495 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5496 /* 64 bit reads are already atomic */
5497 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5498 ins->dreg = mono_alloc_preg (cfg);
5499 ins->inst_basereg = args [0]->dreg;
5500 ins->inst_offset = 0;
5501 MONO_ADD_INS (cfg->cbb, ins);
5503 #endif
5505 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5506 if (strcmp (cmethod->name, "Increment") == 0) {
5507 MonoInst *ins_iconst;
5508 guint32 opcode = 0;
5510 if (fsig->params [0]->type == MONO_TYPE_I4) {
5511 opcode = OP_ATOMIC_ADD_NEW_I4;
5512 cfg->has_atomic_add_new_i4 = TRUE;
5514 #if SIZEOF_REGISTER == 8
5515 else if (fsig->params [0]->type == MONO_TYPE_I8)
5516 opcode = OP_ATOMIC_ADD_NEW_I8;
5517 #endif
5518 if (opcode) {
5519 if (!mono_arch_opcode_supported (opcode))
5520 return NULL;
5521 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5522 ins_iconst->inst_c0 = 1;
5523 ins_iconst->dreg = mono_alloc_ireg (cfg);
5524 MONO_ADD_INS (cfg->cbb, ins_iconst);
5526 MONO_INST_NEW (cfg, ins, opcode);
5527 ins->dreg = mono_alloc_ireg (cfg);
5528 ins->inst_basereg = args [0]->dreg;
5529 ins->inst_offset = 0;
5530 ins->sreg2 = ins_iconst->dreg;
5531 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5532 MONO_ADD_INS (cfg->cbb, ins);
5534 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5535 MonoInst *ins_iconst;
5536 guint32 opcode = 0;
5538 if (fsig->params [0]->type == MONO_TYPE_I4) {
5539 opcode = OP_ATOMIC_ADD_NEW_I4;
5540 cfg->has_atomic_add_new_i4 = TRUE;
5542 #if SIZEOF_REGISTER == 8
5543 else if (fsig->params [0]->type == MONO_TYPE_I8)
5544 opcode = OP_ATOMIC_ADD_NEW_I8;
5545 #endif
5546 if (opcode) {
5547 if (!mono_arch_opcode_supported (opcode))
5548 return NULL;
5549 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5550 ins_iconst->inst_c0 = -1;
5551 ins_iconst->dreg = mono_alloc_ireg (cfg);
5552 MONO_ADD_INS (cfg->cbb, ins_iconst);
5554 MONO_INST_NEW (cfg, ins, opcode);
5555 ins->dreg = mono_alloc_ireg (cfg);
5556 ins->inst_basereg = args [0]->dreg;
5557 ins->inst_offset = 0;
5558 ins->sreg2 = ins_iconst->dreg;
5559 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5560 MONO_ADD_INS (cfg->cbb, ins);
5562 } else if (strcmp (cmethod->name, "Add") == 0) {
5563 guint32 opcode = 0;
5565 if (fsig->params [0]->type == MONO_TYPE_I4) {
5566 opcode = OP_ATOMIC_ADD_NEW_I4;
5567 cfg->has_atomic_add_new_i4 = TRUE;
5569 #if SIZEOF_REGISTER == 8
5570 else if (fsig->params [0]->type == MONO_TYPE_I8)
5571 opcode = OP_ATOMIC_ADD_NEW_I8;
5572 #endif
5573 if (opcode) {
5574 if (!mono_arch_opcode_supported (opcode))
5575 return NULL;
5576 MONO_INST_NEW (cfg, ins, opcode);
5577 ins->dreg = mono_alloc_ireg (cfg);
5578 ins->inst_basereg = args [0]->dreg;
5579 ins->inst_offset = 0;
5580 ins->sreg2 = args [1]->dreg;
5581 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5582 MONO_ADD_INS (cfg->cbb, ins);
5585 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5587 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5588 if (strcmp (cmethod->name, "Exchange") == 0) {
5589 guint32 opcode;
5590 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5592 if (fsig->params [0]->type == MONO_TYPE_I4) {
5593 opcode = OP_ATOMIC_EXCHANGE_I4;
5594 cfg->has_atomic_exchange_i4 = TRUE;
5596 #if SIZEOF_REGISTER == 8
5597 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5598 (fsig->params [0]->type == MONO_TYPE_I))
5599 opcode = OP_ATOMIC_EXCHANGE_I8;
5600 #else
5601 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5602 opcode = OP_ATOMIC_EXCHANGE_I4;
5603 cfg->has_atomic_exchange_i4 = TRUE;
5605 #endif
5606 else
5607 return NULL;
5609 if (!mono_arch_opcode_supported (opcode))
5610 return NULL;
5612 MONO_INST_NEW (cfg, ins, opcode);
5613 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5614 ins->inst_basereg = args [0]->dreg;
5615 ins->inst_offset = 0;
5616 ins->sreg2 = args [1]->dreg;
5617 MONO_ADD_INS (cfg->cbb, ins);
5619 switch (fsig->params [0]->type) {
5620 case MONO_TYPE_I4:
5621 ins->type = STACK_I4;
5622 break;
5623 case MONO_TYPE_I8:
5624 case MONO_TYPE_I:
5625 ins->type = STACK_I8;
5626 break;
5627 case MONO_TYPE_OBJECT:
5628 ins->type = STACK_OBJ;
5629 break;
5630 default:
5631 g_assert_not_reached ();
5634 if (cfg->gen_write_barriers && is_ref)
5635 emit_write_barrier (cfg, args [0], args [1]);
5637 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5639 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5640 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5641 int size = 0;
5642 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5643 if (fsig->params [1]->type == MONO_TYPE_I4)
5644 size = 4;
5645 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5646 size = sizeof (gpointer);
5647 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5648 size = 8;
5649 if (size == 4) {
5650 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5651 return NULL;
5652 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5653 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5654 ins->sreg1 = args [0]->dreg;
5655 ins->sreg2 = args [1]->dreg;
5656 ins->sreg3 = args [2]->dreg;
5657 ins->type = STACK_I4;
5658 MONO_ADD_INS (cfg->cbb, ins);
5659 cfg->has_atomic_cas_i4 = TRUE;
5660 } else if (size == 8) {
5661 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5662 return NULL;
5663 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5664 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5665 ins->sreg1 = args [0]->dreg;
5666 ins->sreg2 = args [1]->dreg;
5667 ins->sreg3 = args [2]->dreg;
5668 ins->type = STACK_I8;
5669 MONO_ADD_INS (cfg->cbb, ins);
5670 } else {
5671 /* g_assert_not_reached (); */
5673 if (cfg->gen_write_barriers && is_ref)
5674 emit_write_barrier (cfg, args [0], args [1]);
5676 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5678 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5679 ins = emit_memory_barrier (cfg, FullBarrier);
5681 if (ins)
5682 return ins;
5683 } else if (cmethod->klass->image == mono_defaults.corlib) {
5684 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5685 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5686 if (should_insert_brekpoint (cfg->method)) {
5687 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5688 } else {
5689 MONO_INST_NEW (cfg, ins, OP_NOP);
5690 MONO_ADD_INS (cfg->cbb, ins);
5692 return ins;
5694 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5695 && strcmp (cmethod->klass->name, "Environment") == 0) {
5696 #ifdef TARGET_WIN32
5697 EMIT_NEW_ICONST (cfg, ins, 1);
5698 #else
5699 EMIT_NEW_ICONST (cfg, ins, 0);
5700 #endif
5701 return ins;
5703 } else if (cmethod->klass == mono_defaults.math_class) {
5705 * There is general branches code for Min/Max, but it does not work for
5706 * all inputs:
5707 * http://everything2.com/?node_id=1051618
5709 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5710 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5711 MonoInst *pi;
5712 MonoJumpInfoToken *ji;
5713 MonoString *s;
5715 cfg->disable_llvm = TRUE;
5717 if (args [0]->opcode == OP_GOT_ENTRY) {
5718 pi = args [0]->inst_p1;
5719 g_assert (pi->opcode == OP_PATCH_INFO);
5720 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5721 ji = pi->inst_p0;
5722 } else {
5723 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5724 ji = args [0]->inst_p0;
5727 NULLIFY_INS (args [0]);
5729 // FIXME: Ugly
5730 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5731 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5732 ins->dreg = mono_alloc_ireg (cfg);
5733 // FIXME: Leaks
5734 ins->inst_p0 = mono_string_to_utf8 (s);
5735 MONO_ADD_INS (cfg->cbb, ins);
5736 return ins;
5737 #endif
5740 #ifdef MONO_ARCH_SIMD_INTRINSICS
5741 if (cfg->opt & MONO_OPT_SIMD) {
5742 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5743 if (ins)
5744 return ins;
5746 #endif
5748 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5749 if (ins)
5750 return ins;
5752 if (COMPILE_LLVM (cfg)) {
5753 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5754 if (ins)
5755 return ins;
5758 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5762 * This entry point could be used later for arbitrary method
5763 * redirection.
5765 inline static MonoInst*
5766 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5767 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5769 if (method->klass == mono_defaults.string_class) {
5770 /* managed string allocation support */
5771 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5772 MonoInst *iargs [2];
5773 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5774 MonoMethod *managed_alloc = NULL;
5776 g_assert (vtable); /*Should not fail since it System.String*/
5777 #ifndef MONO_CROSS_COMPILE
5778 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5779 #endif
5780 if (!managed_alloc)
5781 return NULL;
5782 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5783 iargs [1] = args [0];
5784 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5787 return NULL;
5790 static void
5791 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5793 MonoInst *store, *temp;
5794 int i;
5796 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5797 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5800 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5801 * would be different than the MonoInst's used to represent arguments, and
5802 * the ldelema implementation can't deal with that.
5803 * Solution: When ldelema is used on an inline argument, create a var for
5804 * it, emit ldelema on that var, and emit the saving code below in
5805 * inline_method () if needed.
5807 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5808 cfg->args [i] = temp;
5809 /* This uses cfg->args [i] which is set by the preceeding line */
5810 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5811 store->cil_code = sp [0]->cil_code;
5812 sp++;
5816 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5817 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5819 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5820 static gboolean
5821 check_inline_called_method_name_limit (MonoMethod *called_method)
5823 int strncmp_result;
5824 static const char *limit = NULL;
5826 if (limit == NULL) {
5827 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5829 if (limit_string != NULL)
5830 limit = limit_string;
5831 else
5832 limit = "";
5835 if (limit [0] != '\0') {
5836 char *called_method_name = mono_method_full_name (called_method, TRUE);
5838 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5839 g_free (called_method_name);
5841 //return (strncmp_result <= 0);
5842 return (strncmp_result == 0);
5843 } else {
5844 return TRUE;
5847 #endif
5849 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5850 static gboolean
5851 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5853 int strncmp_result;
5854 static const char *limit = NULL;
5856 if (limit == NULL) {
5857 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5858 if (limit_string != NULL) {
5859 limit = limit_string;
5860 } else {
5861 limit = "";
5865 if (limit [0] != '\0') {
5866 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5868 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5869 g_free (caller_method_name);
5871 //return (strncmp_result <= 0);
5872 return (strncmp_result == 0);
5873 } else {
5874 return TRUE;
5877 #endif
5879 static void
5880 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5882 static double r8_0 = 0.0;
5883 MonoInst *ins;
5884 int t;
5886 rtype = mini_replace_type (rtype);
5887 t = rtype->type;
5889 if (rtype->byref) {
5890 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5891 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5892 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5893 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5894 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5895 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5896 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5897 ins->type = STACK_R8;
5898 ins->inst_p0 = (void*)&r8_0;
5899 ins->dreg = dreg;
5900 MONO_ADD_INS (cfg->cbb, ins);
5901 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5902 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5903 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5904 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5905 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5906 } else {
5907 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5911 static void
5912 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5914 int t;
5916 rtype = mini_replace_type (rtype);
5917 t = rtype->type;
5919 if (rtype->byref) {
5920 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5921 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5922 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5923 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5924 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5925 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5926 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5927 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5928 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5929 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5930 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5931 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5932 } else {
5933 emit_init_rvar (cfg, dreg, rtype);
5937 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5938 static void
5939 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5941 MonoInst *var = cfg->locals [local];
5942 if (COMPILE_SOFT_FLOAT (cfg)) {
5943 MonoInst *store;
5944 int reg = alloc_dreg (cfg, var->type);
5945 emit_init_rvar (cfg, reg, type);
5946 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5947 } else {
5948 if (init)
5949 emit_init_rvar (cfg, var->dreg, type);
5950 else
5951 emit_dummy_init_rvar (cfg, var->dreg, type);
5955 static int
5956 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5957 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5959 MonoInst *ins, *rvar = NULL;
5960 MonoMethodHeader *cheader;
5961 MonoBasicBlock *ebblock, *sbblock;
5962 int i, costs;
5963 MonoMethod *prev_inlined_method;
5964 MonoInst **prev_locals, **prev_args;
5965 MonoType **prev_arg_types;
5966 guint prev_real_offset;
5967 GHashTable *prev_cbb_hash;
5968 MonoBasicBlock **prev_cil_offset_to_bb;
5969 MonoBasicBlock *prev_cbb;
5970 unsigned char* prev_cil_start;
5971 guint32 prev_cil_offset_to_bb_len;
5972 MonoMethod *prev_current_method;
5973 MonoGenericContext *prev_generic_context;
5974 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5976 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5978 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5979 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5980 return 0;
5981 #endif
5982 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5983 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5984 return 0;
5985 #endif
5987 if (cfg->verbose_level > 2)
5988 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5990 if (!cmethod->inline_info) {
5991 cfg->stat_inlineable_methods++;
5992 cmethod->inline_info = 1;
5995 /* allocate local variables */
5996 cheader = mono_method_get_header (cmethod);
5998 if (cheader == NULL || mono_loader_get_last_error ()) {
5999 MonoLoaderError *error = mono_loader_get_last_error ();
6001 if (cheader)
6002 mono_metadata_free_mh (cheader);
6003 if (inline_always && error)
6004 mono_cfg_set_exception (cfg, error->exception_type);
6006 mono_loader_clear_error ();
6007 return 0;
6010 /*Must verify before creating locals as it can cause the JIT to assert.*/
6011 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6012 mono_metadata_free_mh (cheader);
6013 return 0;
6016 /* allocate space to store the return value */
6017 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6018 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6021 prev_locals = cfg->locals;
6022 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6023 for (i = 0; i < cheader->num_locals; ++i)
6024 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6026 /* allocate start and end blocks */
6027 /* This is needed so if the inline is aborted, we can clean up */
6028 NEW_BBLOCK (cfg, sbblock);
6029 sbblock->real_offset = real_offset;
6031 NEW_BBLOCK (cfg, ebblock);
6032 ebblock->block_num = cfg->num_bblocks++;
6033 ebblock->real_offset = real_offset;
6035 prev_args = cfg->args;
6036 prev_arg_types = cfg->arg_types;
6037 prev_inlined_method = cfg->inlined_method;
6038 cfg->inlined_method = cmethod;
6039 cfg->ret_var_set = FALSE;
6040 cfg->inline_depth ++;
6041 prev_real_offset = cfg->real_offset;
6042 prev_cbb_hash = cfg->cbb_hash;
6043 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6044 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6045 prev_cil_start = cfg->cil_start;
6046 prev_cbb = cfg->cbb;
6047 prev_current_method = cfg->current_method;
6048 prev_generic_context = cfg->generic_context;
6049 prev_ret_var_set = cfg->ret_var_set;
6051 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6052 virtual = TRUE;
6054 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6056 ret_var_set = cfg->ret_var_set;
6058 cfg->inlined_method = prev_inlined_method;
6059 cfg->real_offset = prev_real_offset;
6060 cfg->cbb_hash = prev_cbb_hash;
6061 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6062 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6063 cfg->cil_start = prev_cil_start;
6064 cfg->locals = prev_locals;
6065 cfg->args = prev_args;
6066 cfg->arg_types = prev_arg_types;
6067 cfg->current_method = prev_current_method;
6068 cfg->generic_context = prev_generic_context;
6069 cfg->ret_var_set = prev_ret_var_set;
6070 cfg->inline_depth --;
6072 if ((costs >= 0 && costs < 60) || inline_always) {
6073 if (cfg->verbose_level > 2)
6074 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6076 cfg->stat_inlined_methods++;
6078 /* always add some code to avoid block split failures */
6079 MONO_INST_NEW (cfg, ins, OP_NOP);
6080 MONO_ADD_INS (prev_cbb, ins);
6082 prev_cbb->next_bb = sbblock;
6083 link_bblock (cfg, prev_cbb, sbblock);
6086 * Get rid of the begin and end bblocks if possible to aid local
6087 * optimizations.
6089 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6091 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6092 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6094 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6095 MonoBasicBlock *prev = ebblock->in_bb [0];
6096 mono_merge_basic_blocks (cfg, prev, ebblock);
6097 cfg->cbb = prev;
6098 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6099 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6100 cfg->cbb = prev_cbb;
6102 } else {
6104 * Its possible that the rvar is set in some prev bblock, but not in others.
6105 * (#1835).
6107 if (rvar) {
6108 MonoBasicBlock *bb;
6110 for (i = 0; i < ebblock->in_count; ++i) {
6111 bb = ebblock->in_bb [i];
6113 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6114 cfg->cbb = bb;
6116 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6121 cfg->cbb = ebblock;
6124 if (rvar) {
6126 * If the inlined method contains only a throw, then the ret var is not
6127 * set, so set it to a dummy value.
6129 if (!ret_var_set)
6130 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6132 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6133 *sp++ = ins;
6135 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6136 return costs + 1;
6137 } else {
6138 if (cfg->verbose_level > 2)
6139 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6140 cfg->exception_type = MONO_EXCEPTION_NONE;
6141 mono_loader_clear_error ();
6143 /* This gets rid of the newly added bblocks */
6144 cfg->cbb = prev_cbb;
6146 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6147 return 0;
6151 * Some of these comments may well be out-of-date.
6152 * Design decisions: we do a single pass over the IL code (and we do bblock
6153 * splitting/merging in the few cases when it's required: a back jump to an IL
6154 * address that was not already seen as bblock starting point).
6155 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6156 * Complex operations are decomposed in simpler ones right away. We need to let the
6157 * arch-specific code peek and poke inside this process somehow (except when the
6158 * optimizations can take advantage of the full semantic info of coarse opcodes).
6159 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6160 * MonoInst->opcode initially is the IL opcode or some simplification of that
6161 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6162 * opcode with value bigger than OP_LAST.
6163 * At this point the IR can be handed over to an interpreter, a dumb code generator
6164 * or to the optimizing code generator that will translate it to SSA form.
6166 * Profiling directed optimizations.
6167 * We may compile by default with few or no optimizations and instrument the code
6168 * or the user may indicate what methods to optimize the most either in a config file
6169 * or through repeated runs where the compiler applies offline the optimizations to
6170 * each method and then decides if it was worth it.
6173 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6174 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6175 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6176 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6177 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6178 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6179 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6180 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6182 /* offset from br.s -> br like opcodes */
6183 #define BIG_BRANCH_OFFSET 13
6185 static gboolean
6186 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6188 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6190 return b == NULL || b == bb;
6193 static int
6194 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6196 unsigned char *ip = start;
6197 unsigned char *target;
6198 int i;
6199 guint cli_addr;
6200 MonoBasicBlock *bblock;
6201 const MonoOpcode *opcode;
6203 while (ip < end) {
6204 cli_addr = ip - start;
6205 i = mono_opcode_value ((const guint8 **)&ip, end);
6206 if (i < 0)
6207 UNVERIFIED;
6208 opcode = &mono_opcodes [i];
6209 switch (opcode->argument) {
6210 case MonoInlineNone:
6211 ip++;
6212 break;
6213 case MonoInlineString:
6214 case MonoInlineType:
6215 case MonoInlineField:
6216 case MonoInlineMethod:
6217 case MonoInlineTok:
6218 case MonoInlineSig:
6219 case MonoShortInlineR:
6220 case MonoInlineI:
6221 ip += 5;
6222 break;
6223 case MonoInlineVar:
6224 ip += 3;
6225 break;
6226 case MonoShortInlineVar:
6227 case MonoShortInlineI:
6228 ip += 2;
6229 break;
6230 case MonoShortInlineBrTarget:
6231 target = start + cli_addr + 2 + (signed char)ip [1];
6232 GET_BBLOCK (cfg, bblock, target);
6233 ip += 2;
6234 if (ip < end)
6235 GET_BBLOCK (cfg, bblock, ip);
6236 break;
6237 case MonoInlineBrTarget:
6238 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6239 GET_BBLOCK (cfg, bblock, target);
6240 ip += 5;
6241 if (ip < end)
6242 GET_BBLOCK (cfg, bblock, ip);
6243 break;
6244 case MonoInlineSwitch: {
6245 guint32 n = read32 (ip + 1);
6246 guint32 j;
6247 ip += 5;
6248 cli_addr += 5 + 4 * n;
6249 target = start + cli_addr;
6250 GET_BBLOCK (cfg, bblock, target);
6252 for (j = 0; j < n; ++j) {
6253 target = start + cli_addr + (gint32)read32 (ip);
6254 GET_BBLOCK (cfg, bblock, target);
6255 ip += 4;
6257 break;
6259 case MonoInlineR:
6260 case MonoInlineI8:
6261 ip += 9;
6262 break;
6263 default:
6264 g_assert_not_reached ();
6267 if (i == CEE_THROW) {
6268 unsigned char *bb_start = ip - 1;
6270 /* Find the start of the bblock containing the throw */
6271 bblock = NULL;
6272 while ((bb_start >= start) && !bblock) {
6273 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6274 bb_start --;
6276 if (bblock)
6277 bblock->out_of_line = 1;
6280 return 0;
6281 unverified:
6282 exception_exit:
6283 *pos = ip;
6284 return 1;
6287 static inline MonoMethod *
6288 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6290 MonoMethod *method;
6292 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6293 method = mono_method_get_wrapper_data (m, token);
6294 if (context)
6295 method = mono_class_inflate_generic_method (method, context);
6296 } else {
6297 method = mono_get_method_full (m->klass->image, token, klass, context);
6300 return method;
6303 static inline MonoMethod *
6304 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6306 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6308 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6309 return NULL;
6311 return method;
6314 static inline MonoClass*
6315 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6317 MonoClass *klass;
6319 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6320 klass = mono_method_get_wrapper_data (method, token);
6321 if (context)
6322 klass = mono_class_inflate_generic_class (klass, context);
6323 } else {
6324 klass = mono_class_get_full (method->klass->image, token, context);
6326 if (klass)
6327 mono_class_init (klass);
6328 return klass;
6331 static inline MonoMethodSignature*
6332 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6334 MonoMethodSignature *fsig;
6336 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6337 MonoError error;
6339 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6340 if (context) {
6341 fsig = mono_inflate_generic_signature (fsig, context, &error);
6342 // FIXME:
6343 g_assert (mono_error_ok (&error));
6345 } else {
6346 fsig = mono_metadata_parse_signature (method->klass->image, token);
6348 return fsig;
6352 * Returns TRUE if the JIT should abort inlining because "callee"
6353 * is influenced by security attributes.
6355 static
6356 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6358 guint32 result;
6360 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6361 return TRUE;
6364 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6365 if (result == MONO_JIT_SECURITY_OK)
6366 return FALSE;
6368 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6369 /* Generate code to throw a SecurityException before the actual call/link */
6370 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6371 MonoInst *args [2];
6373 NEW_ICONST (cfg, args [0], 4);
6374 NEW_METHODCONST (cfg, args [1], caller);
6375 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6376 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6377 /* don't hide previous results */
6378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6379 cfg->exception_data = result;
6380 return TRUE;
6383 return FALSE;
6386 static MonoMethod*
6387 throw_exception (void)
6389 static MonoMethod *method = NULL;
6391 if (!method) {
6392 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6393 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6395 g_assert (method);
6396 return method;
6399 static void
6400 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6402 MonoMethod *thrower = throw_exception ();
6403 MonoInst *args [1];
6405 EMIT_NEW_PCONST (cfg, args [0], ex);
6406 mono_emit_method_call (cfg, thrower, args, NULL);
6410 * Return the original method is a wrapper is specified. We can only access
6411 * the custom attributes from the original method.
6413 static MonoMethod*
6414 get_original_method (MonoMethod *method)
6416 if (method->wrapper_type == MONO_WRAPPER_NONE)
6417 return method;
6419 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6420 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6421 return NULL;
6423 /* in other cases we need to find the original method */
6424 return mono_marshal_method_from_wrapper (method);
6427 static void
6428 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6429 MonoBasicBlock *bblock, unsigned char *ip)
6431 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6432 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6433 if (ex)
6434 emit_throw_exception (cfg, ex);
6437 static void
6438 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6439 MonoBasicBlock *bblock, unsigned char *ip)
6441 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6442 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6443 if (ex)
6444 emit_throw_exception (cfg, ex);
6448 * Check that the IL instructions at ip are the array initialization
6449 * sequence and return the pointer to the data and the size.
6451 static const char*
6452 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6455 * newarr[System.Int32]
6456 * dup
6457 * ldtoken field valuetype ...
6458 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6460 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6461 guint32 token = read32 (ip + 7);
6462 guint32 field_token = read32 (ip + 2);
6463 guint32 field_index = field_token & 0xffffff;
6464 guint32 rva;
6465 const char *data_ptr;
6466 int size = 0;
6467 MonoMethod *cmethod;
6468 MonoClass *dummy_class;
6469 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6470 int dummy_align;
6472 if (!field)
6473 return NULL;
6475 *out_field_token = field_token;
6477 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6478 if (!cmethod)
6479 return NULL;
6480 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6481 return NULL;
6482 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6483 case MONO_TYPE_BOOLEAN:
6484 case MONO_TYPE_I1:
6485 case MONO_TYPE_U1:
6486 size = 1; break;
6487 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6488 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6489 case MONO_TYPE_CHAR:
6490 case MONO_TYPE_I2:
6491 case MONO_TYPE_U2:
6492 size = 2; break;
6493 case MONO_TYPE_I4:
6494 case MONO_TYPE_U4:
6495 case MONO_TYPE_R4:
6496 size = 4; break;
6497 case MONO_TYPE_R8:
6498 case MONO_TYPE_I8:
6499 case MONO_TYPE_U8:
6500 size = 8; break;
6501 #endif
6502 default:
6503 return NULL;
6505 size *= len;
6506 if (size > mono_type_size (field->type, &dummy_align))
6507 return NULL;
6508 *out_size = size;
6509 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6510 if (!method->klass->image->dynamic) {
6511 field_index = read32 (ip + 2) & 0xffffff;
6512 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6513 data_ptr = mono_image_rva_map (method->klass->image, rva);
6514 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6515 /* for aot code we do the lookup on load */
6516 if (aot && data_ptr)
6517 return GUINT_TO_POINTER (rva);
6518 } else {
6519 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6520 g_assert (!aot);
6521 data_ptr = mono_field_get_data (field);
6523 return data_ptr;
6525 return NULL;
6528 static void
6529 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6531 char *method_fname = mono_method_full_name (method, TRUE);
6532 char *method_code;
6533 MonoMethodHeader *header = mono_method_get_header (method);
6535 if (header->code_size == 0)
6536 method_code = g_strdup ("method body is empty.");
6537 else
6538 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6539 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6540 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6541 g_free (method_fname);
6542 g_free (method_code);
6543 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6546 static void
6547 set_exception_object (MonoCompile *cfg, MonoException *exception)
6549 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6550 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6551 cfg->exception_ptr = exception;
6554 static void
6555 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6557 MonoInst *ins;
6558 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6559 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6560 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6561 /* Optimize reg-reg moves away */
6563 * Can't optimize other opcodes, since sp[0] might point to
6564 * the last ins of a decomposed opcode.
6566 sp [0]->dreg = (cfg)->locals [n]->dreg;
6567 } else {
6568 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6573 * ldloca inhibits many optimizations so try to get rid of it in common
6574 * cases.
6576 static inline unsigned char *
6577 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6579 int local, token;
6580 MonoClass *klass;
6581 MonoType *type;
6583 if (size == 1) {
6584 local = ip [1];
6585 ip += 2;
6586 } else {
6587 local = read16 (ip + 2);
6588 ip += 4;
6591 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6592 /* From the INITOBJ case */
6593 token = read32 (ip + 2);
6594 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6595 CHECK_TYPELOAD (klass);
6596 type = mini_replace_type (&klass->byval_arg);
6597 emit_init_local (cfg, local, type, TRUE);
6598 return ip + 6;
6600 load_error:
6601 return NULL;
6604 static gboolean
6605 is_exception_class (MonoClass *class)
6607 while (class) {
6608 if (class == mono_defaults.exception_class)
6609 return TRUE;
6610 class = class->parent;
6612 return FALSE;
6616 * is_jit_optimizer_disabled:
6618 * Determine whenever M's assembly has a DebuggableAttribute with the
6619 * IsJITOptimizerDisabled flag set.
6621 static gboolean
6622 is_jit_optimizer_disabled (MonoMethod *m)
6624 MonoAssembly *ass = m->klass->image->assembly;
6625 MonoCustomAttrInfo* attrs;
6626 static MonoClass *klass;
6627 int i;
6628 gboolean val = FALSE;
6630 g_assert (ass);
6631 if (ass->jit_optimizer_disabled_inited)
6632 return ass->jit_optimizer_disabled;
6634 if (!klass)
6635 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6636 if (!klass) {
6637 /* Linked away */
6638 ass->jit_optimizer_disabled = FALSE;
6639 mono_memory_barrier ();
6640 ass->jit_optimizer_disabled_inited = TRUE;
6641 return FALSE;
6644 attrs = mono_custom_attrs_from_assembly (ass);
6645 if (attrs) {
6646 for (i = 0; i < attrs->num_attrs; ++i) {
6647 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6648 const gchar *p;
6649 int len;
6650 MonoMethodSignature *sig;
6652 if (!attr->ctor || attr->ctor->klass != klass)
6653 continue;
6654 /* Decode the attribute. See reflection.c */
6655 len = attr->data_size;
6656 p = (const char*)attr->data;
6657 g_assert (read16 (p) == 0x0001);
6658 p += 2;
6660 // FIXME: Support named parameters
6661 sig = mono_method_signature (attr->ctor);
6662 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6663 continue;
6664 /* Two boolean arguments */
6665 p ++;
6666 val = *p;
6668 mono_custom_attrs_free (attrs);
6671 ass->jit_optimizer_disabled = val;
6672 mono_memory_barrier ();
6673 ass->jit_optimizer_disabled_inited = TRUE;
6675 return val;
6678 static gboolean
6679 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6681 gboolean supported_tail_call;
6682 int i;
6684 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6685 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6686 #else
6687 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6688 #endif
6690 for (i = 0; i < fsig->param_count; ++i) {
6691 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6692 /* These can point to the current method's stack */
6693 supported_tail_call = FALSE;
6695 if (fsig->hasthis && cmethod->klass->valuetype)
6696 /* this might point to the current method's stack */
6697 supported_tail_call = FALSE;
6698 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6699 supported_tail_call = FALSE;
6700 if (cfg->method->save_lmf)
6701 supported_tail_call = FALSE;
6702 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6703 supported_tail_call = FALSE;
6704 if (call_opcode != CEE_CALL)
6705 supported_tail_call = FALSE;
6707 /* Debugging support */
6708 #if 0
6709 if (supported_tail_call) {
6710 if (!mono_debug_count ())
6711 supported_tail_call = FALSE;
6713 #endif
6715 return supported_tail_call;
6718 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6719 * it to the thread local value based on the tls_offset field. Every other kind of access to
6720 * the field causes an assert.
6722 static gboolean
6723 is_magic_tls_access (MonoClassField *field)
6725 if (strcmp (field->name, "tlsdata"))
6726 return FALSE;
6727 if (strcmp (field->parent->name, "ThreadLocal`1"))
6728 return FALSE;
6729 return field->parent->image == mono_defaults.corlib;
6732 /* emits the code needed to access a managed tls var (like ThreadStatic)
6733 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6734 * pointer for the current thread.
6735 * Returns the MonoInst* representing the address of the tls var.
6737 static MonoInst*
6738 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6740 MonoInst *addr;
6741 int static_data_reg, array_reg, dreg;
6742 int offset2_reg, idx_reg;
6743 // inlined access to the tls data
6744 // idx = (offset >> 24) - 1;
6745 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6746 static_data_reg = alloc_ireg (cfg);
6747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6748 idx_reg = alloc_ireg (cfg);
6749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6752 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6753 array_reg = alloc_ireg (cfg);
6754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6755 offset2_reg = alloc_ireg (cfg);
6756 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6757 dreg = alloc_ireg (cfg);
6758 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6759 return addr;
6763 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6764 * this address is cached per-method in cached_tls_addr.
6766 static MonoInst*
6767 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6769 MonoInst *load, *addr, *temp, *store, *thread_ins;
6770 MonoClassField *offset_field;
6772 if (*cached_tls_addr) {
6773 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6774 return addr;
6776 thread_ins = mono_get_thread_intrinsic (cfg);
6777 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6779 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6780 if (thread_ins) {
6781 MONO_ADD_INS (cfg->cbb, thread_ins);
6782 } else {
6783 MonoMethod *thread_method;
6784 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6785 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6787 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6788 addr->klass = mono_class_from_mono_type (tls_field->type);
6789 addr->type = STACK_MP;
6790 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6791 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6793 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6794 return addr;
6798 * mono_method_to_ir:
6800 * Translate the .net IL into linear IR.
6803 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6804 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6805 guint inline_offset, gboolean is_virtual_call)
6807 MonoError error;
6808 MonoInst *ins, **sp, **stack_start;
6809 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6810 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6811 MonoMethod *cmethod, *method_definition;
6812 MonoInst **arg_array;
6813 MonoMethodHeader *header;
6814 MonoImage *image;
6815 guint32 token, ins_flag;
6816 MonoClass *klass;
6817 MonoClass *constrained_call = NULL;
6818 unsigned char *ip, *end, *target, *err_pos;
6819 MonoMethodSignature *sig;
6820 MonoGenericContext *generic_context = NULL;
6821 MonoGenericContainer *generic_container = NULL;
6822 MonoType **param_types;
6823 int i, n, start_new_bblock, dreg;
6824 int num_calls = 0, inline_costs = 0;
6825 int breakpoint_id = 0;
6826 guint num_args;
6827 MonoBoolean security, pinvoke;
6828 MonoSecurityManager* secman = NULL;
6829 MonoDeclSecurityActions actions;
6830 GSList *class_inits = NULL;
6831 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6832 int context_used;
6833 gboolean init_locals, seq_points, skip_dead_blocks;
6834 gboolean disable_inline, sym_seq_points = FALSE;
6835 MonoInst *cached_tls_addr = NULL;
6836 MonoDebugMethodInfo *minfo;
6837 MonoBitSet *seq_point_locs = NULL;
6838 MonoBitSet *seq_point_set_locs = NULL;
6840 disable_inline = is_jit_optimizer_disabled (method);
6842 /* serialization and xdomain stuff may need access to private fields and methods */
6843 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6844 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6845 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6846 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6847 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6848 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6850 dont_verify |= mono_security_smcs_hack_enabled ();
6852 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6853 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6854 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6855 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6856 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6858 image = method->klass->image;
6859 header = mono_method_get_header (method);
6860 if (!header) {
6861 MonoLoaderError *error;
6863 if ((error = mono_loader_get_last_error ())) {
6864 mono_cfg_set_exception (cfg, error->exception_type);
6865 } else {
6866 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6867 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6869 goto exception_exit;
6871 generic_container = mono_method_get_generic_container (method);
6872 sig = mono_method_signature (method);
6873 num_args = sig->hasthis + sig->param_count;
6874 ip = (unsigned char*)header->code;
6875 cfg->cil_start = ip;
6876 end = ip + header->code_size;
6877 cfg->stat_cil_code_size += header->code_size;
6879 seq_points = cfg->gen_seq_points && cfg->method == method;
6880 #ifdef PLATFORM_ANDROID
6881 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6882 #endif
6884 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6885 /* We could hit a seq point before attaching to the JIT (#8338) */
6886 seq_points = FALSE;
6889 if (cfg->gen_seq_points && cfg->method == method) {
6890 minfo = mono_debug_lookup_method (method);
6891 if (minfo) {
6892 int i, n_il_offsets;
6893 int *il_offsets;
6894 int *line_numbers;
6896 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
6897 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6898 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6899 sym_seq_points = TRUE;
6900 for (i = 0; i < n_il_offsets; ++i) {
6901 if (il_offsets [i] < header->code_size)
6902 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6904 g_free (il_offsets);
6905 g_free (line_numbers);
6910 * Methods without init_locals set could cause asserts in various passes
6911 * (#497220). To work around this, we emit dummy initialization opcodes
6912 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6913 * on some platforms.
6915 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6916 init_locals = header->init_locals;
6917 else
6918 init_locals = TRUE;
6920 method_definition = method;
6921 while (method_definition->is_inflated) {
6922 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6923 method_definition = imethod->declaring;
6926 /* SkipVerification is not allowed if core-clr is enabled */
6927 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6928 dont_verify = TRUE;
6929 dont_verify_stloc = TRUE;
6932 if (sig->is_inflated)
6933 generic_context = mono_method_get_context (method);
6934 else if (generic_container)
6935 generic_context = &generic_container->context;
6936 cfg->generic_context = generic_context;
6938 if (!cfg->generic_sharing_context)
6939 g_assert (!sig->has_type_parameters);
6941 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6942 g_assert (method->is_inflated);
6943 g_assert (mono_method_get_context (method)->method_inst);
6945 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6946 g_assert (sig->generic_param_count);
6948 if (cfg->method == method) {
6949 cfg->real_offset = 0;
6950 } else {
6951 cfg->real_offset = inline_offset;
6954 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6955 cfg->cil_offset_to_bb_len = header->code_size;
6957 cfg->current_method = method;
6959 if (cfg->verbose_level > 2)
6960 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6962 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6963 if (sig->hasthis)
6964 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6965 for (n = 0; n < sig->param_count; ++n)
6966 param_types [n + sig->hasthis] = sig->params [n];
6967 cfg->arg_types = param_types;
6969 dont_inline = g_list_prepend (dont_inline, method);
6970 if (cfg->method == method) {
6972 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6973 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6975 /* ENTRY BLOCK */
6976 NEW_BBLOCK (cfg, start_bblock);
6977 cfg->bb_entry = start_bblock;
6978 start_bblock->cil_code = NULL;
6979 start_bblock->cil_length = 0;
6980 #if defined(__native_client_codegen__)
6981 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6982 ins->dreg = alloc_dreg (cfg, STACK_I4);
6983 MONO_ADD_INS (start_bblock, ins);
6984 #endif
6986 /* EXIT BLOCK */
6987 NEW_BBLOCK (cfg, end_bblock);
6988 cfg->bb_exit = end_bblock;
6989 end_bblock->cil_code = NULL;
6990 end_bblock->cil_length = 0;
6991 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6992 g_assert (cfg->num_bblocks == 2);
6994 arg_array = cfg->args;
6996 if (header->num_clauses) {
6997 cfg->spvars = g_hash_table_new (NULL, NULL);
6998 cfg->exvars = g_hash_table_new (NULL, NULL);
7000 /* handle exception clauses */
7001 for (i = 0; i < header->num_clauses; ++i) {
7002 MonoBasicBlock *try_bb;
7003 MonoExceptionClause *clause = &header->clauses [i];
7004 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7005 try_bb->real_offset = clause->try_offset;
7006 try_bb->try_start = TRUE;
7007 try_bb->region = ((i + 1) << 8) | clause->flags;
7008 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7009 tblock->real_offset = clause->handler_offset;
7010 tblock->flags |= BB_EXCEPTION_HANDLER;
7013 * Linking the try block with the EH block hinders inlining as we won't be able to
7014 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7016 if (COMPILE_LLVM (cfg))
7017 link_bblock (cfg, try_bb, tblock);
7019 if (*(ip + clause->handler_offset) == CEE_POP)
7020 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7022 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7023 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7024 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7025 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7026 MONO_ADD_INS (tblock, ins);
7028 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7029 /* finally clauses already have a seq point */
7030 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7031 MONO_ADD_INS (tblock, ins);
7034 /* todo: is a fault block unsafe to optimize? */
7035 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7036 tblock->flags |= BB_EXCEPTION_UNSAFE;
7040 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7041 while (p < end) {
7042 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7044 /* catch and filter blocks get the exception object on the stack */
7045 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7046 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7047 MonoInst *dummy_use;
7049 /* mostly like handle_stack_args (), but just sets the input args */
7050 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7051 tblock->in_scount = 1;
7052 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7053 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7056 * Add a dummy use for the exvar so its liveness info will be
7057 * correct.
7059 cfg->cbb = tblock;
7060 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7062 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7063 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7064 tblock->flags |= BB_EXCEPTION_HANDLER;
7065 tblock->real_offset = clause->data.filter_offset;
7066 tblock->in_scount = 1;
7067 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7068 /* The filter block shares the exvar with the handler block */
7069 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7070 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7071 MONO_ADD_INS (tblock, ins);
7075 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7076 clause->data.catch_class &&
7077 cfg->generic_sharing_context &&
7078 mono_class_check_context_used (clause->data.catch_class)) {
7080 * In shared generic code with catch
7081 * clauses containing type variables
7082 * the exception handling code has to
7083 * be able to get to the rgctx.
7084 * Therefore we have to make sure that
7085 * the vtable/mrgctx argument (for
7086 * static or generic methods) or the
7087 * "this" argument (for non-static
7088 * methods) are live.
7090 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7091 mini_method_get_context (method)->method_inst ||
7092 method->klass->valuetype) {
7093 mono_get_vtable_var (cfg);
7094 } else {
7095 MonoInst *dummy_use;
7097 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7101 } else {
7102 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7103 cfg->cbb = start_bblock;
7104 cfg->args = arg_array;
7105 mono_save_args (cfg, sig, inline_args);
7108 /* FIRST CODE BLOCK */
7109 NEW_BBLOCK (cfg, bblock);
7110 bblock->cil_code = ip;
7111 cfg->cbb = bblock;
7112 cfg->ip = ip;
7114 ADD_BBLOCK (cfg, bblock);
7116 if (cfg->method == method) {
7117 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7118 if (breakpoint_id) {
7119 MONO_INST_NEW (cfg, ins, OP_BREAK);
7120 MONO_ADD_INS (bblock, ins);
7124 if (mono_security_cas_enabled ())
7125 secman = mono_security_manager_get_methods ();
7127 security = (secman && mono_security_method_has_declsec (method));
7128 /* at this point having security doesn't mean we have any code to generate */
7129 if (security && (cfg->method == method)) {
7130 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7131 * And we do not want to enter the next section (with allocation) if we
7132 * have nothing to generate */
7133 security = mono_declsec_get_demands (method, &actions);
7136 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7137 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7138 if (pinvoke) {
7139 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7140 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7141 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7143 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7144 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7145 pinvoke = FALSE;
7147 if (custom)
7148 mono_custom_attrs_free (custom);
7150 if (pinvoke) {
7151 custom = mono_custom_attrs_from_class (wrapped->klass);
7152 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7153 pinvoke = FALSE;
7155 if (custom)
7156 mono_custom_attrs_free (custom);
7158 } else {
7159 /* not a P/Invoke after all */
7160 pinvoke = FALSE;
7164 /* we use a separate basic block for the initialization code */
7165 NEW_BBLOCK (cfg, init_localsbb);
7166 cfg->bb_init = init_localsbb;
7167 init_localsbb->real_offset = cfg->real_offset;
7168 start_bblock->next_bb = init_localsbb;
7169 init_localsbb->next_bb = bblock;
7170 link_bblock (cfg, start_bblock, init_localsbb);
7171 link_bblock (cfg, init_localsbb, bblock);
7173 cfg->cbb = init_localsbb;
7175 if (cfg->gsharedvt && cfg->method == method) {
7176 MonoGSharedVtMethodInfo *info;
7177 MonoInst *var, *locals_var;
7178 int dreg;
7180 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7181 info->method = cfg->method;
7182 info->count_entries = 16;
7183 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7184 cfg->gsharedvt_info = info;
7186 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7187 /* prevent it from being register allocated */
7188 //var->flags |= MONO_INST_VOLATILE;
7189 cfg->gsharedvt_info_var = var;
7191 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7192 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7194 /* Allocate locals */
7195 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7196 /* prevent it from being register allocated */
7197 //locals_var->flags |= MONO_INST_VOLATILE;
7198 cfg->gsharedvt_locals_var = locals_var;
7200 dreg = alloc_ireg (cfg);
7201 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7203 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7204 ins->dreg = locals_var->dreg;
7205 ins->sreg1 = dreg;
7206 MONO_ADD_INS (cfg->cbb, ins);
7207 cfg->gsharedvt_locals_var_ins = ins;
7209 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7211 if (init_locals)
7212 ins->flags |= MONO_INST_INIT;
7216 /* at this point we know, if security is TRUE, that some code needs to be generated */
7217 if (security && (cfg->method == method)) {
7218 MonoInst *args [2];
7220 cfg->stat_cas_demand_generation++;
7222 if (actions.demand.blob) {
7223 /* Add code for SecurityAction.Demand */
7224 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7225 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7226 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7227 mono_emit_method_call (cfg, secman->demand, args, NULL);
7229 if (actions.noncasdemand.blob) {
7230 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7231 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7232 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7233 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7234 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7235 mono_emit_method_call (cfg, secman->demand, args, NULL);
7237 if (actions.demandchoice.blob) {
7238 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7239 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7240 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7241 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7242 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7246 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7247 if (pinvoke) {
7248 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7251 if (mono_security_core_clr_enabled ()) {
7252 /* check if this is native code, e.g. an icall or a p/invoke */
7253 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7254 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7255 if (wrapped) {
7256 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7257 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7259 /* if this ia a native call then it can only be JITted from platform code */
7260 if ((icall || pinvk) && method->klass && method->klass->image) {
7261 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7262 MonoException *ex = icall ? mono_get_exception_security () :
7263 mono_get_exception_method_access ();
7264 emit_throw_exception (cfg, ex);
7271 CHECK_CFG_EXCEPTION;
7273 if (header->code_size == 0)
7274 UNVERIFIED;
7276 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7277 ip = err_pos;
7278 UNVERIFIED;
7281 if (cfg->method == method)
7282 mono_debug_init_method (cfg, bblock, breakpoint_id);
7284 for (n = 0; n < header->num_locals; ++n) {
7285 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7286 UNVERIFIED;
7288 class_inits = NULL;
7290 /* We force the vtable variable here for all shared methods
7291 for the possibility that they might show up in a stack
7292 trace where their exact instantiation is needed. */
7293 if (cfg->generic_sharing_context && method == cfg->method) {
7294 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7295 mini_method_get_context (method)->method_inst ||
7296 method->klass->valuetype) {
7297 mono_get_vtable_var (cfg);
7298 } else {
7299 /* FIXME: Is there a better way to do this?
7300 We need the variable live for the duration
7301 of the whole method. */
7302 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7306 /* add a check for this != NULL to inlined methods */
7307 if (is_virtual_call) {
7308 MonoInst *arg_ins;
7310 NEW_ARGLOAD (cfg, arg_ins, 0);
7311 MONO_ADD_INS (cfg->cbb, arg_ins);
7312 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7315 skip_dead_blocks = !dont_verify;
7316 if (skip_dead_blocks) {
7317 original_bb = bb = mono_basic_block_split (method, &error);
7318 if (!mono_error_ok (&error)) {
7319 mono_error_cleanup (&error);
7320 UNVERIFIED;
7322 g_assert (bb);
7325 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7326 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7328 ins_flag = 0;
7329 start_new_bblock = 0;
7330 cfg->cbb = bblock;
7331 while (ip < end) {
7332 if (cfg->method == method)
7333 cfg->real_offset = ip - header->code;
7334 else
7335 cfg->real_offset = inline_offset;
7336 cfg->ip = ip;
7338 context_used = 0;
7340 if (start_new_bblock) {
7341 bblock->cil_length = ip - bblock->cil_code;
7342 if (start_new_bblock == 2) {
7343 g_assert (ip == tblock->cil_code);
7344 } else {
7345 GET_BBLOCK (cfg, tblock, ip);
7347 bblock->next_bb = tblock;
7348 bblock = tblock;
7349 cfg->cbb = bblock;
7350 start_new_bblock = 0;
7351 for (i = 0; i < bblock->in_scount; ++i) {
7352 if (cfg->verbose_level > 3)
7353 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7354 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7355 *sp++ = ins;
7357 if (class_inits)
7358 g_slist_free (class_inits);
7359 class_inits = NULL;
7360 } else {
7361 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7362 link_bblock (cfg, bblock, tblock);
7363 if (sp != stack_start) {
7364 handle_stack_args (cfg, stack_start, sp - stack_start);
7365 sp = stack_start;
7366 CHECK_UNVERIFIABLE (cfg);
7368 bblock->next_bb = tblock;
7369 bblock = tblock;
7370 cfg->cbb = bblock;
7371 for (i = 0; i < bblock->in_scount; ++i) {
7372 if (cfg->verbose_level > 3)
7373 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7374 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7375 *sp++ = ins;
7377 g_slist_free (class_inits);
7378 class_inits = NULL;
7382 if (skip_dead_blocks) {
7383 int ip_offset = ip - header->code;
7385 if (ip_offset == bb->end)
7386 bb = bb->next;
7388 if (bb->dead) {
7389 int op_size = mono_opcode_size (ip, end);
7390 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7392 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7394 if (ip_offset + op_size == bb->end) {
7395 MONO_INST_NEW (cfg, ins, OP_NOP);
7396 MONO_ADD_INS (bblock, ins);
7397 start_new_bblock = 1;
7400 ip += op_size;
7401 continue;
7405 * Sequence points are points where the debugger can place a breakpoint.
7406 * Currently, we generate these automatically at points where the IL
7407 * stack is empty.
7409 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7411 * Make methods interruptable at the beginning, and at the targets of
7412 * backward branches.
7413 * Also, do this at the start of every bblock in methods with clauses too,
7414 * to be able to handle instructions with inprecise control flow like
7415 * throw/endfinally.
7416 * Backward branches are handled at the end of method-to-ir ().
7418 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7420 /* Avoid sequence points on empty IL like .volatile */
7421 // FIXME: Enable this
7422 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7423 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7424 if (sp != stack_start)
7425 ins->flags |= MONO_INST_NONEMPTY_STACK;
7426 MONO_ADD_INS (cfg->cbb, ins);
7428 if (sym_seq_points)
7429 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7432 bblock->real_offset = cfg->real_offset;
7434 if ((cfg->method == method) && cfg->coverage_info) {
7435 guint32 cil_offset = ip - header->code;
7436 cfg->coverage_info->data [cil_offset].cil_code = ip;
7438 /* TODO: Use an increment here */
7439 #if defined(TARGET_X86)
7440 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7441 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7442 ins->inst_imm = 1;
7443 MONO_ADD_INS (cfg->cbb, ins);
7444 #else
7445 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7446 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7447 #endif
7450 if (cfg->verbose_level > 3)
7451 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7453 switch (*ip) {
7454 case CEE_NOP:
7455 if (seq_points && !sym_seq_points && sp != stack_start) {
7457 * The C# compiler uses these nops to notify the JIT that it should
7458 * insert seq points.
7460 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7461 MONO_ADD_INS (cfg->cbb, ins);
7463 if (cfg->keep_cil_nops)
7464 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7465 else
7466 MONO_INST_NEW (cfg, ins, OP_NOP);
7467 ip++;
7468 MONO_ADD_INS (bblock, ins);
7469 break;
7470 case CEE_BREAK:
7471 if (should_insert_brekpoint (cfg->method)) {
7472 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7473 } else {
7474 MONO_INST_NEW (cfg, ins, OP_NOP);
7476 ip++;
7477 MONO_ADD_INS (bblock, ins);
7478 break;
7479 case CEE_LDARG_0:
7480 case CEE_LDARG_1:
7481 case CEE_LDARG_2:
7482 case CEE_LDARG_3:
7483 CHECK_STACK_OVF (1);
7484 n = (*ip)-CEE_LDARG_0;
7485 CHECK_ARG (n);
7486 EMIT_NEW_ARGLOAD (cfg, ins, n);
7487 ip++;
7488 *sp++ = ins;
7489 break;
7490 case CEE_LDLOC_0:
7491 case CEE_LDLOC_1:
7492 case CEE_LDLOC_2:
7493 case CEE_LDLOC_3:
7494 CHECK_STACK_OVF (1);
7495 n = (*ip)-CEE_LDLOC_0;
7496 CHECK_LOCAL (n);
7497 EMIT_NEW_LOCLOAD (cfg, ins, n);
7498 ip++;
7499 *sp++ = ins;
7500 break;
7501 case CEE_STLOC_0:
7502 case CEE_STLOC_1:
7503 case CEE_STLOC_2:
7504 case CEE_STLOC_3: {
7505 CHECK_STACK (1);
7506 n = (*ip)-CEE_STLOC_0;
7507 CHECK_LOCAL (n);
7508 --sp;
7509 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7510 UNVERIFIED;
7511 emit_stloc_ir (cfg, sp, header, n);
7512 ++ip;
7513 inline_costs += 1;
7514 break;
7516 case CEE_LDARG_S:
7517 CHECK_OPSIZE (2);
7518 CHECK_STACK_OVF (1);
7519 n = ip [1];
7520 CHECK_ARG (n);
7521 EMIT_NEW_ARGLOAD (cfg, ins, n);
7522 *sp++ = ins;
7523 ip += 2;
7524 break;
7525 case CEE_LDARGA_S:
7526 CHECK_OPSIZE (2);
7527 CHECK_STACK_OVF (1);
7528 n = ip [1];
7529 CHECK_ARG (n);
7530 NEW_ARGLOADA (cfg, ins, n);
7531 MONO_ADD_INS (cfg->cbb, ins);
7532 *sp++ = ins;
7533 ip += 2;
7534 break;
7535 case CEE_STARG_S:
7536 CHECK_OPSIZE (2);
7537 CHECK_STACK (1);
7538 --sp;
7539 n = ip [1];
7540 CHECK_ARG (n);
7541 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7542 UNVERIFIED;
7543 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7544 ip += 2;
7545 break;
7546 case CEE_LDLOC_S:
7547 CHECK_OPSIZE (2);
7548 CHECK_STACK_OVF (1);
7549 n = ip [1];
7550 CHECK_LOCAL (n);
7551 EMIT_NEW_LOCLOAD (cfg, ins, n);
7552 *sp++ = ins;
7553 ip += 2;
7554 break;
7555 case CEE_LDLOCA_S: {
7556 unsigned char *tmp_ip;
7557 CHECK_OPSIZE (2);
7558 CHECK_STACK_OVF (1);
7559 CHECK_LOCAL (ip [1]);
7561 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7562 ip = tmp_ip;
7563 inline_costs += 1;
7564 break;
7567 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7568 *sp++ = ins;
7569 ip += 2;
7570 break;
7572 case CEE_STLOC_S:
7573 CHECK_OPSIZE (2);
7574 CHECK_STACK (1);
7575 --sp;
7576 CHECK_LOCAL (ip [1]);
7577 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7578 UNVERIFIED;
7579 emit_stloc_ir (cfg, sp, header, ip [1]);
7580 ip += 2;
7581 inline_costs += 1;
7582 break;
7583 case CEE_LDNULL:
7584 CHECK_STACK_OVF (1);
7585 EMIT_NEW_PCONST (cfg, ins, NULL);
7586 ins->type = STACK_OBJ;
7587 ++ip;
7588 *sp++ = ins;
7589 break;
7590 case CEE_LDC_I4_M1:
7591 CHECK_STACK_OVF (1);
7592 EMIT_NEW_ICONST (cfg, ins, -1);
7593 ++ip;
7594 *sp++ = ins;
7595 break;
7596 case CEE_LDC_I4_0:
7597 case CEE_LDC_I4_1:
7598 case CEE_LDC_I4_2:
7599 case CEE_LDC_I4_3:
7600 case CEE_LDC_I4_4:
7601 case CEE_LDC_I4_5:
7602 case CEE_LDC_I4_6:
7603 case CEE_LDC_I4_7:
7604 case CEE_LDC_I4_8:
7605 CHECK_STACK_OVF (1);
7606 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7607 ++ip;
7608 *sp++ = ins;
7609 break;
7610 case CEE_LDC_I4_S:
7611 CHECK_OPSIZE (2);
7612 CHECK_STACK_OVF (1);
7613 ++ip;
7614 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7615 ++ip;
7616 *sp++ = ins;
7617 break;
7618 case CEE_LDC_I4:
7619 CHECK_OPSIZE (5);
7620 CHECK_STACK_OVF (1);
7621 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7622 ip += 5;
7623 *sp++ = ins;
7624 break;
7625 case CEE_LDC_I8:
7626 CHECK_OPSIZE (9);
7627 CHECK_STACK_OVF (1);
7628 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7629 ins->type = STACK_I8;
7630 ins->dreg = alloc_dreg (cfg, STACK_I8);
7631 ++ip;
7632 ins->inst_l = (gint64)read64 (ip);
7633 MONO_ADD_INS (bblock, ins);
7634 ip += 8;
7635 *sp++ = ins;
7636 break;
7637 case CEE_LDC_R4: {
7638 float *f;
7639 gboolean use_aotconst = FALSE;
7641 #ifdef TARGET_POWERPC
7642 /* FIXME: Clean this up */
7643 if (cfg->compile_aot)
7644 use_aotconst = TRUE;
7645 #endif
7647 /* FIXME: we should really allocate this only late in the compilation process */
7648 f = mono_domain_alloc (cfg->domain, sizeof (float));
7649 CHECK_OPSIZE (5);
7650 CHECK_STACK_OVF (1);
7652 if (use_aotconst) {
7653 MonoInst *cons;
7654 int dreg;
7656 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7658 dreg = alloc_freg (cfg);
7659 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7660 ins->type = STACK_R8;
7661 } else {
7662 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7663 ins->type = STACK_R8;
7664 ins->dreg = alloc_dreg (cfg, STACK_R8);
7665 ins->inst_p0 = f;
7666 MONO_ADD_INS (bblock, ins);
7668 ++ip;
7669 readr4 (ip, f);
7670 ip += 4;
7671 *sp++ = ins;
7672 break;
7674 case CEE_LDC_R8: {
7675 double *d;
7676 gboolean use_aotconst = FALSE;
7678 #ifdef TARGET_POWERPC
7679 /* FIXME: Clean this up */
7680 if (cfg->compile_aot)
7681 use_aotconst = TRUE;
7682 #endif
7684 /* FIXME: we should really allocate this only late in the compilation process */
7685 d = mono_domain_alloc (cfg->domain, sizeof (double));
7686 CHECK_OPSIZE (9);
7687 CHECK_STACK_OVF (1);
7689 if (use_aotconst) {
7690 MonoInst *cons;
7691 int dreg;
7693 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7695 dreg = alloc_freg (cfg);
7696 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7697 ins->type = STACK_R8;
7698 } else {
7699 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7700 ins->type = STACK_R8;
7701 ins->dreg = alloc_dreg (cfg, STACK_R8);
7702 ins->inst_p0 = d;
7703 MONO_ADD_INS (bblock, ins);
7705 ++ip;
7706 readr8 (ip, d);
7707 ip += 8;
7708 *sp++ = ins;
7709 break;
7711 case CEE_DUP: {
7712 MonoInst *temp, *store;
7713 CHECK_STACK (1);
7714 CHECK_STACK_OVF (1);
7715 sp--;
7716 ins = *sp;
7718 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7719 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7721 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7722 *sp++ = ins;
7724 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7725 *sp++ = ins;
7727 ++ip;
7728 inline_costs += 2;
7729 break;
7731 case CEE_POP:
7732 CHECK_STACK (1);
7733 ip++;
7734 --sp;
7736 #ifdef TARGET_X86
7737 if (sp [0]->type == STACK_R8)
7738 /* we need to pop the value from the x86 FP stack */
7739 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7740 #endif
7741 break;
7742 case CEE_JMP: {
7743 MonoCallInst *call;
7745 INLINE_FAILURE ("jmp");
7746 GSHAREDVT_FAILURE (*ip);
7748 CHECK_OPSIZE (5);
7749 if (stack_start != sp)
7750 UNVERIFIED;
7751 token = read32 (ip + 1);
7752 /* FIXME: check the signature matches */
7753 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7755 if (!cmethod || mono_loader_get_last_error ())
7756 LOAD_ERROR;
7758 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7759 GENERIC_SHARING_FAILURE (CEE_JMP);
7761 if (mono_security_cas_enabled ())
7762 CHECK_CFG_EXCEPTION;
7764 emit_instrumentation_call (cfg, mono_profiler_method_leave);
7766 if (ARCH_HAVE_OP_TAIL_CALL) {
7767 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7768 int i, n;
7770 /* Handle tail calls similarly to calls */
7771 n = fsig->param_count + fsig->hasthis;
7773 DISABLE_AOT (cfg);
7775 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7776 call->method = cmethod;
7777 call->tail_call = TRUE;
7778 call->signature = mono_method_signature (cmethod);
7779 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7780 call->inst.inst_p0 = cmethod;
7781 for (i = 0; i < n; ++i)
7782 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7784 mono_arch_emit_call (cfg, call);
7785 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
7786 MONO_ADD_INS (bblock, (MonoInst*)call);
7787 } else {
7788 for (i = 0; i < num_args; ++i)
7789 /* Prevent arguments from being optimized away */
7790 arg_array [i]->flags |= MONO_INST_VOLATILE;
7792 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7793 ins = (MonoInst*)call;
7794 ins->inst_p0 = cmethod;
7795 MONO_ADD_INS (bblock, ins);
7798 ip += 5;
7799 start_new_bblock = 1;
7800 break;
7802 case CEE_CALLI:
7803 case CEE_CALL:
7804 case CEE_CALLVIRT: {
7805 MonoInst *addr = NULL;
7806 MonoMethodSignature *fsig = NULL;
7807 int array_rank = 0;
7808 int virtual = *ip == CEE_CALLVIRT;
7809 int calli = *ip == CEE_CALLI;
7810 gboolean pass_imt_from_rgctx = FALSE;
7811 MonoInst *imt_arg = NULL;
7812 MonoInst *keep_this_alive = NULL;
7813 gboolean pass_vtable = FALSE;
7814 gboolean pass_mrgctx = FALSE;
7815 MonoInst *vtable_arg = NULL;
7816 gboolean check_this = FALSE;
7817 gboolean supported_tail_call = FALSE;
7818 gboolean tail_call = FALSE;
7819 gboolean need_seq_point = FALSE;
7820 guint32 call_opcode = *ip;
7821 gboolean emit_widen = TRUE;
7822 gboolean push_res = TRUE;
7823 gboolean skip_ret = FALSE;
7824 gboolean delegate_invoke = FALSE;
7826 CHECK_OPSIZE (5);
7827 token = read32 (ip + 1);
7829 ins = NULL;
7831 if (calli) {
7832 //GSHAREDVT_FAILURE (*ip);
7833 cmethod = NULL;
7834 CHECK_STACK (1);
7835 --sp;
7836 addr = *sp;
7837 fsig = mini_get_signature (method, token, generic_context);
7838 n = fsig->param_count + fsig->hasthis;
7840 if (method->dynamic && fsig->pinvoke) {
7841 MonoInst *args [3];
7844 * This is a call through a function pointer using a pinvoke
7845 * signature. Have to create a wrapper and call that instead.
7846 * FIXME: This is very slow, need to create a wrapper at JIT time
7847 * instead based on the signature.
7849 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7850 EMIT_NEW_PCONST (cfg, args [1], fsig);
7851 args [2] = addr;
7852 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7854 } else {
7855 MonoMethod *cil_method;
7857 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7858 cil_method = cmethod;
7860 if (constrained_call) {
7861 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7862 if (cfg->verbose_level > 2)
7863 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7864 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7865 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7866 cfg->generic_sharing_context)) {
7867 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7869 } else {
7870 if (cfg->verbose_level > 2)
7871 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7873 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7875 * This is needed since get_method_constrained can't find
7876 * the method in klass representing a type var.
7877 * The type var is guaranteed to be a reference type in this
7878 * case.
7880 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7881 g_assert (!cmethod->klass->valuetype);
7882 } else {
7883 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7888 if (!cmethod || mono_loader_get_last_error ())
7889 LOAD_ERROR;
7890 if (!dont_verify && !cfg->skip_visibility) {
7891 MonoMethod *target_method = cil_method;
7892 if (method->is_inflated) {
7893 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7895 if (!mono_method_can_access_method (method_definition, target_method) &&
7896 !mono_method_can_access_method (method, cil_method))
7897 METHOD_ACCESS_FAILURE;
7900 if (mono_security_core_clr_enabled ())
7901 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7903 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7904 /* MS.NET seems to silently convert this to a callvirt */
7905 virtual = 1;
7909 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7910 * converts to a callvirt.
7912 * tests/bug-515884.il is an example of this behavior
7914 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7915 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7916 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7917 virtual = 1;
7920 if (!cmethod->klass->inited)
7921 if (!mono_class_init (cmethod->klass))
7922 TYPE_LOAD_ERROR (cmethod->klass);
7924 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7925 mini_class_is_system_array (cmethod->klass)) {
7926 array_rank = cmethod->klass->rank;
7927 fsig = mono_method_signature (cmethod);
7928 } else {
7929 fsig = mono_method_signature (cmethod);
7931 if (!fsig)
7932 LOAD_ERROR;
7934 if (fsig->pinvoke) {
7935 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7936 check_for_pending_exc, cfg->compile_aot);
7937 fsig = mono_method_signature (wrapper);
7938 } else if (constrained_call) {
7939 fsig = mono_method_signature (cmethod);
7940 } else {
7941 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7945 mono_save_token_info (cfg, image, token, cil_method);
7947 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7949 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7950 * foo (bar (), baz ())
7951 * works correctly. MS does this also:
7952 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7953 * The problem with this approach is that the debugger will stop after all calls returning a value,
7954 * even for simple cases, like:
7955 * int i = foo ();
7957 /* Special case a few common successor opcodes */
7958 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7959 need_seq_point = TRUE;
7962 n = fsig->param_count + fsig->hasthis;
7964 /* Don't support calls made using type arguments for now */
7966 if (cfg->gsharedvt) {
7967 if (mini_is_gsharedvt_signature (cfg, fsig))
7968 GSHAREDVT_FAILURE (*ip);
7972 if (mono_security_cas_enabled ()) {
7973 if (check_linkdemand (cfg, method, cmethod))
7974 INLINE_FAILURE ("linkdemand");
7975 CHECK_CFG_EXCEPTION;
7978 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7979 g_assert_not_reached ();
7982 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7983 UNVERIFIED;
7985 if (!cfg->generic_sharing_context && cmethod)
7986 g_assert (!mono_method_check_context_used (cmethod));
7988 CHECK_STACK (n);
7990 //g_assert (!virtual || fsig->hasthis);
7992 sp -= n;
7994 if (constrained_call) {
7995 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7997 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7999 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8000 /* The 'Own method' case below */
8001 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8002 /* 'The type parameter is instantiated as a reference type' case below. */
8003 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8004 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8005 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8006 MonoInst *args [16];
8009 * This case handles calls to
8010 * - object:ToString()/Equals()/GetHashCode(),
8011 * - System.IComparable<T>:CompareTo()
8012 * - System.IEquatable<T>:Equals ()
8013 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8016 args [0] = sp [0];
8017 if (mono_method_check_context_used (cmethod))
8018 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8019 else
8020 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8021 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8023 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8024 if (fsig->hasthis && fsig->param_count) {
8025 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8026 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8027 ins->dreg = alloc_preg (cfg);
8028 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8029 MONO_ADD_INS (cfg->cbb, ins);
8030 args [4] = ins;
8032 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8033 int addr_reg;
8035 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8037 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8038 addr_reg = ins->dreg;
8039 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8040 } else {
8041 EMIT_NEW_ICONST (cfg, args [3], 0);
8042 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8044 } else {
8045 EMIT_NEW_ICONST (cfg, args [3], 0);
8046 EMIT_NEW_ICONST (cfg, args [4], 0);
8048 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8049 emit_widen = FALSE;
8051 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8052 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8053 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8054 MonoInst *add;
8056 /* Unbox */
8057 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8058 MONO_ADD_INS (cfg->cbb, add);
8059 /* Load value */
8060 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8061 MONO_ADD_INS (cfg->cbb, ins);
8062 /* ins represents the call result */
8065 goto call_end;
8066 } else {
8067 GSHAREDVT_FAILURE (*ip);
8071 * We have the `constrained.' prefix opcode.
8073 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8075 * The type parameter is instantiated as a valuetype,
8076 * but that type doesn't override the method we're
8077 * calling, so we need to box `this'.
8079 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8080 ins->klass = constrained_call;
8081 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8082 CHECK_CFG_EXCEPTION;
8083 } else if (!constrained_call->valuetype) {
8084 int dreg = alloc_ireg_ref (cfg);
8087 * The type parameter is instantiated as a reference
8088 * type. We have a managed pointer on the stack, so
8089 * we need to dereference it here.
8091 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8092 ins->type = STACK_OBJ;
8093 sp [0] = ins;
8094 } else {
8095 if (cmethod->klass->valuetype) {
8096 /* Own method */
8097 } else {
8098 /* Interface method */
8099 int ioffset, slot;
8101 mono_class_setup_vtable (constrained_call);
8102 CHECK_TYPELOAD (constrained_call);
8103 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8104 if (ioffset == -1)
8105 TYPE_LOAD_ERROR (constrained_call);
8106 slot = mono_method_get_vtable_slot (cmethod);
8107 if (slot == -1)
8108 TYPE_LOAD_ERROR (cmethod->klass);
8109 cmethod = constrained_call->vtable [ioffset + slot];
8111 if (cmethod->klass == mono_defaults.enum_class) {
8112 /* Enum implements some interfaces, so treat this as the first case */
8113 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8114 ins->klass = constrained_call;
8115 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8116 CHECK_CFG_EXCEPTION;
8119 virtual = 0;
8121 constrained_call = NULL;
8124 if (!calli && check_call_signature (cfg, fsig, sp))
8125 UNVERIFIED;
8127 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8128 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8129 delegate_invoke = TRUE;
8130 #endif
8132 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8133 bblock = cfg->cbb;
8134 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8135 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8136 emit_widen = FALSE;
8139 goto call_end;
8143 * If the callee is a shared method, then its static cctor
8144 * might not get called after the call was patched.
8146 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8147 emit_generic_class_init (cfg, cmethod->klass);
8148 CHECK_TYPELOAD (cmethod->klass);
8151 if (cmethod)
8152 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8154 if (cfg->generic_sharing_context && cmethod) {
8155 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8157 context_used = mini_method_check_context_used (cfg, cmethod);
8159 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8160 /* Generic method interface
8161 calls are resolved via a
8162 helper function and don't
8163 need an imt. */
8164 if (!cmethod_context || !cmethod_context->method_inst)
8165 pass_imt_from_rgctx = TRUE;
8169 * If a shared method calls another
8170 * shared method then the caller must
8171 * have a generic sharing context
8172 * because the magic trampoline
8173 * requires it. FIXME: We shouldn't
8174 * have to force the vtable/mrgctx
8175 * variable here. Instead there
8176 * should be a flag in the cfg to
8177 * request a generic sharing context.
8179 if (context_used &&
8180 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8181 mono_get_vtable_var (cfg);
8184 if (pass_vtable) {
8185 if (context_used) {
8186 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8187 } else {
8188 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8190 CHECK_TYPELOAD (cmethod->klass);
8191 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8195 if (pass_mrgctx) {
8196 g_assert (!vtable_arg);
8198 if (!cfg->compile_aot) {
8200 * emit_get_rgctx_method () calls mono_class_vtable () so check
8201 * for type load errors before.
8203 mono_class_setup_vtable (cmethod->klass);
8204 CHECK_TYPELOAD (cmethod->klass);
8207 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8209 /* !marshalbyref is needed to properly handle generic methods + remoting */
8210 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8211 MONO_METHOD_IS_FINAL (cmethod)) &&
8212 !mono_class_is_marshalbyref (cmethod->klass)) {
8213 if (virtual)
8214 check_this = TRUE;
8215 virtual = 0;
8219 if (pass_imt_from_rgctx) {
8220 g_assert (!pass_vtable);
8221 g_assert (cmethod);
8223 imt_arg = emit_get_rgctx_method (cfg, context_used,
8224 cmethod, MONO_RGCTX_INFO_METHOD);
8227 if (check_this)
8228 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8230 /* Calling virtual generic methods */
8231 if (cmethod && virtual &&
8232 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8233 !(MONO_METHOD_IS_FINAL (cmethod) &&
8234 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8235 fsig->generic_param_count &&
8236 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8237 MonoInst *this_temp, *this_arg_temp, *store;
8238 MonoInst *iargs [4];
8239 gboolean use_imt = FALSE;
8241 g_assert (fsig->is_inflated);
8243 /* Prevent inlining of methods that contain indirect calls */
8244 INLINE_FAILURE ("virtual generic call");
8246 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8247 GSHAREDVT_FAILURE (*ip);
8249 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8250 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8251 use_imt = TRUE;
8252 #endif
8254 if (use_imt) {
8255 g_assert (!imt_arg);
8256 if (!context_used)
8257 g_assert (cmethod->is_inflated);
8258 imt_arg = emit_get_rgctx_method (cfg, context_used,
8259 cmethod, MONO_RGCTX_INFO_METHOD);
8260 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8261 } else {
8262 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8263 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8264 MONO_ADD_INS (bblock, store);
8266 /* FIXME: This should be a managed pointer */
8267 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8269 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8270 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8271 cmethod, MONO_RGCTX_INFO_METHOD);
8272 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8273 addr = mono_emit_jit_icall (cfg,
8274 mono_helper_compile_generic_method, iargs);
8276 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8278 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8281 goto call_end;
8285 * Implement a workaround for the inherent races involved in locking:
8286 * Monitor.Enter ()
8287 * try {
8288 * } finally {
8289 * Monitor.Exit ()
8291 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8292 * try block, the Exit () won't be executed, see:
8293 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8294 * To work around this, we extend such try blocks to include the last x bytes
8295 * of the Monitor.Enter () call.
8297 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8298 MonoBasicBlock *tbb;
8300 GET_BBLOCK (cfg, tbb, ip + 5);
8302 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8303 * from Monitor.Enter like ArgumentNullException.
8305 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8306 /* Mark this bblock as needing to be extended */
8307 tbb->extend_try_block = TRUE;
8311 /* Conversion to a JIT intrinsic */
8312 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8313 bblock = cfg->cbb;
8314 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8315 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8316 emit_widen = FALSE;
8318 goto call_end;
8321 /* Inlining */
8322 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8323 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8324 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8325 !g_list_find (dont_inline, cmethod)) {
8326 int costs;
8327 gboolean always = FALSE;
8329 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8330 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8331 /* Prevent inlining of methods that call wrappers */
8332 INLINE_FAILURE ("wrapper call");
8333 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8334 always = TRUE;
8337 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8338 if (costs) {
8339 cfg->real_offset += 5;
8340 bblock = cfg->cbb;
8342 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8343 /* *sp is already set by inline_method */
8344 sp++;
8345 push_res = FALSE;
8348 inline_costs += costs;
8350 goto call_end;
8354 /* Tail recursion elimination */
8355 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8356 gboolean has_vtargs = FALSE;
8357 int i;
8359 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8360 INLINE_FAILURE ("tail call");
8362 /* keep it simple */
8363 for (i = fsig->param_count - 1; i >= 0; i--) {
8364 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8365 has_vtargs = TRUE;
8368 if (!has_vtargs) {
8369 for (i = 0; i < n; ++i)
8370 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8371 MONO_INST_NEW (cfg, ins, OP_BR);
8372 MONO_ADD_INS (bblock, ins);
8373 tblock = start_bblock->out_bb [0];
8374 link_bblock (cfg, bblock, tblock);
8375 ins->inst_target_bb = tblock;
8376 start_new_bblock = 1;
8378 /* skip the CEE_RET, too */
8379 if (ip_in_bb (cfg, bblock, ip + 5))
8380 skip_ret = TRUE;
8381 push_res = FALSE;
8382 goto call_end;
8386 inline_costs += 10 * num_calls++;
8389 * Making generic calls out of gsharedvt methods.
8391 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8392 MonoRgctxInfoType info_type;
8394 if (virtual) {
8395 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8396 //GSHAREDVT_FAILURE (*ip);
8397 // disable for possible remoting calls
8398 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8399 GSHAREDVT_FAILURE (*ip);
8400 if (fsig->generic_param_count) {
8401 /* virtual generic call */
8402 g_assert (mono_use_imt);
8403 g_assert (!imt_arg);
8404 /* Same as the virtual generic case above */
8405 imt_arg = emit_get_rgctx_method (cfg, context_used,
8406 cmethod, MONO_RGCTX_INFO_METHOD);
8407 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8408 vtable_arg = NULL;
8412 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8413 /* test_0_multi_dim_arrays () in gshared.cs */
8414 GSHAREDVT_FAILURE (*ip);
8416 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8417 keep_this_alive = sp [0];
8419 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8420 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8421 else
8422 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8423 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8425 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8426 goto call_end;
8427 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8429 * We pass the address to the gsharedvt trampoline in the rgctx reg
8431 MonoInst *callee = addr;
8433 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8434 /* Not tested */
8435 GSHAREDVT_FAILURE (*ip);
8437 addr = emit_get_rgctx_sig (cfg, context_used,
8438 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8439 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8440 goto call_end;
8443 /* Generic sharing */
8444 /* FIXME: only do this for generic methods if
8445 they are not shared! */
8446 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8447 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8448 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8449 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8450 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8451 INLINE_FAILURE ("gshared");
8453 g_assert (cfg->generic_sharing_context && cmethod);
8454 g_assert (!addr);
8457 * We are compiling a call to a
8458 * generic method from shared code,
8459 * which means that we have to look up
8460 * the method in the rgctx and do an
8461 * indirect call.
8463 if (fsig->hasthis)
8464 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8466 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8467 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8468 goto call_end;
8471 /* Indirect calls */
8472 if (addr) {
8473 if (call_opcode == CEE_CALL)
8474 g_assert (context_used);
8475 else if (call_opcode == CEE_CALLI)
8476 g_assert (!vtable_arg);
8477 else
8478 /* FIXME: what the hell is this??? */
8479 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8480 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8482 /* Prevent inlining of methods with indirect calls */
8483 INLINE_FAILURE ("indirect call");
8485 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8486 int info_type;
8487 gpointer info_data;
8490 * Instead of emitting an indirect call, emit a direct call
8491 * with the contents of the aotconst as the patch info.
8493 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8494 info_type = addr->inst_c1;
8495 info_data = addr->inst_p0;
8496 } else {
8497 info_type = addr->inst_right->inst_c1;
8498 info_data = addr->inst_right->inst_left;
8501 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8502 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8503 NULLIFY_INS (addr);
8504 goto call_end;
8507 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8508 goto call_end;
8511 /* Array methods */
8512 if (array_rank) {
8513 MonoInst *addr;
8515 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8516 MonoInst *val = sp [fsig->param_count];
8518 if (val->type == STACK_OBJ) {
8519 MonoInst *iargs [2];
8521 iargs [0] = sp [0];
8522 iargs [1] = val;
8524 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8527 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8528 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8529 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8530 emit_write_barrier (cfg, addr, val);
8531 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8532 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8534 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8535 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8536 if (!cmethod->klass->element_class->valuetype && !readonly)
8537 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8538 CHECK_TYPELOAD (cmethod->klass);
8540 readonly = FALSE;
8541 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8542 ins = addr;
8543 } else {
8544 g_assert_not_reached ();
8547 emit_widen = FALSE;
8548 goto call_end;
8551 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8552 if (ins)
8553 goto call_end;
8555 /* Tail prefix / tail call optimization */
8557 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8558 /* FIXME: runtime generic context pointer for jumps? */
8559 /* FIXME: handle this for generic sharing eventually */
8560 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8561 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8562 supported_tail_call = TRUE;
8564 if (supported_tail_call) {
8565 MonoCallInst *call;
8567 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8568 INLINE_FAILURE ("tail call");
8570 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8572 if (ARCH_HAVE_OP_TAIL_CALL) {
8573 /* Handle tail calls similarly to normal calls */
8574 tail_call = TRUE;
8575 } else {
8576 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8578 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8579 call->tail_call = TRUE;
8580 call->method = cmethod;
8581 call->signature = mono_method_signature (cmethod);
8584 * We implement tail calls by storing the actual arguments into the
8585 * argument variables, then emitting a CEE_JMP.
8587 for (i = 0; i < n; ++i) {
8588 /* Prevent argument from being register allocated */
8589 arg_array [i]->flags |= MONO_INST_VOLATILE;
8590 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8592 ins = (MonoInst*)call;
8593 ins->inst_p0 = cmethod;
8594 ins->inst_p1 = arg_array [0];
8595 MONO_ADD_INS (bblock, ins);
8596 link_bblock (cfg, bblock, end_bblock);
8597 start_new_bblock = 1;
8599 // FIXME: Eliminate unreachable epilogs
8602 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8603 * only reachable from this call.
8605 GET_BBLOCK (cfg, tblock, ip + 5);
8606 if (tblock == bblock || tblock->in_count == 0)
8607 skip_ret = TRUE;
8608 push_res = FALSE;
8610 goto call_end;
8615 * Synchronized wrappers.
8616 * Its hard to determine where to replace a method with its synchronized
8617 * wrapper without causing an infinite recursion. The current solution is
8618 * to add the synchronized wrapper in the trampolines, and to
8619 * change the called method to a dummy wrapper, and resolve that wrapper
8620 * to the real method in mono_jit_compile_method ().
8622 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8623 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8624 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8625 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8628 /* Common call */
8629 INLINE_FAILURE ("call");
8630 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8631 imt_arg, vtable_arg);
8633 if (tail_call) {
8634 link_bblock (cfg, bblock, end_bblock);
8635 start_new_bblock = 1;
8637 // FIXME: Eliminate unreachable epilogs
8640 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8641 * only reachable from this call.
8643 GET_BBLOCK (cfg, tblock, ip + 5);
8644 if (tblock == bblock || tblock->in_count == 0)
8645 skip_ret = TRUE;
8646 push_res = FALSE;
8649 call_end:
8651 /* End of call, INS should contain the result of the call, if any */
8653 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8654 g_assert (ins);
8655 if (emit_widen)
8656 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8657 else
8658 *sp++ = ins;
8661 if (keep_this_alive) {
8662 MonoInst *dummy_use;
8664 /* See mono_emit_method_call_full () */
8665 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8668 CHECK_CFG_EXCEPTION;
8670 ip += 5;
8671 if (skip_ret) {
8672 g_assert (*ip == CEE_RET);
8673 ip += 1;
8675 ins_flag = 0;
8676 constrained_call = NULL;
8677 if (need_seq_point)
8678 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8679 break;
8681 case CEE_RET:
8682 if (cfg->method != method) {
8683 /* return from inlined method */
8685 * If in_count == 0, that means the ret is unreachable due to
8686 * being preceeded by a throw. In that case, inline_method () will
8687 * handle setting the return value
8688 * (test case: test_0_inline_throw ()).
8690 if (return_var && cfg->cbb->in_count) {
8691 MonoType *ret_type = mono_method_signature (method)->ret;
8693 MonoInst *store;
8694 CHECK_STACK (1);
8695 --sp;
8697 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8698 UNVERIFIED;
8700 //g_assert (returnvar != -1);
8701 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8702 cfg->ret_var_set = TRUE;
8704 } else {
8705 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8707 if (cfg->lmf_var && cfg->cbb->in_count)
8708 emit_pop_lmf (cfg);
8710 if (cfg->ret) {
8711 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8713 if (seq_points && !sym_seq_points) {
8715 * Place a seq point here too even through the IL stack is not
8716 * empty, so a step over on
8717 * call <FOO>
8718 * ret
8719 * will work correctly.
8721 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8722 MONO_ADD_INS (cfg->cbb, ins);
8725 g_assert (!return_var);
8726 CHECK_STACK (1);
8727 --sp;
8729 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8730 UNVERIFIED;
8732 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8733 MonoInst *ret_addr;
8735 if (!cfg->vret_addr) {
8736 MonoInst *ins;
8738 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8739 } else {
8740 EMIT_NEW_RETLOADA (cfg, ret_addr);
8742 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8743 ins->klass = mono_class_from_mono_type (ret_type);
8745 } else {
8746 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8747 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8748 MonoInst *iargs [1];
8749 MonoInst *conv;
8751 iargs [0] = *sp;
8752 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8753 mono_arch_emit_setret (cfg, method, conv);
8754 } else {
8755 mono_arch_emit_setret (cfg, method, *sp);
8757 #else
8758 mono_arch_emit_setret (cfg, method, *sp);
8759 #endif
8763 if (sp != stack_start)
8764 UNVERIFIED;
8765 MONO_INST_NEW (cfg, ins, OP_BR);
8766 ip++;
8767 ins->inst_target_bb = end_bblock;
8768 MONO_ADD_INS (bblock, ins);
8769 link_bblock (cfg, bblock, end_bblock);
8770 start_new_bblock = 1;
8771 break;
8772 case CEE_BR_S:
8773 CHECK_OPSIZE (2);
8774 MONO_INST_NEW (cfg, ins, OP_BR);
8775 ip++;
8776 target = ip + 1 + (signed char)(*ip);
8777 ++ip;
8778 GET_BBLOCK (cfg, tblock, target);
8779 link_bblock (cfg, bblock, tblock);
8780 ins->inst_target_bb = tblock;
8781 if (sp != stack_start) {
8782 handle_stack_args (cfg, stack_start, sp - stack_start);
8783 sp = stack_start;
8784 CHECK_UNVERIFIABLE (cfg);
8786 MONO_ADD_INS (bblock, ins);
8787 start_new_bblock = 1;
8788 inline_costs += BRANCH_COST;
8789 break;
8790 case CEE_BEQ_S:
8791 case CEE_BGE_S:
8792 case CEE_BGT_S:
8793 case CEE_BLE_S:
8794 case CEE_BLT_S:
8795 case CEE_BNE_UN_S:
8796 case CEE_BGE_UN_S:
8797 case CEE_BGT_UN_S:
8798 case CEE_BLE_UN_S:
8799 case CEE_BLT_UN_S:
8800 CHECK_OPSIZE (2);
8801 CHECK_STACK (2);
8802 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8803 ip++;
8804 target = ip + 1 + *(signed char*)ip;
8805 ip++;
8807 ADD_BINCOND (NULL);
8809 sp = stack_start;
8810 inline_costs += BRANCH_COST;
8811 break;
8812 case CEE_BR:
8813 CHECK_OPSIZE (5);
8814 MONO_INST_NEW (cfg, ins, OP_BR);
8815 ip++;
8817 target = ip + 4 + (gint32)read32(ip);
8818 ip += 4;
8819 GET_BBLOCK (cfg, tblock, target);
8820 link_bblock (cfg, bblock, tblock);
8821 ins->inst_target_bb = tblock;
8822 if (sp != stack_start) {
8823 handle_stack_args (cfg, stack_start, sp - stack_start);
8824 sp = stack_start;
8825 CHECK_UNVERIFIABLE (cfg);
8828 MONO_ADD_INS (bblock, ins);
8830 start_new_bblock = 1;
8831 inline_costs += BRANCH_COST;
8832 break;
8833 case CEE_BRFALSE_S:
8834 case CEE_BRTRUE_S:
8835 case CEE_BRFALSE:
8836 case CEE_BRTRUE: {
8837 MonoInst *cmp;
8838 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8839 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8840 guint32 opsize = is_short ? 1 : 4;
8842 CHECK_OPSIZE (opsize);
8843 CHECK_STACK (1);
8844 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8845 UNVERIFIED;
8846 ip ++;
8847 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8848 ip += opsize;
8850 sp--;
8852 GET_BBLOCK (cfg, tblock, target);
8853 link_bblock (cfg, bblock, tblock);
8854 GET_BBLOCK (cfg, tblock, ip);
8855 link_bblock (cfg, bblock, tblock);
8857 if (sp != stack_start) {
8858 handle_stack_args (cfg, stack_start, sp - stack_start);
8859 CHECK_UNVERIFIABLE (cfg);
8862 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8863 cmp->sreg1 = sp [0]->dreg;
8864 type_from_op (cmp, sp [0], NULL);
8865 CHECK_TYPE (cmp);
8867 #if SIZEOF_REGISTER == 4
8868 if (cmp->opcode == OP_LCOMPARE_IMM) {
8869 /* Convert it to OP_LCOMPARE */
8870 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8871 ins->type = STACK_I8;
8872 ins->dreg = alloc_dreg (cfg, STACK_I8);
8873 ins->inst_l = 0;
8874 MONO_ADD_INS (bblock, ins);
8875 cmp->opcode = OP_LCOMPARE;
8876 cmp->sreg2 = ins->dreg;
8878 #endif
8879 MONO_ADD_INS (bblock, cmp);
8881 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8882 type_from_op (ins, sp [0], NULL);
8883 MONO_ADD_INS (bblock, ins);
8884 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8885 GET_BBLOCK (cfg, tblock, target);
8886 ins->inst_true_bb = tblock;
8887 GET_BBLOCK (cfg, tblock, ip);
8888 ins->inst_false_bb = tblock;
8889 start_new_bblock = 2;
8891 sp = stack_start;
8892 inline_costs += BRANCH_COST;
8893 break;
8895 case CEE_BEQ:
8896 case CEE_BGE:
8897 case CEE_BGT:
8898 case CEE_BLE:
8899 case CEE_BLT:
8900 case CEE_BNE_UN:
8901 case CEE_BGE_UN:
8902 case CEE_BGT_UN:
8903 case CEE_BLE_UN:
8904 case CEE_BLT_UN:
8905 CHECK_OPSIZE (5);
8906 CHECK_STACK (2);
8907 MONO_INST_NEW (cfg, ins, *ip);
8908 ip++;
8909 target = ip + 4 + (gint32)read32(ip);
8910 ip += 4;
8912 ADD_BINCOND (NULL);
8914 sp = stack_start;
8915 inline_costs += BRANCH_COST;
8916 break;
8917 case CEE_SWITCH: {
8918 MonoInst *src1;
8919 MonoBasicBlock **targets;
8920 MonoBasicBlock *default_bblock;
8921 MonoJumpInfoBBTable *table;
8922 int offset_reg = alloc_preg (cfg);
8923 int target_reg = alloc_preg (cfg);
8924 int table_reg = alloc_preg (cfg);
8925 int sum_reg = alloc_preg (cfg);
8926 gboolean use_op_switch;
8928 CHECK_OPSIZE (5);
8929 CHECK_STACK (1);
8930 n = read32 (ip + 1);
8931 --sp;
8932 src1 = sp [0];
8933 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8934 UNVERIFIED;
8936 ip += 5;
8937 CHECK_OPSIZE (n * sizeof (guint32));
8938 target = ip + n * sizeof (guint32);
8940 GET_BBLOCK (cfg, default_bblock, target);
8941 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8943 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8944 for (i = 0; i < n; ++i) {
8945 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8946 targets [i] = tblock;
8947 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8948 ip += 4;
8951 if (sp != stack_start) {
8953 * Link the current bb with the targets as well, so handle_stack_args
8954 * will set their in_stack correctly.
8956 link_bblock (cfg, bblock, default_bblock);
8957 for (i = 0; i < n; ++i)
8958 link_bblock (cfg, bblock, targets [i]);
8960 handle_stack_args (cfg, stack_start, sp - stack_start);
8961 sp = stack_start;
8962 CHECK_UNVERIFIABLE (cfg);
8965 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8967 bblock = cfg->cbb;
8969 for (i = 0; i < n; ++i)
8970 link_bblock (cfg, bblock, targets [i]);
8972 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8973 table->table = targets;
8974 table->table_size = n;
8976 use_op_switch = FALSE;
8977 #ifdef TARGET_ARM
8978 /* ARM implements SWITCH statements differently */
8979 /* FIXME: Make it use the generic implementation */
8980 if (!cfg->compile_aot)
8981 use_op_switch = TRUE;
8982 #endif
8984 if (COMPILE_LLVM (cfg))
8985 use_op_switch = TRUE;
8987 cfg->cbb->has_jump_table = 1;
8989 if (use_op_switch) {
8990 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8991 ins->sreg1 = src1->dreg;
8992 ins->inst_p0 = table;
8993 ins->inst_many_bb = targets;
8994 ins->klass = GUINT_TO_POINTER (n);
8995 MONO_ADD_INS (cfg->cbb, ins);
8996 } else {
8997 if (sizeof (gpointer) == 8)
8998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8999 else
9000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9002 #if SIZEOF_REGISTER == 8
9003 /* The upper word might not be zero, and we add it to a 64 bit address later */
9004 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9005 #endif
9007 if (cfg->compile_aot) {
9008 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9009 } else {
9010 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9011 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9012 ins->inst_p0 = table;
9013 ins->dreg = table_reg;
9014 MONO_ADD_INS (cfg->cbb, ins);
9017 /* FIXME: Use load_memindex */
9018 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9020 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9022 start_new_bblock = 1;
9023 inline_costs += (BRANCH_COST * 2);
9024 break;
9026 case CEE_LDIND_I1:
9027 case CEE_LDIND_U1:
9028 case CEE_LDIND_I2:
9029 case CEE_LDIND_U2:
9030 case CEE_LDIND_I4:
9031 case CEE_LDIND_U4:
9032 case CEE_LDIND_I8:
9033 case CEE_LDIND_I:
9034 case CEE_LDIND_R4:
9035 case CEE_LDIND_R8:
9036 case CEE_LDIND_REF:
9037 CHECK_STACK (1);
9038 --sp;
9040 switch (*ip) {
9041 case CEE_LDIND_R4:
9042 case CEE_LDIND_R8:
9043 dreg = alloc_freg (cfg);
9044 break;
9045 case CEE_LDIND_I8:
9046 dreg = alloc_lreg (cfg);
9047 break;
9048 case CEE_LDIND_REF:
9049 dreg = alloc_ireg_ref (cfg);
9050 break;
9051 default:
9052 dreg = alloc_preg (cfg);
9055 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9056 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9057 ins->flags |= ins_flag;
9058 MONO_ADD_INS (bblock, ins);
9059 *sp++ = ins;
9060 if (ins_flag & MONO_INST_VOLATILE) {
9061 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9062 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9063 emit_memory_barrier (cfg, FullBarrier);
9065 ins_flag = 0;
9066 ++ip;
9067 break;
9068 case CEE_STIND_REF:
9069 case CEE_STIND_I1:
9070 case CEE_STIND_I2:
9071 case CEE_STIND_I4:
9072 case CEE_STIND_I8:
9073 case CEE_STIND_R4:
9074 case CEE_STIND_R8:
9075 case CEE_STIND_I:
9076 CHECK_STACK (2);
9077 sp -= 2;
9079 if (ins_flag & MONO_INST_VOLATILE) {
9080 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9081 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9082 emit_memory_barrier (cfg, FullBarrier);
9085 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9086 ins->flags |= ins_flag;
9087 ins_flag = 0;
9089 MONO_ADD_INS (bblock, ins);
9091 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9092 emit_write_barrier (cfg, sp [0], sp [1]);
9094 inline_costs += 1;
9095 ++ip;
9096 break;
9098 case CEE_MUL:
9099 CHECK_STACK (2);
9101 MONO_INST_NEW (cfg, ins, (*ip));
9102 sp -= 2;
9103 ins->sreg1 = sp [0]->dreg;
9104 ins->sreg2 = sp [1]->dreg;
9105 type_from_op (ins, sp [0], sp [1]);
9106 CHECK_TYPE (ins);
9107 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9109 /* Use the immediate opcodes if possible */
9110 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9111 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9112 if (imm_opcode != -1) {
9113 ins->opcode = imm_opcode;
9114 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9115 ins->sreg2 = -1;
9117 sp [1]->opcode = OP_NOP;
9121 MONO_ADD_INS ((cfg)->cbb, (ins));
9123 *sp++ = mono_decompose_opcode (cfg, ins);
9124 ip++;
9125 break;
9126 case CEE_ADD:
9127 case CEE_SUB:
9128 case CEE_DIV:
9129 case CEE_DIV_UN:
9130 case CEE_REM:
9131 case CEE_REM_UN:
9132 case CEE_AND:
9133 case CEE_OR:
9134 case CEE_XOR:
9135 case CEE_SHL:
9136 case CEE_SHR:
9137 case CEE_SHR_UN:
9138 CHECK_STACK (2);
9140 MONO_INST_NEW (cfg, ins, (*ip));
9141 sp -= 2;
9142 ins->sreg1 = sp [0]->dreg;
9143 ins->sreg2 = sp [1]->dreg;
9144 type_from_op (ins, sp [0], sp [1]);
9145 CHECK_TYPE (ins);
9146 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9147 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9149 /* FIXME: Pass opcode to is_inst_imm */
9151 /* Use the immediate opcodes if possible */
9152 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9153 int imm_opcode;
9155 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9156 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9157 /* Keep emulated opcodes which are optimized away later */
9158 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9159 imm_opcode = mono_op_to_op_imm (ins->opcode);
9161 #endif
9162 if (imm_opcode != -1) {
9163 ins->opcode = imm_opcode;
9164 if (sp [1]->opcode == OP_I8CONST) {
9165 #if SIZEOF_REGISTER == 8
9166 ins->inst_imm = sp [1]->inst_l;
9167 #else
9168 ins->inst_ls_word = sp [1]->inst_ls_word;
9169 ins->inst_ms_word = sp [1]->inst_ms_word;
9170 #endif
9172 else
9173 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9174 ins->sreg2 = -1;
9176 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9177 if (sp [1]->next == NULL)
9178 sp [1]->opcode = OP_NOP;
9181 MONO_ADD_INS ((cfg)->cbb, (ins));
9183 *sp++ = mono_decompose_opcode (cfg, ins);
9184 ip++;
9185 break;
9186 case CEE_NEG:
9187 case CEE_NOT:
9188 case CEE_CONV_I1:
9189 case CEE_CONV_I2:
9190 case CEE_CONV_I4:
9191 case CEE_CONV_R4:
9192 case CEE_CONV_R8:
9193 case CEE_CONV_U4:
9194 case CEE_CONV_I8:
9195 case CEE_CONV_U8:
9196 case CEE_CONV_OVF_I8:
9197 case CEE_CONV_OVF_U8:
9198 case CEE_CONV_R_UN:
9199 CHECK_STACK (1);
9201 /* Special case this earlier so we have long constants in the IR */
9202 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9203 int data = sp [-1]->inst_c0;
9204 sp [-1]->opcode = OP_I8CONST;
9205 sp [-1]->type = STACK_I8;
9206 #if SIZEOF_REGISTER == 8
9207 if ((*ip) == CEE_CONV_U8)
9208 sp [-1]->inst_c0 = (guint32)data;
9209 else
9210 sp [-1]->inst_c0 = data;
9211 #else
9212 sp [-1]->inst_ls_word = data;
9213 if ((*ip) == CEE_CONV_U8)
9214 sp [-1]->inst_ms_word = 0;
9215 else
9216 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9217 #endif
9218 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9220 else {
9221 ADD_UNOP (*ip);
9223 ip++;
9224 break;
9225 case CEE_CONV_OVF_I4:
9226 case CEE_CONV_OVF_I1:
9227 case CEE_CONV_OVF_I2:
9228 case CEE_CONV_OVF_I:
9229 case CEE_CONV_OVF_U:
9230 CHECK_STACK (1);
9232 if (sp [-1]->type == STACK_R8) {
9233 ADD_UNOP (CEE_CONV_OVF_I8);
9234 ADD_UNOP (*ip);
9235 } else {
9236 ADD_UNOP (*ip);
9238 ip++;
9239 break;
9240 case CEE_CONV_OVF_U1:
9241 case CEE_CONV_OVF_U2:
9242 case CEE_CONV_OVF_U4:
9243 CHECK_STACK (1);
9245 if (sp [-1]->type == STACK_R8) {
9246 ADD_UNOP (CEE_CONV_OVF_U8);
9247 ADD_UNOP (*ip);
9248 } else {
9249 ADD_UNOP (*ip);
9251 ip++;
9252 break;
9253 case CEE_CONV_OVF_I1_UN:
9254 case CEE_CONV_OVF_I2_UN:
9255 case CEE_CONV_OVF_I4_UN:
9256 case CEE_CONV_OVF_I8_UN:
9257 case CEE_CONV_OVF_U1_UN:
9258 case CEE_CONV_OVF_U2_UN:
9259 case CEE_CONV_OVF_U4_UN:
9260 case CEE_CONV_OVF_U8_UN:
9261 case CEE_CONV_OVF_I_UN:
9262 case CEE_CONV_OVF_U_UN:
9263 case CEE_CONV_U2:
9264 case CEE_CONV_U1:
9265 case CEE_CONV_I:
9266 case CEE_CONV_U:
9267 CHECK_STACK (1);
9268 ADD_UNOP (*ip);
9269 CHECK_CFG_EXCEPTION;
9270 ip++;
9271 break;
9272 case CEE_ADD_OVF:
9273 case CEE_ADD_OVF_UN:
9274 case CEE_MUL_OVF:
9275 case CEE_MUL_OVF_UN:
9276 case CEE_SUB_OVF:
9277 case CEE_SUB_OVF_UN:
9278 CHECK_STACK (2);
9279 ADD_BINOP (*ip);
9280 ip++;
9281 break;
9282 case CEE_CPOBJ:
9283 GSHAREDVT_FAILURE (*ip);
9284 CHECK_OPSIZE (5);
9285 CHECK_STACK (2);
9286 token = read32 (ip + 1);
9287 klass = mini_get_class (method, token, generic_context);
9288 CHECK_TYPELOAD (klass);
9289 sp -= 2;
9290 if (generic_class_is_reference_type (cfg, klass)) {
9291 MonoInst *store, *load;
9292 int dreg = alloc_ireg_ref (cfg);
9294 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9295 load->flags |= ins_flag;
9296 MONO_ADD_INS (cfg->cbb, load);
9298 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9299 store->flags |= ins_flag;
9300 MONO_ADD_INS (cfg->cbb, store);
9302 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9303 emit_write_barrier (cfg, sp [0], sp [1]);
9304 } else {
9305 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9307 ins_flag = 0;
9308 ip += 5;
9309 break;
9310 case CEE_LDOBJ: {
9311 int loc_index = -1;
9312 int stloc_len = 0;
9314 CHECK_OPSIZE (5);
9315 CHECK_STACK (1);
9316 --sp;
9317 token = read32 (ip + 1);
9318 klass = mini_get_class (method, token, generic_context);
9319 CHECK_TYPELOAD (klass);
9321 /* Optimize the common ldobj+stloc combination */
9322 switch (ip [5]) {
9323 case CEE_STLOC_S:
9324 loc_index = ip [6];
9325 stloc_len = 2;
9326 break;
9327 case CEE_STLOC_0:
9328 case CEE_STLOC_1:
9329 case CEE_STLOC_2:
9330 case CEE_STLOC_3:
9331 loc_index = ip [5] - CEE_STLOC_0;
9332 stloc_len = 1;
9333 break;
9334 default:
9335 break;
9338 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9339 CHECK_LOCAL (loc_index);
9341 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9342 ins->dreg = cfg->locals [loc_index]->dreg;
9343 ins->flags |= ins_flag;
9344 ip += 5;
9345 ip += stloc_len;
9346 if (ins_flag & MONO_INST_VOLATILE) {
9347 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9348 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9349 emit_memory_barrier (cfg, FullBarrier);
9351 ins_flag = 0;
9352 break;
9355 /* Optimize the ldobj+stobj combination */
9356 /* The reference case ends up being a load+store anyway */
9357 /* Skip this if the operation is volatile. */
9358 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9359 CHECK_STACK (1);
9361 sp --;
9363 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9365 ip += 5 + 5;
9366 ins_flag = 0;
9367 break;
9370 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9371 ins->flags |= ins_flag;
9372 *sp++ = ins;
9374 if (ins_flag & MONO_INST_VOLATILE) {
9375 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9376 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9377 emit_memory_barrier (cfg, FullBarrier);
9380 ip += 5;
9381 ins_flag = 0;
9382 inline_costs += 1;
9383 break;
9385 case CEE_LDSTR:
9386 CHECK_STACK_OVF (1);
9387 CHECK_OPSIZE (5);
9388 n = read32 (ip + 1);
9390 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9391 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9392 ins->type = STACK_OBJ;
9393 *sp = ins;
9395 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9396 MonoInst *iargs [1];
9398 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9399 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9400 } else {
9401 if (cfg->opt & MONO_OPT_SHARED) {
9402 MonoInst *iargs [3];
9404 if (cfg->compile_aot) {
9405 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9407 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9408 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9409 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9410 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9411 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9412 } else {
9413 if (bblock->out_of_line) {
9414 MonoInst *iargs [2];
9416 if (image == mono_defaults.corlib) {
9418 * Avoid relocations in AOT and save some space by using a
9419 * version of helper_ldstr specialized to mscorlib.
9421 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9422 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9423 } else {
9424 /* Avoid creating the string object */
9425 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9426 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9427 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9430 else
9431 if (cfg->compile_aot) {
9432 NEW_LDSTRCONST (cfg, ins, image, n);
9433 *sp = ins;
9434 MONO_ADD_INS (bblock, ins);
9436 else {
9437 NEW_PCONST (cfg, ins, NULL);
9438 ins->type = STACK_OBJ;
9439 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9440 if (!ins->inst_p0)
9441 OUT_OF_MEMORY_FAILURE;
9443 *sp = ins;
9444 MONO_ADD_INS (bblock, ins);
9449 sp++;
9450 ip += 5;
9451 break;
9452 case CEE_NEWOBJ: {
9453 MonoInst *iargs [2];
9454 MonoMethodSignature *fsig;
9455 MonoInst this_ins;
9456 MonoInst *alloc;
9457 MonoInst *vtable_arg = NULL;
9459 CHECK_OPSIZE (5);
9460 token = read32 (ip + 1);
9461 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9462 if (!cmethod || mono_loader_get_last_error ())
9463 LOAD_ERROR;
9464 fsig = mono_method_get_signature (cmethod, image, token);
9465 if (!fsig)
9466 LOAD_ERROR;
9468 mono_save_token_info (cfg, image, token, cmethod);
9470 if (!mono_class_init (cmethod->klass))
9471 TYPE_LOAD_ERROR (cmethod->klass);
9473 context_used = mini_method_check_context_used (cfg, cmethod);
9475 if (mono_security_cas_enabled ()) {
9476 if (check_linkdemand (cfg, method, cmethod))
9477 INLINE_FAILURE ("linkdemand");
9478 CHECK_CFG_EXCEPTION;
9479 } else if (mono_security_core_clr_enabled ()) {
9480 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9483 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9484 emit_generic_class_init (cfg, cmethod->klass);
9485 CHECK_TYPELOAD (cmethod->klass);
9489 if (cfg->gsharedvt) {
9490 if (mini_is_gsharedvt_variable_signature (sig))
9491 GSHAREDVT_FAILURE (*ip);
9495 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9496 mono_method_is_generic_sharable (cmethod, TRUE)) {
9497 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9498 mono_class_vtable (cfg->domain, cmethod->klass);
9499 CHECK_TYPELOAD (cmethod->klass);
9501 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9502 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9503 } else {
9504 if (context_used) {
9505 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9506 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9507 } else {
9508 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9510 CHECK_TYPELOAD (cmethod->klass);
9511 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9516 n = fsig->param_count;
9517 CHECK_STACK (n);
9520 * Generate smaller code for the common newobj <exception> instruction in
9521 * argument checking code.
9523 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9524 is_exception_class (cmethod->klass) && n <= 2 &&
9525 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9526 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9527 MonoInst *iargs [3];
9529 g_assert (!vtable_arg);
9531 sp -= n;
9533 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9534 switch (n) {
9535 case 0:
9536 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9537 break;
9538 case 1:
9539 iargs [1] = sp [0];
9540 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9541 break;
9542 case 2:
9543 iargs [1] = sp [0];
9544 iargs [2] = sp [1];
9545 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9546 break;
9547 default:
9548 g_assert_not_reached ();
9551 ip += 5;
9552 inline_costs += 5;
9553 break;
9556 /* move the args to allow room for 'this' in the first position */
9557 while (n--) {
9558 --sp;
9559 sp [1] = sp [0];
9562 /* check_call_signature () requires sp[0] to be set */
9563 this_ins.type = STACK_OBJ;
9564 sp [0] = &this_ins;
9565 if (check_call_signature (cfg, fsig, sp))
9566 UNVERIFIED;
9568 iargs [0] = NULL;
9570 if (mini_class_is_system_array (cmethod->klass)) {
9571 g_assert (!vtable_arg);
9573 *sp = emit_get_rgctx_method (cfg, context_used,
9574 cmethod, MONO_RGCTX_INFO_METHOD);
9576 /* Avoid varargs in the common case */
9577 if (fsig->param_count == 1)
9578 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9579 else if (fsig->param_count == 2)
9580 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9581 else if (fsig->param_count == 3)
9582 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9583 else if (fsig->param_count == 4)
9584 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9585 else
9586 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9587 } else if (cmethod->string_ctor) {
9588 g_assert (!context_used);
9589 g_assert (!vtable_arg);
9590 /* we simply pass a null pointer */
9591 EMIT_NEW_PCONST (cfg, *sp, NULL);
9592 /* now call the string ctor */
9593 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9594 } else {
9595 MonoInst* callvirt_this_arg = NULL;
9597 if (cmethod->klass->valuetype) {
9598 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9599 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9600 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9602 alloc = NULL;
9605 * The code generated by mini_emit_virtual_call () expects
9606 * iargs [0] to be a boxed instance, but luckily the vcall
9607 * will be transformed into a normal call there.
9609 } else if (context_used) {
9610 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9611 *sp = alloc;
9612 } else {
9613 MonoVTable *vtable = NULL;
9615 if (!cfg->compile_aot)
9616 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9617 CHECK_TYPELOAD (cmethod->klass);
9620 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9621 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9622 * As a workaround, we call class cctors before allocating objects.
9624 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9625 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9626 if (cfg->verbose_level > 2)
9627 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9628 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9631 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9632 *sp = alloc;
9634 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9636 if (alloc)
9637 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9639 /* Now call the actual ctor */
9640 /* Avoid virtual calls to ctors if possible */
9641 if (mono_class_is_marshalbyref (cmethod->klass))
9642 callvirt_this_arg = sp [0];
9645 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9646 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9647 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9648 *sp = ins;
9649 sp++;
9652 CHECK_CFG_EXCEPTION;
9653 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9654 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9655 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9656 !g_list_find (dont_inline, cmethod)) {
9657 int costs;
9659 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9660 cfg->real_offset += 5;
9661 bblock = cfg->cbb;
9663 inline_costs += costs - 5;
9664 } else {
9665 INLINE_FAILURE ("inline failure");
9666 // FIXME-VT: Clean this up
9667 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9668 GSHAREDVT_FAILURE(*ip);
9669 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9671 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9672 MonoInst *addr;
9674 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9675 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9676 } else if (context_used &&
9677 ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
9678 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
9679 MonoInst *cmethod_addr;
9681 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
9683 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9684 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9686 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9687 } else {
9688 INLINE_FAILURE ("ctor call");
9689 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9690 callvirt_this_arg, NULL, vtable_arg);
9694 if (alloc == NULL) {
9695 /* Valuetype */
9696 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9697 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9698 *sp++= ins;
9700 else
9701 *sp++ = alloc;
9703 ip += 5;
9704 inline_costs += 5;
9705 break;
9707 case CEE_CASTCLASS:
9708 CHECK_STACK (1);
9709 --sp;
9710 CHECK_OPSIZE (5);
9711 token = read32 (ip + 1);
9712 klass = mini_get_class (method, token, generic_context);
9713 CHECK_TYPELOAD (klass);
9714 if (sp [0]->type != STACK_OBJ)
9715 UNVERIFIED;
9717 context_used = mini_class_check_context_used (cfg, klass);
9719 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9720 MonoInst *args [3];
9722 /* obj */
9723 args [0] = *sp;
9725 /* klass */
9726 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9728 /* inline cache*/
9729 if (cfg->compile_aot)
9730 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9731 else
9732 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9734 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9736 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9737 ip += 5;
9738 inline_costs += 2;
9739 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9740 MonoMethod *mono_castclass;
9741 MonoInst *iargs [1];
9742 int costs;
9744 mono_castclass = mono_marshal_get_castclass (klass);
9745 iargs [0] = sp [0];
9747 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9748 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9749 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9750 reset_cast_details (cfg);
9751 CHECK_CFG_EXCEPTION;
9752 g_assert (costs > 0);
9754 ip += 5;
9755 cfg->real_offset += 5;
9756 bblock = cfg->cbb;
9758 *sp++ = iargs [0];
9760 inline_costs += costs;
9762 else {
9763 ins = handle_castclass (cfg, klass, *sp, context_used);
9764 CHECK_CFG_EXCEPTION;
9765 bblock = cfg->cbb;
9766 *sp ++ = ins;
9767 ip += 5;
9769 break;
9770 case CEE_ISINST: {
9771 CHECK_STACK (1);
9772 --sp;
9773 CHECK_OPSIZE (5);
9774 token = read32 (ip + 1);
9775 klass = mini_get_class (method, token, generic_context);
9776 CHECK_TYPELOAD (klass);
9777 if (sp [0]->type != STACK_OBJ)
9778 UNVERIFIED;
9780 context_used = mini_class_check_context_used (cfg, klass);
9782 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9783 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9784 MonoInst *args [3];
9786 /* obj */
9787 args [0] = *sp;
9789 /* klass */
9790 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9792 /* inline cache*/
9793 if (cfg->compile_aot)
9794 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9795 else
9796 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9798 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9799 ip += 5;
9800 inline_costs += 2;
9801 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9802 MonoMethod *mono_isinst;
9803 MonoInst *iargs [1];
9804 int costs;
9806 mono_isinst = mono_marshal_get_isinst (klass);
9807 iargs [0] = sp [0];
9809 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9810 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9811 CHECK_CFG_EXCEPTION;
9812 g_assert (costs > 0);
9814 ip += 5;
9815 cfg->real_offset += 5;
9816 bblock = cfg->cbb;
9818 *sp++= iargs [0];
9820 inline_costs += costs;
9822 else {
9823 ins = handle_isinst (cfg, klass, *sp, context_used);
9824 CHECK_CFG_EXCEPTION;
9825 bblock = cfg->cbb;
9826 *sp ++ = ins;
9827 ip += 5;
9829 break;
9831 case CEE_UNBOX_ANY: {
9832 CHECK_STACK (1);
9833 --sp;
9834 CHECK_OPSIZE (5);
9835 token = read32 (ip + 1);
9836 klass = mini_get_class (method, token, generic_context);
9837 CHECK_TYPELOAD (klass);
9839 mono_save_token_info (cfg, image, token, klass);
9841 context_used = mini_class_check_context_used (cfg, klass);
9843 if (mini_is_gsharedvt_klass (cfg, klass)) {
9844 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9845 sp ++;
9847 ip += 5;
9848 inline_costs += 2;
9849 break;
9852 if (generic_class_is_reference_type (cfg, klass)) {
9853 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9854 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9855 MonoInst *args [3];
9857 /* obj */
9858 args [0] = *sp;
9860 /* klass */
9861 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9863 /* inline cache*/
9864 /*FIXME AOT support*/
9865 if (cfg->compile_aot)
9866 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9867 else
9868 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9870 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9871 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9872 ip += 5;
9873 inline_costs += 2;
9874 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9875 MonoMethod *mono_castclass;
9876 MonoInst *iargs [1];
9877 int costs;
9879 mono_castclass = mono_marshal_get_castclass (klass);
9880 iargs [0] = sp [0];
9882 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9883 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9884 CHECK_CFG_EXCEPTION;
9885 g_assert (costs > 0);
9887 ip += 5;
9888 cfg->real_offset += 5;
9889 bblock = cfg->cbb;
9891 *sp++ = iargs [0];
9892 inline_costs += costs;
9893 } else {
9894 ins = handle_castclass (cfg, klass, *sp, context_used);
9895 CHECK_CFG_EXCEPTION;
9896 bblock = cfg->cbb;
9897 *sp ++ = ins;
9898 ip += 5;
9900 break;
9903 if (mono_class_is_nullable (klass)) {
9904 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9905 *sp++= ins;
9906 ip += 5;
9907 break;
9910 /* UNBOX */
9911 ins = handle_unbox (cfg, klass, sp, context_used);
9912 *sp = ins;
9914 ip += 5;
9916 /* LDOBJ */
9917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9918 *sp++ = ins;
9920 inline_costs += 2;
9921 break;
9923 case CEE_BOX: {
9924 MonoInst *val;
9926 CHECK_STACK (1);
9927 --sp;
9928 val = *sp;
9929 CHECK_OPSIZE (5);
9930 token = read32 (ip + 1);
9931 klass = mini_get_class (method, token, generic_context);
9932 CHECK_TYPELOAD (klass);
9934 mono_save_token_info (cfg, image, token, klass);
9936 context_used = mini_class_check_context_used (cfg, klass);
9938 if (generic_class_is_reference_type (cfg, klass)) {
9939 *sp++ = val;
9940 ip += 5;
9941 break;
9944 if (klass == mono_defaults.void_class)
9945 UNVERIFIED;
9946 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9947 UNVERIFIED;
9948 /* frequent check in generic code: box (struct), brtrue */
9950 // FIXME: LLVM can't handle the inconsistent bb linking
9951 if (!mono_class_is_nullable (klass) &&
9952 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9953 (ip [5] == CEE_BRTRUE ||
9954 ip [5] == CEE_BRTRUE_S ||
9955 ip [5] == CEE_BRFALSE ||
9956 ip [5] == CEE_BRFALSE_S)) {
9957 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9958 int dreg;
9959 MonoBasicBlock *true_bb, *false_bb;
9961 ip += 5;
9963 if (cfg->verbose_level > 3) {
9964 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9965 printf ("<box+brtrue opt>\n");
9968 switch (*ip) {
9969 case CEE_BRTRUE_S:
9970 case CEE_BRFALSE_S:
9971 CHECK_OPSIZE (2);
9972 ip++;
9973 target = ip + 1 + (signed char)(*ip);
9974 ip++;
9975 break;
9976 case CEE_BRTRUE:
9977 case CEE_BRFALSE:
9978 CHECK_OPSIZE (5);
9979 ip++;
9980 target = ip + 4 + (gint)(read32 (ip));
9981 ip += 4;
9982 break;
9983 default:
9984 g_assert_not_reached ();
9988 * We need to link both bblocks, since it is needed for handling stack
9989 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9990 * Branching to only one of them would lead to inconsistencies, so
9991 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9993 GET_BBLOCK (cfg, true_bb, target);
9994 GET_BBLOCK (cfg, false_bb, ip);
9996 mono_link_bblock (cfg, cfg->cbb, true_bb);
9997 mono_link_bblock (cfg, cfg->cbb, false_bb);
9999 if (sp != stack_start) {
10000 handle_stack_args (cfg, stack_start, sp - stack_start);
10001 sp = stack_start;
10002 CHECK_UNVERIFIABLE (cfg);
10005 if (COMPILE_LLVM (cfg)) {
10006 dreg = alloc_ireg (cfg);
10007 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10010 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10011 } else {
10012 /* The JIT can't eliminate the iconst+compare */
10013 MONO_INST_NEW (cfg, ins, OP_BR);
10014 ins->inst_target_bb = is_true ? true_bb : false_bb;
10015 MONO_ADD_INS (cfg->cbb, ins);
10018 start_new_bblock = 1;
10019 break;
10022 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10024 CHECK_CFG_EXCEPTION;
10025 ip += 5;
10026 inline_costs += 1;
10027 break;
10029 case CEE_UNBOX: {
10030 CHECK_STACK (1);
10031 --sp;
10032 CHECK_OPSIZE (5);
10033 token = read32 (ip + 1);
10034 klass = mini_get_class (method, token, generic_context);
10035 CHECK_TYPELOAD (klass);
10037 mono_save_token_info (cfg, image, token, klass);
10039 context_used = mini_class_check_context_used (cfg, klass);
10041 if (mono_class_is_nullable (klass)) {
10042 MonoInst *val;
10044 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10045 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10047 *sp++= ins;
10048 } else {
10049 ins = handle_unbox (cfg, klass, sp, context_used);
10050 *sp++ = ins;
10052 ip += 5;
10053 inline_costs += 2;
10054 break;
10056 case CEE_LDFLD:
10057 case CEE_LDFLDA:
10058 case CEE_STFLD:
10059 case CEE_LDSFLD:
10060 case CEE_LDSFLDA:
10061 case CEE_STSFLD: {
10062 MonoClassField *field;
10063 #ifndef DISABLE_REMOTING
10064 int costs;
10065 #endif
10066 guint foffset;
10067 gboolean is_instance;
10068 int op;
10069 gpointer addr = NULL;
10070 gboolean is_special_static;
10071 MonoType *ftype;
10072 MonoInst *store_val = NULL;
10073 MonoInst *thread_ins;
10075 op = *ip;
10076 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10077 if (is_instance) {
10078 if (op == CEE_STFLD) {
10079 CHECK_STACK (2);
10080 sp -= 2;
10081 store_val = sp [1];
10082 } else {
10083 CHECK_STACK (1);
10084 --sp;
10086 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10087 UNVERIFIED;
10088 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10089 UNVERIFIED;
10090 } else {
10091 if (op == CEE_STSFLD) {
10092 CHECK_STACK (1);
10093 sp--;
10094 store_val = sp [0];
10098 CHECK_OPSIZE (5);
10099 token = read32 (ip + 1);
10100 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10101 field = mono_method_get_wrapper_data (method, token);
10102 klass = field->parent;
10104 else {
10105 field = mono_field_from_token (image, token, &klass, generic_context);
10107 if (!field)
10108 LOAD_ERROR;
10109 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10110 FIELD_ACCESS_FAILURE;
10111 mono_class_init (klass);
10113 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10114 UNVERIFIED;
10116 /* if the class is Critical then transparent code cannot access it's fields */
10117 if (!is_instance && mono_security_core_clr_enabled ())
10118 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10120 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10121 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10122 if (mono_security_core_clr_enabled ())
10123 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10127 * LDFLD etc. is usable on static fields as well, so convert those cases to
10128 * the static case.
10130 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10131 switch (op) {
10132 case CEE_LDFLD:
10133 op = CEE_LDSFLD;
10134 break;
10135 case CEE_STFLD:
10136 op = CEE_STSFLD;
10137 break;
10138 case CEE_LDFLDA:
10139 op = CEE_LDSFLDA;
10140 break;
10141 default:
10142 g_assert_not_reached ();
10144 is_instance = FALSE;
10147 context_used = mini_class_check_context_used (cfg, klass);
10149 /* INSTANCE CASE */
10151 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10152 if (op == CEE_STFLD) {
10153 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10154 UNVERIFIED;
10155 #ifndef DISABLE_REMOTING
10156 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10157 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10158 MonoInst *iargs [5];
10160 GSHAREDVT_FAILURE (op);
10162 iargs [0] = sp [0];
10163 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10164 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10165 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10166 field->offset);
10167 iargs [4] = sp [1];
10169 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10170 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10171 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10172 CHECK_CFG_EXCEPTION;
10173 g_assert (costs > 0);
10175 cfg->real_offset += 5;
10176 bblock = cfg->cbb;
10178 inline_costs += costs;
10179 } else {
10180 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10182 } else
10183 #endif
10185 MonoInst *store;
10187 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10189 if (mini_is_gsharedvt_klass (cfg, klass)) {
10190 MonoInst *offset_ins;
10192 context_used = mini_class_check_context_used (cfg, klass);
10194 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10195 dreg = alloc_ireg_mp (cfg);
10196 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10197 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10198 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10199 } else {
10200 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10202 if (sp [0]->opcode != OP_LDADDR)
10203 store->flags |= MONO_INST_FAULT;
10205 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10206 /* insert call to write barrier */
10207 MonoInst *ptr;
10208 int dreg;
10210 dreg = alloc_ireg_mp (cfg);
10211 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10212 emit_write_barrier (cfg, ptr, sp [1]);
10215 store->flags |= ins_flag;
10217 ins_flag = 0;
10218 ip += 5;
10219 break;
10222 #ifndef DISABLE_REMOTING
10223 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10224 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10225 MonoInst *iargs [4];
10227 GSHAREDVT_FAILURE (op);
10229 iargs [0] = sp [0];
10230 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10231 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10232 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10233 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10234 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10235 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10236 CHECK_CFG_EXCEPTION;
10237 bblock = cfg->cbb;
10238 g_assert (costs > 0);
10240 cfg->real_offset += 5;
10242 *sp++ = iargs [0];
10244 inline_costs += costs;
10245 } else {
10246 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10247 *sp++ = ins;
10249 } else
10250 #endif
10251 if (is_instance) {
10252 if (sp [0]->type == STACK_VTYPE) {
10253 MonoInst *var;
10255 /* Have to compute the address of the variable */
10257 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10258 if (!var)
10259 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10260 else
10261 g_assert (var->klass == klass);
10263 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10264 sp [0] = ins;
10267 if (op == CEE_LDFLDA) {
10268 if (is_magic_tls_access (field)) {
10269 GSHAREDVT_FAILURE (*ip);
10270 ins = sp [0];
10271 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10272 } else {
10273 if (sp [0]->type == STACK_OBJ) {
10274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10275 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10278 dreg = alloc_ireg_mp (cfg);
10280 if (mini_is_gsharedvt_klass (cfg, klass)) {
10281 MonoInst *offset_ins;
10283 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10284 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10285 } else {
10286 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10288 ins->klass = mono_class_from_mono_type (field->type);
10289 ins->type = STACK_MP;
10290 *sp++ = ins;
10292 } else {
10293 MonoInst *load;
10295 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10297 if (mini_is_gsharedvt_klass (cfg, klass)) {
10298 MonoInst *offset_ins;
10300 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10301 dreg = alloc_ireg_mp (cfg);
10302 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10303 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10304 } else {
10305 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10307 load->flags |= ins_flag;
10308 if (sp [0]->opcode != OP_LDADDR)
10309 load->flags |= MONO_INST_FAULT;
10310 *sp++ = load;
10314 if (is_instance) {
10315 ins_flag = 0;
10316 ip += 5;
10317 break;
10320 /* STATIC CASE */
10323 * We can only support shared generic static
10324 * field access on architectures where the
10325 * trampoline code has been extended to handle
10326 * the generic class init.
10328 #ifndef MONO_ARCH_VTABLE_REG
10329 GENERIC_SHARING_FAILURE (op);
10330 #endif
10332 context_used = mini_class_check_context_used (cfg, klass);
10334 ftype = mono_field_get_type (field);
10336 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10337 UNVERIFIED;
10339 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10340 * to be called here.
10342 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10343 mono_class_vtable (cfg->domain, klass);
10344 CHECK_TYPELOAD (klass);
10346 mono_domain_lock (cfg->domain);
10347 if (cfg->domain->special_static_fields)
10348 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10349 mono_domain_unlock (cfg->domain);
10351 is_special_static = mono_class_field_is_special_static (field);
10353 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10354 thread_ins = mono_get_thread_intrinsic (cfg);
10355 else
10356 thread_ins = NULL;
10358 /* Generate IR to compute the field address */
10359 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10361 * Fast access to TLS data
10362 * Inline version of get_thread_static_data () in
10363 * threads.c.
10365 guint32 offset;
10366 int idx, static_data_reg, array_reg, dreg;
10368 GSHAREDVT_FAILURE (op);
10370 // offset &= 0x7fffffff;
10371 // idx = (offset >> 24) - 1;
10372 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10373 MONO_ADD_INS (cfg->cbb, thread_ins);
10374 static_data_reg = alloc_ireg (cfg);
10375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10377 if (cfg->compile_aot) {
10378 int offset_reg, offset2_reg, idx_reg;
10380 /* For TLS variables, this will return the TLS offset */
10381 EMIT_NEW_SFLDACONST (cfg, ins, field);
10382 offset_reg = ins->dreg;
10383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10384 idx_reg = alloc_ireg (cfg);
10385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10386 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10388 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10389 array_reg = alloc_ireg (cfg);
10390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10391 offset2_reg = alloc_ireg (cfg);
10392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10393 dreg = alloc_ireg (cfg);
10394 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10395 } else {
10396 offset = (gsize)addr & 0x7fffffff;
10397 idx = (offset >> 24) - 1;
10399 array_reg = alloc_ireg (cfg);
10400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10401 dreg = alloc_ireg (cfg);
10402 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10404 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10405 (cfg->compile_aot && is_special_static) ||
10406 (context_used && is_special_static)) {
10407 MonoInst *iargs [2];
10409 g_assert (field->parent);
10410 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10411 if (context_used) {
10412 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10413 field, MONO_RGCTX_INFO_CLASS_FIELD);
10414 } else {
10415 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10417 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10418 } else if (context_used) {
10419 MonoInst *static_data;
10422 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10423 method->klass->name_space, method->klass->name, method->name,
10424 depth, field->offset);
10427 if (mono_class_needs_cctor_run (klass, method))
10428 emit_generic_class_init (cfg, klass);
10431 * The pointer we're computing here is
10433 * super_info.static_data + field->offset
10435 static_data = emit_get_rgctx_klass (cfg, context_used,
10436 klass, MONO_RGCTX_INFO_STATIC_DATA);
10438 if (mini_is_gsharedvt_klass (cfg, klass)) {
10439 MonoInst *offset_ins;
10441 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10442 dreg = alloc_ireg_mp (cfg);
10443 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10444 } else if (field->offset == 0) {
10445 ins = static_data;
10446 } else {
10447 int addr_reg = mono_alloc_preg (cfg);
10448 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10450 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10451 MonoInst *iargs [2];
10453 g_assert (field->parent);
10454 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10455 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10456 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10457 } else {
10458 MonoVTable *vtable = NULL;
10460 if (!cfg->compile_aot)
10461 vtable = mono_class_vtable (cfg->domain, klass);
10462 CHECK_TYPELOAD (klass);
10464 if (!addr) {
10465 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10466 if (!(g_slist_find (class_inits, klass))) {
10467 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10468 if (cfg->verbose_level > 2)
10469 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10470 class_inits = g_slist_prepend (class_inits, klass);
10472 } else {
10473 if (cfg->run_cctors) {
10474 MonoException *ex;
10475 /* This makes so that inline cannot trigger */
10476 /* .cctors: too many apps depend on them */
10477 /* running with a specific order... */
10478 g_assert (vtable);
10479 if (! vtable->initialized)
10480 INLINE_FAILURE ("class init");
10481 ex = mono_runtime_class_init_full (vtable, FALSE);
10482 if (ex) {
10483 set_exception_object (cfg, ex);
10484 goto exception_exit;
10488 if (cfg->compile_aot)
10489 EMIT_NEW_SFLDACONST (cfg, ins, field);
10490 else {
10491 g_assert (vtable);
10492 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10493 g_assert (addr);
10494 EMIT_NEW_PCONST (cfg, ins, addr);
10496 } else {
10497 MonoInst *iargs [1];
10498 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10499 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10503 /* Generate IR to do the actual load/store operation */
10505 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10506 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10507 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10508 emit_memory_barrier (cfg, FullBarrier);
10511 if (op == CEE_LDSFLDA) {
10512 ins->klass = mono_class_from_mono_type (ftype);
10513 ins->type = STACK_PTR;
10514 *sp++ = ins;
10515 } else if (op == CEE_STSFLD) {
10516 MonoInst *store;
10518 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10519 store->flags |= ins_flag;
10520 } else {
10521 gboolean is_const = FALSE;
10522 MonoVTable *vtable = NULL;
10523 gpointer addr = NULL;
10525 if (!context_used) {
10526 vtable = mono_class_vtable (cfg->domain, klass);
10527 CHECK_TYPELOAD (klass);
10529 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10530 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10531 int ro_type = ftype->type;
10532 if (!addr)
10533 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10534 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10535 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10538 GSHAREDVT_FAILURE (op);
10540 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10541 is_const = TRUE;
10542 switch (ro_type) {
10543 case MONO_TYPE_BOOLEAN:
10544 case MONO_TYPE_U1:
10545 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10546 sp++;
10547 break;
10548 case MONO_TYPE_I1:
10549 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10550 sp++;
10551 break;
10552 case MONO_TYPE_CHAR:
10553 case MONO_TYPE_U2:
10554 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10555 sp++;
10556 break;
10557 case MONO_TYPE_I2:
10558 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10559 sp++;
10560 break;
10561 break;
10562 case MONO_TYPE_I4:
10563 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10564 sp++;
10565 break;
10566 case MONO_TYPE_U4:
10567 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10568 sp++;
10569 break;
10570 case MONO_TYPE_I:
10571 case MONO_TYPE_U:
10572 case MONO_TYPE_PTR:
10573 case MONO_TYPE_FNPTR:
10574 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10575 type_to_eval_stack_type ((cfg), field->type, *sp);
10576 sp++;
10577 break;
10578 case MONO_TYPE_STRING:
10579 case MONO_TYPE_OBJECT:
10580 case MONO_TYPE_CLASS:
10581 case MONO_TYPE_SZARRAY:
10582 case MONO_TYPE_ARRAY:
10583 if (!mono_gc_is_moving ()) {
10584 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10585 type_to_eval_stack_type ((cfg), field->type, *sp);
10586 sp++;
10587 } else {
10588 is_const = FALSE;
10590 break;
10591 case MONO_TYPE_I8:
10592 case MONO_TYPE_U8:
10593 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10594 sp++;
10595 break;
10596 case MONO_TYPE_R4:
10597 case MONO_TYPE_R8:
10598 case MONO_TYPE_VALUETYPE:
10599 default:
10600 is_const = FALSE;
10601 break;
10605 if (!is_const) {
10606 MonoInst *load;
10608 CHECK_STACK_OVF (1);
10610 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10611 load->flags |= ins_flag;
10612 ins_flag = 0;
10613 *sp++ = load;
10617 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10618 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10619 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10620 emit_memory_barrier (cfg, FullBarrier);
10623 ins_flag = 0;
10624 ip += 5;
10625 break;
10627 case CEE_STOBJ:
10628 CHECK_STACK (2);
10629 sp -= 2;
10630 CHECK_OPSIZE (5);
10631 token = read32 (ip + 1);
10632 klass = mini_get_class (method, token, generic_context);
10633 CHECK_TYPELOAD (klass);
10634 if (ins_flag & MONO_INST_VOLATILE) {
10635 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10636 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10637 emit_memory_barrier (cfg, FullBarrier);
10639 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10640 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10641 ins->flags |= ins_flag;
10642 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10643 generic_class_is_reference_type (cfg, klass)) {
10644 /* insert call to write barrier */
10645 emit_write_barrier (cfg, sp [0], sp [1]);
10647 ins_flag = 0;
10648 ip += 5;
10649 inline_costs += 1;
10650 break;
10653 * Array opcodes
10655 case CEE_NEWARR: {
10656 MonoInst *len_ins;
10657 const char *data_ptr;
10658 int data_size = 0;
10659 guint32 field_token;
10661 CHECK_STACK (1);
10662 --sp;
10664 CHECK_OPSIZE (5);
10665 token = read32 (ip + 1);
10667 klass = mini_get_class (method, token, generic_context);
10668 CHECK_TYPELOAD (klass);
10670 context_used = mini_class_check_context_used (cfg, klass);
10672 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10673 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10674 ins->sreg1 = sp [0]->dreg;
10675 ins->type = STACK_I4;
10676 ins->dreg = alloc_ireg (cfg);
10677 MONO_ADD_INS (cfg->cbb, ins);
10678 *sp = mono_decompose_opcode (cfg, ins);
10681 if (context_used) {
10682 MonoInst *args [3];
10683 MonoClass *array_class = mono_array_class_get (klass, 1);
10684 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10686 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10688 /* vtable */
10689 args [0] = emit_get_rgctx_klass (cfg, context_used,
10690 array_class, MONO_RGCTX_INFO_VTABLE);
10691 /* array len */
10692 args [1] = sp [0];
10694 if (managed_alloc)
10695 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10696 else
10697 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10698 } else {
10699 if (cfg->opt & MONO_OPT_SHARED) {
10700 /* Decompose now to avoid problems with references to the domainvar */
10701 MonoInst *iargs [3];
10703 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10704 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10705 iargs [2] = sp [0];
10707 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10708 } else {
10709 /* Decompose later since it is needed by abcrem */
10710 MonoClass *array_type = mono_array_class_get (klass, 1);
10711 mono_class_vtable (cfg->domain, array_type);
10712 CHECK_TYPELOAD (array_type);
10714 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10715 ins->dreg = alloc_ireg_ref (cfg);
10716 ins->sreg1 = sp [0]->dreg;
10717 ins->inst_newa_class = klass;
10718 ins->type = STACK_OBJ;
10719 ins->klass = array_type;
10720 MONO_ADD_INS (cfg->cbb, ins);
10721 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10722 cfg->cbb->has_array_access = TRUE;
10724 /* Needed so mono_emit_load_get_addr () gets called */
10725 mono_get_got_var (cfg);
10729 len_ins = sp [0];
10730 ip += 5;
10731 *sp++ = ins;
10732 inline_costs += 1;
10735 * we inline/optimize the initialization sequence if possible.
10736 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10737 * for small sizes open code the memcpy
10738 * ensure the rva field is big enough
10740 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10741 MonoMethod *memcpy_method = get_memcpy_method ();
10742 MonoInst *iargs [3];
10743 int add_reg = alloc_ireg_mp (cfg);
10745 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10746 if (cfg->compile_aot) {
10747 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10748 } else {
10749 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10751 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10752 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10753 ip += 11;
10756 break;
10758 case CEE_LDLEN:
10759 CHECK_STACK (1);
10760 --sp;
10761 if (sp [0]->type != STACK_OBJ)
10762 UNVERIFIED;
10764 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10765 ins->dreg = alloc_preg (cfg);
10766 ins->sreg1 = sp [0]->dreg;
10767 ins->type = STACK_I4;
10768 /* This flag will be inherited by the decomposition */
10769 ins->flags |= MONO_INST_FAULT;
10770 MONO_ADD_INS (cfg->cbb, ins);
10771 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10772 cfg->cbb->has_array_access = TRUE;
10773 ip ++;
10774 *sp++ = ins;
10775 break;
10776 case CEE_LDELEMA:
10777 CHECK_STACK (2);
10778 sp -= 2;
10779 CHECK_OPSIZE (5);
10780 if (sp [0]->type != STACK_OBJ)
10781 UNVERIFIED;
10783 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10785 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10786 CHECK_TYPELOAD (klass);
10787 /* we need to make sure that this array is exactly the type it needs
10788 * to be for correctness. the wrappers are lax with their usage
10789 * so we need to ignore them here
10791 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10792 MonoClass *array_class = mono_array_class_get (klass, 1);
10793 mini_emit_check_array_type (cfg, sp [0], array_class);
10794 CHECK_TYPELOAD (array_class);
10797 readonly = FALSE;
10798 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10799 *sp++ = ins;
10800 ip += 5;
10801 break;
10802 case CEE_LDELEM:
10803 case CEE_LDELEM_I1:
10804 case CEE_LDELEM_U1:
10805 case CEE_LDELEM_I2:
10806 case CEE_LDELEM_U2:
10807 case CEE_LDELEM_I4:
10808 case CEE_LDELEM_U4:
10809 case CEE_LDELEM_I8:
10810 case CEE_LDELEM_I:
10811 case CEE_LDELEM_R4:
10812 case CEE_LDELEM_R8:
10813 case CEE_LDELEM_REF: {
10814 MonoInst *addr;
10816 CHECK_STACK (2);
10817 sp -= 2;
10819 if (*ip == CEE_LDELEM) {
10820 CHECK_OPSIZE (5);
10821 token = read32 (ip + 1);
10822 klass = mini_get_class (method, token, generic_context);
10823 CHECK_TYPELOAD (klass);
10824 mono_class_init (klass);
10826 else
10827 klass = array_access_to_klass (*ip);
10829 if (sp [0]->type != STACK_OBJ)
10830 UNVERIFIED;
10832 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10834 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10835 // FIXME-VT: OP_ICONST optimization
10836 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10837 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10838 ins->opcode = OP_LOADV_MEMBASE;
10839 } else if (sp [1]->opcode == OP_ICONST) {
10840 int array_reg = sp [0]->dreg;
10841 int index_reg = sp [1]->dreg;
10842 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10844 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10845 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10846 } else {
10847 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10848 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10850 *sp++ = ins;
10851 if (*ip == CEE_LDELEM)
10852 ip += 5;
10853 else
10854 ++ip;
10855 break;
10857 case CEE_STELEM_I:
10858 case CEE_STELEM_I1:
10859 case CEE_STELEM_I2:
10860 case CEE_STELEM_I4:
10861 case CEE_STELEM_I8:
10862 case CEE_STELEM_R4:
10863 case CEE_STELEM_R8:
10864 case CEE_STELEM_REF:
10865 case CEE_STELEM: {
10866 CHECK_STACK (3);
10867 sp -= 3;
10869 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10871 if (*ip == CEE_STELEM) {
10872 CHECK_OPSIZE (5);
10873 token = read32 (ip + 1);
10874 klass = mini_get_class (method, token, generic_context);
10875 CHECK_TYPELOAD (klass);
10876 mono_class_init (klass);
10878 else
10879 klass = array_access_to_klass (*ip);
10881 if (sp [0]->type != STACK_OBJ)
10882 UNVERIFIED;
10884 emit_array_store (cfg, klass, sp, TRUE);
10886 if (*ip == CEE_STELEM)
10887 ip += 5;
10888 else
10889 ++ip;
10890 inline_costs += 1;
10891 break;
10893 case CEE_CKFINITE: {
10894 CHECK_STACK (1);
10895 --sp;
10897 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10898 ins->sreg1 = sp [0]->dreg;
10899 ins->dreg = alloc_freg (cfg);
10900 ins->type = STACK_R8;
10901 MONO_ADD_INS (bblock, ins);
10903 *sp++ = mono_decompose_opcode (cfg, ins);
10905 ++ip;
10906 break;
10908 case CEE_REFANYVAL: {
10909 MonoInst *src_var, *src;
10911 int klass_reg = alloc_preg (cfg);
10912 int dreg = alloc_preg (cfg);
10914 GSHAREDVT_FAILURE (*ip);
10916 CHECK_STACK (1);
10917 MONO_INST_NEW (cfg, ins, *ip);
10918 --sp;
10919 CHECK_OPSIZE (5);
10920 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10921 CHECK_TYPELOAD (klass);
10922 mono_class_init (klass);
10924 context_used = mini_class_check_context_used (cfg, klass);
10926 // FIXME:
10927 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10928 if (!src_var)
10929 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10930 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10933 if (context_used) {
10934 MonoInst *klass_ins;
10936 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10937 klass, MONO_RGCTX_INFO_KLASS);
10939 // FIXME:
10940 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10941 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10942 } else {
10943 mini_emit_class_check (cfg, klass_reg, klass);
10945 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10946 ins->type = STACK_MP;
10947 *sp++ = ins;
10948 ip += 5;
10949 break;
10951 case CEE_MKREFANY: {
10952 MonoInst *loc, *addr;
10954 GSHAREDVT_FAILURE (*ip);
10956 CHECK_STACK (1);
10957 MONO_INST_NEW (cfg, ins, *ip);
10958 --sp;
10959 CHECK_OPSIZE (5);
10960 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10961 CHECK_TYPELOAD (klass);
10962 mono_class_init (klass);
10964 context_used = mini_class_check_context_used (cfg, klass);
10966 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10967 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10969 if (context_used) {
10970 MonoInst *const_ins;
10971 int type_reg = alloc_preg (cfg);
10973 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10974 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10975 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10976 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10977 } else if (cfg->compile_aot) {
10978 int const_reg = alloc_preg (cfg);
10979 int type_reg = alloc_preg (cfg);
10981 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10982 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10984 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10985 } else {
10986 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10987 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10989 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10991 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10992 ins->type = STACK_VTYPE;
10993 ins->klass = mono_defaults.typed_reference_class;
10994 *sp++ = ins;
10995 ip += 5;
10996 break;
10998 case CEE_LDTOKEN: {
10999 gpointer handle;
11000 MonoClass *handle_class;
11002 CHECK_STACK_OVF (1);
11004 CHECK_OPSIZE (5);
11005 n = read32 (ip + 1);
11007 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11008 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11009 handle = mono_method_get_wrapper_data (method, n);
11010 handle_class = mono_method_get_wrapper_data (method, n + 1);
11011 if (handle_class == mono_defaults.typehandle_class)
11012 handle = &((MonoClass*)handle)->byval_arg;
11014 else {
11015 handle = mono_ldtoken (image, n, &handle_class, generic_context);
11017 if (!handle)
11018 LOAD_ERROR;
11019 mono_class_init (handle_class);
11020 if (cfg->generic_sharing_context) {
11021 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11022 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11023 /* This case handles ldtoken
11024 of an open type, like for
11025 typeof(Gen<>). */
11026 context_used = 0;
11027 } else if (handle_class == mono_defaults.typehandle_class) {
11028 /* If we get a MONO_TYPE_CLASS
11029 then we need to provide the
11030 open type, not an
11031 instantiation of it. */
11032 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
11033 context_used = 0;
11034 else
11035 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11036 } else if (handle_class == mono_defaults.fieldhandle_class)
11037 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11038 else if (handle_class == mono_defaults.methodhandle_class)
11039 context_used = mini_method_check_context_used (cfg, handle);
11040 else
11041 g_assert_not_reached ();
11044 if ((cfg->opt & MONO_OPT_SHARED) &&
11045 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11046 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11047 MonoInst *addr, *vtvar, *iargs [3];
11048 int method_context_used;
11050 method_context_used = mini_method_check_context_used (cfg, method);
11052 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11054 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11055 EMIT_NEW_ICONST (cfg, iargs [1], n);
11056 if (method_context_used) {
11057 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11058 method, MONO_RGCTX_INFO_METHOD);
11059 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11060 } else {
11061 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11062 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11064 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11066 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11068 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11069 } else {
11070 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11071 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11072 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11073 (cmethod->klass == mono_defaults.systemtype_class) &&
11074 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11075 MonoClass *tclass = mono_class_from_mono_type (handle);
11077 mono_class_init (tclass);
11078 if (context_used) {
11079 ins = emit_get_rgctx_klass (cfg, context_used,
11080 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11081 } else if (cfg->compile_aot) {
11082 if (method->wrapper_type) {
11083 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
11084 /* Special case for static synchronized wrappers */
11085 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11086 } else {
11087 /* FIXME: n is not a normal token */
11088 DISABLE_AOT (cfg);
11089 EMIT_NEW_PCONST (cfg, ins, NULL);
11091 } else {
11092 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11094 } else {
11095 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11097 ins->type = STACK_OBJ;
11098 ins->klass = cmethod->klass;
11099 ip += 5;
11100 } else {
11101 MonoInst *addr, *vtvar;
11103 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11105 if (context_used) {
11106 if (handle_class == mono_defaults.typehandle_class) {
11107 ins = emit_get_rgctx_klass (cfg, context_used,
11108 mono_class_from_mono_type (handle),
11109 MONO_RGCTX_INFO_TYPE);
11110 } else if (handle_class == mono_defaults.methodhandle_class) {
11111 ins = emit_get_rgctx_method (cfg, context_used,
11112 handle, MONO_RGCTX_INFO_METHOD);
11113 } else if (handle_class == mono_defaults.fieldhandle_class) {
11114 ins = emit_get_rgctx_field (cfg, context_used,
11115 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11116 } else {
11117 g_assert_not_reached ();
11119 } else if (cfg->compile_aot) {
11120 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11121 } else {
11122 EMIT_NEW_PCONST (cfg, ins, handle);
11124 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11125 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11126 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11130 *sp++ = ins;
11131 ip += 5;
11132 break;
11134 case CEE_THROW:
11135 CHECK_STACK (1);
11136 MONO_INST_NEW (cfg, ins, OP_THROW);
11137 --sp;
11138 ins->sreg1 = sp [0]->dreg;
11139 ip++;
11140 bblock->out_of_line = TRUE;
11141 MONO_ADD_INS (bblock, ins);
11142 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11143 MONO_ADD_INS (bblock, ins);
11144 sp = stack_start;
11146 link_bblock (cfg, bblock, end_bblock);
11147 start_new_bblock = 1;
11148 break;
11149 case CEE_ENDFINALLY:
11150 /* mono_save_seq_point_info () depends on this */
11151 if (sp != stack_start)
11152 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11153 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11154 MONO_ADD_INS (bblock, ins);
11155 ip++;
11156 start_new_bblock = 1;
11159 * Control will leave the method so empty the stack, otherwise
11160 * the next basic block will start with a nonempty stack.
11162 while (sp != stack_start) {
11163 sp--;
11165 break;
11166 case CEE_LEAVE:
11167 case CEE_LEAVE_S: {
11168 GList *handlers;
11170 if (*ip == CEE_LEAVE) {
11171 CHECK_OPSIZE (5);
11172 target = ip + 5 + (gint32)read32(ip + 1);
11173 } else {
11174 CHECK_OPSIZE (2);
11175 target = ip + 2 + (signed char)(ip [1]);
11178 /* empty the stack */
11179 while (sp != stack_start) {
11180 sp--;
11184 * If this leave statement is in a catch block, check for a
11185 * pending exception, and rethrow it if necessary.
11186 * We avoid doing this in runtime invoke wrappers, since those are called
11187 * by native code which excepts the wrapper to catch all exceptions.
11189 for (i = 0; i < header->num_clauses; ++i) {
11190 MonoExceptionClause *clause = &header->clauses [i];
11193 * Use <= in the final comparison to handle clauses with multiple
11194 * leave statements, like in bug #78024.
11195 * The ordering of the exception clauses guarantees that we find the
11196 * innermost clause.
11198 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11199 MonoInst *exc_ins;
11200 MonoBasicBlock *dont_throw;
11203 MonoInst *load;
11205 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11208 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11210 NEW_BBLOCK (cfg, dont_throw);
11213 * Currently, we always rethrow the abort exception, despite the
11214 * fact that this is not correct. See thread6.cs for an example.
11215 * But propagating the abort exception is more important than
11216 * getting the sematics right.
11218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11219 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11220 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11222 MONO_START_BB (cfg, dont_throw);
11223 bblock = cfg->cbb;
11227 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11228 GList *tmp;
11229 MonoExceptionClause *clause;
11231 for (tmp = handlers; tmp; tmp = tmp->next) {
11232 clause = tmp->data;
11233 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11234 g_assert (tblock);
11235 link_bblock (cfg, bblock, tblock);
11236 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11237 ins->inst_target_bb = tblock;
11238 ins->inst_eh_block = clause;
11239 MONO_ADD_INS (bblock, ins);
11240 bblock->has_call_handler = 1;
11241 if (COMPILE_LLVM (cfg)) {
11242 MonoBasicBlock *target_bb;
11245 * Link the finally bblock with the target, since it will
11246 * conceptually branch there.
11247 * FIXME: Have to link the bblock containing the endfinally.
11249 GET_BBLOCK (cfg, target_bb, target);
11250 link_bblock (cfg, tblock, target_bb);
11253 g_list_free (handlers);
11256 MONO_INST_NEW (cfg, ins, OP_BR);
11257 MONO_ADD_INS (bblock, ins);
11258 GET_BBLOCK (cfg, tblock, target);
11259 link_bblock (cfg, bblock, tblock);
11260 ins->inst_target_bb = tblock;
11261 start_new_bblock = 1;
11263 if (*ip == CEE_LEAVE)
11264 ip += 5;
11265 else
11266 ip += 2;
11268 break;
11272 * Mono specific opcodes
11274 case MONO_CUSTOM_PREFIX: {
11276 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11278 CHECK_OPSIZE (2);
11279 switch (ip [1]) {
11280 case CEE_MONO_ICALL: {
11281 gpointer func;
11282 MonoJitICallInfo *info;
11284 token = read32 (ip + 2);
11285 func = mono_method_get_wrapper_data (method, token);
11286 info = mono_find_jit_icall_by_addr (func);
11287 if (!info)
11288 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11289 g_assert (info);
11291 CHECK_STACK (info->sig->param_count);
11292 sp -= info->sig->param_count;
11294 ins = mono_emit_jit_icall (cfg, info->func, sp);
11295 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11296 *sp++ = ins;
11298 ip += 6;
11299 inline_costs += 10 * num_calls++;
11301 break;
11303 case CEE_MONO_LDPTR: {
11304 gpointer ptr;
11306 CHECK_STACK_OVF (1);
11307 CHECK_OPSIZE (6);
11308 token = read32 (ip + 2);
11310 ptr = mono_method_get_wrapper_data (method, token);
11311 /* FIXME: Generalize this */
11312 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11313 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11314 *sp++ = ins;
11315 ip += 6;
11316 break;
11318 EMIT_NEW_PCONST (cfg, ins, ptr);
11319 *sp++ = ins;
11320 ip += 6;
11321 inline_costs += 10 * num_calls++;
11322 /* Can't embed random pointers into AOT code */
11323 DISABLE_AOT (cfg);
11324 break;
11326 case CEE_MONO_JIT_ICALL_ADDR: {
11327 MonoJitICallInfo *callinfo;
11328 gpointer ptr;
11330 CHECK_STACK_OVF (1);
11331 CHECK_OPSIZE (6);
11332 token = read32 (ip + 2);
11334 ptr = mono_method_get_wrapper_data (method, token);
11335 callinfo = mono_find_jit_icall_by_addr (ptr);
11336 g_assert (callinfo);
11337 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11338 *sp++ = ins;
11339 ip += 6;
11340 inline_costs += 10 * num_calls++;
11341 break;
11343 case CEE_MONO_ICALL_ADDR: {
11344 MonoMethod *cmethod;
11345 gpointer ptr;
11347 CHECK_STACK_OVF (1);
11348 CHECK_OPSIZE (6);
11349 token = read32 (ip + 2);
11351 cmethod = mono_method_get_wrapper_data (method, token);
11353 if (cfg->compile_aot) {
11354 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11355 } else {
11356 ptr = mono_lookup_internal_call (cmethod);
11357 g_assert (ptr);
11358 EMIT_NEW_PCONST (cfg, ins, ptr);
11360 *sp++ = ins;
11361 ip += 6;
11362 break;
11364 case CEE_MONO_VTADDR: {
11365 MonoInst *src_var, *src;
11367 CHECK_STACK (1);
11368 --sp;
11370 // FIXME:
11371 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11372 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11373 *sp++ = src;
11374 ip += 2;
11375 break;
11377 case CEE_MONO_NEWOBJ: {
11378 MonoInst *iargs [2];
11380 CHECK_STACK_OVF (1);
11381 CHECK_OPSIZE (6);
11382 token = read32 (ip + 2);
11383 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11384 mono_class_init (klass);
11385 NEW_DOMAINCONST (cfg, iargs [0]);
11386 MONO_ADD_INS (cfg->cbb, iargs [0]);
11387 NEW_CLASSCONST (cfg, iargs [1], klass);
11388 MONO_ADD_INS (cfg->cbb, iargs [1]);
11389 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11390 ip += 6;
11391 inline_costs += 10 * num_calls++;
11392 break;
11394 case CEE_MONO_OBJADDR:
11395 CHECK_STACK (1);
11396 --sp;
11397 MONO_INST_NEW (cfg, ins, OP_MOVE);
11398 ins->dreg = alloc_ireg_mp (cfg);
11399 ins->sreg1 = sp [0]->dreg;
11400 ins->type = STACK_MP;
11401 MONO_ADD_INS (cfg->cbb, ins);
11402 *sp++ = ins;
11403 ip += 2;
11404 break;
11405 case CEE_MONO_LDNATIVEOBJ:
11407 * Similar to LDOBJ, but instead load the unmanaged
11408 * representation of the vtype to the stack.
11410 CHECK_STACK (1);
11411 CHECK_OPSIZE (6);
11412 --sp;
11413 token = read32 (ip + 2);
11414 klass = mono_method_get_wrapper_data (method, token);
11415 g_assert (klass->valuetype);
11416 mono_class_init (klass);
11419 MonoInst *src, *dest, *temp;
11421 src = sp [0];
11422 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11423 temp->backend.is_pinvoke = 1;
11424 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11425 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11427 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11428 dest->type = STACK_VTYPE;
11429 dest->klass = klass;
11431 *sp ++ = dest;
11432 ip += 6;
11434 break;
11435 case CEE_MONO_RETOBJ: {
11437 * Same as RET, but return the native representation of a vtype
11438 * to the caller.
11440 g_assert (cfg->ret);
11441 g_assert (mono_method_signature (method)->pinvoke);
11442 CHECK_STACK (1);
11443 --sp;
11445 CHECK_OPSIZE (6);
11446 token = read32 (ip + 2);
11447 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11449 if (!cfg->vret_addr) {
11450 g_assert (cfg->ret_var_is_local);
11452 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11453 } else {
11454 EMIT_NEW_RETLOADA (cfg, ins);
11456 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11458 if (sp != stack_start)
11459 UNVERIFIED;
11461 MONO_INST_NEW (cfg, ins, OP_BR);
11462 ins->inst_target_bb = end_bblock;
11463 MONO_ADD_INS (bblock, ins);
11464 link_bblock (cfg, bblock, end_bblock);
11465 start_new_bblock = 1;
11466 ip += 6;
11467 break;
11469 case CEE_MONO_CISINST:
11470 case CEE_MONO_CCASTCLASS: {
11471 int token;
11472 CHECK_STACK (1);
11473 --sp;
11474 CHECK_OPSIZE (6);
11475 token = read32 (ip + 2);
11476 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11477 if (ip [1] == CEE_MONO_CISINST)
11478 ins = handle_cisinst (cfg, klass, sp [0]);
11479 else
11480 ins = handle_ccastclass (cfg, klass, sp [0]);
11481 bblock = cfg->cbb;
11482 *sp++ = ins;
11483 ip += 6;
11484 break;
11486 case CEE_MONO_SAVE_LMF:
11487 case CEE_MONO_RESTORE_LMF:
11488 #ifdef MONO_ARCH_HAVE_LMF_OPS
11489 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11490 MONO_ADD_INS (bblock, ins);
11491 cfg->need_lmf_area = TRUE;
11492 #endif
11493 ip += 2;
11494 break;
11495 case CEE_MONO_CLASSCONST:
11496 CHECK_STACK_OVF (1);
11497 CHECK_OPSIZE (6);
11498 token = read32 (ip + 2);
11499 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11500 *sp++ = ins;
11501 ip += 6;
11502 inline_costs += 10 * num_calls++;
11503 break;
11504 case CEE_MONO_NOT_TAKEN:
11505 bblock->out_of_line = TRUE;
11506 ip += 2;
11507 break;
11508 case CEE_MONO_TLS: {
11509 int key;
11511 CHECK_STACK_OVF (1);
11512 CHECK_OPSIZE (6);
11513 key = (gint32)read32 (ip + 2);
11514 g_assert (key < TLS_KEY_NUM);
11516 ins = mono_create_tls_get (cfg, key);
11517 if (!ins) {
11518 if (cfg->compile_aot) {
11519 DISABLE_AOT (cfg);
11520 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11521 ins->dreg = alloc_preg (cfg);
11522 ins->type = STACK_PTR;
11523 } else {
11524 g_assert_not_reached ();
11527 ins->type = STACK_PTR;
11528 MONO_ADD_INS (bblock, ins);
11529 *sp++ = ins;
11530 ip += 6;
11531 break;
11533 case CEE_MONO_DYN_CALL: {
11534 MonoCallInst *call;
11536 /* It would be easier to call a trampoline, but that would put an
11537 * extra frame on the stack, confusing exception handling. So
11538 * implement it inline using an opcode for now.
11541 if (!cfg->dyn_call_var) {
11542 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11543 /* prevent it from being register allocated */
11544 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11547 /* Has to use a call inst since it local regalloc expects it */
11548 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11549 ins = (MonoInst*)call;
11550 sp -= 2;
11551 ins->sreg1 = sp [0]->dreg;
11552 ins->sreg2 = sp [1]->dreg;
11553 MONO_ADD_INS (bblock, ins);
11555 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11557 ip += 2;
11558 inline_costs += 10 * num_calls++;
11560 break;
11562 case CEE_MONO_MEMORY_BARRIER: {
11563 CHECK_OPSIZE (5);
11564 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11565 ip += 5;
11566 break;
11568 case CEE_MONO_JIT_ATTACH: {
11569 MonoInst *args [16];
11570 MonoInst *ad_ins, *lmf_ins;
11571 MonoBasicBlock *next_bb = NULL;
11573 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11575 EMIT_NEW_PCONST (cfg, ins, NULL);
11576 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11578 #if TARGET_WIN32
11579 ad_ins = NULL;
11580 lmf_ins = NULL;
11581 #else
11582 ad_ins = mono_get_domain_intrinsic (cfg);
11583 lmf_ins = mono_get_lmf_intrinsic (cfg);
11584 #endif
11586 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11587 NEW_BBLOCK (cfg, next_bb);
11589 MONO_ADD_INS (cfg->cbb, ad_ins);
11590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11591 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11593 MONO_ADD_INS (cfg->cbb, lmf_ins);
11594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11598 if (cfg->compile_aot) {
11599 /* AOT code is only used in the root domain */
11600 EMIT_NEW_PCONST (cfg, args [0], NULL);
11601 } else {
11602 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11604 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11605 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11607 if (next_bb) {
11608 MONO_START_BB (cfg, next_bb);
11609 bblock = cfg->cbb;
11611 ip += 2;
11612 break;
11614 case CEE_MONO_JIT_DETACH: {
11615 MonoInst *args [16];
11617 /* Restore the original domain */
11618 dreg = alloc_ireg (cfg);
11619 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11620 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11621 ip += 2;
11622 break;
11624 default:
11625 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11626 break;
11628 break;
11631 case CEE_PREFIX1: {
11632 CHECK_OPSIZE (2);
11633 switch (ip [1]) {
11634 case CEE_ARGLIST: {
11635 /* somewhat similar to LDTOKEN */
11636 MonoInst *addr, *vtvar;
11637 CHECK_STACK_OVF (1);
11638 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11640 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11641 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11643 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11644 ins->type = STACK_VTYPE;
11645 ins->klass = mono_defaults.argumenthandle_class;
11646 *sp++ = ins;
11647 ip += 2;
11648 break;
11650 case CEE_CEQ:
11651 case CEE_CGT:
11652 case CEE_CGT_UN:
11653 case CEE_CLT:
11654 case CEE_CLT_UN: {
11655 MonoInst *cmp;
11656 CHECK_STACK (2);
11658 * The following transforms:
11659 * CEE_CEQ into OP_CEQ
11660 * CEE_CGT into OP_CGT
11661 * CEE_CGT_UN into OP_CGT_UN
11662 * CEE_CLT into OP_CLT
11663 * CEE_CLT_UN into OP_CLT_UN
11665 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11667 MONO_INST_NEW (cfg, ins, cmp->opcode);
11668 sp -= 2;
11669 cmp->sreg1 = sp [0]->dreg;
11670 cmp->sreg2 = sp [1]->dreg;
11671 type_from_op (cmp, sp [0], sp [1]);
11672 CHECK_TYPE (cmp);
11673 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11674 cmp->opcode = OP_LCOMPARE;
11675 else if (sp [0]->type == STACK_R8)
11676 cmp->opcode = OP_FCOMPARE;
11677 else
11678 cmp->opcode = OP_ICOMPARE;
11679 MONO_ADD_INS (bblock, cmp);
11680 ins->type = STACK_I4;
11681 ins->dreg = alloc_dreg (cfg, ins->type);
11682 type_from_op (ins, sp [0], sp [1]);
11684 if (cmp->opcode == OP_FCOMPARE) {
11686 * The backends expect the fceq opcodes to do the
11687 * comparison too.
11689 cmp->opcode = OP_NOP;
11690 ins->sreg1 = cmp->sreg1;
11691 ins->sreg2 = cmp->sreg2;
11693 MONO_ADD_INS (bblock, ins);
11694 *sp++ = ins;
11695 ip += 2;
11696 break;
11698 case CEE_LDFTN: {
11699 MonoInst *argconst;
11700 MonoMethod *cil_method;
11702 CHECK_STACK_OVF (1);
11703 CHECK_OPSIZE (6);
11704 n = read32 (ip + 2);
11705 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11706 if (!cmethod || mono_loader_get_last_error ())
11707 LOAD_ERROR;
11708 mono_class_init (cmethod->klass);
11710 mono_save_token_info (cfg, image, n, cmethod);
11712 context_used = mini_method_check_context_used (cfg, cmethod);
11714 cil_method = cmethod;
11715 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11716 METHOD_ACCESS_FAILURE;
11718 if (mono_security_cas_enabled ()) {
11719 if (check_linkdemand (cfg, method, cmethod))
11720 INLINE_FAILURE ("linkdemand");
11721 CHECK_CFG_EXCEPTION;
11722 } else if (mono_security_core_clr_enabled ()) {
11723 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11727 * Optimize the common case of ldftn+delegate creation
11729 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11730 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11731 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11732 MonoInst *target_ins;
11733 MonoMethod *invoke;
11734 int invoke_context_used;
11736 invoke = mono_get_delegate_invoke (ctor_method->klass);
11737 if (!invoke || !mono_method_signature (invoke))
11738 LOAD_ERROR;
11740 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11742 target_ins = sp [-1];
11744 if (mono_security_core_clr_enabled ())
11745 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11747 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11748 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11749 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11750 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11751 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11755 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11756 /* FIXME: SGEN support */
11757 if (invoke_context_used == 0) {
11758 ip += 6;
11759 if (cfg->verbose_level > 3)
11760 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11761 sp --;
11762 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11763 CHECK_CFG_EXCEPTION;
11764 ip += 5;
11765 sp ++;
11766 break;
11768 #endif
11772 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11773 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11774 *sp++ = ins;
11776 ip += 6;
11777 inline_costs += 10 * num_calls++;
11778 break;
11780 case CEE_LDVIRTFTN: {
11781 MonoInst *args [2];
11783 CHECK_STACK (1);
11784 CHECK_OPSIZE (6);
11785 n = read32 (ip + 2);
11786 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11787 if (!cmethod || mono_loader_get_last_error ())
11788 LOAD_ERROR;
11789 mono_class_init (cmethod->klass);
11791 context_used = mini_method_check_context_used (cfg, cmethod);
11793 if (mono_security_cas_enabled ()) {
11794 if (check_linkdemand (cfg, method, cmethod))
11795 INLINE_FAILURE ("linkdemand");
11796 CHECK_CFG_EXCEPTION;
11797 } else if (mono_security_core_clr_enabled ()) {
11798 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11801 --sp;
11802 args [0] = *sp;
11804 args [1] = emit_get_rgctx_method (cfg, context_used,
11805 cmethod, MONO_RGCTX_INFO_METHOD);
11807 if (context_used)
11808 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11809 else
11810 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11812 ip += 6;
11813 inline_costs += 10 * num_calls++;
11814 break;
11816 case CEE_LDARG:
11817 CHECK_STACK_OVF (1);
11818 CHECK_OPSIZE (4);
11819 n = read16 (ip + 2);
11820 CHECK_ARG (n);
11821 EMIT_NEW_ARGLOAD (cfg, ins, n);
11822 *sp++ = ins;
11823 ip += 4;
11824 break;
11825 case CEE_LDARGA:
11826 CHECK_STACK_OVF (1);
11827 CHECK_OPSIZE (4);
11828 n = read16 (ip + 2);
11829 CHECK_ARG (n);
11830 NEW_ARGLOADA (cfg, ins, n);
11831 MONO_ADD_INS (cfg->cbb, ins);
11832 *sp++ = ins;
11833 ip += 4;
11834 break;
11835 case CEE_STARG:
11836 CHECK_STACK (1);
11837 --sp;
11838 CHECK_OPSIZE (4);
11839 n = read16 (ip + 2);
11840 CHECK_ARG (n);
11841 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11842 UNVERIFIED;
11843 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11844 ip += 4;
11845 break;
11846 case CEE_LDLOC:
11847 CHECK_STACK_OVF (1);
11848 CHECK_OPSIZE (4);
11849 n = read16 (ip + 2);
11850 CHECK_LOCAL (n);
11851 EMIT_NEW_LOCLOAD (cfg, ins, n);
11852 *sp++ = ins;
11853 ip += 4;
11854 break;
11855 case CEE_LDLOCA: {
11856 unsigned char *tmp_ip;
11857 CHECK_STACK_OVF (1);
11858 CHECK_OPSIZE (4);
11859 n = read16 (ip + 2);
11860 CHECK_LOCAL (n);
11862 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11863 ip = tmp_ip;
11864 inline_costs += 1;
11865 break;
11868 EMIT_NEW_LOCLOADA (cfg, ins, n);
11869 *sp++ = ins;
11870 ip += 4;
11871 break;
11873 case CEE_STLOC:
11874 CHECK_STACK (1);
11875 --sp;
11876 CHECK_OPSIZE (4);
11877 n = read16 (ip + 2);
11878 CHECK_LOCAL (n);
11879 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11880 UNVERIFIED;
11881 emit_stloc_ir (cfg, sp, header, n);
11882 ip += 4;
11883 inline_costs += 1;
11884 break;
11885 case CEE_LOCALLOC:
11886 CHECK_STACK (1);
11887 --sp;
11888 if (sp != stack_start)
11889 UNVERIFIED;
11890 if (cfg->method != method)
11892 * Inlining this into a loop in a parent could lead to
11893 * stack overflows which is different behavior than the
11894 * non-inlined case, thus disable inlining in this case.
11896 goto inline_failure;
11898 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11899 ins->dreg = alloc_preg (cfg);
11900 ins->sreg1 = sp [0]->dreg;
11901 ins->type = STACK_PTR;
11902 MONO_ADD_INS (cfg->cbb, ins);
11904 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11905 if (init_locals)
11906 ins->flags |= MONO_INST_INIT;
11908 *sp++ = ins;
11909 ip += 2;
11910 break;
11911 case CEE_ENDFILTER: {
11912 MonoExceptionClause *clause, *nearest;
11913 int cc, nearest_num;
11915 CHECK_STACK (1);
11916 --sp;
11917 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11918 UNVERIFIED;
11919 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11920 ins->sreg1 = (*sp)->dreg;
11921 MONO_ADD_INS (bblock, ins);
11922 start_new_bblock = 1;
11923 ip += 2;
11925 nearest = NULL;
11926 nearest_num = 0;
11927 for (cc = 0; cc < header->num_clauses; ++cc) {
11928 clause = &header->clauses [cc];
11929 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11930 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11931 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11932 nearest = clause;
11933 nearest_num = cc;
11936 g_assert (nearest);
11937 if ((ip - header->code) != nearest->handler_offset)
11938 UNVERIFIED;
11940 break;
11942 case CEE_UNALIGNED_:
11943 ins_flag |= MONO_INST_UNALIGNED;
11944 /* FIXME: record alignment? we can assume 1 for now */
11945 CHECK_OPSIZE (3);
11946 ip += 3;
11947 break;
11948 case CEE_VOLATILE_:
11949 ins_flag |= MONO_INST_VOLATILE;
11950 ip += 2;
11951 break;
11952 case CEE_TAIL_:
11953 ins_flag |= MONO_INST_TAILCALL;
11954 cfg->flags |= MONO_CFG_HAS_TAIL;
11955 /* Can't inline tail calls at this time */
11956 inline_costs += 100000;
11957 ip += 2;
11958 break;
11959 case CEE_INITOBJ:
11960 CHECK_STACK (1);
11961 --sp;
11962 CHECK_OPSIZE (6);
11963 token = read32 (ip + 2);
11964 klass = mini_get_class (method, token, generic_context);
11965 CHECK_TYPELOAD (klass);
11966 if (generic_class_is_reference_type (cfg, klass))
11967 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11968 else
11969 mini_emit_initobj (cfg, *sp, NULL, klass);
11970 ip += 6;
11971 inline_costs += 1;
11972 break;
11973 case CEE_CONSTRAINED_:
11974 CHECK_OPSIZE (6);
11975 token = read32 (ip + 2);
11976 constrained_call = mini_get_class (method, token, generic_context);
11977 CHECK_TYPELOAD (constrained_call);
11978 ip += 6;
11979 break;
11980 case CEE_CPBLK:
11981 case CEE_INITBLK: {
11982 MonoInst *iargs [3];
11983 CHECK_STACK (3);
11984 sp -= 3;
11986 /* Skip optimized paths for volatile operations. */
11987 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11988 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11989 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11990 /* emit_memset only works when val == 0 */
11991 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11992 } else {
11993 MonoInst *call;
11994 iargs [0] = sp [0];
11995 iargs [1] = sp [1];
11996 iargs [2] = sp [2];
11997 if (ip [1] == CEE_CPBLK) {
11999 * FIXME: It's unclear whether we should be emitting both the acquire
12000 * and release barriers for cpblk. It is technically both a load and
12001 * store operation, so it seems like that's the sensible thing to do.
12003 MonoMethod *memcpy_method = get_memcpy_method ();
12004 if (ins_flag & MONO_INST_VOLATILE) {
12005 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12006 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12007 emit_memory_barrier (cfg, FullBarrier);
12009 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12010 call->flags |= ins_flag;
12011 if (ins_flag & MONO_INST_VOLATILE) {
12012 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12013 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12014 emit_memory_barrier (cfg, FullBarrier);
12016 } else {
12017 MonoMethod *memset_method = get_memset_method ();
12018 if (ins_flag & MONO_INST_VOLATILE) {
12019 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12020 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12021 emit_memory_barrier (cfg, FullBarrier);
12023 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12024 call->flags |= ins_flag;
12027 ip += 2;
12028 ins_flag = 0;
12029 inline_costs += 1;
12030 break;
12032 case CEE_NO_:
12033 CHECK_OPSIZE (3);
12034 if (ip [2] & 0x1)
12035 ins_flag |= MONO_INST_NOTYPECHECK;
12036 if (ip [2] & 0x2)
12037 ins_flag |= MONO_INST_NORANGECHECK;
12038 /* we ignore the no-nullcheck for now since we
12039 * really do it explicitly only when doing callvirt->call
12041 ip += 3;
12042 break;
12043 case CEE_RETHROW: {
12044 MonoInst *load;
12045 int handler_offset = -1;
12047 for (i = 0; i < header->num_clauses; ++i) {
12048 MonoExceptionClause *clause = &header->clauses [i];
12049 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12050 handler_offset = clause->handler_offset;
12051 break;
12055 bblock->flags |= BB_EXCEPTION_UNSAFE;
12057 g_assert (handler_offset != -1);
12059 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12060 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12061 ins->sreg1 = load->dreg;
12062 MONO_ADD_INS (bblock, ins);
12064 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12065 MONO_ADD_INS (bblock, ins);
12067 sp = stack_start;
12068 link_bblock (cfg, bblock, end_bblock);
12069 start_new_bblock = 1;
12070 ip += 2;
12071 break;
12073 case CEE_SIZEOF: {
12074 guint32 val;
12075 int ialign;
12077 GSHAREDVT_FAILURE (*ip);
12079 CHECK_STACK_OVF (1);
12080 CHECK_OPSIZE (6);
12081 token = read32 (ip + 2);
12082 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
12083 MonoType *type = mono_type_create_from_typespec (image, token);
12084 val = mono_type_size (type, &ialign);
12085 } else {
12086 MonoClass *klass = mono_class_get_full (image, token, generic_context);
12087 CHECK_TYPELOAD (klass);
12088 mono_class_init (klass);
12089 val = mono_type_size (&klass->byval_arg, &ialign);
12091 EMIT_NEW_ICONST (cfg, ins, val);
12092 *sp++= ins;
12093 ip += 6;
12094 break;
12096 case CEE_REFANYTYPE: {
12097 MonoInst *src_var, *src;
12099 GSHAREDVT_FAILURE (*ip);
12101 CHECK_STACK (1);
12102 --sp;
12104 // FIXME:
12105 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12106 if (!src_var)
12107 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12108 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12109 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
12110 *sp++ = ins;
12111 ip += 2;
12112 break;
12114 case CEE_READONLY_:
12115 readonly = TRUE;
12116 ip += 2;
12117 break;
12119 case CEE_UNUSED56:
12120 case CEE_UNUSED57:
12121 case CEE_UNUSED70:
12122 case CEE_UNUSED:
12123 case CEE_UNUSED99:
12124 UNVERIFIED;
12126 default:
12127 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12128 UNVERIFIED;
12130 break;
12132 case CEE_UNUSED58:
12133 case CEE_UNUSED1:
12134 UNVERIFIED;
12136 default:
12137 g_warning ("opcode 0x%02x not handled", *ip);
12138 UNVERIFIED;
12141 if (start_new_bblock != 1)
12142 UNVERIFIED;
12144 bblock->cil_length = ip - bblock->cil_code;
12145 if (bblock->next_bb) {
12146 /* This could already be set because of inlining, #693905 */
12147 MonoBasicBlock *bb = bblock;
12149 while (bb->next_bb)
12150 bb = bb->next_bb;
12151 bb->next_bb = end_bblock;
12152 } else {
12153 bblock->next_bb = end_bblock;
12156 if (cfg->method == method && cfg->domainvar) {
12157 MonoInst *store;
12158 MonoInst *get_domain;
12160 cfg->cbb = init_localsbb;
12162 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12163 MONO_ADD_INS (cfg->cbb, get_domain);
12164 } else {
12165 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12167 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12168 MONO_ADD_INS (cfg->cbb, store);
12171 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12172 if (cfg->compile_aot)
12173 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12174 mono_get_got_var (cfg);
12175 #endif
12177 if (cfg->method == method && cfg->got_var)
12178 mono_emit_load_got_addr (cfg);
12180 if (init_localsbb) {
12181 cfg->cbb = init_localsbb;
12182 cfg->ip = NULL;
12183 for (i = 0; i < header->num_locals; ++i) {
12184 emit_init_local (cfg, i, header->locals [i], init_locals);
12188 if (cfg->init_ref_vars && cfg->method == method) {
12189 /* Emit initialization for ref vars */
12190 // FIXME: Avoid duplication initialization for IL locals.
12191 for (i = 0; i < cfg->num_varinfo; ++i) {
12192 MonoInst *ins = cfg->varinfo [i];
12194 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12195 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12199 if (cfg->lmf_var && cfg->method == method) {
12200 cfg->cbb = init_localsbb;
12201 emit_push_lmf (cfg);
12204 cfg->cbb = init_localsbb;
12205 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12207 if (seq_points) {
12208 MonoBasicBlock *bb;
12211 * Make seq points at backward branch targets interruptable.
12213 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12214 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12215 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12218 /* Add a sequence point for method entry/exit events */
12219 if (seq_points) {
12220 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12221 MONO_ADD_INS (init_localsbb, ins);
12222 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12223 MONO_ADD_INS (cfg->bb_exit, ins);
12227 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12228 * the code they refer to was dead (#11880).
12230 if (sym_seq_points) {
12231 for (i = 0; i < header->code_size; ++i) {
12232 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12233 MonoInst *ins;
12235 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12236 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12241 cfg->ip = NULL;
12243 if (cfg->method == method) {
12244 MonoBasicBlock *bb;
12245 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12246 bb->region = mono_find_block_region (cfg, bb->real_offset);
12247 if (cfg->spvars)
12248 mono_create_spvar_for_region (cfg, bb->region);
12249 if (cfg->verbose_level > 2)
12250 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12254 g_slist_free (class_inits);
12255 dont_inline = g_list_remove (dont_inline, method);
12257 if (inline_costs < 0) {
12258 char *mname;
12260 /* Method is too large */
12261 mname = mono_method_full_name (method, TRUE);
12262 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12263 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12264 g_free (mname);
12265 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12266 mono_basic_block_free (original_bb);
12267 return -1;
12270 if ((cfg->verbose_level > 2) && (cfg->method == method))
12271 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12273 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12274 mono_basic_block_free (original_bb);
12275 return inline_costs;
12277 exception_exit:
12278 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12279 goto cleanup;
12281 inline_failure:
12282 goto cleanup;
12284 load_error:
12285 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12286 goto cleanup;
12288 unverified:
12289 set_exception_type_from_invalid_il (cfg, method, ip);
12290 goto cleanup;
12292 cleanup:
12293 g_slist_free (class_inits);
12294 mono_basic_block_free (original_bb);
12295 dont_inline = g_list_remove (dont_inline, method);
12296 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12297 return -1;
12300 static int
12301 store_membase_reg_to_store_membase_imm (int opcode)
12303 switch (opcode) {
12304 case OP_STORE_MEMBASE_REG:
12305 return OP_STORE_MEMBASE_IMM;
12306 case OP_STOREI1_MEMBASE_REG:
12307 return OP_STOREI1_MEMBASE_IMM;
12308 case OP_STOREI2_MEMBASE_REG:
12309 return OP_STOREI2_MEMBASE_IMM;
12310 case OP_STOREI4_MEMBASE_REG:
12311 return OP_STOREI4_MEMBASE_IMM;
12312 case OP_STOREI8_MEMBASE_REG:
12313 return OP_STOREI8_MEMBASE_IMM;
12314 default:
12315 g_assert_not_reached ();
12318 return -1;
12322 mono_op_to_op_imm (int opcode)
12324 switch (opcode) {
12325 case OP_IADD:
12326 return OP_IADD_IMM;
12327 case OP_ISUB:
12328 return OP_ISUB_IMM;
12329 case OP_IDIV:
12330 return OP_IDIV_IMM;
12331 case OP_IDIV_UN:
12332 return OP_IDIV_UN_IMM;
12333 case OP_IREM:
12334 return OP_IREM_IMM;
12335 case OP_IREM_UN:
12336 return OP_IREM_UN_IMM;
12337 case OP_IMUL:
12338 return OP_IMUL_IMM;
12339 case OP_IAND:
12340 return OP_IAND_IMM;
12341 case OP_IOR:
12342 return OP_IOR_IMM;
12343 case OP_IXOR:
12344 return OP_IXOR_IMM;
12345 case OP_ISHL:
12346 return OP_ISHL_IMM;
12347 case OP_ISHR:
12348 return OP_ISHR_IMM;
12349 case OP_ISHR_UN:
12350 return OP_ISHR_UN_IMM;
12352 case OP_LADD:
12353 return OP_LADD_IMM;
12354 case OP_LSUB:
12355 return OP_LSUB_IMM;
12356 case OP_LAND:
12357 return OP_LAND_IMM;
12358 case OP_LOR:
12359 return OP_LOR_IMM;
12360 case OP_LXOR:
12361 return OP_LXOR_IMM;
12362 case OP_LSHL:
12363 return OP_LSHL_IMM;
12364 case OP_LSHR:
12365 return OP_LSHR_IMM;
12366 case OP_LSHR_UN:
12367 return OP_LSHR_UN_IMM;
12368 #if SIZEOF_REGISTER == 8
12369 case OP_LREM:
12370 return OP_LREM_IMM;
12371 #endif
12373 case OP_COMPARE:
12374 return OP_COMPARE_IMM;
12375 case OP_ICOMPARE:
12376 return OP_ICOMPARE_IMM;
12377 case OP_LCOMPARE:
12378 return OP_LCOMPARE_IMM;
12380 case OP_STORE_MEMBASE_REG:
12381 return OP_STORE_MEMBASE_IMM;
12382 case OP_STOREI1_MEMBASE_REG:
12383 return OP_STOREI1_MEMBASE_IMM;
12384 case OP_STOREI2_MEMBASE_REG:
12385 return OP_STOREI2_MEMBASE_IMM;
12386 case OP_STOREI4_MEMBASE_REG:
12387 return OP_STOREI4_MEMBASE_IMM;
12389 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12390 case OP_X86_PUSH:
12391 return OP_X86_PUSH_IMM;
12392 case OP_X86_COMPARE_MEMBASE_REG:
12393 return OP_X86_COMPARE_MEMBASE_IMM;
12394 #endif
12395 #if defined(TARGET_AMD64)
12396 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12397 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12398 #endif
12399 case OP_VOIDCALL_REG:
12400 return OP_VOIDCALL;
12401 case OP_CALL_REG:
12402 return OP_CALL;
12403 case OP_LCALL_REG:
12404 return OP_LCALL;
12405 case OP_FCALL_REG:
12406 return OP_FCALL;
12407 case OP_LOCALLOC:
12408 return OP_LOCALLOC_IMM;
12411 return -1;
12414 static int
12415 ldind_to_load_membase (int opcode)
12417 switch (opcode) {
12418 case CEE_LDIND_I1:
12419 return OP_LOADI1_MEMBASE;
12420 case CEE_LDIND_U1:
12421 return OP_LOADU1_MEMBASE;
12422 case CEE_LDIND_I2:
12423 return OP_LOADI2_MEMBASE;
12424 case CEE_LDIND_U2:
12425 return OP_LOADU2_MEMBASE;
12426 case CEE_LDIND_I4:
12427 return OP_LOADI4_MEMBASE;
12428 case CEE_LDIND_U4:
12429 return OP_LOADU4_MEMBASE;
12430 case CEE_LDIND_I:
12431 return OP_LOAD_MEMBASE;
12432 case CEE_LDIND_REF:
12433 return OP_LOAD_MEMBASE;
12434 case CEE_LDIND_I8:
12435 return OP_LOADI8_MEMBASE;
12436 case CEE_LDIND_R4:
12437 return OP_LOADR4_MEMBASE;
12438 case CEE_LDIND_R8:
12439 return OP_LOADR8_MEMBASE;
12440 default:
12441 g_assert_not_reached ();
12444 return -1;
12447 static int
12448 stind_to_store_membase (int opcode)
12450 switch (opcode) {
12451 case CEE_STIND_I1:
12452 return OP_STOREI1_MEMBASE_REG;
12453 case CEE_STIND_I2:
12454 return OP_STOREI2_MEMBASE_REG;
12455 case CEE_STIND_I4:
12456 return OP_STOREI4_MEMBASE_REG;
12457 case CEE_STIND_I:
12458 case CEE_STIND_REF:
12459 return OP_STORE_MEMBASE_REG;
12460 case CEE_STIND_I8:
12461 return OP_STOREI8_MEMBASE_REG;
12462 case CEE_STIND_R4:
12463 return OP_STORER4_MEMBASE_REG;
12464 case CEE_STIND_R8:
12465 return OP_STORER8_MEMBASE_REG;
12466 default:
12467 g_assert_not_reached ();
12470 return -1;
12474 mono_load_membase_to_load_mem (int opcode)
12476 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12477 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12478 switch (opcode) {
12479 case OP_LOAD_MEMBASE:
12480 return OP_LOAD_MEM;
12481 case OP_LOADU1_MEMBASE:
12482 return OP_LOADU1_MEM;
12483 case OP_LOADU2_MEMBASE:
12484 return OP_LOADU2_MEM;
12485 case OP_LOADI4_MEMBASE:
12486 return OP_LOADI4_MEM;
12487 case OP_LOADU4_MEMBASE:
12488 return OP_LOADU4_MEM;
12489 #if SIZEOF_REGISTER == 8
12490 case OP_LOADI8_MEMBASE:
12491 return OP_LOADI8_MEM;
12492 #endif
12494 #endif
12496 return -1;
12499 static inline int
12500 op_to_op_dest_membase (int store_opcode, int opcode)
12502 #if defined(TARGET_X86)
12503 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12504 return -1;
12506 switch (opcode) {
12507 case OP_IADD:
12508 return OP_X86_ADD_MEMBASE_REG;
12509 case OP_ISUB:
12510 return OP_X86_SUB_MEMBASE_REG;
12511 case OP_IAND:
12512 return OP_X86_AND_MEMBASE_REG;
12513 case OP_IOR:
12514 return OP_X86_OR_MEMBASE_REG;
12515 case OP_IXOR:
12516 return OP_X86_XOR_MEMBASE_REG;
12517 case OP_ADD_IMM:
12518 case OP_IADD_IMM:
12519 return OP_X86_ADD_MEMBASE_IMM;
12520 case OP_SUB_IMM:
12521 case OP_ISUB_IMM:
12522 return OP_X86_SUB_MEMBASE_IMM;
12523 case OP_AND_IMM:
12524 case OP_IAND_IMM:
12525 return OP_X86_AND_MEMBASE_IMM;
12526 case OP_OR_IMM:
12527 case OP_IOR_IMM:
12528 return OP_X86_OR_MEMBASE_IMM;
12529 case OP_XOR_IMM:
12530 case OP_IXOR_IMM:
12531 return OP_X86_XOR_MEMBASE_IMM;
12532 case OP_MOVE:
12533 return OP_NOP;
12535 #endif
12537 #if defined(TARGET_AMD64)
12538 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12539 return -1;
12541 switch (opcode) {
12542 case OP_IADD:
12543 return OP_X86_ADD_MEMBASE_REG;
12544 case OP_ISUB:
12545 return OP_X86_SUB_MEMBASE_REG;
12546 case OP_IAND:
12547 return OP_X86_AND_MEMBASE_REG;
12548 case OP_IOR:
12549 return OP_X86_OR_MEMBASE_REG;
12550 case OP_IXOR:
12551 return OP_X86_XOR_MEMBASE_REG;
12552 case OP_IADD_IMM:
12553 return OP_X86_ADD_MEMBASE_IMM;
12554 case OP_ISUB_IMM:
12555 return OP_X86_SUB_MEMBASE_IMM;
12556 case OP_IAND_IMM:
12557 return OP_X86_AND_MEMBASE_IMM;
12558 case OP_IOR_IMM:
12559 return OP_X86_OR_MEMBASE_IMM;
12560 case OP_IXOR_IMM:
12561 return OP_X86_XOR_MEMBASE_IMM;
12562 case OP_LADD:
12563 return OP_AMD64_ADD_MEMBASE_REG;
12564 case OP_LSUB:
12565 return OP_AMD64_SUB_MEMBASE_REG;
12566 case OP_LAND:
12567 return OP_AMD64_AND_MEMBASE_REG;
12568 case OP_LOR:
12569 return OP_AMD64_OR_MEMBASE_REG;
12570 case OP_LXOR:
12571 return OP_AMD64_XOR_MEMBASE_REG;
12572 case OP_ADD_IMM:
12573 case OP_LADD_IMM:
12574 return OP_AMD64_ADD_MEMBASE_IMM;
12575 case OP_SUB_IMM:
12576 case OP_LSUB_IMM:
12577 return OP_AMD64_SUB_MEMBASE_IMM;
12578 case OP_AND_IMM:
12579 case OP_LAND_IMM:
12580 return OP_AMD64_AND_MEMBASE_IMM;
12581 case OP_OR_IMM:
12582 case OP_LOR_IMM:
12583 return OP_AMD64_OR_MEMBASE_IMM;
12584 case OP_XOR_IMM:
12585 case OP_LXOR_IMM:
12586 return OP_AMD64_XOR_MEMBASE_IMM;
12587 case OP_MOVE:
12588 return OP_NOP;
12590 #endif
12592 return -1;
12595 static inline int
12596 op_to_op_store_membase (int store_opcode, int opcode)
12598 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12599 switch (opcode) {
12600 case OP_ICEQ:
12601 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12602 return OP_X86_SETEQ_MEMBASE;
12603 case OP_CNE:
12604 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12605 return OP_X86_SETNE_MEMBASE;
12607 #endif
12609 return -1;
12612 static inline int
12613 op_to_op_src1_membase (int load_opcode, int opcode)
12615 #ifdef TARGET_X86
12616 /* FIXME: This has sign extension issues */
12618 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12619 return OP_X86_COMPARE_MEMBASE8_IMM;
12622 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12623 return -1;
12625 switch (opcode) {
12626 case OP_X86_PUSH:
12627 return OP_X86_PUSH_MEMBASE;
12628 case OP_COMPARE_IMM:
12629 case OP_ICOMPARE_IMM:
12630 return OP_X86_COMPARE_MEMBASE_IMM;
12631 case OP_COMPARE:
12632 case OP_ICOMPARE:
12633 return OP_X86_COMPARE_MEMBASE_REG;
12635 #endif
12637 #ifdef TARGET_AMD64
12638 /* FIXME: This has sign extension issues */
12640 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12641 return OP_X86_COMPARE_MEMBASE8_IMM;
12644 switch (opcode) {
12645 case OP_X86_PUSH:
12646 #ifdef __mono_ilp32__
12647 if (load_opcode == OP_LOADI8_MEMBASE)
12648 #else
12649 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12650 #endif
12651 return OP_X86_PUSH_MEMBASE;
12652 break;
12653 /* FIXME: This only works for 32 bit immediates
12654 case OP_COMPARE_IMM:
12655 case OP_LCOMPARE_IMM:
12656 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12657 return OP_AMD64_COMPARE_MEMBASE_IMM;
12659 case OP_ICOMPARE_IMM:
12660 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12661 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12662 break;
12663 case OP_COMPARE:
12664 case OP_LCOMPARE:
12665 #ifdef __mono_ilp32__
12666 if (load_opcode == OP_LOAD_MEMBASE)
12667 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12668 if (load_opcode == OP_LOADI8_MEMBASE)
12669 #else
12670 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12671 #endif
12672 return OP_AMD64_COMPARE_MEMBASE_REG;
12673 break;
12674 case OP_ICOMPARE:
12675 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12676 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12677 break;
12679 #endif
12681 return -1;
12684 static inline int
12685 op_to_op_src2_membase (int load_opcode, int opcode)
12687 #ifdef TARGET_X86
12688 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12689 return -1;
12691 switch (opcode) {
12692 case OP_COMPARE:
12693 case OP_ICOMPARE:
12694 return OP_X86_COMPARE_REG_MEMBASE;
12695 case OP_IADD:
12696 return OP_X86_ADD_REG_MEMBASE;
12697 case OP_ISUB:
12698 return OP_X86_SUB_REG_MEMBASE;
12699 case OP_IAND:
12700 return OP_X86_AND_REG_MEMBASE;
12701 case OP_IOR:
12702 return OP_X86_OR_REG_MEMBASE;
12703 case OP_IXOR:
12704 return OP_X86_XOR_REG_MEMBASE;
12706 #endif
12708 #ifdef TARGET_AMD64
12709 #ifdef __mono_ilp32__
12710 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12711 #else
12712 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12713 #endif
12714 switch (opcode) {
12715 case OP_ICOMPARE:
12716 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12717 case OP_IADD:
12718 return OP_X86_ADD_REG_MEMBASE;
12719 case OP_ISUB:
12720 return OP_X86_SUB_REG_MEMBASE;
12721 case OP_IAND:
12722 return OP_X86_AND_REG_MEMBASE;
12723 case OP_IOR:
12724 return OP_X86_OR_REG_MEMBASE;
12725 case OP_IXOR:
12726 return OP_X86_XOR_REG_MEMBASE;
12728 #ifdef __mono_ilp32__
12729 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12730 #else
12731 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12732 #endif
12733 switch (opcode) {
12734 case OP_COMPARE:
12735 case OP_LCOMPARE:
12736 return OP_AMD64_COMPARE_REG_MEMBASE;
12737 case OP_LADD:
12738 return OP_AMD64_ADD_REG_MEMBASE;
12739 case OP_LSUB:
12740 return OP_AMD64_SUB_REG_MEMBASE;
12741 case OP_LAND:
12742 return OP_AMD64_AND_REG_MEMBASE;
12743 case OP_LOR:
12744 return OP_AMD64_OR_REG_MEMBASE;
12745 case OP_LXOR:
12746 return OP_AMD64_XOR_REG_MEMBASE;
12749 #endif
12751 return -1;
12755 mono_op_to_op_imm_noemul (int opcode)
12757 switch (opcode) {
12758 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12759 case OP_LSHR:
12760 case OP_LSHL:
12761 case OP_LSHR_UN:
12762 return -1;
12763 #endif
12764 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12765 case OP_IDIV:
12766 case OP_IDIV_UN:
12767 case OP_IREM:
12768 case OP_IREM_UN:
12769 return -1;
12770 #endif
12771 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12772 case OP_IMUL:
12773 return -1;
12774 #endif
12775 default:
12776 return mono_op_to_op_imm (opcode);
12781 * mono_handle_global_vregs:
12783 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12784 * for them.
12786 void
12787 mono_handle_global_vregs (MonoCompile *cfg)
12789 gint32 *vreg_to_bb;
12790 MonoBasicBlock *bb;
12791 int i, pos;
12793 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12795 #ifdef MONO_ARCH_SIMD_INTRINSICS
12796 if (cfg->uses_simd_intrinsics)
12797 mono_simd_simplify_indirection (cfg);
12798 #endif
12800 /* Find local vregs used in more than one bb */
12801 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12802 MonoInst *ins = bb->code;
12803 int block_num = bb->block_num;
12805 if (cfg->verbose_level > 2)
12806 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12808 cfg->cbb = bb;
12809 for (; ins; ins = ins->next) {
12810 const char *spec = INS_INFO (ins->opcode);
12811 int regtype = 0, regindex;
12812 gint32 prev_bb;
12814 if (G_UNLIKELY (cfg->verbose_level > 2))
12815 mono_print_ins (ins);
12817 g_assert (ins->opcode >= MONO_CEE_LAST);
12819 for (regindex = 0; regindex < 4; regindex ++) {
12820 int vreg = 0;
12822 if (regindex == 0) {
12823 regtype = spec [MONO_INST_DEST];
12824 if (regtype == ' ')
12825 continue;
12826 vreg = ins->dreg;
12827 } else if (regindex == 1) {
12828 regtype = spec [MONO_INST_SRC1];
12829 if (regtype == ' ')
12830 continue;
12831 vreg = ins->sreg1;
12832 } else if (regindex == 2) {
12833 regtype = spec [MONO_INST_SRC2];
12834 if (regtype == ' ')
12835 continue;
12836 vreg = ins->sreg2;
12837 } else if (regindex == 3) {
12838 regtype = spec [MONO_INST_SRC3];
12839 if (regtype == ' ')
12840 continue;
12841 vreg = ins->sreg3;
12844 #if SIZEOF_REGISTER == 4
12845 /* In the LLVM case, the long opcodes are not decomposed */
12846 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12848 * Since some instructions reference the original long vreg,
12849 * and some reference the two component vregs, it is quite hard
12850 * to determine when it needs to be global. So be conservative.
12852 if (!get_vreg_to_inst (cfg, vreg)) {
12853 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12855 if (cfg->verbose_level > 2)
12856 printf ("LONG VREG R%d made global.\n", vreg);
12860 * Make the component vregs volatile since the optimizations can
12861 * get confused otherwise.
12863 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12864 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12866 #endif
12868 g_assert (vreg != -1);
12870 prev_bb = vreg_to_bb [vreg];
12871 if (prev_bb == 0) {
12872 /* 0 is a valid block num */
12873 vreg_to_bb [vreg] = block_num + 1;
12874 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12875 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12876 continue;
12878 if (!get_vreg_to_inst (cfg, vreg)) {
12879 if (G_UNLIKELY (cfg->verbose_level > 2))
12880 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12882 switch (regtype) {
12883 case 'i':
12884 if (vreg_is_ref (cfg, vreg))
12885 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12886 else
12887 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12888 break;
12889 case 'l':
12890 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12891 break;
12892 case 'f':
12893 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12894 break;
12895 case 'v':
12896 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12897 break;
12898 default:
12899 g_assert_not_reached ();
12903 /* Flag as having been used in more than one bb */
12904 vreg_to_bb [vreg] = -1;
12910 /* If a variable is used in only one bblock, convert it into a local vreg */
12911 for (i = 0; i < cfg->num_varinfo; i++) {
12912 MonoInst *var = cfg->varinfo [i];
12913 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12915 switch (var->type) {
12916 case STACK_I4:
12917 case STACK_OBJ:
12918 case STACK_PTR:
12919 case STACK_MP:
12920 case STACK_VTYPE:
12921 #if SIZEOF_REGISTER == 8
12922 case STACK_I8:
12923 #endif
12924 #if !defined(TARGET_X86)
12925 /* Enabling this screws up the fp stack on x86 */
12926 case STACK_R8:
12927 #endif
12928 if (mono_arch_is_soft_float ())
12929 break;
12931 /* Arguments are implicitly global */
12932 /* Putting R4 vars into registers doesn't work currently */
12933 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12934 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12936 * Make that the variable's liveness interval doesn't contain a call, since
12937 * that would cause the lvreg to be spilled, making the whole optimization
12938 * useless.
12940 /* This is too slow for JIT compilation */
12941 #if 0
12942 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12943 MonoInst *ins;
12944 int def_index, call_index, ins_index;
12945 gboolean spilled = FALSE;
12947 def_index = -1;
12948 call_index = -1;
12949 ins_index = 0;
12950 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12951 const char *spec = INS_INFO (ins->opcode);
12953 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12954 def_index = ins_index;
12956 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12957 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12958 if (call_index > def_index) {
12959 spilled = TRUE;
12960 break;
12964 if (MONO_IS_CALL (ins))
12965 call_index = ins_index;
12967 ins_index ++;
12970 if (spilled)
12971 break;
12973 #endif
12975 if (G_UNLIKELY (cfg->verbose_level > 2))
12976 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12977 var->flags |= MONO_INST_IS_DEAD;
12978 cfg->vreg_to_inst [var->dreg] = NULL;
12980 break;
12985 * Compress the varinfo and vars tables so the liveness computation is faster and
12986 * takes up less space.
12988 pos = 0;
12989 for (i = 0; i < cfg->num_varinfo; ++i) {
12990 MonoInst *var = cfg->varinfo [i];
12991 if (pos < i && cfg->locals_start == i)
12992 cfg->locals_start = pos;
12993 if (!(var->flags & MONO_INST_IS_DEAD)) {
12994 if (pos < i) {
12995 cfg->varinfo [pos] = cfg->varinfo [i];
12996 cfg->varinfo [pos]->inst_c0 = pos;
12997 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12998 cfg->vars [pos].idx = pos;
12999 #if SIZEOF_REGISTER == 4
13000 if (cfg->varinfo [pos]->type == STACK_I8) {
13001 /* Modify the two component vars too */
13002 MonoInst *var1;
13004 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13005 var1->inst_c0 = pos;
13006 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13007 var1->inst_c0 = pos;
13009 #endif
13011 pos ++;
13014 cfg->num_varinfo = pos;
13015 if (cfg->locals_start > cfg->num_varinfo)
13016 cfg->locals_start = cfg->num_varinfo;
13020 * mono_spill_global_vars:
13022 * Generate spill code for variables which are not allocated to registers,
13023 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13024 * code is generated which could be optimized by the local optimization passes.
13026 void
13027 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13029 MonoBasicBlock *bb;
13030 char spec2 [16];
13031 int orig_next_vreg;
13032 guint32 *vreg_to_lvreg;
13033 guint32 *lvregs;
13034 guint32 i, lvregs_len;
13035 gboolean dest_has_lvreg = FALSE;
13036 guint32 stacktypes [128];
13037 MonoInst **live_range_start, **live_range_end;
13038 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13039 int *gsharedvt_vreg_to_idx = NULL;
13041 *need_local_opts = FALSE;
13043 memset (spec2, 0, sizeof (spec2));
13045 /* FIXME: Move this function to mini.c */
13046 stacktypes ['i'] = STACK_PTR;
13047 stacktypes ['l'] = STACK_I8;
13048 stacktypes ['f'] = STACK_R8;
13049 #ifdef MONO_ARCH_SIMD_INTRINSICS
13050 stacktypes ['x'] = STACK_VTYPE;
13051 #endif
13053 #if SIZEOF_REGISTER == 4
13054 /* Create MonoInsts for longs */
13055 for (i = 0; i < cfg->num_varinfo; i++) {
13056 MonoInst *ins = cfg->varinfo [i];
13058 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13059 switch (ins->type) {
13060 case STACK_R8:
13061 case STACK_I8: {
13062 MonoInst *tree;
13064 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13065 break;
13067 g_assert (ins->opcode == OP_REGOFFSET);
13069 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13070 g_assert (tree);
13071 tree->opcode = OP_REGOFFSET;
13072 tree->inst_basereg = ins->inst_basereg;
13073 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13075 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13076 g_assert (tree);
13077 tree->opcode = OP_REGOFFSET;
13078 tree->inst_basereg = ins->inst_basereg;
13079 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13080 break;
13082 default:
13083 break;
13087 #endif
13089 if (cfg->compute_gc_maps) {
13090 /* registers need liveness info even for !non refs */
13091 for (i = 0; i < cfg->num_varinfo; i++) {
13092 MonoInst *ins = cfg->varinfo [i];
13094 if (ins->opcode == OP_REGVAR)
13095 ins->flags |= MONO_INST_GC_TRACK;
13099 if (cfg->gsharedvt) {
13100 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13102 for (i = 0; i < cfg->num_varinfo; ++i) {
13103 MonoInst *ins = cfg->varinfo [i];
13104 int idx;
13106 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13107 if (i >= cfg->locals_start) {
13108 /* Local */
13109 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13110 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13111 ins->opcode = OP_GSHAREDVT_LOCAL;
13112 ins->inst_imm = idx;
13113 } else {
13114 /* Arg */
13115 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13116 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13122 /* FIXME: widening and truncation */
13125 * As an optimization, when a variable allocated to the stack is first loaded into
13126 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13127 * the variable again.
13129 orig_next_vreg = cfg->next_vreg;
13130 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13131 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13132 lvregs_len = 0;
13135 * These arrays contain the first and last instructions accessing a given
13136 * variable.
13137 * Since we emit bblocks in the same order we process them here, and we
13138 * don't split live ranges, these will precisely describe the live range of
13139 * the variable, i.e. the instruction range where a valid value can be found
13140 * in the variables location.
13141 * The live range is computed using the liveness info computed by the liveness pass.
13142 * We can't use vmv->range, since that is an abstract live range, and we need
13143 * one which is instruction precise.
13144 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13146 /* FIXME: Only do this if debugging info is requested */
13147 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13148 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13149 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13150 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13152 /* Add spill loads/stores */
13153 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13154 MonoInst *ins;
13156 if (cfg->verbose_level > 2)
13157 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13159 /* Clear vreg_to_lvreg array */
13160 for (i = 0; i < lvregs_len; i++)
13161 vreg_to_lvreg [lvregs [i]] = 0;
13162 lvregs_len = 0;
13164 cfg->cbb = bb;
13165 MONO_BB_FOR_EACH_INS (bb, ins) {
13166 const char *spec = INS_INFO (ins->opcode);
13167 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13168 gboolean store, no_lvreg;
13169 int sregs [MONO_MAX_SRC_REGS];
13171 if (G_UNLIKELY (cfg->verbose_level > 2))
13172 mono_print_ins (ins);
13174 if (ins->opcode == OP_NOP)
13175 continue;
13178 * We handle LDADDR here as well, since it can only be decomposed
13179 * when variable addresses are known.
13181 if (ins->opcode == OP_LDADDR) {
13182 MonoInst *var = ins->inst_p0;
13184 if (var->opcode == OP_VTARG_ADDR) {
13185 /* Happens on SPARC/S390 where vtypes are passed by reference */
13186 MonoInst *vtaddr = var->inst_left;
13187 if (vtaddr->opcode == OP_REGVAR) {
13188 ins->opcode = OP_MOVE;
13189 ins->sreg1 = vtaddr->dreg;
13191 else if (var->inst_left->opcode == OP_REGOFFSET) {
13192 ins->opcode = OP_LOAD_MEMBASE;
13193 ins->inst_basereg = vtaddr->inst_basereg;
13194 ins->inst_offset = vtaddr->inst_offset;
13195 } else
13196 NOT_IMPLEMENTED;
13197 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13198 /* gsharedvt arg passed by ref */
13199 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13201 ins->opcode = OP_LOAD_MEMBASE;
13202 ins->inst_basereg = var->inst_basereg;
13203 ins->inst_offset = var->inst_offset;
13204 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13205 MonoInst *load, *load2, *load3;
13206 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13207 int reg1, reg2, reg3;
13208 MonoInst *info_var = cfg->gsharedvt_info_var;
13209 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13212 * gsharedvt local.
13213 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13216 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13218 g_assert (info_var);
13219 g_assert (locals_var);
13221 /* Mark the instruction used to compute the locals var as used */
13222 cfg->gsharedvt_locals_var_ins = NULL;
13224 /* Load the offset */
13225 if (info_var->opcode == OP_REGOFFSET) {
13226 reg1 = alloc_ireg (cfg);
13227 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13228 } else if (info_var->opcode == OP_REGVAR) {
13229 load = NULL;
13230 reg1 = info_var->dreg;
13231 } else {
13232 g_assert_not_reached ();
13234 reg2 = alloc_ireg (cfg);
13235 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13236 /* Load the locals area address */
13237 reg3 = alloc_ireg (cfg);
13238 if (locals_var->opcode == OP_REGOFFSET) {
13239 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13240 } else if (locals_var->opcode == OP_REGVAR) {
13241 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13242 } else {
13243 g_assert_not_reached ();
13245 /* Compute the address */
13246 ins->opcode = OP_PADD;
13247 ins->sreg1 = reg3;
13248 ins->sreg2 = reg2;
13250 mono_bblock_insert_before_ins (bb, ins, load3);
13251 mono_bblock_insert_before_ins (bb, load3, load2);
13252 if (load)
13253 mono_bblock_insert_before_ins (bb, load2, load);
13254 } else {
13255 g_assert (var->opcode == OP_REGOFFSET);
13257 ins->opcode = OP_ADD_IMM;
13258 ins->sreg1 = var->inst_basereg;
13259 ins->inst_imm = var->inst_offset;
13262 *need_local_opts = TRUE;
13263 spec = INS_INFO (ins->opcode);
13266 if (ins->opcode < MONO_CEE_LAST) {
13267 mono_print_ins (ins);
13268 g_assert_not_reached ();
13272 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13273 * src register.
13274 * FIXME:
13276 if (MONO_IS_STORE_MEMBASE (ins)) {
13277 tmp_reg = ins->dreg;
13278 ins->dreg = ins->sreg2;
13279 ins->sreg2 = tmp_reg;
13280 store = TRUE;
13282 spec2 [MONO_INST_DEST] = ' ';
13283 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13284 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13285 spec2 [MONO_INST_SRC3] = ' ';
13286 spec = spec2;
13287 } else if (MONO_IS_STORE_MEMINDEX (ins))
13288 g_assert_not_reached ();
13289 else
13290 store = FALSE;
13291 no_lvreg = FALSE;
13293 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13294 printf ("\t %.3s %d", spec, ins->dreg);
13295 num_sregs = mono_inst_get_src_registers (ins, sregs);
13296 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13297 printf (" %d", sregs [srcindex]);
13298 printf ("\n");
13301 /***************/
13302 /* DREG */
13303 /***************/
13304 regtype = spec [MONO_INST_DEST];
13305 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13306 prev_dreg = -1;
13308 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13309 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13310 MonoInst *store_ins;
13311 int store_opcode;
13312 MonoInst *def_ins = ins;
13313 int dreg = ins->dreg; /* The original vreg */
13315 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13317 if (var->opcode == OP_REGVAR) {
13318 ins->dreg = var->dreg;
13319 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13321 * Instead of emitting a load+store, use a _membase opcode.
13323 g_assert (var->opcode == OP_REGOFFSET);
13324 if (ins->opcode == OP_MOVE) {
13325 NULLIFY_INS (ins);
13326 def_ins = NULL;
13327 } else {
13328 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13329 ins->inst_basereg = var->inst_basereg;
13330 ins->inst_offset = var->inst_offset;
13331 ins->dreg = -1;
13333 spec = INS_INFO (ins->opcode);
13334 } else {
13335 guint32 lvreg;
13337 g_assert (var->opcode == OP_REGOFFSET);
13339 prev_dreg = ins->dreg;
13341 /* Invalidate any previous lvreg for this vreg */
13342 vreg_to_lvreg [ins->dreg] = 0;
13344 lvreg = 0;
13346 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13347 regtype = 'l';
13348 store_opcode = OP_STOREI8_MEMBASE_REG;
13351 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13353 #if SIZEOF_REGISTER != 8
13354 if (regtype == 'l') {
13355 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13356 mono_bblock_insert_after_ins (bb, ins, store_ins);
13357 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13358 mono_bblock_insert_after_ins (bb, ins, store_ins);
13359 def_ins = store_ins;
13361 else
13362 #endif
13364 g_assert (store_opcode != OP_STOREV_MEMBASE);
13366 /* Try to fuse the store into the instruction itself */
13367 /* FIXME: Add more instructions */
13368 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13369 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13370 ins->inst_imm = ins->inst_c0;
13371 ins->inst_destbasereg = var->inst_basereg;
13372 ins->inst_offset = var->inst_offset;
13373 spec = INS_INFO (ins->opcode);
13374 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13375 ins->opcode = store_opcode;
13376 ins->inst_destbasereg = var->inst_basereg;
13377 ins->inst_offset = var->inst_offset;
13379 no_lvreg = TRUE;
13381 tmp_reg = ins->dreg;
13382 ins->dreg = ins->sreg2;
13383 ins->sreg2 = tmp_reg;
13384 store = TRUE;
13386 spec2 [MONO_INST_DEST] = ' ';
13387 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13388 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13389 spec2 [MONO_INST_SRC3] = ' ';
13390 spec = spec2;
13391 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13392 // FIXME: The backends expect the base reg to be in inst_basereg
13393 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13394 ins->dreg = -1;
13395 ins->inst_basereg = var->inst_basereg;
13396 ins->inst_offset = var->inst_offset;
13397 spec = INS_INFO (ins->opcode);
13398 } else {
13399 /* printf ("INS: "); mono_print_ins (ins); */
13400 /* Create a store instruction */
13401 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13403 /* Insert it after the instruction */
13404 mono_bblock_insert_after_ins (bb, ins, store_ins);
13406 def_ins = store_ins;
13409 * We can't assign ins->dreg to var->dreg here, since the
13410 * sregs could use it. So set a flag, and do it after
13411 * the sregs.
13413 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13414 dest_has_lvreg = TRUE;
13419 if (def_ins && !live_range_start [dreg]) {
13420 live_range_start [dreg] = def_ins;
13421 live_range_start_bb [dreg] = bb;
13424 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13425 MonoInst *tmp;
13427 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13428 tmp->inst_c1 = dreg;
13429 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13433 /************/
13434 /* SREGS */
13435 /************/
13436 num_sregs = mono_inst_get_src_registers (ins, sregs);
13437 for (srcindex = 0; srcindex < 3; ++srcindex) {
13438 regtype = spec [MONO_INST_SRC1 + srcindex];
13439 sreg = sregs [srcindex];
13441 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13442 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13443 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13444 MonoInst *use_ins = ins;
13445 MonoInst *load_ins;
13446 guint32 load_opcode;
13448 if (var->opcode == OP_REGVAR) {
13449 sregs [srcindex] = var->dreg;
13450 //mono_inst_set_src_registers (ins, sregs);
13451 live_range_end [sreg] = use_ins;
13452 live_range_end_bb [sreg] = bb;
13454 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13455 MonoInst *tmp;
13457 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13458 /* var->dreg is a hreg */
13459 tmp->inst_c1 = sreg;
13460 mono_bblock_insert_after_ins (bb, ins, tmp);
13463 continue;
13466 g_assert (var->opcode == OP_REGOFFSET);
13468 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13470 g_assert (load_opcode != OP_LOADV_MEMBASE);
13472 if (vreg_to_lvreg [sreg]) {
13473 g_assert (vreg_to_lvreg [sreg] != -1);
13475 /* The variable is already loaded to an lvreg */
13476 if (G_UNLIKELY (cfg->verbose_level > 2))
13477 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13478 sregs [srcindex] = vreg_to_lvreg [sreg];
13479 //mono_inst_set_src_registers (ins, sregs);
13480 continue;
13483 /* Try to fuse the load into the instruction */
13484 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13485 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13486 sregs [0] = var->inst_basereg;
13487 //mono_inst_set_src_registers (ins, sregs);
13488 ins->inst_offset = var->inst_offset;
13489 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13490 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13491 sregs [1] = var->inst_basereg;
13492 //mono_inst_set_src_registers (ins, sregs);
13493 ins->inst_offset = var->inst_offset;
13494 } else {
13495 if (MONO_IS_REAL_MOVE (ins)) {
13496 ins->opcode = OP_NOP;
13497 sreg = ins->dreg;
13498 } else {
13499 //printf ("%d ", srcindex); mono_print_ins (ins);
13501 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13503 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13504 if (var->dreg == prev_dreg) {
13506 * sreg refers to the value loaded by the load
13507 * emitted below, but we need to use ins->dreg
13508 * since it refers to the store emitted earlier.
13510 sreg = ins->dreg;
13512 g_assert (sreg != -1);
13513 vreg_to_lvreg [var->dreg] = sreg;
13514 g_assert (lvregs_len < 1024);
13515 lvregs [lvregs_len ++] = var->dreg;
13519 sregs [srcindex] = sreg;
13520 //mono_inst_set_src_registers (ins, sregs);
13522 #if SIZEOF_REGISTER != 8
13523 if (regtype == 'l') {
13524 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13525 mono_bblock_insert_before_ins (bb, ins, load_ins);
13526 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13527 mono_bblock_insert_before_ins (bb, ins, load_ins);
13528 use_ins = load_ins;
13530 else
13531 #endif
13533 #if SIZEOF_REGISTER == 4
13534 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13535 #endif
13536 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13537 mono_bblock_insert_before_ins (bb, ins, load_ins);
13538 use_ins = load_ins;
13542 if (var->dreg < orig_next_vreg) {
13543 live_range_end [var->dreg] = use_ins;
13544 live_range_end_bb [var->dreg] = bb;
13547 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13548 MonoInst *tmp;
13550 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13551 tmp->inst_c1 = var->dreg;
13552 mono_bblock_insert_after_ins (bb, ins, tmp);
13556 mono_inst_set_src_registers (ins, sregs);
13558 if (dest_has_lvreg) {
13559 g_assert (ins->dreg != -1);
13560 vreg_to_lvreg [prev_dreg] = ins->dreg;
13561 g_assert (lvregs_len < 1024);
13562 lvregs [lvregs_len ++] = prev_dreg;
13563 dest_has_lvreg = FALSE;
13566 if (store) {
13567 tmp_reg = ins->dreg;
13568 ins->dreg = ins->sreg2;
13569 ins->sreg2 = tmp_reg;
13572 if (MONO_IS_CALL (ins)) {
13573 /* Clear vreg_to_lvreg array */
13574 for (i = 0; i < lvregs_len; i++)
13575 vreg_to_lvreg [lvregs [i]] = 0;
13576 lvregs_len = 0;
13577 } else if (ins->opcode == OP_NOP) {
13578 ins->dreg = -1;
13579 MONO_INST_NULLIFY_SREGS (ins);
13582 if (cfg->verbose_level > 2)
13583 mono_print_ins_index (1, ins);
13586 /* Extend the live range based on the liveness info */
13587 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13588 for (i = 0; i < cfg->num_varinfo; i ++) {
13589 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13591 if (vreg_is_volatile (cfg, vi->vreg))
13592 /* The liveness info is incomplete */
13593 continue;
13595 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13596 /* Live from at least the first ins of this bb */
13597 live_range_start [vi->vreg] = bb->code;
13598 live_range_start_bb [vi->vreg] = bb;
13601 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13602 /* Live at least until the last ins of this bb */
13603 live_range_end [vi->vreg] = bb->last_ins;
13604 live_range_end_bb [vi->vreg] = bb;
13610 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13612 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13613 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13615 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13616 for (i = 0; i < cfg->num_varinfo; ++i) {
13617 int vreg = MONO_VARINFO (cfg, i)->vreg;
13618 MonoInst *ins;
13620 if (live_range_start [vreg]) {
13621 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13622 ins->inst_c0 = i;
13623 ins->inst_c1 = vreg;
13624 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13626 if (live_range_end [vreg]) {
13627 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13628 ins->inst_c0 = i;
13629 ins->inst_c1 = vreg;
13630 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13631 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13632 else
13633 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13637 #endif
13639 if (cfg->gsharedvt_locals_var_ins) {
13640 /* Nullify if unused */
13641 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13642 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13645 g_free (live_range_start);
13646 g_free (live_range_end);
13647 g_free (live_range_start_bb);
13648 g_free (live_range_end_bb);
13652 * FIXME:
13653 * - use 'iadd' instead of 'int_add'
13654 * - handling ovf opcodes: decompose in method_to_ir.
13655 * - unify iregs/fregs
13656 * -> partly done, the missing parts are:
13657 * - a more complete unification would involve unifying the hregs as well, so
13658 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13659 * would no longer map to the machine hregs, so the code generators would need to
13660 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13661 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13662 * fp/non-fp branches speeds it up by about 15%.
13663 * - use sext/zext opcodes instead of shifts
13664 * - add OP_ICALL
13665 * - get rid of TEMPLOADs if possible and use vregs instead
13666 * - clean up usage of OP_P/OP_ opcodes
13667 * - cleanup usage of DUMMY_USE
13668 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13669 * stack
13670 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13671 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13672 * - make sure handle_stack_args () is called before the branch is emitted
13673 * - when the new IR is done, get rid of all unused stuff
13674 * - COMPARE/BEQ as separate instructions or unify them ?
13675 * - keeping them separate allows specialized compare instructions like
13676 * compare_imm, compare_membase
13677 * - most back ends unify fp compare+branch, fp compare+ceq
13678 * - integrate mono_save_args into inline_method
13679 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13680 * - handle long shift opts on 32 bit platforms somehow: they require
13681 * 3 sregs (2 for arg1 and 1 for arg2)
13682 * - make byref a 'normal' type.
13683 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13684 * variable if needed.
13685 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13686 * like inline_method.
13687 * - remove inlining restrictions
13688 * - fix LNEG and enable cfold of INEG
13689 * - generalize x86 optimizations like ldelema as a peephole optimization
13690 * - add store_mem_imm for amd64
13691 * - optimize the loading of the interruption flag in the managed->native wrappers
13692 * - avoid special handling of OP_NOP in passes
13693 * - move code inserting instructions into one function/macro.
13694 * - try a coalescing phase after liveness analysis
13695 * - add float -> vreg conversion + local optimizations on !x86
13696 * - figure out how to handle decomposed branches during optimizations, ie.
13697 * compare+branch, op_jump_table+op_br etc.
13698 * - promote RuntimeXHandles to vregs
13699 * - vtype cleanups:
13700 * - add a NEW_VARLOADA_VREG macro
13701 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13702 * accessing vtype fields.
13703 * - get rid of I8CONST on 64 bit platforms
13704 * - dealing with the increase in code size due to branches created during opcode
13705 * decomposition:
13706 * - use extended basic blocks
13707 * - all parts of the JIT
13708 * - handle_global_vregs () && local regalloc
13709 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13710 * - sources of increase in code size:
13711 * - vtypes
13712 * - long compares
13713 * - isinst and castclass
13714 * - lvregs not allocated to global registers even if used multiple times
13715 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13716 * meaningful.
13717 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13718 * - add all micro optimizations from the old JIT
13719 * - put tree optimizations into the deadce pass
13720 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13721 * specific function.
13722 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13723 * fcompare + branchCC.
13724 * - create a helper function for allocating a stack slot, taking into account
13725 * MONO_CFG_HAS_SPILLUP.
13726 * - merge r68207.
13727 * - merge the ia64 switch changes.
13728 * - optimize mono_regstate2_alloc_int/float.
13729 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13730 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13731 * parts of the tree could be separated by other instructions, killing the tree
13732 * arguments, or stores killing loads etc. Also, should we fold loads into other
13733 * instructions if the result of the load is used multiple times ?
13734 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13735 * - LAST MERGE: 108395.
13736 * - when returning vtypes in registers, generate IR and append it to the end of the
13737 * last bb instead of doing it in the epilog.
13738 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13743 NOTES
13744 -----
13746 - When to decompose opcodes:
13747 - earlier: this makes some optimizations hard to implement, since the low level IR
13748 no longer contains the neccessary information. But it is easier to do.
13749 - later: harder to implement, enables more optimizations.
13750 - Branches inside bblocks:
13751 - created when decomposing complex opcodes.
13752 - branches to another bblock: harmless, but not tracked by the branch
13753 optimizations, so need to branch to a label at the start of the bblock.
13754 - branches to inside the same bblock: very problematic, trips up the local
13755 reg allocator. Can be fixed by spitting the current bblock, but that is a
13756 complex operation, since some local vregs can become global vregs etc.
13757 - Local/global vregs:
13758 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13759 local register allocator.
13760 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13761 structure, created by mono_create_var (). Assigned to hregs or the stack by
13762 the global register allocator.
13763 - When to do optimizations like alu->alu_imm:
13764 - earlier -> saves work later on since the IR will be smaller/simpler
13765 - later -> can work on more instructions
13766 - Handling of valuetypes:
13767 - When a vtype is pushed on the stack, a new temporary is created, an
13768 instruction computing its address (LDADDR) is emitted and pushed on
13769 the stack. Need to optimize cases when the vtype is used immediately as in
13770 argument passing, stloc etc.
13771 - Instead of the to_end stuff in the old JIT, simply call the function handling
13772 the values on the stack before emitting the last instruction of the bb.
13775 #endif /* DISABLE_JIT */