[runtime] Properly handle rethrow outside of catch blocks. Fixes #20412
[mono-project.git] / mono / mini / method-to-ir.c
blobfaeb658ef8c18a8e41f16f248940717f82b3c947
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 #include <config.h>
15 #ifndef DISABLE_JIT
17 #include <signal.h>
19 #ifdef HAVE_UNISTD_H
20 #include <unistd.h>
21 #endif
23 #include <math.h>
24 #include <string.h>
25 #include <ctype.h>
27 #ifdef HAVE_SYS_TIME_H
28 #include <sys/time.h>
29 #endif
31 #ifdef HAVE_ALLOCA_H
32 #include <alloca.h>
33 #endif
35 #include <mono/utils/memcheck.h>
36 #include "mini.h"
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
64 #include "trace.h"
66 #include "ir-emit.h"
68 #include "jit-icalls.h"
69 #include "jit.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
79 } \
80 } while (0)
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
83 goto exception_exit;\
84 } while (0)
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
93 } while (0)
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
102 } while (0)
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
110 } while (0)
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
119 } while (0)
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
123 } while (0)
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
128 } while (0)
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline;
143 static MonoMethodSignature *helper_sig_domain_get;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
151 * Instruction metadata
153 #ifdef MINI_OP
154 #undef MINI_OP
155 #endif
156 #ifdef MINI_OP3
157 #undef MINI_OP3
158 #endif
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
161 #define NONE ' '
162 #define IREG 'i'
163 #define FREG 'f'
164 #define VREG 'v'
165 #define XREG 'x'
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
167 #define LREG IREG
168 #else
169 #define LREG 'l'
170 #endif
171 /* keep in sync with the enum in mini.h */
172 const char
173 ins_info[] = {
174 #include "mini-ops.h"
176 #undef MINI_OP
177 #undef MINI_OP3
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
188 #undef MINI_OP
189 #undef MINI_OP3
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
193 (vi)->reg = -1; \
194 (vi)->idx = (id); \
195 } while (0)
197 void
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
205 guint32
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
211 guint32
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
217 guint32
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
223 guint32
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
229 guint32
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
240 guint32
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
251 guint32
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
262 guint32
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
269 else
270 return alloc_ireg (cfg);
273 guint
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
276 if (type->byref)
277 return OP_MOVE;
279 type = mini_replace_type (type);
280 handle_enum:
281 switch (type->type) {
282 case MONO_TYPE_I1:
283 case MONO_TYPE_U1:
284 case MONO_TYPE_BOOLEAN:
285 return OP_MOVE;
286 case MONO_TYPE_I2:
287 case MONO_TYPE_U2:
288 case MONO_TYPE_CHAR:
289 return OP_MOVE;
290 case MONO_TYPE_I4:
291 case MONO_TYPE_U4:
292 return OP_MOVE;
293 case MONO_TYPE_I:
294 case MONO_TYPE_U:
295 case MONO_TYPE_PTR:
296 case MONO_TYPE_FNPTR:
297 return OP_MOVE;
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
303 return OP_MOVE;
304 case MONO_TYPE_I8:
305 case MONO_TYPE_U8:
306 #if SIZEOF_REGISTER == 8
307 return OP_MOVE;
308 #else
309 return OP_LMOVE;
310 #endif
311 case MONO_TYPE_R4:
312 return OP_FMOVE;
313 case MONO_TYPE_R8:
314 return OP_FMOVE;
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
318 goto handle_enum;
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 return OP_XMOVE;
322 return OP_VMOVE;
323 case MONO_TYPE_TYPEDBYREF:
324 return OP_VMOVE;
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
327 goto handle_enum;
328 case MONO_TYPE_VAR:
329 case MONO_TYPE_MVAR:
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
332 return OP_VMOVE;
333 else
334 return OP_MOVE;
335 default:
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
338 return -1;
341 void
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
344 int i;
345 MonoInst *tree;
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 printf (", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 printf (" ]\n");
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
358 void
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
371 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
372 * foo<T> (int i) { ldarg.0; box T; }
374 #define UNVERIFIED do { \
375 if (cfg->gsharedvt) { \
376 if (cfg->verbose_level > 2) \
377 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
379 goto exception_exit; \
381 if (mini_get_debug_options ()->break_on_unverified) \
382 G_BREAKPOINT (); \
383 else \
384 goto unverified; \
385 } while (0)
387 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
389 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
391 #define GET_BBLOCK(cfg,tblock,ip) do { \
392 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
393 if (!(tblock)) { \
394 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
395 NEW_BBLOCK (cfg, (tblock)); \
396 (tblock)->cil_code = (ip); \
397 ADD_BBLOCK (cfg, (tblock)); \
399 } while (0)
401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
402 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
403 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
404 (dest)->dreg = alloc_ireg_mp ((cfg)); \
405 (dest)->sreg1 = (sr1); \
406 (dest)->sreg2 = (sr2); \
407 (dest)->inst_imm = (imm); \
408 (dest)->backend.shift_amount = (shift); \
409 MONO_ADD_INS ((cfg)->cbb, (dest)); \
410 } while (0)
411 #endif
413 #if SIZEOF_REGISTER == 8
414 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
415 /* FIXME: Need to add many more cases */ \
416 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
417 MonoInst *widen; \
418 int dr = alloc_preg (cfg); \
419 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
420 (ins)->sreg2 = widen->dreg; \
422 } while (0)
423 #else
424 #define ADD_WIDEN_OP(ins, arg1, arg2)
425 #endif
427 #define ADD_BINOP(op) do { \
428 MONO_INST_NEW (cfg, ins, (op)); \
429 sp -= 2; \
430 ins->sreg1 = sp [0]->dreg; \
431 ins->sreg2 = sp [1]->dreg; \
432 type_from_op (ins, sp [0], sp [1]); \
433 CHECK_TYPE (ins); \
434 /* Have to insert a widening op */ \
435 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
436 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
437 MONO_ADD_INS ((cfg)->cbb, (ins)); \
438 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
439 } while (0)
441 #define ADD_UNOP(op) do { \
442 MONO_INST_NEW (cfg, ins, (op)); \
443 sp--; \
444 ins->sreg1 = sp [0]->dreg; \
445 type_from_op (ins, sp [0], NULL); \
446 CHECK_TYPE (ins); \
447 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
448 MONO_ADD_INS ((cfg)->cbb, (ins)); \
449 *sp++ = mono_decompose_opcode (cfg, ins); \
450 } while (0)
452 #define ADD_BINCOND(next_block) do { \
453 MonoInst *cmp; \
454 sp -= 2; \
455 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
456 cmp->sreg1 = sp [0]->dreg; \
457 cmp->sreg2 = sp [1]->dreg; \
458 type_from_op (cmp, sp [0], sp [1]); \
459 CHECK_TYPE (cmp); \
460 type_from_op (ins, sp [0], sp [1]); \
461 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
462 GET_BBLOCK (cfg, tblock, target); \
463 link_bblock (cfg, bblock, tblock); \
464 ins->inst_true_bb = tblock; \
465 if ((next_block)) { \
466 link_bblock (cfg, bblock, (next_block)); \
467 ins->inst_false_bb = (next_block); \
468 start_new_bblock = 1; \
469 } else { \
470 GET_BBLOCK (cfg, tblock, ip); \
471 link_bblock (cfg, bblock, tblock); \
472 ins->inst_false_bb = tblock; \
473 start_new_bblock = 2; \
475 if (sp != stack_start) { \
476 handle_stack_args (cfg, stack_start, sp - stack_start); \
477 CHECK_UNVERIFIABLE (cfg); \
479 MONO_ADD_INS (bblock, cmp); \
480 MONO_ADD_INS (bblock, ins); \
481 } while (0)
483 /* *
484 * link_bblock: Links two basic blocks
486 * links two basic blocks in the control flow graph, the 'from'
487 * argument is the starting block and the 'to' argument is the block
488 * the control flow ends to after 'from'.
490 static void
491 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
493 MonoBasicBlock **newa;
494 int i, found;
496 #if 0
497 if (from->cil_code) {
498 if (to->cil_code)
499 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
500 else
501 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
502 } else {
503 if (to->cil_code)
504 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
505 else
506 printf ("edge from entry to exit\n");
508 #endif
510 found = FALSE;
511 for (i = 0; i < from->out_count; ++i) {
512 if (to == from->out_bb [i]) {
513 found = TRUE;
514 break;
517 if (!found) {
518 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
519 for (i = 0; i < from->out_count; ++i) {
520 newa [i] = from->out_bb [i];
522 newa [i] = to;
523 from->out_count++;
524 from->out_bb = newa;
527 found = FALSE;
528 for (i = 0; i < to->in_count; ++i) {
529 if (from == to->in_bb [i]) {
530 found = TRUE;
531 break;
534 if (!found) {
535 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
536 for (i = 0; i < to->in_count; ++i) {
537 newa [i] = to->in_bb [i];
539 newa [i] = from;
540 to->in_count++;
541 to->in_bb = newa;
545 void
546 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
548 link_bblock (cfg, from, to);
552 * mono_find_block_region:
554 * We mark each basic block with a region ID. We use that to avoid BB
555 * optimizations when blocks are in different regions.
557 * Returns:
558 * A region token that encodes where this region is, and information
559 * about the clause owner for this block.
561 * The region encodes the try/catch/filter clause that owns this block
562 * as well as the type. -1 is a special value that represents a block
563 * that is in none of try/catch/filter.
565 static int
566 mono_find_block_region (MonoCompile *cfg, int offset)
568 MonoMethodHeader *header = cfg->header;
569 MonoExceptionClause *clause;
570 int i;
572 for (i = 0; i < header->num_clauses; ++i) {
573 clause = &header->clauses [i];
574 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
575 (offset < (clause->handler_offset)))
576 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
578 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
579 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
580 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
581 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
582 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
583 else
584 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
587 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
588 return ((i + 1) << 8) | clause->flags;
591 return -1;
594 static GList*
595 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
597 MonoMethodHeader *header = cfg->header;
598 MonoExceptionClause *clause;
599 int i;
600 GList *res = NULL;
602 for (i = 0; i < header->num_clauses; ++i) {
603 clause = &header->clauses [i];
604 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
605 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
606 if (clause->flags == type)
607 res = g_list_append (res, clause);
610 return res;
613 static void
614 mono_create_spvar_for_region (MonoCompile *cfg, int region)
616 MonoInst *var;
618 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
619 if (var)
620 return;
622 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
623 /* prevent it from being register allocated */
624 var->flags |= MONO_INST_VOLATILE;
626 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
629 MonoInst *
630 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
632 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
635 static MonoInst*
636 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
638 MonoInst *var;
640 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
641 if (var)
642 return var;
644 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
645 /* prevent it from being register allocated */
646 var->flags |= MONO_INST_VOLATILE;
648 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
650 return var;
654 * Returns the type used in the eval stack when @type is loaded.
655 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
657 void
658 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
660 MonoClass *klass;
662 type = mini_replace_type (type);
663 inst->klass = klass = mono_class_from_mono_type (type);
664 if (type->byref) {
665 inst->type = STACK_MP;
666 return;
669 handle_enum:
670 switch (type->type) {
671 case MONO_TYPE_VOID:
672 inst->type = STACK_INV;
673 return;
674 case MONO_TYPE_I1:
675 case MONO_TYPE_U1:
676 case MONO_TYPE_BOOLEAN:
677 case MONO_TYPE_I2:
678 case MONO_TYPE_U2:
679 case MONO_TYPE_CHAR:
680 case MONO_TYPE_I4:
681 case MONO_TYPE_U4:
682 inst->type = STACK_I4;
683 return;
684 case MONO_TYPE_I:
685 case MONO_TYPE_U:
686 case MONO_TYPE_PTR:
687 case MONO_TYPE_FNPTR:
688 inst->type = STACK_PTR;
689 return;
690 case MONO_TYPE_CLASS:
691 case MONO_TYPE_STRING:
692 case MONO_TYPE_OBJECT:
693 case MONO_TYPE_SZARRAY:
694 case MONO_TYPE_ARRAY:
695 inst->type = STACK_OBJ;
696 return;
697 case MONO_TYPE_I8:
698 case MONO_TYPE_U8:
699 inst->type = STACK_I8;
700 return;
701 case MONO_TYPE_R4:
702 case MONO_TYPE_R8:
703 inst->type = STACK_R8;
704 return;
705 case MONO_TYPE_VALUETYPE:
706 if (type->data.klass->enumtype) {
707 type = mono_class_enum_basetype (type->data.klass);
708 goto handle_enum;
709 } else {
710 inst->klass = klass;
711 inst->type = STACK_VTYPE;
712 return;
714 case MONO_TYPE_TYPEDBYREF:
715 inst->klass = mono_defaults.typed_reference_class;
716 inst->type = STACK_VTYPE;
717 return;
718 case MONO_TYPE_GENERICINST:
719 type = &type->data.generic_class->container_class->byval_arg;
720 goto handle_enum;
721 case MONO_TYPE_VAR:
722 case MONO_TYPE_MVAR:
723 g_assert (cfg->generic_sharing_context);
724 if (mini_is_gsharedvt_type (cfg, type)) {
725 g_assert (cfg->gsharedvt);
726 inst->type = STACK_VTYPE;
727 } else {
728 inst->type = STACK_OBJ;
730 return;
731 default:
732 g_error ("unknown type 0x%02x in eval stack type", type->type);
737 * The following tables are used to quickly validate the IL code in type_from_op ().
739 static const char
740 bin_num_table [STACK_MAX] [STACK_MAX] = {
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
751 static const char
752 neg_table [] = {
753 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
756 /* reduce the size of this table */
757 static const char
758 bin_int_table [STACK_MAX] [STACK_MAX] = {
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
761 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
762 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
763 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
764 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
765 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
766 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
769 static const char
770 bin_comp_table [STACK_MAX] [STACK_MAX] = {
771 /* Inv i L p F & O vt */
772 {0},
773 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
774 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
775 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
776 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
777 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
778 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
779 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
782 /* reduce the size of this table */
783 static const char
784 shift_table [STACK_MAX] [STACK_MAX] = {
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
787 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
788 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
796 * Tables to map from the non-specific opcode to the matching
797 * type-specific opcode.
799 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
800 static const guint16
801 binops_op_map [STACK_MAX] = {
802 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
805 /* handles from CEE_NEG to CEE_CONV_U8 */
806 static const guint16
807 unops_op_map [STACK_MAX] = {
808 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
811 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
812 static const guint16
813 ovfops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
817 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
818 static const guint16
819 ovf2ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
823 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
824 static const guint16
825 ovf3ops_op_map [STACK_MAX] = {
826 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
829 /* handles from CEE_BEQ to CEE_BLT_UN */
830 static const guint16
831 beqops_op_map [STACK_MAX] = {
832 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
835 /* handles from CEE_CEQ to CEE_CLT_UN */
836 static const guint16
837 ceqops_op_map [STACK_MAX] = {
838 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
842 * Sets ins->type (the type on the eval stack) according to the
843 * type of the opcode and the arguments to it.
844 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
846 * FIXME: this function sets ins->type unconditionally in some cases, but
847 * it should set it to invalid for some types (a conv.x on an object)
849 static void
850 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
852 switch (ins->opcode) {
853 /* binops */
854 case CEE_ADD:
855 case CEE_SUB:
856 case CEE_MUL:
857 case CEE_DIV:
858 case CEE_REM:
859 /* FIXME: check unverifiable args for STACK_MP */
860 ins->type = bin_num_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
862 break;
863 case CEE_DIV_UN:
864 case CEE_REM_UN:
865 case CEE_AND:
866 case CEE_OR:
867 case CEE_XOR:
868 ins->type = bin_int_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
870 break;
871 case CEE_SHL:
872 case CEE_SHR:
873 case CEE_SHR_UN:
874 ins->type = shift_table [src1->type] [src2->type];
875 ins->opcode += binops_op_map [ins->type];
876 break;
877 case OP_COMPARE:
878 case OP_LCOMPARE:
879 case OP_ICOMPARE:
880 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
881 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
882 ins->opcode = OP_LCOMPARE;
883 else if (src1->type == STACK_R8)
884 ins->opcode = OP_FCOMPARE;
885 else
886 ins->opcode = OP_ICOMPARE;
887 break;
888 case OP_ICOMPARE_IMM:
889 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
890 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
891 ins->opcode = OP_LCOMPARE_IMM;
892 break;
893 case CEE_BEQ:
894 case CEE_BGE:
895 case CEE_BGT:
896 case CEE_BLE:
897 case CEE_BLT:
898 case CEE_BNE_UN:
899 case CEE_BGE_UN:
900 case CEE_BGT_UN:
901 case CEE_BLE_UN:
902 case CEE_BLT_UN:
903 ins->opcode += beqops_op_map [src1->type];
904 break;
905 case OP_CEQ:
906 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
907 ins->opcode += ceqops_op_map [src1->type];
908 break;
909 case OP_CGT:
910 case OP_CGT_UN:
911 case OP_CLT:
912 case OP_CLT_UN:
913 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
914 ins->opcode += ceqops_op_map [src1->type];
915 break;
916 /* unops */
917 case CEE_NEG:
918 ins->type = neg_table [src1->type];
919 ins->opcode += unops_op_map [ins->type];
920 break;
921 case CEE_NOT:
922 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
923 ins->type = src1->type;
924 else
925 ins->type = STACK_INV;
926 ins->opcode += unops_op_map [ins->type];
927 break;
928 case CEE_CONV_I1:
929 case CEE_CONV_I2:
930 case CEE_CONV_I4:
931 case CEE_CONV_U4:
932 ins->type = STACK_I4;
933 ins->opcode += unops_op_map [src1->type];
934 break;
935 case CEE_CONV_R_UN:
936 ins->type = STACK_R8;
937 switch (src1->type) {
938 case STACK_I4:
939 case STACK_PTR:
940 ins->opcode = OP_ICONV_TO_R_UN;
941 break;
942 case STACK_I8:
943 ins->opcode = OP_LCONV_TO_R_UN;
944 break;
946 break;
947 case CEE_CONV_OVF_I1:
948 case CEE_CONV_OVF_U1:
949 case CEE_CONV_OVF_I2:
950 case CEE_CONV_OVF_U2:
951 case CEE_CONV_OVF_I4:
952 case CEE_CONV_OVF_U4:
953 ins->type = STACK_I4;
954 ins->opcode += ovf3ops_op_map [src1->type];
955 break;
956 case CEE_CONV_OVF_I_UN:
957 case CEE_CONV_OVF_U_UN:
958 ins->type = STACK_PTR;
959 ins->opcode += ovf2ops_op_map [src1->type];
960 break;
961 case CEE_CONV_OVF_I1_UN:
962 case CEE_CONV_OVF_I2_UN:
963 case CEE_CONV_OVF_I4_UN:
964 case CEE_CONV_OVF_U1_UN:
965 case CEE_CONV_OVF_U2_UN:
966 case CEE_CONV_OVF_U4_UN:
967 ins->type = STACK_I4;
968 ins->opcode += ovf2ops_op_map [src1->type];
969 break;
970 case CEE_CONV_U:
971 ins->type = STACK_PTR;
972 switch (src1->type) {
973 case STACK_I4:
974 ins->opcode = OP_ICONV_TO_U;
975 break;
976 case STACK_PTR:
977 case STACK_MP:
978 #if SIZEOF_VOID_P == 8
979 ins->opcode = OP_LCONV_TO_U;
980 #else
981 ins->opcode = OP_MOVE;
982 #endif
983 break;
984 case STACK_I8:
985 ins->opcode = OP_LCONV_TO_U;
986 break;
987 case STACK_R8:
988 ins->opcode = OP_FCONV_TO_U;
989 break;
991 break;
992 case CEE_CONV_I8:
993 case CEE_CONV_U8:
994 ins->type = STACK_I8;
995 ins->opcode += unops_op_map [src1->type];
996 break;
997 case CEE_CONV_OVF_I8:
998 case CEE_CONV_OVF_U8:
999 ins->type = STACK_I8;
1000 ins->opcode += ovf3ops_op_map [src1->type];
1001 break;
1002 case CEE_CONV_OVF_U8_UN:
1003 case CEE_CONV_OVF_I8_UN:
1004 ins->type = STACK_I8;
1005 ins->opcode += ovf2ops_op_map [src1->type];
1006 break;
1007 case CEE_CONV_R4:
1008 case CEE_CONV_R8:
1009 ins->type = STACK_R8;
1010 ins->opcode += unops_op_map [src1->type];
1011 break;
1012 case OP_CKFINITE:
1013 ins->type = STACK_R8;
1014 break;
1015 case CEE_CONV_U2:
1016 case CEE_CONV_U1:
1017 ins->type = STACK_I4;
1018 ins->opcode += ovfops_op_map [src1->type];
1019 break;
1020 case CEE_CONV_I:
1021 case CEE_CONV_OVF_I:
1022 case CEE_CONV_OVF_U:
1023 ins->type = STACK_PTR;
1024 ins->opcode += ovfops_op_map [src1->type];
1025 break;
1026 case CEE_ADD_OVF:
1027 case CEE_ADD_OVF_UN:
1028 case CEE_MUL_OVF:
1029 case CEE_MUL_OVF_UN:
1030 case CEE_SUB_OVF:
1031 case CEE_SUB_OVF_UN:
1032 ins->type = bin_num_table [src1->type] [src2->type];
1033 ins->opcode += ovfops_op_map [src1->type];
1034 if (ins->type == STACK_R8)
1035 ins->type = STACK_INV;
1036 break;
1037 case OP_LOAD_MEMBASE:
1038 ins->type = STACK_PTR;
1039 break;
1040 case OP_LOADI1_MEMBASE:
1041 case OP_LOADU1_MEMBASE:
1042 case OP_LOADI2_MEMBASE:
1043 case OP_LOADU2_MEMBASE:
1044 case OP_LOADI4_MEMBASE:
1045 case OP_LOADU4_MEMBASE:
1046 ins->type = STACK_PTR;
1047 break;
1048 case OP_LOADI8_MEMBASE:
1049 ins->type = STACK_I8;
1050 break;
1051 case OP_LOADR4_MEMBASE:
1052 case OP_LOADR8_MEMBASE:
1053 ins->type = STACK_R8;
1054 break;
1055 default:
1056 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1057 break;
1060 if (ins->type == STACK_MP)
1061 ins->klass = mono_defaults.object_class;
1064 static const char
1065 ldind_type [] = {
1066 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1069 #if 0
1071 static const char
1072 param_table [STACK_MAX] [STACK_MAX] = {
1073 {0},
1076 static int
1077 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1078 int i;
1080 if (sig->hasthis) {
1081 switch (args->type) {
1082 case STACK_I4:
1083 case STACK_I8:
1084 case STACK_R8:
1085 case STACK_VTYPE:
1086 case STACK_INV:
1087 return 0;
1089 args++;
1091 for (i = 0; i < sig->param_count; ++i) {
1092 switch (args [i].type) {
1093 case STACK_INV:
1094 return 0;
1095 case STACK_MP:
1096 if (!sig->params [i]->byref)
1097 return 0;
1098 continue;
1099 case STACK_OBJ:
1100 if (sig->params [i]->byref)
1101 return 0;
1102 switch (sig->params [i]->type) {
1103 case MONO_TYPE_CLASS:
1104 case MONO_TYPE_STRING:
1105 case MONO_TYPE_OBJECT:
1106 case MONO_TYPE_SZARRAY:
1107 case MONO_TYPE_ARRAY:
1108 break;
1109 default:
1110 return 0;
1112 continue;
1113 case STACK_R8:
1114 if (sig->params [i]->byref)
1115 return 0;
1116 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1117 return 0;
1118 continue;
1119 case STACK_PTR:
1120 case STACK_I4:
1121 case STACK_I8:
1122 case STACK_VTYPE:
1123 break;
1125 /*if (!param_table [args [i].type] [sig->params [i]->type])
1126 return 0;*/
1128 return 1;
1130 #endif
1133 * When we need a pointer to the current domain many times in a method, we
1134 * call mono_domain_get() once and we store the result in a local variable.
1135 * This function returns the variable that represents the MonoDomain*.
1137 inline static MonoInst *
1138 mono_get_domainvar (MonoCompile *cfg)
1140 if (!cfg->domainvar)
1141 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1142 return cfg->domainvar;
1146 * The got_var contains the address of the Global Offset Table when AOT
1147 * compiling.
1149 MonoInst *
1150 mono_get_got_var (MonoCompile *cfg)
1152 #ifdef MONO_ARCH_NEED_GOT_VAR
1153 if (!cfg->compile_aot)
1154 return NULL;
1155 if (!cfg->got_var) {
1156 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1158 return cfg->got_var;
1159 #else
1160 return NULL;
1161 #endif
1164 static MonoInst *
1165 mono_get_vtable_var (MonoCompile *cfg)
1167 g_assert (cfg->generic_sharing_context);
1169 if (!cfg->rgctx_var) {
1170 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1171 /* force the var to be stack allocated */
1172 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1175 return cfg->rgctx_var;
1178 static MonoType*
1179 type_from_stack_type (MonoInst *ins) {
1180 switch (ins->type) {
1181 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1182 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1183 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1184 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1185 case STACK_MP:
1186 return &ins->klass->this_arg;
1187 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1188 case STACK_VTYPE: return &ins->klass->byval_arg;
1189 default:
1190 g_error ("stack type %d to monotype not handled\n", ins->type);
1192 return NULL;
1195 static G_GNUC_UNUSED int
1196 type_to_stack_type (MonoType *t)
1198 t = mono_type_get_underlying_type (t);
1199 switch (t->type) {
1200 case MONO_TYPE_I1:
1201 case MONO_TYPE_U1:
1202 case MONO_TYPE_BOOLEAN:
1203 case MONO_TYPE_I2:
1204 case MONO_TYPE_U2:
1205 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_I4:
1207 case MONO_TYPE_U4:
1208 return STACK_I4;
1209 case MONO_TYPE_I:
1210 case MONO_TYPE_U:
1211 case MONO_TYPE_PTR:
1212 case MONO_TYPE_FNPTR:
1213 return STACK_PTR;
1214 case MONO_TYPE_CLASS:
1215 case MONO_TYPE_STRING:
1216 case MONO_TYPE_OBJECT:
1217 case MONO_TYPE_SZARRAY:
1218 case MONO_TYPE_ARRAY:
1219 return STACK_OBJ;
1220 case MONO_TYPE_I8:
1221 case MONO_TYPE_U8:
1222 return STACK_I8;
1223 case MONO_TYPE_R4:
1224 case MONO_TYPE_R8:
1225 return STACK_R8;
1226 case MONO_TYPE_VALUETYPE:
1227 case MONO_TYPE_TYPEDBYREF:
1228 return STACK_VTYPE;
1229 case MONO_TYPE_GENERICINST:
1230 if (mono_type_generic_inst_is_valuetype (t))
1231 return STACK_VTYPE;
1232 else
1233 return STACK_OBJ;
1234 break;
1235 default:
1236 g_assert_not_reached ();
1239 return -1;
1242 static MonoClass*
1243 array_access_to_klass (int opcode)
1245 switch (opcode) {
1246 case CEE_LDELEM_U1:
1247 return mono_defaults.byte_class;
1248 case CEE_LDELEM_U2:
1249 return mono_defaults.uint16_class;
1250 case CEE_LDELEM_I:
1251 case CEE_STELEM_I:
1252 return mono_defaults.int_class;
1253 case CEE_LDELEM_I1:
1254 case CEE_STELEM_I1:
1255 return mono_defaults.sbyte_class;
1256 case CEE_LDELEM_I2:
1257 case CEE_STELEM_I2:
1258 return mono_defaults.int16_class;
1259 case CEE_LDELEM_I4:
1260 case CEE_STELEM_I4:
1261 return mono_defaults.int32_class;
1262 case CEE_LDELEM_U4:
1263 return mono_defaults.uint32_class;
1264 case CEE_LDELEM_I8:
1265 case CEE_STELEM_I8:
1266 return mono_defaults.int64_class;
1267 case CEE_LDELEM_R4:
1268 case CEE_STELEM_R4:
1269 return mono_defaults.single_class;
1270 case CEE_LDELEM_R8:
1271 case CEE_STELEM_R8:
1272 return mono_defaults.double_class;
1273 case CEE_LDELEM_REF:
1274 case CEE_STELEM_REF:
1275 return mono_defaults.object_class;
1276 default:
1277 g_assert_not_reached ();
1279 return NULL;
1283 * We try to share variables when possible
1285 static MonoInst *
1286 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1288 MonoInst *res;
1289 int pos, vnum;
1291 /* inlining can result in deeper stacks */
1292 if (slot >= cfg->header->max_stack)
1293 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1295 pos = ins->type - 1 + slot * STACK_MAX;
1297 switch (ins->type) {
1298 case STACK_I4:
1299 case STACK_I8:
1300 case STACK_R8:
1301 case STACK_PTR:
1302 case STACK_MP:
1303 case STACK_OBJ:
1304 if ((vnum = cfg->intvars [pos]))
1305 return cfg->varinfo [vnum];
1306 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1307 cfg->intvars [pos] = res->inst_c0;
1308 break;
1309 default:
1310 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1312 return res;
1315 static void
1316 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1319 * Don't use this if a generic_context is set, since that means AOT can't
1320 * look up the method using just the image+token.
1321 * table == 0 means this is a reference made from a wrapper.
1323 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1324 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1325 jump_info_token->image = image;
1326 jump_info_token->token = token;
1327 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1332 * This function is called to handle items that are left on the evaluation stack
1333 * at basic block boundaries. What happens is that we save the values to local variables
1334 * and we reload them later when first entering the target basic block (with the
1335 * handle_loaded_temps () function).
1336 * A single joint point will use the same variables (stored in the array bb->out_stack or
1337 * bb->in_stack, if the basic block is before or after the joint point).
1339 * This function needs to be called _before_ emitting the last instruction of
1340 * the bb (i.e. before emitting a branch).
1341 * If the stack merge fails at a join point, cfg->unverifiable is set.
1343 static void
1344 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1346 int i, bindex;
1347 MonoBasicBlock *bb = cfg->cbb;
1348 MonoBasicBlock *outb;
1349 MonoInst *inst, **locals;
1350 gboolean found;
1352 if (!count)
1353 return;
1354 if (cfg->verbose_level > 3)
1355 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1356 if (!bb->out_scount) {
1357 bb->out_scount = count;
1358 //printf ("bblock %d has out:", bb->block_num);
1359 found = FALSE;
1360 for (i = 0; i < bb->out_count; ++i) {
1361 outb = bb->out_bb [i];
1362 /* exception handlers are linked, but they should not be considered for stack args */
1363 if (outb->flags & BB_EXCEPTION_HANDLER)
1364 continue;
1365 //printf (" %d", outb->block_num);
1366 if (outb->in_stack) {
1367 found = TRUE;
1368 bb->out_stack = outb->in_stack;
1369 break;
1372 //printf ("\n");
1373 if (!found) {
1374 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1375 for (i = 0; i < count; ++i) {
1377 * try to reuse temps already allocated for this purpouse, if they occupy the same
1378 * stack slot and if they are of the same type.
1379 * This won't cause conflicts since if 'local' is used to
1380 * store one of the values in the in_stack of a bblock, then
1381 * the same variable will be used for the same outgoing stack
1382 * slot as well.
1383 * This doesn't work when inlining methods, since the bblocks
1384 * in the inlined methods do not inherit their in_stack from
1385 * the bblock they are inlined to. See bug #58863 for an
1386 * example.
1388 if (cfg->inlined_method)
1389 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1390 else
1391 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1396 for (i = 0; i < bb->out_count; ++i) {
1397 outb = bb->out_bb [i];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER)
1400 continue;
1401 if (outb->in_scount) {
1402 if (outb->in_scount != bb->out_scount) {
1403 cfg->unverifiable = TRUE;
1404 return;
1406 continue; /* check they are the same locals */
1408 outb->in_scount = count;
1409 outb->in_stack = bb->out_stack;
1412 locals = bb->out_stack;
1413 cfg->cbb = bb;
1414 for (i = 0; i < count; ++i) {
1415 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1416 inst->cil_code = sp [i]->cil_code;
1417 sp [i] = locals [i];
1418 if (cfg->verbose_level > 3)
1419 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1423 * It is possible that the out bblocks already have in_stack assigned, and
1424 * the in_stacks differ. In this case, we will store to all the different
1425 * in_stacks.
1428 found = TRUE;
1429 bindex = 0;
1430 while (found) {
1431 /* Find a bblock which has a different in_stack */
1432 found = FALSE;
1433 while (bindex < bb->out_count) {
1434 outb = bb->out_bb [bindex];
1435 /* exception handlers are linked, but they should not be considered for stack args */
1436 if (outb->flags & BB_EXCEPTION_HANDLER) {
1437 bindex++;
1438 continue;
1440 if (outb->in_stack != locals) {
1441 for (i = 0; i < count; ++i) {
1442 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1443 inst->cil_code = sp [i]->cil_code;
1444 sp [i] = locals [i];
1445 if (cfg->verbose_level > 3)
1446 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1448 locals = outb->in_stack;
1449 found = TRUE;
1450 break;
1452 bindex ++;
1457 /* Emit code which loads interface_offsets [klass->interface_id]
1458 * The array is stored in memory before vtable.
1460 static void
1461 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1463 if (cfg->compile_aot) {
1464 int ioffset_reg = alloc_preg (cfg);
1465 int iid_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1471 else {
1472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1476 static void
1477 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1479 int ibitmap_reg = alloc_preg (cfg);
1480 #ifdef COMPRESSED_INTERFACE_BITMAP
1481 MonoInst *args [2];
1482 MonoInst *res, *ins;
1483 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1484 MONO_ADD_INS (cfg->cbb, ins);
1485 args [0] = ins;
1486 if (cfg->compile_aot)
1487 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1488 else
1489 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1490 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1491 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1492 #else
1493 int ibitmap_byte_reg = alloc_preg (cfg);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1497 if (cfg->compile_aot) {
1498 int iid_reg = alloc_preg (cfg);
1499 int shifted_iid_reg = alloc_preg (cfg);
1500 int ibitmap_byte_address_reg = alloc_preg (cfg);
1501 int masked_iid_reg = alloc_preg (cfg);
1502 int iid_one_bit_reg = alloc_preg (cfg);
1503 int iid_bit_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1509 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1511 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1512 } else {
1513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1516 #endif
1520 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1521 * stored in "klass_reg" implements the interface "klass".
1523 static void
1524 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1526 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1530 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1531 * stored in "vtable_reg" implements the interface "klass".
1533 static void
1534 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1536 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1540 * Emit code which checks whenever the interface id of @klass is smaller than
1541 * than the value given by max_iid_reg.
1543 static void
1544 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1545 MonoBasicBlock *false_target)
1547 if (cfg->compile_aot) {
1548 int iid_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1552 else
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1554 if (false_target)
1555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1556 else
1557 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1560 /* Same as above, but obtains max_iid from a vtable */
1561 static void
1562 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 /* Same as above, but obtains max_iid from a klass */
1572 static void
1573 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1574 MonoBasicBlock *false_target)
1576 int max_iid_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1579 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1582 static void
1583 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 mono_class_setup_supertypes (klass);
1591 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1598 if (klass_ins) {
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1600 } else if (cfg->compile_aot) {
1601 int const_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1604 } else {
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1610 static void
1611 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1616 static void
1617 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1619 int intf_reg = alloc_preg (cfg);
1621 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1622 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1624 if (true_target)
1625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1626 else
1627 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1631 * Variant of the above that takes a register to the class, not the vtable.
1633 static void
1634 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1636 int intf_bit_reg = alloc_preg (cfg);
1638 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1639 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1641 if (true_target)
1642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1643 else
1644 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1647 static inline void
1648 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1650 if (klass_inst) {
1651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1652 } else if (cfg->compile_aot) {
1653 int const_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1655 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1656 } else {
1657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1659 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1662 static inline void
1663 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1665 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1668 static inline void
1669 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1671 if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1675 } else {
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1681 static void
1682 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1684 static void
1685 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1687 if (klass->rank) {
1688 int rank_reg = alloc_preg (cfg);
1689 int eclass_reg = alloc_preg (cfg);
1691 g_assert (!klass_inst);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1694 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1695 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1697 if (klass->cast_class == mono_defaults.object_class) {
1698 int parent_reg = alloc_preg (cfg);
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1700 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1701 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1702 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1703 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1704 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1705 } else if (klass->cast_class == mono_defaults.enum_class) {
1706 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1707 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1708 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1709 } else {
1710 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1711 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1714 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1715 /* Check that the object is a vector too */
1716 int bounds_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1719 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1721 } else {
1722 int idepth_reg = alloc_preg (cfg);
1723 int stypes_reg = alloc_preg (cfg);
1724 int stype = alloc_preg (cfg);
1726 mono_class_setup_supertypes (klass);
1728 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1731 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1735 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1739 static void
1740 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1742 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1745 static void
1746 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1748 int val_reg;
1750 g_assert (val == 0);
1752 if (align == 0)
1753 align = 4;
1755 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1756 switch (size) {
1757 case 1:
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1759 return;
1760 case 2:
1761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1762 return;
1763 case 4:
1764 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1765 return;
1766 #if SIZEOF_REGISTER == 8
1767 case 8:
1768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1769 return;
1770 #endif
1774 val_reg = alloc_preg (cfg);
1776 if (SIZEOF_REGISTER == 8)
1777 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1778 else
1779 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1781 if (align < 4) {
1782 /* This could be optimized further if neccesary */
1783 while (size >= 1) {
1784 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 offset += 1;
1786 size -= 1;
1788 return;
1791 #if !NO_UNALIGNED_ACCESS
1792 if (SIZEOF_REGISTER == 8) {
1793 if (offset % 8) {
1794 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1795 offset += 4;
1796 size -= 4;
1798 while (size >= 8) {
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1800 offset += 8;
1801 size -= 8;
1804 #endif
1806 while (size >= 4) {
1807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1808 offset += 4;
1809 size -= 4;
1811 while (size >= 2) {
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1813 offset += 2;
1814 size -= 2;
1816 while (size >= 1) {
1817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 offset += 1;
1819 size -= 1;
1823 void
1824 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1826 int cur_reg;
1828 if (align == 0)
1829 align = 4;
1831 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1832 g_assert (size < 10000);
1834 if (align < 4) {
1835 /* This could be optimized further if neccesary */
1836 while (size >= 1) {
1837 cur_reg = alloc_preg (cfg);
1838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 doffset += 1;
1841 soffset += 1;
1842 size -= 1;
1846 #if !NO_UNALIGNED_ACCESS
1847 if (SIZEOF_REGISTER == 8) {
1848 while (size >= 8) {
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1852 doffset += 8;
1853 soffset += 8;
1854 size -= 8;
1857 #endif
1859 while (size >= 4) {
1860 cur_reg = alloc_preg (cfg);
1861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1863 doffset += 4;
1864 soffset += 4;
1865 size -= 4;
1867 while (size >= 2) {
1868 cur_reg = alloc_preg (cfg);
1869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1871 doffset += 2;
1872 soffset += 2;
1873 size -= 2;
1875 while (size >= 1) {
1876 cur_reg = alloc_preg (cfg);
1877 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1879 doffset += 1;
1880 soffset += 1;
1881 size -= 1;
1885 static void
1886 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1888 MonoInst *ins, *c;
1890 if (cfg->compile_aot) {
1891 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1892 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1893 ins->sreg1 = sreg1;
1894 ins->sreg2 = c->dreg;
1895 MONO_ADD_INS (cfg->cbb, ins);
1896 } else {
1897 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1898 ins->sreg1 = sreg1;
1899 ins->inst_offset = mini_get_tls_offset (tls_key);
1900 MONO_ADD_INS (cfg->cbb, ins);
1905 * emit_push_lmf:
1907 * Emit IR to push the current LMF onto the LMF stack.
1909 static void
1910 emit_push_lmf (MonoCompile *cfg)
1913 * Emit IR to push the LMF:
1914 * lmf_addr = <lmf_addr from tls>
1915 * lmf->lmf_addr = lmf_addr
1916 * lmf->prev_lmf = *lmf_addr
1917 * *lmf_addr = lmf
1919 int lmf_reg, prev_lmf_reg;
1920 MonoInst *ins, *lmf_ins;
1922 if (!cfg->lmf_ir)
1923 return;
1925 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1926 /* Load current lmf */
1927 lmf_ins = mono_get_lmf_intrinsic (cfg);
1928 g_assert (lmf_ins);
1929 MONO_ADD_INS (cfg->cbb, lmf_ins);
1930 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1931 lmf_reg = ins->dreg;
1932 /* Save previous_lmf */
1933 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1934 /* Set new LMF */
1935 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1936 } else {
1938 * Store lmf_addr in a variable, so it can be allocated to a global register.
1940 if (!cfg->lmf_addr_var)
1941 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1943 #ifdef HOST_WIN32
1944 ins = mono_get_jit_tls_intrinsic (cfg);
1945 if (ins) {
1946 int jit_tls_dreg = ins->dreg;
1948 MONO_ADD_INS (cfg->cbb, ins);
1949 lmf_reg = alloc_preg (cfg);
1950 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1951 } else {
1952 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1954 #else
1955 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1956 if (lmf_ins) {
1957 MONO_ADD_INS (cfg->cbb, lmf_ins);
1958 } else {
1959 #ifdef TARGET_IOS
1960 MonoInst *args [16], *jit_tls_ins, *ins;
1962 /* Inline mono_get_lmf_addr () */
1963 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
1965 /* Load mono_jit_tls_id */
1966 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
1967 /* call pthread_getspecific () */
1968 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
1969 /* lmf_addr = &jit_tls->lmf */
1970 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1971 lmf_ins = ins;
1972 #else
1973 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1974 #endif
1976 #endif
1977 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1982 prev_lmf_reg = alloc_preg (cfg);
1983 /* Save previous_lmf */
1984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1985 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1986 /* Set new lmf */
1987 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1992 * emit_pop_lmf:
1994 * Emit IR to pop the current LMF from the LMF stack.
1996 static void
1997 emit_pop_lmf (MonoCompile *cfg)
1999 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2000 MonoInst *ins;
2002 if (!cfg->lmf_ir)
2003 return;
2005 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2006 lmf_reg = ins->dreg;
2008 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2009 /* Load previous_lmf */
2010 prev_lmf_reg = alloc_preg (cfg);
2011 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2012 /* Set new LMF */
2013 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2014 } else {
2016 * Emit IR to pop the LMF:
2017 * *(lmf->lmf_addr) = lmf->prev_lmf
2019 /* This could be called before emit_push_lmf () */
2020 if (!cfg->lmf_addr_var)
2021 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2022 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2024 prev_lmf_reg = alloc_preg (cfg);
2025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2026 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2030 static void
2031 emit_instrumentation_call (MonoCompile *cfg, void *func)
2033 MonoInst *iargs [1];
2036 * Avoid instrumenting inlined methods since it can
2037 * distort profiling results.
2039 if (cfg->method != cfg->current_method)
2040 return;
2042 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2043 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2044 mono_emit_jit_icall (cfg, func, iargs);
2048 static int
2049 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2051 if (type->byref)
2052 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2054 handle_enum:
2055 type = mini_get_basic_type_from_generic (gsctx, type);
2056 type = mini_replace_type (type);
2057 switch (type->type) {
2058 case MONO_TYPE_VOID:
2059 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2060 case MONO_TYPE_I1:
2061 case MONO_TYPE_U1:
2062 case MONO_TYPE_BOOLEAN:
2063 case MONO_TYPE_I2:
2064 case MONO_TYPE_U2:
2065 case MONO_TYPE_CHAR:
2066 case MONO_TYPE_I4:
2067 case MONO_TYPE_U4:
2068 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2069 case MONO_TYPE_I:
2070 case MONO_TYPE_U:
2071 case MONO_TYPE_PTR:
2072 case MONO_TYPE_FNPTR:
2073 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2074 case MONO_TYPE_CLASS:
2075 case MONO_TYPE_STRING:
2076 case MONO_TYPE_OBJECT:
2077 case MONO_TYPE_SZARRAY:
2078 case MONO_TYPE_ARRAY:
2079 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2080 case MONO_TYPE_I8:
2081 case MONO_TYPE_U8:
2082 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2083 case MONO_TYPE_R4:
2084 case MONO_TYPE_R8:
2085 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2086 case MONO_TYPE_VALUETYPE:
2087 if (type->data.klass->enumtype) {
2088 type = mono_class_enum_basetype (type->data.klass);
2089 goto handle_enum;
2090 } else
2091 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2092 case MONO_TYPE_TYPEDBYREF:
2093 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2094 case MONO_TYPE_GENERICINST:
2095 type = &type->data.generic_class->container_class->byval_arg;
2096 goto handle_enum;
2097 case MONO_TYPE_VAR:
2098 case MONO_TYPE_MVAR:
2099 /* gsharedvt */
2100 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2101 default:
2102 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2104 return -1;
2108 * target_type_is_incompatible:
2109 * @cfg: MonoCompile context
2111 * Check that the item @arg on the evaluation stack can be stored
2112 * in the target type (can be a local, or field, etc).
2113 * The cfg arg can be used to check if we need verification or just
2114 * validity checks.
2116 * Returns: non-0 value if arg can't be stored on a target.
2118 static int
2119 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2121 MonoType *simple_type;
2122 MonoClass *klass;
2124 target = mini_replace_type (target);
2125 if (target->byref) {
2126 /* FIXME: check that the pointed to types match */
2127 if (arg->type == STACK_MP)
2128 return arg->klass != mono_class_from_mono_type (target);
2129 if (arg->type == STACK_PTR)
2130 return 0;
2131 return 1;
2134 simple_type = mono_type_get_underlying_type (target);
2135 switch (simple_type->type) {
2136 case MONO_TYPE_VOID:
2137 return 1;
2138 case MONO_TYPE_I1:
2139 case MONO_TYPE_U1:
2140 case MONO_TYPE_BOOLEAN:
2141 case MONO_TYPE_I2:
2142 case MONO_TYPE_U2:
2143 case MONO_TYPE_CHAR:
2144 case MONO_TYPE_I4:
2145 case MONO_TYPE_U4:
2146 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2147 return 1;
2148 return 0;
2149 case MONO_TYPE_PTR:
2150 /* STACK_MP is needed when setting pinned locals */
2151 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2152 return 1;
2153 return 0;
2154 case MONO_TYPE_I:
2155 case MONO_TYPE_U:
2156 case MONO_TYPE_FNPTR:
2158 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2159 * in native int. (#688008).
2161 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2162 return 1;
2163 return 0;
2164 case MONO_TYPE_CLASS:
2165 case MONO_TYPE_STRING:
2166 case MONO_TYPE_OBJECT:
2167 case MONO_TYPE_SZARRAY:
2168 case MONO_TYPE_ARRAY:
2169 if (arg->type != STACK_OBJ)
2170 return 1;
2171 /* FIXME: check type compatibility */
2172 return 0;
2173 case MONO_TYPE_I8:
2174 case MONO_TYPE_U8:
2175 if (arg->type != STACK_I8)
2176 return 1;
2177 return 0;
2178 case MONO_TYPE_R4:
2179 case MONO_TYPE_R8:
2180 if (arg->type != STACK_R8)
2181 return 1;
2182 return 0;
2183 case MONO_TYPE_VALUETYPE:
2184 if (arg->type != STACK_VTYPE)
2185 return 1;
2186 klass = mono_class_from_mono_type (simple_type);
2187 if (klass != arg->klass)
2188 return 1;
2189 return 0;
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (arg->type != STACK_VTYPE)
2192 return 1;
2193 klass = mono_class_from_mono_type (simple_type);
2194 if (klass != arg->klass)
2195 return 1;
2196 return 0;
2197 case MONO_TYPE_GENERICINST:
2198 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2199 if (arg->type != STACK_VTYPE)
2200 return 1;
2201 klass = mono_class_from_mono_type (simple_type);
2202 if (klass != arg->klass)
2203 return 1;
2204 return 0;
2205 } else {
2206 if (arg->type != STACK_OBJ)
2207 return 1;
2208 /* FIXME: check type compatibility */
2209 return 0;
2211 case MONO_TYPE_VAR:
2212 case MONO_TYPE_MVAR:
2213 g_assert (cfg->generic_sharing_context);
2214 if (mini_type_var_is_vt (cfg, simple_type)) {
2215 if (arg->type != STACK_VTYPE)
2216 return 1;
2217 } else {
2218 if (arg->type != STACK_OBJ)
2219 return 1;
2221 return 0;
2222 default:
2223 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2225 return 1;
2229 * Prepare arguments for passing to a function call.
2230 * Return a non-zero value if the arguments can't be passed to the given
2231 * signature.
2232 * The type checks are not yet complete and some conversions may need
2233 * casts on 32 or 64 bit architectures.
2235 * FIXME: implement this using target_type_is_incompatible ()
2237 static int
2238 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2240 MonoType *simple_type;
2241 int i;
2243 if (sig->hasthis) {
2244 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2245 return 1;
2246 args++;
2248 for (i = 0; i < sig->param_count; ++i) {
2249 if (sig->params [i]->byref) {
2250 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2251 return 1;
2252 continue;
2254 simple_type = sig->params [i];
2255 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2256 handle_enum:
2257 switch (simple_type->type) {
2258 case MONO_TYPE_VOID:
2259 return 1;
2260 continue;
2261 case MONO_TYPE_I1:
2262 case MONO_TYPE_U1:
2263 case MONO_TYPE_BOOLEAN:
2264 case MONO_TYPE_I2:
2265 case MONO_TYPE_U2:
2266 case MONO_TYPE_CHAR:
2267 case MONO_TYPE_I4:
2268 case MONO_TYPE_U4:
2269 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2270 return 1;
2271 continue;
2272 case MONO_TYPE_I:
2273 case MONO_TYPE_U:
2274 case MONO_TYPE_PTR:
2275 case MONO_TYPE_FNPTR:
2276 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2277 return 1;
2278 continue;
2279 case MONO_TYPE_CLASS:
2280 case MONO_TYPE_STRING:
2281 case MONO_TYPE_OBJECT:
2282 case MONO_TYPE_SZARRAY:
2283 case MONO_TYPE_ARRAY:
2284 if (args [i]->type != STACK_OBJ)
2285 return 1;
2286 continue;
2287 case MONO_TYPE_I8:
2288 case MONO_TYPE_U8:
2289 if (args [i]->type != STACK_I8)
2290 return 1;
2291 continue;
2292 case MONO_TYPE_R4:
2293 case MONO_TYPE_R8:
2294 if (args [i]->type != STACK_R8)
2295 return 1;
2296 continue;
2297 case MONO_TYPE_VALUETYPE:
2298 if (simple_type->data.klass->enumtype) {
2299 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2300 goto handle_enum;
2302 if (args [i]->type != STACK_VTYPE)
2303 return 1;
2304 continue;
2305 case MONO_TYPE_TYPEDBYREF:
2306 if (args [i]->type != STACK_VTYPE)
2307 return 1;
2308 continue;
2309 case MONO_TYPE_GENERICINST:
2310 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2311 goto handle_enum;
2312 case MONO_TYPE_VAR:
2313 case MONO_TYPE_MVAR:
2314 /* gsharedvt */
2315 if (args [i]->type != STACK_VTYPE)
2316 return 1;
2317 continue;
2318 default:
2319 g_error ("unknown type 0x%02x in check_call_signature",
2320 simple_type->type);
2323 return 0;
2326 static int
2327 callvirt_to_call (int opcode)
2329 switch (opcode) {
2330 case OP_CALL_MEMBASE:
2331 return OP_CALL;
2332 case OP_VOIDCALL_MEMBASE:
2333 return OP_VOIDCALL;
2334 case OP_FCALL_MEMBASE:
2335 return OP_FCALL;
2336 case OP_VCALL_MEMBASE:
2337 return OP_VCALL;
2338 case OP_LCALL_MEMBASE:
2339 return OP_LCALL;
2340 default:
2341 g_assert_not_reached ();
2344 return -1;
2347 /* Either METHOD or IMT_ARG needs to be set */
2348 static void
2349 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2351 int method_reg;
2353 if (COMPILE_LLVM (cfg)) {
2354 method_reg = alloc_preg (cfg);
2356 if (imt_arg) {
2357 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2358 } else if (cfg->compile_aot) {
2359 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2360 } else {
2361 MonoInst *ins;
2362 MONO_INST_NEW (cfg, ins, OP_PCONST);
2363 ins->inst_p0 = method;
2364 ins->dreg = method_reg;
2365 MONO_ADD_INS (cfg->cbb, ins);
2368 #ifdef ENABLE_LLVM
2369 call->imt_arg_reg = method_reg;
2370 #endif
2371 #ifdef MONO_ARCH_IMT_REG
2372 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2373 #else
2374 /* Need this to keep the IMT arg alive */
2375 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2376 #endif
2377 return;
2380 #ifdef MONO_ARCH_IMT_REG
2381 method_reg = alloc_preg (cfg);
2383 if (imt_arg) {
2384 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2385 } else if (cfg->compile_aot) {
2386 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2387 } else {
2388 MonoInst *ins;
2389 MONO_INST_NEW (cfg, ins, OP_PCONST);
2390 ins->inst_p0 = method;
2391 ins->dreg = method_reg;
2392 MONO_ADD_INS (cfg->cbb, ins);
2395 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2396 #else
2397 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2398 #endif
2401 static MonoJumpInfo *
2402 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2404 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2406 ji->ip.i = ip;
2407 ji->type = type;
2408 ji->data.target = target;
2410 return ji;
2413 static int
2414 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2416 if (cfg->generic_sharing_context)
2417 return mono_class_check_context_used (klass);
2418 else
2419 return 0;
2422 static int
2423 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2425 if (cfg->generic_sharing_context)
2426 return mono_method_check_context_used (method);
2427 else
2428 return 0;
2432 * check_method_sharing:
2434 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2436 static void
2437 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2439 gboolean pass_vtable = FALSE;
2440 gboolean pass_mrgctx = FALSE;
2442 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2443 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2444 gboolean sharable = FALSE;
2446 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2447 sharable = TRUE;
2448 } else {
2449 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2450 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2451 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2453 sharable = sharing_enabled && context_sharable;
2457 * Pass vtable iff target method might
2458 * be shared, which means that sharing
2459 * is enabled for its class and its
2460 * context is sharable (and it's not a
2461 * generic method).
2463 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2464 pass_vtable = TRUE;
2467 if (mini_method_get_context (cmethod) &&
2468 mini_method_get_context (cmethod)->method_inst) {
2469 g_assert (!pass_vtable);
2471 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2472 pass_mrgctx = TRUE;
2473 } else {
2474 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2475 MonoGenericContext *context = mini_method_get_context (cmethod);
2476 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2478 if (sharing_enabled && context_sharable)
2479 pass_mrgctx = TRUE;
2480 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2481 pass_mrgctx = TRUE;
2485 if (out_pass_vtable)
2486 *out_pass_vtable = pass_vtable;
2487 if (out_pass_mrgctx)
2488 *out_pass_mrgctx = pass_mrgctx;
2491 inline static MonoCallInst *
2492 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2493 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2495 MonoType *sig_ret;
2496 MonoCallInst *call;
2497 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2498 int i;
2499 #endif
2501 if (tail) {
2502 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2504 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2505 } else
2506 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2508 call->args = args;
2509 call->signature = sig;
2510 call->rgctx_reg = rgctx;
2511 sig_ret = mini_replace_type (sig->ret);
2513 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2515 if (tail) {
2516 if (mini_type_is_vtype (cfg, sig_ret)) {
2517 call->vret_var = cfg->vret_addr;
2518 //g_assert_not_reached ();
2520 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2521 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2522 MonoInst *loada;
2524 temp->backend.is_pinvoke = sig->pinvoke;
2527 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2528 * address of return value to increase optimization opportunities.
2529 * Before vtype decomposition, the dreg of the call ins itself represents the
2530 * fact the call modifies the return value. After decomposition, the call will
2531 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2532 * will be transformed into an LDADDR.
2534 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2535 loada->dreg = alloc_preg (cfg);
2536 loada->inst_p0 = temp;
2537 /* We reference the call too since call->dreg could change during optimization */
2538 loada->inst_p1 = call;
2539 MONO_ADD_INS (cfg->cbb, loada);
2541 call->inst.dreg = temp->dreg;
2543 call->vret_var = loada;
2544 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2545 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2547 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2548 if (COMPILE_SOFT_FLOAT (cfg)) {
2550 * If the call has a float argument, we would need to do an r8->r4 conversion using
2551 * an icall, but that cannot be done during the call sequence since it would clobber
2552 * the call registers + the stack. So we do it before emitting the call.
2554 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2555 MonoType *t;
2556 MonoInst *in = call->args [i];
2558 if (i >= sig->hasthis)
2559 t = sig->params [i - sig->hasthis];
2560 else
2561 t = &mono_defaults.int_class->byval_arg;
2562 t = mono_type_get_underlying_type (t);
2564 if (!t->byref && t->type == MONO_TYPE_R4) {
2565 MonoInst *iargs [1];
2566 MonoInst *conv;
2568 iargs [0] = in;
2569 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2571 /* The result will be in an int vreg */
2572 call->args [i] = conv;
2576 #endif
2578 call->need_unbox_trampoline = unbox_trampoline;
2580 #ifdef ENABLE_LLVM
2581 if (COMPILE_LLVM (cfg))
2582 mono_llvm_emit_call (cfg, call);
2583 else
2584 mono_arch_emit_call (cfg, call);
2585 #else
2586 mono_arch_emit_call (cfg, call);
2587 #endif
2589 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2590 cfg->flags |= MONO_CFG_HAS_CALLS;
2592 return call;
2595 static void
2596 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2598 #ifdef MONO_ARCH_RGCTX_REG
2599 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2600 cfg->uses_rgctx_reg = TRUE;
2601 call->rgctx_reg = TRUE;
2602 #ifdef ENABLE_LLVM
2603 call->rgctx_arg_reg = rgctx_reg;
2604 #endif
2605 #else
2606 NOT_IMPLEMENTED;
2607 #endif
2610 inline static MonoInst*
2611 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2613 MonoCallInst *call;
2614 MonoInst *ins;
2615 int rgctx_reg = -1;
2616 gboolean check_sp = FALSE;
2618 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2619 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2621 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2622 check_sp = TRUE;
2625 if (rgctx_arg) {
2626 rgctx_reg = mono_alloc_preg (cfg);
2627 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2630 if (check_sp) {
2631 if (!cfg->stack_inbalance_var)
2632 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2634 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2635 ins->dreg = cfg->stack_inbalance_var->dreg;
2636 MONO_ADD_INS (cfg->cbb, ins);
2639 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2641 call->inst.sreg1 = addr->dreg;
2643 if (imt_arg)
2644 emit_imt_argument (cfg, call, NULL, imt_arg);
2646 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2648 if (check_sp) {
2649 int sp_reg;
2651 sp_reg = mono_alloc_preg (cfg);
2653 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2654 ins->dreg = sp_reg;
2655 MONO_ADD_INS (cfg->cbb, ins);
2657 /* Restore the stack so we don't crash when throwing the exception */
2658 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2659 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2660 MONO_ADD_INS (cfg->cbb, ins);
2662 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2663 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2666 if (rgctx_arg)
2667 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2669 return (MonoInst*)call;
2672 static MonoInst*
2673 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2675 static MonoInst*
2676 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2677 static MonoInst*
2678 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2680 static MonoInst*
2681 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2682 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2684 #ifndef DISABLE_REMOTING
2685 gboolean might_be_remote = FALSE;
2686 #endif
2687 gboolean virtual = this != NULL;
2688 gboolean enable_for_aot = TRUE;
2689 int context_used;
2690 MonoCallInst *call;
2691 int rgctx_reg = 0;
2692 gboolean need_unbox_trampoline;
2694 if (!sig)
2695 sig = mono_method_signature (method);
2697 if (rgctx_arg) {
2698 rgctx_reg = mono_alloc_preg (cfg);
2699 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2702 if (method->string_ctor) {
2703 /* Create the real signature */
2704 /* FIXME: Cache these */
2705 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2706 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2708 sig = ctor_sig;
2711 context_used = mini_method_check_context_used (cfg, method);
2713 #ifndef DISABLE_REMOTING
2714 might_be_remote = this && sig->hasthis &&
2715 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2716 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2718 if (might_be_remote && context_used) {
2719 MonoInst *addr;
2721 g_assert (cfg->generic_sharing_context);
2723 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2725 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2727 #endif
2729 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2731 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2733 #ifndef DISABLE_REMOTING
2734 if (might_be_remote)
2735 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2736 else
2737 #endif
2738 call->method = method;
2739 call->inst.flags |= MONO_INST_HAS_METHOD;
2740 call->inst.inst_left = this;
2741 call->tail_call = tail;
2743 if (virtual) {
2744 int vtable_reg, slot_reg, this_reg;
2745 int offset;
2747 this_reg = this->dreg;
2749 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2750 MonoInst *dummy_use;
2752 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2754 /* Make a call to delegate->invoke_impl */
2755 call->inst.inst_basereg = this_reg;
2756 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2757 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2759 /* We must emit a dummy use here because the delegate trampoline will
2760 replace the 'this' argument with the delegate target making this activation
2761 no longer a root for the delegate.
2762 This is an issue for delegates that target collectible code such as dynamic
2763 methods of GC'able assemblies.
2765 For a test case look into #667921.
2767 FIXME: a dummy use is not the best way to do it as the local register allocator
2768 will put it on a caller save register and spil it around the call.
2769 Ideally, we would either put it on a callee save register or only do the store part.
2771 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2773 return (MonoInst*)call;
2776 if ((!cfg->compile_aot || enable_for_aot) &&
2777 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2778 (MONO_METHOD_IS_FINAL (method) &&
2779 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2780 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2782 * the method is not virtual, we just need to ensure this is not null
2783 * and then we can call the method directly.
2785 #ifndef DISABLE_REMOTING
2786 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2788 * The check above ensures method is not gshared, this is needed since
2789 * gshared methods can't have wrappers.
2791 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2793 #endif
2795 if (!method->string_ctor)
2796 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2798 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2799 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2801 * the method is virtual, but we can statically dispatch since either
2802 * it's class or the method itself are sealed.
2803 * But first we need to ensure it's not a null reference.
2805 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2807 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2808 } else {
2809 vtable_reg = alloc_preg (cfg);
2810 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2811 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2812 slot_reg = -1;
2813 if (mono_use_imt) {
2814 guint32 imt_slot = mono_method_get_imt_slot (method);
2815 emit_imt_argument (cfg, call, call->method, imt_arg);
2816 slot_reg = vtable_reg;
2817 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2819 if (slot_reg == -1) {
2820 slot_reg = alloc_preg (cfg);
2821 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2822 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2824 } else {
2825 slot_reg = vtable_reg;
2826 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2827 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2828 if (imt_arg) {
2829 g_assert (mono_method_signature (method)->generic_param_count);
2830 emit_imt_argument (cfg, call, call->method, imt_arg);
2834 call->inst.sreg1 = slot_reg;
2835 call->inst.inst_offset = offset;
2836 call->virtual = TRUE;
2840 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2842 if (rgctx_arg)
2843 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2845 return (MonoInst*)call;
2848 MonoInst*
2849 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2851 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2854 MonoInst*
2855 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2856 MonoInst **args)
2858 MonoCallInst *call;
2860 g_assert (sig);
2862 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2863 call->fptr = func;
2865 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2867 return (MonoInst*)call;
2870 MonoInst*
2871 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2873 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2875 g_assert (info);
2877 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2881 * mono_emit_abs_call:
2883 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2885 inline static MonoInst*
2886 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2887 MonoMethodSignature *sig, MonoInst **args)
2889 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2890 MonoInst *ins;
2893 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2894 * handle it.
2896 if (cfg->abs_patches == NULL)
2897 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2898 g_hash_table_insert (cfg->abs_patches, ji, ji);
2899 ins = mono_emit_native_call (cfg, ji, sig, args);
2900 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2901 return ins;
2904 static MonoInst*
2905 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2907 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2908 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2909 int widen_op = -1;
2912 * Native code might return non register sized integers
2913 * without initializing the upper bits.
2915 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2916 case OP_LOADI1_MEMBASE:
2917 widen_op = OP_ICONV_TO_I1;
2918 break;
2919 case OP_LOADU1_MEMBASE:
2920 widen_op = OP_ICONV_TO_U1;
2921 break;
2922 case OP_LOADI2_MEMBASE:
2923 widen_op = OP_ICONV_TO_I2;
2924 break;
2925 case OP_LOADU2_MEMBASE:
2926 widen_op = OP_ICONV_TO_U2;
2927 break;
2928 default:
2929 break;
2932 if (widen_op != -1) {
2933 int dreg = alloc_preg (cfg);
2934 MonoInst *widen;
2936 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2937 widen->type = ins->type;
2938 ins = widen;
2943 return ins;
2946 static MonoMethod*
2947 get_memcpy_method (void)
2949 static MonoMethod *memcpy_method = NULL;
2950 if (!memcpy_method) {
2951 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2952 if (!memcpy_method)
2953 g_error ("Old corlib found. Install a new one");
2955 return memcpy_method;
2958 static void
2959 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2961 MonoClassField *field;
2962 gpointer iter = NULL;
2964 while ((field = mono_class_get_fields (klass, &iter))) {
2965 int foffset;
2967 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2968 continue;
2969 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2970 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2971 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2972 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2973 } else {
2974 MonoClass *field_class = mono_class_from_mono_type (field->type);
2975 if (field_class->has_references)
2976 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2981 static void
2982 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2984 int card_table_shift_bits;
2985 gpointer card_table_mask;
2986 guint8 *card_table;
2987 MonoInst *dummy_use;
2988 int nursery_shift_bits;
2989 size_t nursery_size;
2990 gboolean has_card_table_wb = FALSE;
2992 if (!cfg->gen_write_barriers)
2993 return;
2995 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2997 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2999 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3000 has_card_table_wb = TRUE;
3001 #endif
3003 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3004 MonoInst *wbarrier;
3006 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3007 wbarrier->sreg1 = ptr->dreg;
3008 wbarrier->sreg2 = value->dreg;
3009 MONO_ADD_INS (cfg->cbb, wbarrier);
3010 } else if (card_table) {
3011 int offset_reg = alloc_preg (cfg);
3012 int card_reg = alloc_preg (cfg);
3013 MonoInst *ins;
3015 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3016 if (card_table_mask)
3017 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3019 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3020 * IMM's larger than 32bits.
3022 if (cfg->compile_aot) {
3023 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3024 } else {
3025 MONO_INST_NEW (cfg, ins, OP_PCONST);
3026 ins->inst_p0 = card_table;
3027 ins->dreg = card_reg;
3028 MONO_ADD_INS (cfg->cbb, ins);
3031 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3032 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3033 } else {
3034 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3035 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3038 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3041 static gboolean
3042 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3044 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3045 unsigned need_wb = 0;
3047 if (align == 0)
3048 align = 4;
3050 /*types with references can't have alignment smaller than sizeof(void*) */
3051 if (align < SIZEOF_VOID_P)
3052 return FALSE;
3054 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3055 if (size > 32 * SIZEOF_VOID_P)
3056 return FALSE;
3058 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3060 /* We don't unroll more than 5 stores to avoid code bloat. */
3061 if (size > 5 * SIZEOF_VOID_P) {
3062 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3063 size += (SIZEOF_VOID_P - 1);
3064 size &= ~(SIZEOF_VOID_P - 1);
3066 EMIT_NEW_ICONST (cfg, iargs [2], size);
3067 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3068 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3069 return TRUE;
3072 destreg = iargs [0]->dreg;
3073 srcreg = iargs [1]->dreg;
3074 offset = 0;
3076 dest_ptr_reg = alloc_preg (cfg);
3077 tmp_reg = alloc_preg (cfg);
3079 /*tmp = dreg*/
3080 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3082 while (size >= SIZEOF_VOID_P) {
3083 MonoInst *load_inst;
3084 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3085 load_inst->dreg = tmp_reg;
3086 load_inst->inst_basereg = srcreg;
3087 load_inst->inst_offset = offset;
3088 MONO_ADD_INS (cfg->cbb, load_inst);
3090 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3092 if (need_wb & 0x1)
3093 emit_write_barrier (cfg, iargs [0], load_inst);
3095 offset += SIZEOF_VOID_P;
3096 size -= SIZEOF_VOID_P;
3097 need_wb >>= 1;
3099 /*tmp += sizeof (void*)*/
3100 if (size >= SIZEOF_VOID_P) {
3101 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3102 MONO_ADD_INS (cfg->cbb, iargs [0]);
3106 /* Those cannot be references since size < sizeof (void*) */
3107 while (size >= 4) {
3108 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3110 offset += 4;
3111 size -= 4;
3114 while (size >= 2) {
3115 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3116 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3117 offset += 2;
3118 size -= 2;
3121 while (size >= 1) {
3122 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3123 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3124 offset += 1;
3125 size -= 1;
3128 return TRUE;
3132 * Emit code to copy a valuetype of type @klass whose address is stored in
3133 * @src->dreg to memory whose address is stored at @dest->dreg.
3135 void
3136 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3138 MonoInst *iargs [4];
3139 int context_used, n;
3140 guint32 align = 0;
3141 MonoMethod *memcpy_method;
3142 MonoInst *size_ins = NULL;
3143 MonoInst *memcpy_ins = NULL;
3145 g_assert (klass);
3147 * This check breaks with spilled vars... need to handle it during verification anyway.
3148 * g_assert (klass && klass == src->klass && klass == dest->klass);
3151 if (mini_is_gsharedvt_klass (cfg, klass)) {
3152 g_assert (!native);
3153 context_used = mini_class_check_context_used (cfg, klass);
3154 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3155 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3158 if (native)
3159 n = mono_class_native_size (klass, &align);
3160 else
3161 n = mono_class_value_size (klass, &align);
3163 /* if native is true there should be no references in the struct */
3164 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3165 /* Avoid barriers when storing to the stack */
3166 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3167 (dest->opcode == OP_LDADDR))) {
3168 int context_used;
3170 iargs [0] = dest;
3171 iargs [1] = src;
3173 context_used = mini_class_check_context_used (cfg, klass);
3175 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3176 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3177 return;
3178 } else if (context_used) {
3179 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3180 } else {
3181 if (cfg->compile_aot) {
3182 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3183 } else {
3184 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3185 mono_class_compute_gc_descriptor (klass);
3189 if (size_ins)
3190 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3191 else
3192 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3193 return;
3197 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3198 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3199 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3200 } else {
3201 iargs [0] = dest;
3202 iargs [1] = src;
3203 if (size_ins)
3204 iargs [2] = size_ins;
3205 else
3206 EMIT_NEW_ICONST (cfg, iargs [2], n);
3208 memcpy_method = get_memcpy_method ();
3209 if (memcpy_ins)
3210 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3211 else
3212 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3216 static MonoMethod*
3217 get_memset_method (void)
3219 static MonoMethod *memset_method = NULL;
3220 if (!memset_method) {
3221 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3222 if (!memset_method)
3223 g_error ("Old corlib found. Install a new one");
3225 return memset_method;
3228 void
3229 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3231 MonoInst *iargs [3];
3232 int n, context_used;
3233 guint32 align;
3234 MonoMethod *memset_method;
3235 MonoInst *size_ins = NULL;
3236 MonoInst *bzero_ins = NULL;
3237 static MonoMethod *bzero_method;
3239 /* FIXME: Optimize this for the case when dest is an LDADDR */
3241 mono_class_init (klass);
3242 if (mini_is_gsharedvt_klass (cfg, klass)) {
3243 context_used = mini_class_check_context_used (cfg, klass);
3244 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3245 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3246 if (!bzero_method)
3247 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3248 g_assert (bzero_method);
3249 iargs [0] = dest;
3250 iargs [1] = size_ins;
3251 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3252 return;
3255 n = mono_class_value_size (klass, &align);
3257 if (n <= sizeof (gpointer) * 5) {
3258 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3260 else {
3261 memset_method = get_memset_method ();
3262 iargs [0] = dest;
3263 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3264 EMIT_NEW_ICONST (cfg, iargs [2], n);
3265 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3269 static MonoInst*
3270 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3272 MonoInst *this = NULL;
3274 g_assert (cfg->generic_sharing_context);
3276 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3277 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3278 !method->klass->valuetype)
3279 EMIT_NEW_ARGLOAD (cfg, this, 0);
3281 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3282 MonoInst *mrgctx_loc, *mrgctx_var;
3284 g_assert (!this);
3285 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3287 mrgctx_loc = mono_get_vtable_var (cfg);
3288 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3290 return mrgctx_var;
3291 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3292 MonoInst *vtable_loc, *vtable_var;
3294 g_assert (!this);
3296 vtable_loc = mono_get_vtable_var (cfg);
3297 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3299 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3300 MonoInst *mrgctx_var = vtable_var;
3301 int vtable_reg;
3303 vtable_reg = alloc_preg (cfg);
3304 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3305 vtable_var->type = STACK_PTR;
3308 return vtable_var;
3309 } else {
3310 MonoInst *ins;
3311 int vtable_reg;
3313 vtable_reg = alloc_preg (cfg);
3314 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3315 return ins;
3319 static MonoJumpInfoRgctxEntry *
3320 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3322 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3323 res->method = method;
3324 res->in_mrgctx = in_mrgctx;
3325 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3326 res->data->type = patch_type;
3327 res->data->data.target = patch_data;
3328 res->info_type = info_type;
3330 return res;
3333 static inline MonoInst*
3334 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3336 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3339 static MonoInst*
3340 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3341 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3343 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3344 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3346 return emit_rgctx_fetch (cfg, rgctx, entry);
3349 static MonoInst*
3350 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3351 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3353 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3354 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3356 return emit_rgctx_fetch (cfg, rgctx, entry);
3359 static MonoInst*
3360 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3361 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3363 MonoJumpInfoGSharedVtCall *call_info;
3364 MonoJumpInfoRgctxEntry *entry;
3365 MonoInst *rgctx;
3367 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3368 call_info->sig = sig;
3369 call_info->method = cmethod;
3371 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3372 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3374 return emit_rgctx_fetch (cfg, rgctx, entry);
3378 static MonoInst*
3379 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3380 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3382 MonoJumpInfoRgctxEntry *entry;
3383 MonoInst *rgctx;
3385 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3386 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3388 return emit_rgctx_fetch (cfg, rgctx, entry);
3392 * emit_get_rgctx_method:
3394 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3395 * normal constants, else emit a load from the rgctx.
3397 static MonoInst*
3398 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3399 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3401 if (!context_used) {
3402 MonoInst *ins;
3404 switch (rgctx_type) {
3405 case MONO_RGCTX_INFO_METHOD:
3406 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3407 return ins;
3408 case MONO_RGCTX_INFO_METHOD_RGCTX:
3409 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3410 return ins;
3411 default:
3412 g_assert_not_reached ();
3414 } else {
3415 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3416 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3418 return emit_rgctx_fetch (cfg, rgctx, entry);
3422 static MonoInst*
3423 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3424 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3426 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3427 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3429 return emit_rgctx_fetch (cfg, rgctx, entry);
3432 static int
3433 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3435 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3436 MonoRuntimeGenericContextInfoTemplate *template;
3437 int i, idx;
3439 g_assert (info);
3441 for (i = 0; i < info->num_entries; ++i) {
3442 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3444 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3445 return i;
3448 if (info->num_entries == info->count_entries) {
3449 MonoRuntimeGenericContextInfoTemplate *new_entries;
3450 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3452 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3454 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3455 info->entries = new_entries;
3456 info->count_entries = new_count_entries;
3459 idx = info->num_entries;
3460 template = &info->entries [idx];
3461 template->info_type = rgctx_type;
3462 template->data = data;
3464 info->num_entries ++;
3466 return idx;
3470 * emit_get_gsharedvt_info:
3472 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3474 static MonoInst*
3475 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3477 MonoInst *ins;
3478 int idx, dreg;
3480 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3481 /* Load info->entries [idx] */
3482 dreg = alloc_preg (cfg);
3483 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3485 return ins;
3488 static MonoInst*
3489 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3491 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3495 * On return the caller must check @klass for load errors.
3497 static void
3498 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3500 MonoInst *vtable_arg;
3501 MonoCallInst *call;
3502 int context_used;
3504 context_used = mini_class_check_context_used (cfg, klass);
3506 if (context_used) {
3507 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3508 klass, MONO_RGCTX_INFO_VTABLE);
3509 } else {
3510 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3512 if (!vtable)
3513 return;
3514 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3517 if (COMPILE_LLVM (cfg))
3518 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3519 else
3520 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3521 #ifdef MONO_ARCH_VTABLE_REG
3522 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3523 cfg->uses_vtable_reg = TRUE;
3524 #else
3525 NOT_IMPLEMENTED;
3526 #endif
3529 static void
3530 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3532 MonoInst *ins;
3534 if (cfg->gen_seq_points && cfg->method == method) {
3535 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3536 if (nonempty_stack)
3537 ins->flags |= MONO_INST_NONEMPTY_STACK;
3538 MONO_ADD_INS (cfg->cbb, ins);
3542 static void
3543 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3545 if (mini_get_debug_options ()->better_cast_details) {
3546 int to_klass_reg = alloc_preg (cfg);
3547 int vtable_reg = alloc_preg (cfg);
3548 int klass_reg = alloc_preg (cfg);
3549 MonoBasicBlock *is_null_bb = NULL;
3550 MonoInst *tls_get;
3552 if (null_check) {
3553 NEW_BBLOCK (cfg, is_null_bb);
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3559 tls_get = mono_get_jit_tls_intrinsic (cfg);
3560 if (!tls_get) {
3561 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3562 exit (1);
3565 MONO_ADD_INS (cfg->cbb, tls_get);
3566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3569 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3570 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3571 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3573 if (null_check) {
3574 MONO_START_BB (cfg, is_null_bb);
3575 if (out_bblock)
3576 *out_bblock = cfg->cbb;
3581 static void
3582 reset_cast_details (MonoCompile *cfg)
3584 /* Reset the variables holding the cast details */
3585 if (mini_get_debug_options ()->better_cast_details) {
3586 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3588 MONO_ADD_INS (cfg->cbb, tls_get);
3589 /* It is enough to reset the from field */
3590 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3595 * On return the caller must check @array_class for load errors
3597 static void
3598 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3600 int vtable_reg = alloc_preg (cfg);
3601 int context_used;
3603 context_used = mini_class_check_context_used (cfg, array_class);
3605 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3607 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3609 if (cfg->opt & MONO_OPT_SHARED) {
3610 int class_reg = alloc_preg (cfg);
3611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3612 if (cfg->compile_aot) {
3613 int klass_reg = alloc_preg (cfg);
3614 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3615 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3616 } else {
3617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3619 } else if (context_used) {
3620 MonoInst *vtable_ins;
3622 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3623 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3624 } else {
3625 if (cfg->compile_aot) {
3626 int vt_reg;
3627 MonoVTable *vtable;
3629 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3630 return;
3631 vt_reg = alloc_preg (cfg);
3632 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3633 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3634 } else {
3635 MonoVTable *vtable;
3636 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3637 return;
3638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3642 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3644 reset_cast_details (cfg);
3648 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3649 * generic code is generated.
3651 static MonoInst*
3652 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3654 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3656 if (context_used) {
3657 MonoInst *rgctx, *addr;
3659 /* FIXME: What if the class is shared? We might not
3660 have to get the address of the method from the
3661 RGCTX. */
3662 addr = emit_get_rgctx_method (cfg, context_used, method,
3663 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3665 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3667 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3668 } else {
3669 gboolean pass_vtable, pass_mrgctx;
3670 MonoInst *rgctx_arg = NULL;
3672 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3673 g_assert (!pass_mrgctx);
3675 if (pass_vtable) {
3676 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3678 g_assert (vtable);
3679 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3682 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3686 static MonoInst*
3687 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3689 MonoInst *add;
3690 int obj_reg;
3691 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3692 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3693 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3694 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3696 obj_reg = sp [0]->dreg;
3697 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3698 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3700 /* FIXME: generics */
3701 g_assert (klass->rank == 0);
3703 // Check rank == 0
3704 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3710 if (context_used) {
3711 MonoInst *element_class;
3713 /* This assertion is from the unboxcast insn */
3714 g_assert (klass->rank == 0);
3716 element_class = emit_get_rgctx_klass (cfg, context_used,
3717 klass->element_class, MONO_RGCTX_INFO_KLASS);
3719 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3720 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3721 } else {
3722 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3723 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3724 reset_cast_details (cfg);
3727 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3728 MONO_ADD_INS (cfg->cbb, add);
3729 add->type = STACK_MP;
3730 add->klass = klass;
3732 return add;
3735 static MonoInst*
3736 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3738 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3739 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3740 MonoInst *ins;
3741 int dreg, addr_reg;
3743 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3745 /* obj */
3746 args [0] = obj;
3748 /* klass */
3749 args [1] = klass_inst;
3751 /* CASTCLASS */
3752 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3754 NEW_BBLOCK (cfg, is_ref_bb);
3755 NEW_BBLOCK (cfg, is_nullable_bb);
3756 NEW_BBLOCK (cfg, end_bb);
3757 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3759 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3764 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3765 addr_reg = alloc_dreg (cfg, STACK_MP);
3767 /* Non-ref case */
3768 /* UNBOX */
3769 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3770 MONO_ADD_INS (cfg->cbb, addr);
3772 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3774 /* Ref case */
3775 MONO_START_BB (cfg, is_ref_bb);
3777 /* Save the ref to a temporary */
3778 dreg = alloc_ireg (cfg);
3779 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3780 addr->dreg = addr_reg;
3781 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3782 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3784 /* Nullable case */
3785 MONO_START_BB (cfg, is_nullable_bb);
3788 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3789 MonoInst *unbox_call;
3790 MonoMethodSignature *unbox_sig;
3791 MonoInst *var;
3793 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3795 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3796 unbox_sig->ret = &klass->byval_arg;
3797 unbox_sig->param_count = 1;
3798 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3799 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3801 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3802 addr->dreg = addr_reg;
3805 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3807 /* End */
3808 MONO_START_BB (cfg, end_bb);
3810 /* LDOBJ */
3811 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3813 *out_cbb = cfg->cbb;
3815 return ins;
3819 * Returns NULL and set the cfg exception on error.
3821 static MonoInst*
3822 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3824 MonoInst *iargs [2];
3825 void *alloc_ftn;
3827 if (context_used) {
3828 MonoInst *data;
3829 int rgctx_info;
3830 MonoInst *iargs [2];
3832 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3834 if (cfg->opt & MONO_OPT_SHARED)
3835 rgctx_info = MONO_RGCTX_INFO_KLASS;
3836 else
3837 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3838 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3840 if (cfg->opt & MONO_OPT_SHARED) {
3841 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3842 iargs [1] = data;
3843 alloc_ftn = mono_object_new;
3844 } else {
3845 iargs [0] = data;
3846 alloc_ftn = mono_object_new_specific;
3849 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3850 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3852 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3855 if (cfg->opt & MONO_OPT_SHARED) {
3856 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3857 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3859 alloc_ftn = mono_object_new;
3860 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3861 /* This happens often in argument checking code, eg. throw new FooException... */
3862 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3863 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3864 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3865 } else {
3866 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3867 MonoMethod *managed_alloc = NULL;
3868 gboolean pass_lw;
3870 if (!vtable) {
3871 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3872 cfg->exception_ptr = klass;
3873 return NULL;
3876 #ifndef MONO_CROSS_COMPILE
3877 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3878 #endif
3880 if (managed_alloc) {
3881 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3882 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3884 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3885 if (pass_lw) {
3886 guint32 lw = vtable->klass->instance_size;
3887 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3888 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3889 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3891 else {
3892 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3896 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3900 * Returns NULL and set the cfg exception on error.
3902 static MonoInst*
3903 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3905 MonoInst *alloc, *ins;
3907 *out_cbb = cfg->cbb;
3909 if (mono_class_is_nullable (klass)) {
3910 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3912 if (context_used) {
3913 /* FIXME: What if the class is shared? We might not
3914 have to get the method address from the RGCTX. */
3915 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3916 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3917 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3919 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3920 } else {
3921 gboolean pass_vtable, pass_mrgctx;
3922 MonoInst *rgctx_arg = NULL;
3924 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3925 g_assert (!pass_mrgctx);
3927 if (pass_vtable) {
3928 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3930 g_assert (vtable);
3931 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3934 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3938 if (mini_is_gsharedvt_klass (cfg, klass)) {
3939 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3940 MonoInst *res, *is_ref, *src_var, *addr;
3941 int addr_reg, dreg;
3943 dreg = alloc_ireg (cfg);
3945 NEW_BBLOCK (cfg, is_ref_bb);
3946 NEW_BBLOCK (cfg, is_nullable_bb);
3947 NEW_BBLOCK (cfg, end_bb);
3948 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3952 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3953 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3955 /* Non-ref case */
3956 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3957 if (!alloc)
3958 return NULL;
3959 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3960 ins->opcode = OP_STOREV_MEMBASE;
3962 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3963 res->type = STACK_OBJ;
3964 res->klass = klass;
3965 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3967 /* Ref case */
3968 MONO_START_BB (cfg, is_ref_bb);
3969 addr_reg = alloc_ireg (cfg);
3971 /* val is a vtype, so has to load the value manually */
3972 src_var = get_vreg_to_inst (cfg, val->dreg);
3973 if (!src_var)
3974 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3975 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3977 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3979 /* Nullable case */
3980 MONO_START_BB (cfg, is_nullable_bb);
3983 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3984 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3985 MonoInst *box_call;
3986 MonoMethodSignature *box_sig;
3989 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3990 * construct that method at JIT time, so have to do things by hand.
3992 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3993 box_sig->ret = &mono_defaults.object_class->byval_arg;
3994 box_sig->param_count = 1;
3995 box_sig->params [0] = &klass->byval_arg;
3996 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3997 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3998 res->type = STACK_OBJ;
3999 res->klass = klass;
4002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4004 MONO_START_BB (cfg, end_bb);
4006 *out_cbb = cfg->cbb;
4008 return res;
4009 } else {
4010 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4011 if (!alloc)
4012 return NULL;
4014 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4015 return alloc;
4020 static gboolean
4021 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4023 int i;
4024 MonoGenericContainer *container;
4025 MonoGenericInst *ginst;
4027 if (klass->generic_class) {
4028 container = klass->generic_class->container_class->generic_container;
4029 ginst = klass->generic_class->context.class_inst;
4030 } else if (klass->generic_container && context_used) {
4031 container = klass->generic_container;
4032 ginst = container->context.class_inst;
4033 } else {
4034 return FALSE;
4037 for (i = 0; i < container->type_argc; ++i) {
4038 MonoType *type;
4039 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4040 continue;
4041 type = ginst->type_argv [i];
4042 if (mini_type_is_reference (cfg, type))
4043 return TRUE;
4045 return FALSE;
4048 // FIXME: This doesn't work yet (class libs tests fail?)
4049 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4051 static MonoInst*
4052 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4054 MonoMethod *mono_castclass;
4055 MonoInst *res;
4057 mono_castclass = mono_marshal_get_castclass_with_cache ();
4059 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4060 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4061 reset_cast_details (cfg);
4063 return res;
4067 * Returns NULL and set the cfg exception on error.
4069 static MonoInst*
4070 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4072 MonoBasicBlock *is_null_bb;
4073 int obj_reg = src->dreg;
4074 int vtable_reg = alloc_preg (cfg);
4075 MonoInst *klass_inst = NULL;
4077 if (context_used) {
4078 MonoInst *args [3];
4080 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4081 MonoInst *cache_ins;
4083 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4085 /* obj */
4086 args [0] = src;
4088 /* klass - it's the second element of the cache entry*/
4089 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4091 /* cache */
4092 args [2] = cache_ins;
4094 return emit_castclass_with_cache (cfg, klass, args, NULL);
4097 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4100 NEW_BBLOCK (cfg, is_null_bb);
4102 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4103 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4105 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4107 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4109 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4110 } else {
4111 int klass_reg = alloc_preg (cfg);
4113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4115 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4116 /* the remoting code is broken, access the class for now */
4117 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4118 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4119 if (!vt) {
4120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4121 cfg->exception_ptr = klass;
4122 return NULL;
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4125 } else {
4126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4129 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4130 } else {
4131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4132 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4136 MONO_START_BB (cfg, is_null_bb);
4138 reset_cast_details (cfg);
4140 return src;
4144 * Returns NULL and set the cfg exception on error.
4146 static MonoInst*
4147 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4149 MonoInst *ins;
4150 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4151 int obj_reg = src->dreg;
4152 int vtable_reg = alloc_preg (cfg);
4153 int res_reg = alloc_ireg_ref (cfg);
4154 MonoInst *klass_inst = NULL;
4156 if (context_used) {
4157 MonoInst *args [3];
4159 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4160 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4161 MonoInst *cache_ins;
4163 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4165 /* obj */
4166 args [0] = src;
4168 /* klass - it's the second element of the cache entry*/
4169 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4171 /* cache */
4172 args [2] = cache_ins;
4174 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4177 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4180 NEW_BBLOCK (cfg, is_null_bb);
4181 NEW_BBLOCK (cfg, false_bb);
4182 NEW_BBLOCK (cfg, end_bb);
4184 /* Do the assignment at the beginning, so the other assignment can be if converted */
4185 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4186 ins->type = STACK_OBJ;
4187 ins->klass = klass;
4189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4192 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4194 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4195 g_assert (!context_used);
4196 /* the is_null_bb target simply copies the input register to the output */
4197 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4198 } else {
4199 int klass_reg = alloc_preg (cfg);
4201 if (klass->rank) {
4202 int rank_reg = alloc_preg (cfg);
4203 int eclass_reg = alloc_preg (cfg);
4205 g_assert (!context_used);
4206 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4207 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4208 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4210 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4211 if (klass->cast_class == mono_defaults.object_class) {
4212 int parent_reg = alloc_preg (cfg);
4213 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4214 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4215 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4217 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4218 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4219 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4220 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4221 } else if (klass->cast_class == mono_defaults.enum_class) {
4222 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4224 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4225 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4226 } else {
4227 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4228 /* Check that the object is a vector too */
4229 int bounds_reg = alloc_preg (cfg);
4230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4232 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4235 /* the is_null_bb target simply copies the input register to the output */
4236 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4238 } else if (mono_class_is_nullable (klass)) {
4239 g_assert (!context_used);
4240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4241 /* the is_null_bb target simply copies the input register to the output */
4242 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4243 } else {
4244 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4245 g_assert (!context_used);
4246 /* the remoting code is broken, access the class for now */
4247 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4248 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4249 if (!vt) {
4250 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4251 cfg->exception_ptr = klass;
4252 return NULL;
4254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4255 } else {
4256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4259 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4260 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4261 } else {
4262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4263 /* the is_null_bb target simply copies the input register to the output */
4264 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4269 MONO_START_BB (cfg, false_bb);
4271 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4274 MONO_START_BB (cfg, is_null_bb);
4276 MONO_START_BB (cfg, end_bb);
4278 return ins;
4281 static MonoInst*
4282 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4284 /* This opcode takes as input an object reference and a class, and returns:
4285 0) if the object is an instance of the class,
4286 1) if the object is not instance of the class,
4287 2) if the object is a proxy whose type cannot be determined */
4289 MonoInst *ins;
4290 #ifndef DISABLE_REMOTING
4291 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4292 #else
4293 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4294 #endif
4295 int obj_reg = src->dreg;
4296 int dreg = alloc_ireg (cfg);
4297 int tmp_reg;
4298 #ifndef DISABLE_REMOTING
4299 int klass_reg = alloc_preg (cfg);
4300 #endif
4302 NEW_BBLOCK (cfg, true_bb);
4303 NEW_BBLOCK (cfg, false_bb);
4304 NEW_BBLOCK (cfg, end_bb);
4305 #ifndef DISABLE_REMOTING
4306 NEW_BBLOCK (cfg, false2_bb);
4307 NEW_BBLOCK (cfg, no_proxy_bb);
4308 #endif
4310 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4311 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4313 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4314 #ifndef DISABLE_REMOTING
4315 NEW_BBLOCK (cfg, interface_fail_bb);
4316 #endif
4318 tmp_reg = alloc_preg (cfg);
4319 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4320 #ifndef DISABLE_REMOTING
4321 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4322 MONO_START_BB (cfg, interface_fail_bb);
4323 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4325 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4327 tmp_reg = alloc_preg (cfg);
4328 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4329 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4330 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4331 #else
4332 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4333 #endif
4334 } else {
4335 #ifndef DISABLE_REMOTING
4336 tmp_reg = alloc_preg (cfg);
4337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4340 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4341 tmp_reg = alloc_preg (cfg);
4342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4345 tmp_reg = alloc_preg (cfg);
4346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4350 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4353 MONO_START_BB (cfg, no_proxy_bb);
4355 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4356 #else
4357 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4358 #endif
4361 MONO_START_BB (cfg, false_bb);
4363 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4364 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4366 #ifndef DISABLE_REMOTING
4367 MONO_START_BB (cfg, false2_bb);
4369 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4370 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4371 #endif
4373 MONO_START_BB (cfg, true_bb);
4375 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4377 MONO_START_BB (cfg, end_bb);
4379 /* FIXME: */
4380 MONO_INST_NEW (cfg, ins, OP_ICONST);
4381 ins->dreg = dreg;
4382 ins->type = STACK_I4;
4384 return ins;
4387 static MonoInst*
4388 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4390 /* This opcode takes as input an object reference and a class, and returns:
4391 0) if the object is an instance of the class,
4392 1) if the object is a proxy whose type cannot be determined
4393 an InvalidCastException exception is thrown otherwhise*/
4395 MonoInst *ins;
4396 #ifndef DISABLE_REMOTING
4397 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4398 #else
4399 MonoBasicBlock *ok_result_bb;
4400 #endif
4401 int obj_reg = src->dreg;
4402 int dreg = alloc_ireg (cfg);
4403 int tmp_reg = alloc_preg (cfg);
4405 #ifndef DISABLE_REMOTING
4406 int klass_reg = alloc_preg (cfg);
4407 NEW_BBLOCK (cfg, end_bb);
4408 #endif
4410 NEW_BBLOCK (cfg, ok_result_bb);
4412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4415 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4417 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4418 #ifndef DISABLE_REMOTING
4419 NEW_BBLOCK (cfg, interface_fail_bb);
4421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4422 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4423 MONO_START_BB (cfg, interface_fail_bb);
4424 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4426 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4428 tmp_reg = alloc_preg (cfg);
4429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4431 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4433 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4434 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4435 #else
4436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4437 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4439 #endif
4440 } else {
4441 #ifndef DISABLE_REMOTING
4442 NEW_BBLOCK (cfg, no_proxy_bb);
4444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4446 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4448 tmp_reg = alloc_preg (cfg);
4449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4452 tmp_reg = alloc_preg (cfg);
4453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4457 NEW_BBLOCK (cfg, fail_1_bb);
4459 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4461 MONO_START_BB (cfg, fail_1_bb);
4463 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4466 MONO_START_BB (cfg, no_proxy_bb);
4468 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4469 #else
4470 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4471 #endif
4474 MONO_START_BB (cfg, ok_result_bb);
4476 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4478 #ifndef DISABLE_REMOTING
4479 MONO_START_BB (cfg, end_bb);
4480 #endif
4482 /* FIXME: */
4483 MONO_INST_NEW (cfg, ins, OP_ICONST);
4484 ins->dreg = dreg;
4485 ins->type = STACK_I4;
4487 return ins;
4491 * Returns NULL and set the cfg exception on error.
4493 static G_GNUC_UNUSED MonoInst*
4494 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4496 MonoInst *ptr;
4497 int dreg;
4498 MonoDelegateTrampInfo *trampoline;
4499 MonoInst *obj, *method_ins, *tramp_ins;
4500 MonoDomain *domain;
4501 guint8 **code_slot;
4503 obj = handle_alloc (cfg, klass, FALSE, 0);
4504 if (!obj)
4505 return NULL;
4507 /* Inline the contents of mono_delegate_ctor */
4509 /* Set target field */
4510 /* Optimize away setting of NULL target */
4511 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4512 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4513 if (cfg->gen_write_barriers) {
4514 dreg = alloc_preg (cfg);
4515 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4516 emit_write_barrier (cfg, ptr, target);
4520 /* Set method field */
4521 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4522 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4524 * To avoid looking up the compiled code belonging to the target method
4525 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4526 * store it, and we fill it after the method has been compiled.
4528 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4529 MonoInst *code_slot_ins;
4531 if (context_used) {
4532 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4533 } else {
4534 domain = mono_domain_get ();
4535 mono_domain_lock (domain);
4536 if (!domain_jit_info (domain)->method_code_hash)
4537 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4538 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4539 if (!code_slot) {
4540 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4541 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4543 mono_domain_unlock (domain);
4545 if (cfg->compile_aot)
4546 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4547 else
4548 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4550 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4553 /* Set invoke_impl field */
4554 if (cfg->compile_aot) {
4555 MonoClassMethodPair *del_tramp;
4557 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4558 del_tramp->klass = klass;
4559 del_tramp->method = context_used ? NULL : method;
4560 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4561 } else {
4562 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4563 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4566 dreg = alloc_preg (cfg);
4567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4568 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4570 dreg = alloc_preg (cfg);
4571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4572 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4574 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4576 return obj;
4579 static MonoInst*
4580 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4582 MonoJitICallInfo *info;
4584 /* Need to register the icall so it gets an icall wrapper */
4585 info = mono_get_array_new_va_icall (rank);
4587 cfg->flags |= MONO_CFG_HAS_VARARGS;
4589 /* mono_array_new_va () needs a vararg calling convention */
4590 cfg->disable_llvm = TRUE;
4592 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4593 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4596 static void
4597 mono_emit_load_got_addr (MonoCompile *cfg)
4599 MonoInst *getaddr, *dummy_use;
4601 if (!cfg->got_var || cfg->got_var_allocated)
4602 return;
4604 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4605 getaddr->cil_code = cfg->header->code;
4606 getaddr->dreg = cfg->got_var->dreg;
4608 /* Add it to the start of the first bblock */
4609 if (cfg->bb_entry->code) {
4610 getaddr->next = cfg->bb_entry->code;
4611 cfg->bb_entry->code = getaddr;
4613 else
4614 MONO_ADD_INS (cfg->bb_entry, getaddr);
4616 cfg->got_var_allocated = TRUE;
4619 * Add a dummy use to keep the got_var alive, since real uses might
4620 * only be generated by the back ends.
4621 * Add it to end_bblock, so the variable's lifetime covers the whole
4622 * method.
4623 * It would be better to make the usage of the got var explicit in all
4624 * cases when the backend needs it (i.e. calls, throw etc.), so this
4625 * wouldn't be needed.
4627 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4628 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4631 static int inline_limit;
4632 static gboolean inline_limit_inited;
4634 static gboolean
4635 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4637 MonoMethodHeaderSummary header;
4638 MonoVTable *vtable;
4639 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4640 MonoMethodSignature *sig = mono_method_signature (method);
4641 int i;
4642 #endif
4644 if (cfg->generic_sharing_context)
4645 return FALSE;
4647 if (cfg->inline_depth > 10)
4648 return FALSE;
4650 #ifdef MONO_ARCH_HAVE_LMF_OPS
4651 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4652 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4653 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4654 return TRUE;
4655 #endif
4658 if (!mono_method_get_header_summary (method, &header))
4659 return FALSE;
4661 /*runtime, icall and pinvoke are checked by summary call*/
4662 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4663 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4664 (mono_class_is_marshalbyref (method->klass)) ||
4665 header.has_clauses)
4666 return FALSE;
4668 /* also consider num_locals? */
4669 /* Do the size check early to avoid creating vtables */
4670 if (!inline_limit_inited) {
4671 if (g_getenv ("MONO_INLINELIMIT"))
4672 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4673 else
4674 inline_limit = INLINE_LENGTH_LIMIT;
4675 inline_limit_inited = TRUE;
4677 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4678 return FALSE;
4681 * if we can initialize the class of the method right away, we do,
4682 * otherwise we don't allow inlining if the class needs initialization,
4683 * since it would mean inserting a call to mono_runtime_class_init()
4684 * inside the inlined code
4686 if (!(cfg->opt & MONO_OPT_SHARED)) {
4687 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4688 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4689 vtable = mono_class_vtable (cfg->domain, method->klass);
4690 if (!vtable)
4691 return FALSE;
4692 if (!cfg->compile_aot)
4693 mono_runtime_class_init (vtable);
4694 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4695 if (cfg->run_cctors && method->klass->has_cctor) {
4696 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4697 if (!method->klass->runtime_info)
4698 /* No vtable created yet */
4699 return FALSE;
4700 vtable = mono_class_vtable (cfg->domain, method->klass);
4701 if (!vtable)
4702 return FALSE;
4703 /* This makes so that inline cannot trigger */
4704 /* .cctors: too many apps depend on them */
4705 /* running with a specific order... */
4706 if (! vtable->initialized)
4707 return FALSE;
4708 mono_runtime_class_init (vtable);
4710 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4711 if (!method->klass->runtime_info)
4712 /* No vtable created yet */
4713 return FALSE;
4714 vtable = mono_class_vtable (cfg->domain, method->klass);
4715 if (!vtable)
4716 return FALSE;
4717 if (!vtable->initialized)
4718 return FALSE;
4720 } else {
4722 * If we're compiling for shared code
4723 * the cctor will need to be run at aot method load time, for example,
4724 * or at the end of the compilation of the inlining method.
4726 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4727 return FALSE;
4731 * CAS - do not inline methods with declarative security
4732 * Note: this has to be before any possible return TRUE;
4734 if (mono_security_method_has_declsec (method))
4735 return FALSE;
4737 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4738 if (mono_arch_is_soft_float ()) {
4739 /* FIXME: */
4740 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4741 return FALSE;
4742 for (i = 0; i < sig->param_count; ++i)
4743 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4744 return FALSE;
4746 #endif
4748 return TRUE;
4751 static gboolean
4752 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4754 if (!cfg->compile_aot) {
4755 g_assert (vtable);
4756 if (vtable->initialized)
4757 return FALSE;
4760 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4761 if (cfg->method == method)
4762 return FALSE;
4765 if (!mono_class_needs_cctor_run (klass, method))
4766 return FALSE;
4768 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4769 /* The initialization is already done before the method is called */
4770 return FALSE;
4772 return TRUE;
4775 static MonoInst*
4776 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4778 MonoInst *ins;
4779 guint32 size;
4780 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4781 int context_used;
4783 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4784 size = -1;
4785 } else {
4786 mono_class_init (klass);
4787 size = mono_class_array_element_size (klass);
4790 mult_reg = alloc_preg (cfg);
4791 array_reg = arr->dreg;
4792 index_reg = index->dreg;
4794 #if SIZEOF_REGISTER == 8
4795 /* The array reg is 64 bits but the index reg is only 32 */
4796 if (COMPILE_LLVM (cfg)) {
4797 /* Not needed */
4798 index2_reg = index_reg;
4799 } else {
4800 index2_reg = alloc_preg (cfg);
4801 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4803 #else
4804 if (index->type == STACK_I8) {
4805 index2_reg = alloc_preg (cfg);
4806 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4807 } else {
4808 index2_reg = index_reg;
4810 #endif
4812 if (bcheck)
4813 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4815 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4816 if (size == 1 || size == 2 || size == 4 || size == 8) {
4817 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4819 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4820 ins->klass = mono_class_get_element_class (klass);
4821 ins->type = STACK_MP;
4823 return ins;
4825 #endif
4827 add_reg = alloc_ireg_mp (cfg);
4829 if (size == -1) {
4830 MonoInst *rgctx_ins;
4832 /* gsharedvt */
4833 g_assert (cfg->generic_sharing_context);
4834 context_used = mini_class_check_context_used (cfg, klass);
4835 g_assert (context_used);
4836 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4837 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4838 } else {
4839 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4841 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4842 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4843 ins->klass = mono_class_get_element_class (klass);
4844 ins->type = STACK_MP;
4845 MONO_ADD_INS (cfg->cbb, ins);
4847 return ins;
4850 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4851 static MonoInst*
4852 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4854 int bounds_reg = alloc_preg (cfg);
4855 int add_reg = alloc_ireg_mp (cfg);
4856 int mult_reg = alloc_preg (cfg);
4857 int mult2_reg = alloc_preg (cfg);
4858 int low1_reg = alloc_preg (cfg);
4859 int low2_reg = alloc_preg (cfg);
4860 int high1_reg = alloc_preg (cfg);
4861 int high2_reg = alloc_preg (cfg);
4862 int realidx1_reg = alloc_preg (cfg);
4863 int realidx2_reg = alloc_preg (cfg);
4864 int sum_reg = alloc_preg (cfg);
4865 int index1, index2, tmpreg;
4866 MonoInst *ins;
4867 guint32 size;
4869 mono_class_init (klass);
4870 size = mono_class_array_element_size (klass);
4872 index1 = index_ins1->dreg;
4873 index2 = index_ins2->dreg;
4875 #if SIZEOF_REGISTER == 8
4876 /* The array reg is 64 bits but the index reg is only 32 */
4877 if (COMPILE_LLVM (cfg)) {
4878 /* Not needed */
4879 } else {
4880 tmpreg = alloc_preg (cfg);
4881 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4882 index1 = tmpreg;
4883 tmpreg = alloc_preg (cfg);
4884 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4885 index2 = tmpreg;
4887 #else
4888 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4889 tmpreg = -1;
4890 #endif
4892 /* range checking */
4893 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4894 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4896 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4897 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4898 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4900 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4901 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4902 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4904 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4905 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4906 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4907 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4908 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4909 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4910 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4912 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4913 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4915 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4916 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4918 ins->type = STACK_MP;
4919 ins->klass = klass;
4920 MONO_ADD_INS (cfg->cbb, ins);
4922 return ins;
4924 #endif
4926 static MonoInst*
4927 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4929 int rank;
4930 MonoInst *addr;
4931 MonoMethod *addr_method;
4932 int element_size;
4934 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4936 if (rank == 1)
4937 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4939 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4940 /* emit_ldelema_2 depends on OP_LMUL */
4941 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4942 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4944 #endif
4946 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4947 addr_method = mono_marshal_get_array_address (rank, element_size);
4948 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4950 return addr;
4953 static MonoBreakPolicy
4954 always_insert_breakpoint (MonoMethod *method)
4956 return MONO_BREAK_POLICY_ALWAYS;
4959 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4962 * mono_set_break_policy:
4963 * policy_callback: the new callback function
4965 * Allow embedders to decide wherther to actually obey breakpoint instructions
4966 * (both break IL instructions and Debugger.Break () method calls), for example
4967 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4968 * untrusted or semi-trusted code.
4970 * @policy_callback will be called every time a break point instruction needs to
4971 * be inserted with the method argument being the method that calls Debugger.Break()
4972 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4973 * if it wants the breakpoint to not be effective in the given method.
4974 * #MONO_BREAK_POLICY_ALWAYS is the default.
4976 void
4977 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4979 if (policy_callback)
4980 break_policy_func = policy_callback;
4981 else
4982 break_policy_func = always_insert_breakpoint;
4985 static gboolean
4986 should_insert_brekpoint (MonoMethod *method) {
4987 switch (break_policy_func (method)) {
4988 case MONO_BREAK_POLICY_ALWAYS:
4989 return TRUE;
4990 case MONO_BREAK_POLICY_NEVER:
4991 return FALSE;
4992 case MONO_BREAK_POLICY_ON_DBG:
4993 g_warning ("mdb no longer supported");
4994 return FALSE;
4995 default:
4996 g_warning ("Incorrect value returned from break policy callback");
4997 return FALSE;
5001 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5002 static MonoInst*
5003 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5005 MonoInst *addr, *store, *load;
5006 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5008 /* the bounds check is already done by the callers */
5009 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5010 if (is_set) {
5011 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5012 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5013 if (mini_type_is_reference (cfg, fsig->params [2]))
5014 emit_write_barrier (cfg, addr, load);
5015 } else {
5016 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5017 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5019 return store;
5023 static gboolean
5024 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5026 return mini_type_is_reference (cfg, &klass->byval_arg);
5029 static MonoInst*
5030 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5032 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5033 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5034 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5035 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5036 MonoInst *iargs [3];
5038 if (!helper->slot)
5039 mono_class_setup_vtable (obj_array);
5040 g_assert (helper->slot);
5042 if (sp [0]->type != STACK_OBJ)
5043 return NULL;
5044 if (sp [2]->type != STACK_OBJ)
5045 return NULL;
5047 iargs [2] = sp [2];
5048 iargs [1] = sp [1];
5049 iargs [0] = sp [0];
5051 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5052 } else {
5053 MonoInst *ins;
5055 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5056 MonoInst *addr;
5058 // FIXME-VT: OP_ICONST optimization
5059 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5060 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5061 ins->opcode = OP_STOREV_MEMBASE;
5062 } else if (sp [1]->opcode == OP_ICONST) {
5063 int array_reg = sp [0]->dreg;
5064 int index_reg = sp [1]->dreg;
5065 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5067 if (safety_checks)
5068 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5069 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5070 } else {
5071 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5072 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5073 if (generic_class_is_reference_type (cfg, klass))
5074 emit_write_barrier (cfg, addr, sp [2]);
5076 return ins;
5080 static MonoInst*
5081 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5083 MonoClass *eklass;
5085 if (is_set)
5086 eklass = mono_class_from_mono_type (fsig->params [2]);
5087 else
5088 eklass = mono_class_from_mono_type (fsig->ret);
5090 if (is_set) {
5091 return emit_array_store (cfg, eklass, args, FALSE);
5092 } else {
5093 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5094 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5095 return ins;
5099 static gboolean
5100 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5102 uint32_t align;
5104 //Only allow for valuetypes
5105 if (!param_klass->valuetype || !return_klass->valuetype)
5106 return FALSE;
5108 //That are blitable
5109 if (param_klass->has_references || return_klass->has_references)
5110 return FALSE;
5112 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5113 if ((MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5114 (!MONO_TYPE_ISSTRUCT (&param_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5115 return FALSE;
5117 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5118 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5119 return FALSE;
5121 //And have the same size
5122 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5123 return FALSE;
5124 return TRUE;
5127 static MonoInst*
5128 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5130 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5131 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5133 //Valuetypes that are semantically equivalent
5134 if (is_unsafe_mov_compatible (param_klass, return_klass))
5135 return args [0];
5137 //Arrays of valuetypes that are semantically equivalent
5138 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5139 return args [0];
5141 return NULL;
5144 static MonoInst*
5145 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5147 #ifdef MONO_ARCH_SIMD_INTRINSICS
5148 MonoInst *ins = NULL;
5150 if (cfg->opt & MONO_OPT_SIMD) {
5151 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5152 if (ins)
5153 return ins;
5155 #endif
5157 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5160 static MonoInst*
5161 emit_memory_barrier (MonoCompile *cfg, int kind)
5163 MonoInst *ins = NULL;
5164 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5165 MONO_ADD_INS (cfg->cbb, ins);
5166 ins->backend.memory_barrier_kind = kind;
5168 return ins;
5171 static MonoInst*
5172 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5174 MonoInst *ins = NULL;
5175 int opcode = 0;
5177 /* The LLVM backend supports these intrinsics */
5178 if (cmethod->klass == mono_defaults.math_class) {
5179 if (strcmp (cmethod->name, "Sin") == 0) {
5180 opcode = OP_SIN;
5181 } else if (strcmp (cmethod->name, "Cos") == 0) {
5182 opcode = OP_COS;
5183 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5184 opcode = OP_SQRT;
5185 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5186 opcode = OP_ABS;
5189 if (opcode) {
5190 MONO_INST_NEW (cfg, ins, opcode);
5191 ins->type = STACK_R8;
5192 ins->dreg = mono_alloc_freg (cfg);
5193 ins->sreg1 = args [0]->dreg;
5194 MONO_ADD_INS (cfg->cbb, ins);
5197 opcode = 0;
5198 if (cfg->opt & MONO_OPT_CMOV) {
5199 if (strcmp (cmethod->name, "Min") == 0) {
5200 if (fsig->params [0]->type == MONO_TYPE_I4)
5201 opcode = OP_IMIN;
5202 if (fsig->params [0]->type == MONO_TYPE_U4)
5203 opcode = OP_IMIN_UN;
5204 else if (fsig->params [0]->type == MONO_TYPE_I8)
5205 opcode = OP_LMIN;
5206 else if (fsig->params [0]->type == MONO_TYPE_U8)
5207 opcode = OP_LMIN_UN;
5208 } else if (strcmp (cmethod->name, "Max") == 0) {
5209 if (fsig->params [0]->type == MONO_TYPE_I4)
5210 opcode = OP_IMAX;
5211 if (fsig->params [0]->type == MONO_TYPE_U4)
5212 opcode = OP_IMAX_UN;
5213 else if (fsig->params [0]->type == MONO_TYPE_I8)
5214 opcode = OP_LMAX;
5215 else if (fsig->params [0]->type == MONO_TYPE_U8)
5216 opcode = OP_LMAX_UN;
5220 if (opcode) {
5221 MONO_INST_NEW (cfg, ins, opcode);
5222 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5223 ins->dreg = mono_alloc_ireg (cfg);
5224 ins->sreg1 = args [0]->dreg;
5225 ins->sreg2 = args [1]->dreg;
5226 MONO_ADD_INS (cfg->cbb, ins);
5230 return ins;
5233 static MonoInst*
5234 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5236 if (cmethod->klass == mono_defaults.array_class) {
5237 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5238 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5239 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5240 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5241 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5242 return emit_array_unsafe_mov (cfg, fsig, args);
5245 return NULL;
5248 static MonoInst*
5249 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5251 MonoInst *ins = NULL;
5253 static MonoClass *runtime_helpers_class = NULL;
5254 if (! runtime_helpers_class)
5255 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5256 "System.Runtime.CompilerServices", "RuntimeHelpers");
5258 if (cmethod->klass == mono_defaults.string_class) {
5259 if (strcmp (cmethod->name, "get_Chars") == 0) {
5260 int dreg = alloc_ireg (cfg);
5261 int index_reg = alloc_preg (cfg);
5262 int mult_reg = alloc_preg (cfg);
5263 int add_reg = alloc_preg (cfg);
5265 #if SIZEOF_REGISTER == 8
5266 /* The array reg is 64 bits but the index reg is only 32 */
5267 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5268 #else
5269 index_reg = args [1]->dreg;
5270 #endif
5271 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5273 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5274 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5275 add_reg = ins->dreg;
5276 /* Avoid a warning */
5277 mult_reg = 0;
5278 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5279 add_reg, 0);
5280 #else
5281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5282 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5283 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5284 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5285 #endif
5286 type_from_op (ins, NULL, NULL);
5287 return ins;
5288 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5289 int dreg = alloc_ireg (cfg);
5290 /* Decompose later to allow more optimizations */
5291 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5292 ins->type = STACK_I4;
5293 ins->flags |= MONO_INST_FAULT;
5294 cfg->cbb->has_array_access = TRUE;
5295 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5297 return ins;
5298 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5299 int mult_reg = alloc_preg (cfg);
5300 int add_reg = alloc_preg (cfg);
5302 /* The corlib functions check for oob already. */
5303 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5304 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5306 return cfg->cbb->last_ins;
5307 } else
5308 return NULL;
5309 } else if (cmethod->klass == mono_defaults.object_class) {
5311 if (strcmp (cmethod->name, "GetType") == 0) {
5312 int dreg = alloc_ireg_ref (cfg);
5313 int vt_reg = alloc_preg (cfg);
5314 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5315 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5316 type_from_op (ins, NULL, NULL);
5318 return ins;
5319 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5320 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5321 int dreg = alloc_ireg (cfg);
5322 int t1 = alloc_ireg (cfg);
5324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5325 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5326 ins->type = STACK_I4;
5328 return ins;
5329 #endif
5330 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5331 MONO_INST_NEW (cfg, ins, OP_NOP);
5332 MONO_ADD_INS (cfg->cbb, ins);
5333 return ins;
5334 } else
5335 return NULL;
5336 } else if (cmethod->klass == mono_defaults.array_class) {
5337 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5338 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5340 #ifndef MONO_BIG_ARRAYS
5342 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5343 * Array methods.
5345 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5346 int dreg = alloc_ireg (cfg);
5347 int bounds_reg = alloc_ireg_mp (cfg);
5348 MonoBasicBlock *end_bb, *szarray_bb;
5349 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5351 NEW_BBLOCK (cfg, end_bb);
5352 NEW_BBLOCK (cfg, szarray_bb);
5354 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5355 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5358 /* Non-szarray case */
5359 if (get_length)
5360 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5361 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5362 else
5363 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5364 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5365 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5366 MONO_START_BB (cfg, szarray_bb);
5367 /* Szarray case */
5368 if (get_length)
5369 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5370 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5371 else
5372 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5373 MONO_START_BB (cfg, end_bb);
5375 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5376 ins->type = STACK_I4;
5378 return ins;
5380 #endif
5382 if (cmethod->name [0] != 'g')
5383 return NULL;
5385 if (strcmp (cmethod->name, "get_Rank") == 0) {
5386 int dreg = alloc_ireg (cfg);
5387 int vtable_reg = alloc_preg (cfg);
5388 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5389 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5390 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5391 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5392 type_from_op (ins, NULL, NULL);
5394 return ins;
5395 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5396 int dreg = alloc_ireg (cfg);
5398 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5399 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5400 type_from_op (ins, NULL, NULL);
5402 return ins;
5403 } else
5404 return NULL;
5405 } else if (cmethod->klass == runtime_helpers_class) {
5407 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5408 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5409 return ins;
5410 } else
5411 return NULL;
5412 } else if (cmethod->klass == mono_defaults.thread_class) {
5413 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5414 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5415 MONO_ADD_INS (cfg->cbb, ins);
5416 return ins;
5417 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5418 return emit_memory_barrier (cfg, FullBarrier);
5420 } else if (cmethod->klass == mono_defaults.monitor_class) {
5422 /* FIXME this should be integrated to the check below once we support the trampoline version */
5423 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5424 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5425 MonoMethod *fast_method = NULL;
5427 /* Avoid infinite recursion */
5428 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5429 return NULL;
5431 fast_method = mono_monitor_get_fast_path (cmethod);
5432 if (!fast_method)
5433 return NULL;
5435 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5437 #endif
5439 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5440 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5441 MonoCallInst *call;
5443 if (COMPILE_LLVM (cfg)) {
5445 * Pass the argument normally, the LLVM backend will handle the
5446 * calling convention problems.
5448 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5449 } else {
5450 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5451 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5452 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5453 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5456 return (MonoInst*)call;
5457 } else if (strcmp (cmethod->name, "Exit") == 0) {
5458 MonoCallInst *call;
5460 if (COMPILE_LLVM (cfg)) {
5461 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5462 } else {
5463 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5464 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5465 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5466 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5469 return (MonoInst*)call;
5471 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5473 MonoMethod *fast_method = NULL;
5475 /* Avoid infinite recursion */
5476 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5477 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5478 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5479 return NULL;
5481 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5482 strcmp (cmethod->name, "Exit") == 0)
5483 fast_method = mono_monitor_get_fast_path (cmethod);
5484 if (!fast_method)
5485 return NULL;
5487 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5489 #endif
5490 } else if (cmethod->klass->image == mono_defaults.corlib &&
5491 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5492 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5493 ins = NULL;
5495 #if SIZEOF_REGISTER == 8
5496 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5497 MonoInst *load_ins;
5499 emit_memory_barrier (cfg, FullBarrier);
5501 /* 64 bit reads are already atomic */
5502 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5503 load_ins->dreg = mono_alloc_preg (cfg);
5504 load_ins->inst_basereg = args [0]->dreg;
5505 load_ins->inst_offset = 0;
5506 MONO_ADD_INS (cfg->cbb, load_ins);
5508 emit_memory_barrier (cfg, FullBarrier);
5510 ins = load_ins;
5512 #endif
5514 if (strcmp (cmethod->name, "Increment") == 0) {
5515 MonoInst *ins_iconst;
5516 guint32 opcode = 0;
5518 if (fsig->params [0]->type == MONO_TYPE_I4) {
5519 opcode = OP_ATOMIC_ADD_I4;
5520 cfg->has_atomic_add_i4 = TRUE;
5522 #if SIZEOF_REGISTER == 8
5523 else if (fsig->params [0]->type == MONO_TYPE_I8)
5524 opcode = OP_ATOMIC_ADD_I8;
5525 #endif
5526 if (opcode) {
5527 if (!mono_arch_opcode_supported (opcode))
5528 return NULL;
5529 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5530 ins_iconst->inst_c0 = 1;
5531 ins_iconst->dreg = mono_alloc_ireg (cfg);
5532 MONO_ADD_INS (cfg->cbb, ins_iconst);
5534 MONO_INST_NEW (cfg, ins, opcode);
5535 ins->dreg = mono_alloc_ireg (cfg);
5536 ins->inst_basereg = args [0]->dreg;
5537 ins->inst_offset = 0;
5538 ins->sreg2 = ins_iconst->dreg;
5539 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5540 MONO_ADD_INS (cfg->cbb, ins);
5542 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5543 MonoInst *ins_iconst;
5544 guint32 opcode = 0;
5546 if (fsig->params [0]->type == MONO_TYPE_I4) {
5547 opcode = OP_ATOMIC_ADD_I4;
5548 cfg->has_atomic_add_i4 = TRUE;
5550 #if SIZEOF_REGISTER == 8
5551 else if (fsig->params [0]->type == MONO_TYPE_I8)
5552 opcode = OP_ATOMIC_ADD_I8;
5553 #endif
5554 if (opcode) {
5555 if (!mono_arch_opcode_supported (opcode))
5556 return NULL;
5557 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5558 ins_iconst->inst_c0 = -1;
5559 ins_iconst->dreg = mono_alloc_ireg (cfg);
5560 MONO_ADD_INS (cfg->cbb, ins_iconst);
5562 MONO_INST_NEW (cfg, ins, opcode);
5563 ins->dreg = mono_alloc_ireg (cfg);
5564 ins->inst_basereg = args [0]->dreg;
5565 ins->inst_offset = 0;
5566 ins->sreg2 = ins_iconst->dreg;
5567 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5568 MONO_ADD_INS (cfg->cbb, ins);
5570 } else if (strcmp (cmethod->name, "Add") == 0) {
5571 guint32 opcode = 0;
5573 if (fsig->params [0]->type == MONO_TYPE_I4) {
5574 opcode = OP_ATOMIC_ADD_I4;
5575 cfg->has_atomic_add_i4 = TRUE;
5577 #if SIZEOF_REGISTER == 8
5578 else if (fsig->params [0]->type == MONO_TYPE_I8)
5579 opcode = OP_ATOMIC_ADD_I8;
5580 #endif
5581 if (opcode) {
5582 if (!mono_arch_opcode_supported (opcode))
5583 return NULL;
5584 MONO_INST_NEW (cfg, ins, opcode);
5585 ins->dreg = mono_alloc_ireg (cfg);
5586 ins->inst_basereg = args [0]->dreg;
5587 ins->inst_offset = 0;
5588 ins->sreg2 = args [1]->dreg;
5589 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5590 MONO_ADD_INS (cfg->cbb, ins);
5594 if (strcmp (cmethod->name, "Exchange") == 0) {
5595 guint32 opcode;
5596 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5598 if (fsig->params [0]->type == MONO_TYPE_I4) {
5599 opcode = OP_ATOMIC_EXCHANGE_I4;
5600 cfg->has_atomic_exchange_i4 = TRUE;
5602 #if SIZEOF_REGISTER == 8
5603 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5604 (fsig->params [0]->type == MONO_TYPE_I))
5605 opcode = OP_ATOMIC_EXCHANGE_I8;
5606 #else
5607 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5608 opcode = OP_ATOMIC_EXCHANGE_I4;
5609 cfg->has_atomic_exchange_i4 = TRUE;
5611 #endif
5612 else
5613 return NULL;
5615 if (!mono_arch_opcode_supported (opcode))
5616 return NULL;
5618 MONO_INST_NEW (cfg, ins, opcode);
5619 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5620 ins->inst_basereg = args [0]->dreg;
5621 ins->inst_offset = 0;
5622 ins->sreg2 = args [1]->dreg;
5623 MONO_ADD_INS (cfg->cbb, ins);
5625 switch (fsig->params [0]->type) {
5626 case MONO_TYPE_I4:
5627 ins->type = STACK_I4;
5628 break;
5629 case MONO_TYPE_I8:
5630 case MONO_TYPE_I:
5631 ins->type = STACK_I8;
5632 break;
5633 case MONO_TYPE_OBJECT:
5634 ins->type = STACK_OBJ;
5635 break;
5636 default:
5637 g_assert_not_reached ();
5640 if (cfg->gen_write_barriers && is_ref)
5641 emit_write_barrier (cfg, args [0], args [1]);
5644 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5645 int size = 0;
5646 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5647 if (fsig->params [1]->type == MONO_TYPE_I4)
5648 size = 4;
5649 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5650 size = sizeof (gpointer);
5651 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5652 size = 8;
5653 if (size == 4) {
5654 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5655 return NULL;
5656 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5657 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5658 ins->sreg1 = args [0]->dreg;
5659 ins->sreg2 = args [1]->dreg;
5660 ins->sreg3 = args [2]->dreg;
5661 ins->type = STACK_I4;
5662 MONO_ADD_INS (cfg->cbb, ins);
5663 cfg->has_atomic_cas_i4 = TRUE;
5664 } else if (size == 8) {
5665 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5666 return NULL;
5667 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5668 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5669 ins->sreg1 = args [0]->dreg;
5670 ins->sreg2 = args [1]->dreg;
5671 ins->sreg3 = args [2]->dreg;
5672 ins->type = STACK_I8;
5673 MONO_ADD_INS (cfg->cbb, ins);
5674 } else {
5675 /* g_assert_not_reached (); */
5677 if (cfg->gen_write_barriers && is_ref)
5678 emit_write_barrier (cfg, args [0], args [1]);
5681 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5682 ins = emit_memory_barrier (cfg, FullBarrier);
5684 if (ins)
5685 return ins;
5686 } else if (cmethod->klass->image == mono_defaults.corlib) {
5687 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5688 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5689 if (should_insert_brekpoint (cfg->method)) {
5690 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5691 } else {
5692 MONO_INST_NEW (cfg, ins, OP_NOP);
5693 MONO_ADD_INS (cfg->cbb, ins);
5695 return ins;
5697 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5698 && strcmp (cmethod->klass->name, "Environment") == 0) {
5699 #ifdef TARGET_WIN32
5700 EMIT_NEW_ICONST (cfg, ins, 1);
5701 #else
5702 EMIT_NEW_ICONST (cfg, ins, 0);
5703 #endif
5704 return ins;
5706 } else if (cmethod->klass == mono_defaults.math_class) {
5708 * There is general branches code for Min/Max, but it does not work for
5709 * all inputs:
5710 * http://everything2.com/?node_id=1051618
5712 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5713 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5714 MonoInst *pi;
5715 MonoJumpInfoToken *ji;
5716 MonoString *s;
5718 cfg->disable_llvm = TRUE;
5720 if (args [0]->opcode == OP_GOT_ENTRY) {
5721 pi = args [0]->inst_p1;
5722 g_assert (pi->opcode == OP_PATCH_INFO);
5723 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5724 ji = pi->inst_p0;
5725 } else {
5726 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5727 ji = args [0]->inst_p0;
5730 NULLIFY_INS (args [0]);
5732 // FIXME: Ugly
5733 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5734 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5735 ins->dreg = mono_alloc_ireg (cfg);
5736 // FIXME: Leaks
5737 ins->inst_p0 = mono_string_to_utf8 (s);
5738 MONO_ADD_INS (cfg->cbb, ins);
5739 return ins;
5740 #endif
5743 #ifdef MONO_ARCH_SIMD_INTRINSICS
5744 if (cfg->opt & MONO_OPT_SIMD) {
5745 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5746 if (ins)
5747 return ins;
5749 #endif
5751 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5752 if (ins)
5753 return ins;
5755 if (COMPILE_LLVM (cfg)) {
5756 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5757 if (ins)
5758 return ins;
5761 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5765 * This entry point could be used later for arbitrary method
5766 * redirection.
5768 inline static MonoInst*
5769 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5770 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5772 if (method->klass == mono_defaults.string_class) {
5773 /* managed string allocation support */
5774 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5775 MonoInst *iargs [2];
5776 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5777 MonoMethod *managed_alloc = NULL;
5779 g_assert (vtable); /*Should not fail since it System.String*/
5780 #ifndef MONO_CROSS_COMPILE
5781 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5782 #endif
5783 if (!managed_alloc)
5784 return NULL;
5785 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5786 iargs [1] = args [0];
5787 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5790 return NULL;
5793 static void
5794 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5796 MonoInst *store, *temp;
5797 int i;
5799 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5800 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5803 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5804 * would be different than the MonoInst's used to represent arguments, and
5805 * the ldelema implementation can't deal with that.
5806 * Solution: When ldelema is used on an inline argument, create a var for
5807 * it, emit ldelema on that var, and emit the saving code below in
5808 * inline_method () if needed.
5810 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5811 cfg->args [i] = temp;
5812 /* This uses cfg->args [i] which is set by the preceeding line */
5813 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5814 store->cil_code = sp [0]->cil_code;
5815 sp++;
5819 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5820 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5822 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5823 static gboolean
5824 check_inline_called_method_name_limit (MonoMethod *called_method)
5826 int strncmp_result;
5827 static const char *limit = NULL;
5829 if (limit == NULL) {
5830 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5832 if (limit_string != NULL)
5833 limit = limit_string;
5834 else
5835 limit = "";
5838 if (limit [0] != '\0') {
5839 char *called_method_name = mono_method_full_name (called_method, TRUE);
5841 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5842 g_free (called_method_name);
5844 //return (strncmp_result <= 0);
5845 return (strncmp_result == 0);
5846 } else {
5847 return TRUE;
5850 #endif
5852 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5853 static gboolean
5854 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5856 int strncmp_result;
5857 static const char *limit = NULL;
5859 if (limit == NULL) {
5860 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5861 if (limit_string != NULL) {
5862 limit = limit_string;
5863 } else {
5864 limit = "";
5868 if (limit [0] != '\0') {
5869 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5871 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5872 g_free (caller_method_name);
5874 //return (strncmp_result <= 0);
5875 return (strncmp_result == 0);
5876 } else {
5877 return TRUE;
5880 #endif
5882 static void
5883 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5885 static double r8_0 = 0.0;
5886 MonoInst *ins;
5887 int t;
5889 rtype = mini_replace_type (rtype);
5890 t = rtype->type;
5892 if (rtype->byref) {
5893 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5894 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5895 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5896 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5897 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5898 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5899 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5900 ins->type = STACK_R8;
5901 ins->inst_p0 = (void*)&r8_0;
5902 ins->dreg = dreg;
5903 MONO_ADD_INS (cfg->cbb, ins);
5904 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5905 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5906 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5907 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5908 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5909 } else {
5910 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5914 static void
5915 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5917 int t;
5919 rtype = mini_replace_type (rtype);
5920 t = rtype->type;
5922 if (rtype->byref) {
5923 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5924 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5925 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5926 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5927 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5928 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5929 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5930 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5931 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5932 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5933 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5934 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5935 } else {
5936 emit_init_rvar (cfg, dreg, rtype);
5940 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5941 static void
5942 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5944 MonoInst *var = cfg->locals [local];
5945 if (COMPILE_SOFT_FLOAT (cfg)) {
5946 MonoInst *store;
5947 int reg = alloc_dreg (cfg, var->type);
5948 emit_init_rvar (cfg, reg, type);
5949 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5950 } else {
5951 if (init)
5952 emit_init_rvar (cfg, var->dreg, type);
5953 else
5954 emit_dummy_init_rvar (cfg, var->dreg, type);
5958 static int
5959 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5960 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5962 MonoInst *ins, *rvar = NULL;
5963 MonoMethodHeader *cheader;
5964 MonoBasicBlock *ebblock, *sbblock;
5965 int i, costs;
5966 MonoMethod *prev_inlined_method;
5967 MonoInst **prev_locals, **prev_args;
5968 MonoType **prev_arg_types;
5969 guint prev_real_offset;
5970 GHashTable *prev_cbb_hash;
5971 MonoBasicBlock **prev_cil_offset_to_bb;
5972 MonoBasicBlock *prev_cbb;
5973 unsigned char* prev_cil_start;
5974 guint32 prev_cil_offset_to_bb_len;
5975 MonoMethod *prev_current_method;
5976 MonoGenericContext *prev_generic_context;
5977 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5979 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5981 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5982 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5983 return 0;
5984 #endif
5985 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5986 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5987 return 0;
5988 #endif
5990 if (cfg->verbose_level > 2)
5991 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5993 if (!cmethod->inline_info) {
5994 cfg->stat_inlineable_methods++;
5995 cmethod->inline_info = 1;
5998 /* allocate local variables */
5999 cheader = mono_method_get_header (cmethod);
6001 if (cheader == NULL || mono_loader_get_last_error ()) {
6002 MonoLoaderError *error = mono_loader_get_last_error ();
6004 if (cheader)
6005 mono_metadata_free_mh (cheader);
6006 if (inline_always && error)
6007 mono_cfg_set_exception (cfg, error->exception_type);
6009 mono_loader_clear_error ();
6010 return 0;
6013 /*Must verify before creating locals as it can cause the JIT to assert.*/
6014 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6015 mono_metadata_free_mh (cheader);
6016 return 0;
6019 /* allocate space to store the return value */
6020 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6021 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6024 prev_locals = cfg->locals;
6025 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6026 for (i = 0; i < cheader->num_locals; ++i)
6027 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6029 /* allocate start and end blocks */
6030 /* This is needed so if the inline is aborted, we can clean up */
6031 NEW_BBLOCK (cfg, sbblock);
6032 sbblock->real_offset = real_offset;
6034 NEW_BBLOCK (cfg, ebblock);
6035 ebblock->block_num = cfg->num_bblocks++;
6036 ebblock->real_offset = real_offset;
6038 prev_args = cfg->args;
6039 prev_arg_types = cfg->arg_types;
6040 prev_inlined_method = cfg->inlined_method;
6041 cfg->inlined_method = cmethod;
6042 cfg->ret_var_set = FALSE;
6043 cfg->inline_depth ++;
6044 prev_real_offset = cfg->real_offset;
6045 prev_cbb_hash = cfg->cbb_hash;
6046 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6047 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6048 prev_cil_start = cfg->cil_start;
6049 prev_cbb = cfg->cbb;
6050 prev_current_method = cfg->current_method;
6051 prev_generic_context = cfg->generic_context;
6052 prev_ret_var_set = cfg->ret_var_set;
6054 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6055 virtual = TRUE;
6057 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6059 ret_var_set = cfg->ret_var_set;
6061 cfg->inlined_method = prev_inlined_method;
6062 cfg->real_offset = prev_real_offset;
6063 cfg->cbb_hash = prev_cbb_hash;
6064 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6065 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6066 cfg->cil_start = prev_cil_start;
6067 cfg->locals = prev_locals;
6068 cfg->args = prev_args;
6069 cfg->arg_types = prev_arg_types;
6070 cfg->current_method = prev_current_method;
6071 cfg->generic_context = prev_generic_context;
6072 cfg->ret_var_set = prev_ret_var_set;
6073 cfg->inline_depth --;
6075 if ((costs >= 0 && costs < 60) || inline_always) {
6076 if (cfg->verbose_level > 2)
6077 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6079 cfg->stat_inlined_methods++;
6081 /* always add some code to avoid block split failures */
6082 MONO_INST_NEW (cfg, ins, OP_NOP);
6083 MONO_ADD_INS (prev_cbb, ins);
6085 prev_cbb->next_bb = sbblock;
6086 link_bblock (cfg, prev_cbb, sbblock);
6089 * Get rid of the begin and end bblocks if possible to aid local
6090 * optimizations.
6092 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6094 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6095 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6097 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6098 MonoBasicBlock *prev = ebblock->in_bb [0];
6099 mono_merge_basic_blocks (cfg, prev, ebblock);
6100 cfg->cbb = prev;
6101 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6102 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6103 cfg->cbb = prev_cbb;
6105 } else {
6107 * Its possible that the rvar is set in some prev bblock, but not in others.
6108 * (#1835).
6110 if (rvar) {
6111 MonoBasicBlock *bb;
6113 for (i = 0; i < ebblock->in_count; ++i) {
6114 bb = ebblock->in_bb [i];
6116 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6117 cfg->cbb = bb;
6119 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6124 cfg->cbb = ebblock;
6127 if (rvar) {
6129 * If the inlined method contains only a throw, then the ret var is not
6130 * set, so set it to a dummy value.
6132 if (!ret_var_set)
6133 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6135 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6136 *sp++ = ins;
6138 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6139 return costs + 1;
6140 } else {
6141 if (cfg->verbose_level > 2)
6142 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6143 cfg->exception_type = MONO_EXCEPTION_NONE;
6144 mono_loader_clear_error ();
6146 /* This gets rid of the newly added bblocks */
6147 cfg->cbb = prev_cbb;
6149 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6150 return 0;
6154 * Some of these comments may well be out-of-date.
6155 * Design decisions: we do a single pass over the IL code (and we do bblock
6156 * splitting/merging in the few cases when it's required: a back jump to an IL
6157 * address that was not already seen as bblock starting point).
6158 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6159 * Complex operations are decomposed in simpler ones right away. We need to let the
6160 * arch-specific code peek and poke inside this process somehow (except when the
6161 * optimizations can take advantage of the full semantic info of coarse opcodes).
6162 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6163 * MonoInst->opcode initially is the IL opcode or some simplification of that
6164 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6165 * opcode with value bigger than OP_LAST.
6166 * At this point the IR can be handed over to an interpreter, a dumb code generator
6167 * or to the optimizing code generator that will translate it to SSA form.
6169 * Profiling directed optimizations.
6170 * We may compile by default with few or no optimizations and instrument the code
6171 * or the user may indicate what methods to optimize the most either in a config file
6172 * or through repeated runs where the compiler applies offline the optimizations to
6173 * each method and then decides if it was worth it.
6176 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6177 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6178 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6179 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6180 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6181 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6182 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6183 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6185 /* offset from br.s -> br like opcodes */
6186 #define BIG_BRANCH_OFFSET 13
6188 static gboolean
6189 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6191 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6193 return b == NULL || b == bb;
6196 static int
6197 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6199 unsigned char *ip = start;
6200 unsigned char *target;
6201 int i;
6202 guint cli_addr;
6203 MonoBasicBlock *bblock;
6204 const MonoOpcode *opcode;
6206 while (ip < end) {
6207 cli_addr = ip - start;
6208 i = mono_opcode_value ((const guint8 **)&ip, end);
6209 if (i < 0)
6210 UNVERIFIED;
6211 opcode = &mono_opcodes [i];
6212 switch (opcode->argument) {
6213 case MonoInlineNone:
6214 ip++;
6215 break;
6216 case MonoInlineString:
6217 case MonoInlineType:
6218 case MonoInlineField:
6219 case MonoInlineMethod:
6220 case MonoInlineTok:
6221 case MonoInlineSig:
6222 case MonoShortInlineR:
6223 case MonoInlineI:
6224 ip += 5;
6225 break;
6226 case MonoInlineVar:
6227 ip += 3;
6228 break;
6229 case MonoShortInlineVar:
6230 case MonoShortInlineI:
6231 ip += 2;
6232 break;
6233 case MonoShortInlineBrTarget:
6234 target = start + cli_addr + 2 + (signed char)ip [1];
6235 GET_BBLOCK (cfg, bblock, target);
6236 ip += 2;
6237 if (ip < end)
6238 GET_BBLOCK (cfg, bblock, ip);
6239 break;
6240 case MonoInlineBrTarget:
6241 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6242 GET_BBLOCK (cfg, bblock, target);
6243 ip += 5;
6244 if (ip < end)
6245 GET_BBLOCK (cfg, bblock, ip);
6246 break;
6247 case MonoInlineSwitch: {
6248 guint32 n = read32 (ip + 1);
6249 guint32 j;
6250 ip += 5;
6251 cli_addr += 5 + 4 * n;
6252 target = start + cli_addr;
6253 GET_BBLOCK (cfg, bblock, target);
6255 for (j = 0; j < n; ++j) {
6256 target = start + cli_addr + (gint32)read32 (ip);
6257 GET_BBLOCK (cfg, bblock, target);
6258 ip += 4;
6260 break;
6262 case MonoInlineR:
6263 case MonoInlineI8:
6264 ip += 9;
6265 break;
6266 default:
6267 g_assert_not_reached ();
6270 if (i == CEE_THROW) {
6271 unsigned char *bb_start = ip - 1;
6273 /* Find the start of the bblock containing the throw */
6274 bblock = NULL;
6275 while ((bb_start >= start) && !bblock) {
6276 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6277 bb_start --;
6279 if (bblock)
6280 bblock->out_of_line = 1;
6283 return 0;
6284 unverified:
6285 exception_exit:
6286 *pos = ip;
6287 return 1;
6290 static inline MonoMethod *
6291 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6293 MonoMethod *method;
6295 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6296 method = mono_method_get_wrapper_data (m, token);
6297 if (context)
6298 method = mono_class_inflate_generic_method (method, context);
6299 } else {
6300 method = mono_get_method_full (m->klass->image, token, klass, context);
6303 return method;
6306 static inline MonoMethod *
6307 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6309 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6311 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6312 return NULL;
6314 return method;
6317 static inline MonoClass*
6318 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6320 MonoClass *klass;
6322 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6323 klass = mono_method_get_wrapper_data (method, token);
6324 if (context)
6325 klass = mono_class_inflate_generic_class (klass, context);
6326 } else {
6327 klass = mono_class_get_full (method->klass->image, token, context);
6329 if (klass)
6330 mono_class_init (klass);
6331 return klass;
6334 static inline MonoMethodSignature*
6335 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6337 MonoMethodSignature *fsig;
6339 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6340 MonoError error;
6342 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6343 if (context) {
6344 fsig = mono_inflate_generic_signature (fsig, context, &error);
6345 // FIXME:
6346 g_assert (mono_error_ok (&error));
6348 } else {
6349 fsig = mono_metadata_parse_signature (method->klass->image, token);
6351 return fsig;
6355 * Returns TRUE if the JIT should abort inlining because "callee"
6356 * is influenced by security attributes.
6358 static
6359 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6361 guint32 result;
6363 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6364 return TRUE;
6367 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6368 if (result == MONO_JIT_SECURITY_OK)
6369 return FALSE;
6371 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6372 /* Generate code to throw a SecurityException before the actual call/link */
6373 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6374 MonoInst *args [2];
6376 NEW_ICONST (cfg, args [0], 4);
6377 NEW_METHODCONST (cfg, args [1], caller);
6378 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6379 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6380 /* don't hide previous results */
6381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6382 cfg->exception_data = result;
6383 return TRUE;
6386 return FALSE;
6389 static MonoMethod*
6390 throw_exception (void)
6392 static MonoMethod *method = NULL;
6394 if (!method) {
6395 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6396 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6398 g_assert (method);
6399 return method;
6402 static void
6403 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6405 MonoMethod *thrower = throw_exception ();
6406 MonoInst *args [1];
6408 EMIT_NEW_PCONST (cfg, args [0], ex);
6409 mono_emit_method_call (cfg, thrower, args, NULL);
6413 * Return the original method is a wrapper is specified. We can only access
6414 * the custom attributes from the original method.
6416 static MonoMethod*
6417 get_original_method (MonoMethod *method)
6419 if (method->wrapper_type == MONO_WRAPPER_NONE)
6420 return method;
6422 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6423 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6424 return NULL;
6426 /* in other cases we need to find the original method */
6427 return mono_marshal_method_from_wrapper (method);
6430 static void
6431 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6432 MonoBasicBlock *bblock, unsigned char *ip)
6434 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6435 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6436 if (ex)
6437 emit_throw_exception (cfg, ex);
6440 static void
6441 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6442 MonoBasicBlock *bblock, unsigned char *ip)
6444 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6445 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6446 if (ex)
6447 emit_throw_exception (cfg, ex);
6451 * Check that the IL instructions at ip are the array initialization
6452 * sequence and return the pointer to the data and the size.
6454 static const char*
6455 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6458 * newarr[System.Int32]
6459 * dup
6460 * ldtoken field valuetype ...
6461 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6463 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6464 guint32 token = read32 (ip + 7);
6465 guint32 field_token = read32 (ip + 2);
6466 guint32 field_index = field_token & 0xffffff;
6467 guint32 rva;
6468 const char *data_ptr;
6469 int size = 0;
6470 MonoMethod *cmethod;
6471 MonoClass *dummy_class;
6472 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6473 int dummy_align;
6475 if (!field)
6476 return NULL;
6478 *out_field_token = field_token;
6480 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6481 if (!cmethod)
6482 return NULL;
6483 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6484 return NULL;
6485 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6486 case MONO_TYPE_BOOLEAN:
6487 case MONO_TYPE_I1:
6488 case MONO_TYPE_U1:
6489 size = 1; break;
6490 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6491 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6492 case MONO_TYPE_CHAR:
6493 case MONO_TYPE_I2:
6494 case MONO_TYPE_U2:
6495 size = 2; break;
6496 case MONO_TYPE_I4:
6497 case MONO_TYPE_U4:
6498 case MONO_TYPE_R4:
6499 size = 4; break;
6500 case MONO_TYPE_R8:
6501 case MONO_TYPE_I8:
6502 case MONO_TYPE_U8:
6503 size = 8; break;
6504 #endif
6505 default:
6506 return NULL;
6508 size *= len;
6509 if (size > mono_type_size (field->type, &dummy_align))
6510 return NULL;
6511 *out_size = size;
6512 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6513 if (!image_is_dynamic (method->klass->image)) {
6514 field_index = read32 (ip + 2) & 0xffffff;
6515 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6516 data_ptr = mono_image_rva_map (method->klass->image, rva);
6517 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6518 /* for aot code we do the lookup on load */
6519 if (aot && data_ptr)
6520 return GUINT_TO_POINTER (rva);
6521 } else {
6522 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6523 g_assert (!aot);
6524 data_ptr = mono_field_get_data (field);
6526 return data_ptr;
6528 return NULL;
6531 static void
6532 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6534 char *method_fname = mono_method_full_name (method, TRUE);
6535 char *method_code;
6536 MonoMethodHeader *header = mono_method_get_header (method);
6538 if (header->code_size == 0)
6539 method_code = g_strdup ("method body is empty.");
6540 else
6541 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6542 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6543 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6544 g_free (method_fname);
6545 g_free (method_code);
6546 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6549 static void
6550 set_exception_object (MonoCompile *cfg, MonoException *exception)
6552 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6553 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6554 cfg->exception_ptr = exception;
6557 static void
6558 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6560 MonoInst *ins;
6561 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6562 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6563 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6564 /* Optimize reg-reg moves away */
6566 * Can't optimize other opcodes, since sp[0] might point to
6567 * the last ins of a decomposed opcode.
6569 sp [0]->dreg = (cfg)->locals [n]->dreg;
6570 } else {
6571 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6576 * ldloca inhibits many optimizations so try to get rid of it in common
6577 * cases.
6579 static inline unsigned char *
6580 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6582 int local, token;
6583 MonoClass *klass;
6584 MonoType *type;
6586 if (size == 1) {
6587 local = ip [1];
6588 ip += 2;
6589 } else {
6590 local = read16 (ip + 2);
6591 ip += 4;
6594 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6595 /* From the INITOBJ case */
6596 token = read32 (ip + 2);
6597 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6598 CHECK_TYPELOAD (klass);
6599 type = mini_replace_type (&klass->byval_arg);
6600 emit_init_local (cfg, local, type, TRUE);
6601 return ip + 6;
6603 load_error:
6604 return NULL;
6607 static gboolean
6608 is_exception_class (MonoClass *class)
6610 while (class) {
6611 if (class == mono_defaults.exception_class)
6612 return TRUE;
6613 class = class->parent;
6615 return FALSE;
6619 * is_jit_optimizer_disabled:
6621 * Determine whenever M's assembly has a DebuggableAttribute with the
6622 * IsJITOptimizerDisabled flag set.
6624 static gboolean
6625 is_jit_optimizer_disabled (MonoMethod *m)
6627 MonoAssembly *ass = m->klass->image->assembly;
6628 MonoCustomAttrInfo* attrs;
6629 static MonoClass *klass;
6630 int i;
6631 gboolean val = FALSE;
6633 g_assert (ass);
6634 if (ass->jit_optimizer_disabled_inited)
6635 return ass->jit_optimizer_disabled;
6637 if (!klass)
6638 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6639 if (!klass) {
6640 /* Linked away */
6641 ass->jit_optimizer_disabled = FALSE;
6642 mono_memory_barrier ();
6643 ass->jit_optimizer_disabled_inited = TRUE;
6644 return FALSE;
6647 attrs = mono_custom_attrs_from_assembly (ass);
6648 if (attrs) {
6649 for (i = 0; i < attrs->num_attrs; ++i) {
6650 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6651 const gchar *p;
6652 int len;
6653 MonoMethodSignature *sig;
6655 if (!attr->ctor || attr->ctor->klass != klass)
6656 continue;
6657 /* Decode the attribute. See reflection.c */
6658 len = attr->data_size;
6659 p = (const char*)attr->data;
6660 g_assert (read16 (p) == 0x0001);
6661 p += 2;
6663 // FIXME: Support named parameters
6664 sig = mono_method_signature (attr->ctor);
6665 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6666 continue;
6667 /* Two boolean arguments */
6668 p ++;
6669 val = *p;
6671 mono_custom_attrs_free (attrs);
6674 ass->jit_optimizer_disabled = val;
6675 mono_memory_barrier ();
6676 ass->jit_optimizer_disabled_inited = TRUE;
6678 return val;
6681 static gboolean
6682 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6684 gboolean supported_tail_call;
6685 int i;
6687 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6688 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6689 #else
6690 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6691 #endif
6693 for (i = 0; i < fsig->param_count; ++i) {
6694 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6695 /* These can point to the current method's stack */
6696 supported_tail_call = FALSE;
6698 if (fsig->hasthis && cmethod->klass->valuetype)
6699 /* this might point to the current method's stack */
6700 supported_tail_call = FALSE;
6701 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6702 supported_tail_call = FALSE;
6703 if (cfg->method->save_lmf)
6704 supported_tail_call = FALSE;
6705 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6706 supported_tail_call = FALSE;
6707 if (call_opcode != CEE_CALL)
6708 supported_tail_call = FALSE;
6710 /* Debugging support */
6711 #if 0
6712 if (supported_tail_call) {
6713 if (!mono_debug_count ())
6714 supported_tail_call = FALSE;
6716 #endif
6718 return supported_tail_call;
6721 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6722 * it to the thread local value based on the tls_offset field. Every other kind of access to
6723 * the field causes an assert.
6725 static gboolean
6726 is_magic_tls_access (MonoClassField *field)
6728 if (strcmp (field->name, "tlsdata"))
6729 return FALSE;
6730 if (strcmp (field->parent->name, "ThreadLocal`1"))
6731 return FALSE;
6732 return field->parent->image == mono_defaults.corlib;
6735 /* emits the code needed to access a managed tls var (like ThreadStatic)
6736 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6737 * pointer for the current thread.
6738 * Returns the MonoInst* representing the address of the tls var.
6740 static MonoInst*
6741 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6743 MonoInst *addr;
6744 int static_data_reg, array_reg, dreg;
6745 int offset2_reg, idx_reg;
6746 // inlined access to the tls data
6747 // idx = (offset >> 24) - 1;
6748 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6749 static_data_reg = alloc_ireg (cfg);
6750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6751 idx_reg = alloc_ireg (cfg);
6752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6755 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6756 array_reg = alloc_ireg (cfg);
6757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6758 offset2_reg = alloc_ireg (cfg);
6759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6760 dreg = alloc_ireg (cfg);
6761 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6762 return addr;
6766 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6767 * this address is cached per-method in cached_tls_addr.
6769 static MonoInst*
6770 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6772 MonoInst *load, *addr, *temp, *store, *thread_ins;
6773 MonoClassField *offset_field;
6775 if (*cached_tls_addr) {
6776 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6777 return addr;
6779 thread_ins = mono_get_thread_intrinsic (cfg);
6780 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6782 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6783 if (thread_ins) {
6784 MONO_ADD_INS (cfg->cbb, thread_ins);
6785 } else {
6786 MonoMethod *thread_method;
6787 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6788 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6790 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6791 addr->klass = mono_class_from_mono_type (tls_field->type);
6792 addr->type = STACK_MP;
6793 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6794 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6796 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6797 return addr;
6801 * mono_method_to_ir:
6803 * Translate the .net IL into linear IR.
6806 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6807 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6808 guint inline_offset, gboolean is_virtual_call)
6810 MonoError error;
6811 MonoInst *ins, **sp, **stack_start;
6812 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6813 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6814 MonoMethod *cmethod, *method_definition;
6815 MonoInst **arg_array;
6816 MonoMethodHeader *header;
6817 MonoImage *image;
6818 guint32 token, ins_flag;
6819 MonoClass *klass;
6820 MonoClass *constrained_call = NULL;
6821 unsigned char *ip, *end, *target, *err_pos;
6822 MonoMethodSignature *sig;
6823 MonoGenericContext *generic_context = NULL;
6824 MonoGenericContainer *generic_container = NULL;
6825 MonoType **param_types;
6826 int i, n, start_new_bblock, dreg;
6827 int num_calls = 0, inline_costs = 0;
6828 int breakpoint_id = 0;
6829 guint num_args;
6830 MonoBoolean security, pinvoke;
6831 MonoSecurityManager* secman = NULL;
6832 MonoDeclSecurityActions actions;
6833 GSList *class_inits = NULL;
6834 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6835 int context_used;
6836 gboolean init_locals, seq_points, skip_dead_blocks;
6837 gboolean disable_inline, sym_seq_points = FALSE;
6838 MonoInst *cached_tls_addr = NULL;
6839 MonoDebugMethodInfo *minfo;
6840 MonoBitSet *seq_point_locs = NULL;
6841 MonoBitSet *seq_point_set_locs = NULL;
6843 disable_inline = is_jit_optimizer_disabled (method);
6845 /* serialization and xdomain stuff may need access to private fields and methods */
6846 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6847 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6848 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6849 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6850 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6851 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6853 dont_verify |= mono_security_smcs_hack_enabled ();
6855 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6856 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6857 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6858 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6859 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6861 image = method->klass->image;
6862 header = mono_method_get_header (method);
6863 if (!header) {
6864 MonoLoaderError *error;
6866 if ((error = mono_loader_get_last_error ())) {
6867 mono_cfg_set_exception (cfg, error->exception_type);
6868 } else {
6869 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6870 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6872 goto exception_exit;
6874 generic_container = mono_method_get_generic_container (method);
6875 sig = mono_method_signature (method);
6876 num_args = sig->hasthis + sig->param_count;
6877 ip = (unsigned char*)header->code;
6878 cfg->cil_start = ip;
6879 end = ip + header->code_size;
6880 cfg->stat_cil_code_size += header->code_size;
6882 seq_points = cfg->gen_seq_points && cfg->method == method;
6883 #ifdef PLATFORM_ANDROID
6884 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6885 #endif
6887 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6888 /* We could hit a seq point before attaching to the JIT (#8338) */
6889 seq_points = FALSE;
6892 if (cfg->gen_seq_points && cfg->method == method) {
6893 minfo = mono_debug_lookup_method (method);
6894 if (minfo) {
6895 int i, n_il_offsets;
6896 int *il_offsets;
6897 int *line_numbers;
6899 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
6900 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6901 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6902 sym_seq_points = TRUE;
6903 for (i = 0; i < n_il_offsets; ++i) {
6904 if (il_offsets [i] < header->code_size)
6905 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6907 g_free (il_offsets);
6908 g_free (line_numbers);
6913 * Methods without init_locals set could cause asserts in various passes
6914 * (#497220). To work around this, we emit dummy initialization opcodes
6915 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6916 * on some platforms.
6918 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6919 init_locals = header->init_locals;
6920 else
6921 init_locals = TRUE;
6923 method_definition = method;
6924 while (method_definition->is_inflated) {
6925 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6926 method_definition = imethod->declaring;
6929 /* SkipVerification is not allowed if core-clr is enabled */
6930 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6931 dont_verify = TRUE;
6932 dont_verify_stloc = TRUE;
6935 if (sig->is_inflated)
6936 generic_context = mono_method_get_context (method);
6937 else if (generic_container)
6938 generic_context = &generic_container->context;
6939 cfg->generic_context = generic_context;
6941 if (!cfg->generic_sharing_context)
6942 g_assert (!sig->has_type_parameters);
6944 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6945 g_assert (method->is_inflated);
6946 g_assert (mono_method_get_context (method)->method_inst);
6948 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6949 g_assert (sig->generic_param_count);
6951 if (cfg->method == method) {
6952 cfg->real_offset = 0;
6953 } else {
6954 cfg->real_offset = inline_offset;
6957 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6958 cfg->cil_offset_to_bb_len = header->code_size;
6960 cfg->current_method = method;
6962 if (cfg->verbose_level > 2)
6963 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6965 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6966 if (sig->hasthis)
6967 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6968 for (n = 0; n < sig->param_count; ++n)
6969 param_types [n + sig->hasthis] = sig->params [n];
6970 cfg->arg_types = param_types;
6972 dont_inline = g_list_prepend (dont_inline, method);
6973 if (cfg->method == method) {
6975 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6976 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6978 /* ENTRY BLOCK */
6979 NEW_BBLOCK (cfg, start_bblock);
6980 cfg->bb_entry = start_bblock;
6981 start_bblock->cil_code = NULL;
6982 start_bblock->cil_length = 0;
6983 #if defined(__native_client_codegen__)
6984 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6985 ins->dreg = alloc_dreg (cfg, STACK_I4);
6986 MONO_ADD_INS (start_bblock, ins);
6987 #endif
6989 /* EXIT BLOCK */
6990 NEW_BBLOCK (cfg, end_bblock);
6991 cfg->bb_exit = end_bblock;
6992 end_bblock->cil_code = NULL;
6993 end_bblock->cil_length = 0;
6994 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6995 g_assert (cfg->num_bblocks == 2);
6997 arg_array = cfg->args;
6999 if (header->num_clauses) {
7000 cfg->spvars = g_hash_table_new (NULL, NULL);
7001 cfg->exvars = g_hash_table_new (NULL, NULL);
7003 /* handle exception clauses */
7004 for (i = 0; i < header->num_clauses; ++i) {
7005 MonoBasicBlock *try_bb;
7006 MonoExceptionClause *clause = &header->clauses [i];
7007 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7008 try_bb->real_offset = clause->try_offset;
7009 try_bb->try_start = TRUE;
7010 try_bb->region = ((i + 1) << 8) | clause->flags;
7011 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7012 tblock->real_offset = clause->handler_offset;
7013 tblock->flags |= BB_EXCEPTION_HANDLER;
7016 * Linking the try block with the EH block hinders inlining as we won't be able to
7017 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7019 if (COMPILE_LLVM (cfg))
7020 link_bblock (cfg, try_bb, tblock);
7022 if (*(ip + clause->handler_offset) == CEE_POP)
7023 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7025 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7026 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7027 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7028 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7029 MONO_ADD_INS (tblock, ins);
7031 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7032 /* finally clauses already have a seq point */
7033 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7034 MONO_ADD_INS (tblock, ins);
7037 /* todo: is a fault block unsafe to optimize? */
7038 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7039 tblock->flags |= BB_EXCEPTION_UNSAFE;
7043 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7044 while (p < end) {
7045 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7047 /* catch and filter blocks get the exception object on the stack */
7048 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7049 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7050 MonoInst *dummy_use;
7052 /* mostly like handle_stack_args (), but just sets the input args */
7053 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7054 tblock->in_scount = 1;
7055 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7056 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7059 * Add a dummy use for the exvar so its liveness info will be
7060 * correct.
7062 cfg->cbb = tblock;
7063 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7065 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7066 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7067 tblock->flags |= BB_EXCEPTION_HANDLER;
7068 tblock->real_offset = clause->data.filter_offset;
7069 tblock->in_scount = 1;
7070 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7071 /* The filter block shares the exvar with the handler block */
7072 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7073 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7074 MONO_ADD_INS (tblock, ins);
7078 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7079 clause->data.catch_class &&
7080 cfg->generic_sharing_context &&
7081 mono_class_check_context_used (clause->data.catch_class)) {
7083 * In shared generic code with catch
7084 * clauses containing type variables
7085 * the exception handling code has to
7086 * be able to get to the rgctx.
7087 * Therefore we have to make sure that
7088 * the vtable/mrgctx argument (for
7089 * static or generic methods) or the
7090 * "this" argument (for non-static
7091 * methods) are live.
7093 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7094 mini_method_get_context (method)->method_inst ||
7095 method->klass->valuetype) {
7096 mono_get_vtable_var (cfg);
7097 } else {
7098 MonoInst *dummy_use;
7100 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7104 } else {
7105 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7106 cfg->cbb = start_bblock;
7107 cfg->args = arg_array;
7108 mono_save_args (cfg, sig, inline_args);
7111 /* FIRST CODE BLOCK */
7112 NEW_BBLOCK (cfg, bblock);
7113 bblock->cil_code = ip;
7114 cfg->cbb = bblock;
7115 cfg->ip = ip;
7117 ADD_BBLOCK (cfg, bblock);
7119 if (cfg->method == method) {
7120 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7121 if (breakpoint_id) {
7122 MONO_INST_NEW (cfg, ins, OP_BREAK);
7123 MONO_ADD_INS (bblock, ins);
7127 if (mono_security_cas_enabled ())
7128 secman = mono_security_manager_get_methods ();
7130 security = (secman && mono_security_method_has_declsec (method));
7131 /* at this point having security doesn't mean we have any code to generate */
7132 if (security && (cfg->method == method)) {
7133 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7134 * And we do not want to enter the next section (with allocation) if we
7135 * have nothing to generate */
7136 security = mono_declsec_get_demands (method, &actions);
7139 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7140 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7141 if (pinvoke) {
7142 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7143 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7144 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7146 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7147 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7148 pinvoke = FALSE;
7150 if (custom)
7151 mono_custom_attrs_free (custom);
7153 if (pinvoke) {
7154 custom = mono_custom_attrs_from_class (wrapped->klass);
7155 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7156 pinvoke = FALSE;
7158 if (custom)
7159 mono_custom_attrs_free (custom);
7161 } else {
7162 /* not a P/Invoke after all */
7163 pinvoke = FALSE;
7167 /* we use a separate basic block for the initialization code */
7168 NEW_BBLOCK (cfg, init_localsbb);
7169 cfg->bb_init = init_localsbb;
7170 init_localsbb->real_offset = cfg->real_offset;
7171 start_bblock->next_bb = init_localsbb;
7172 init_localsbb->next_bb = bblock;
7173 link_bblock (cfg, start_bblock, init_localsbb);
7174 link_bblock (cfg, init_localsbb, bblock);
7176 cfg->cbb = init_localsbb;
7178 if (cfg->gsharedvt && cfg->method == method) {
7179 MonoGSharedVtMethodInfo *info;
7180 MonoInst *var, *locals_var;
7181 int dreg;
7183 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7184 info->method = cfg->method;
7185 info->count_entries = 16;
7186 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7187 cfg->gsharedvt_info = info;
7189 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7190 /* prevent it from being register allocated */
7191 //var->flags |= MONO_INST_VOLATILE;
7192 cfg->gsharedvt_info_var = var;
7194 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7195 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7197 /* Allocate locals */
7198 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7199 /* prevent it from being register allocated */
7200 //locals_var->flags |= MONO_INST_VOLATILE;
7201 cfg->gsharedvt_locals_var = locals_var;
7203 dreg = alloc_ireg (cfg);
7204 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7206 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7207 ins->dreg = locals_var->dreg;
7208 ins->sreg1 = dreg;
7209 MONO_ADD_INS (cfg->cbb, ins);
7210 cfg->gsharedvt_locals_var_ins = ins;
7212 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7214 if (init_locals)
7215 ins->flags |= MONO_INST_INIT;
7219 /* at this point we know, if security is TRUE, that some code needs to be generated */
7220 if (security && (cfg->method == method)) {
7221 MonoInst *args [2];
7223 cfg->stat_cas_demand_generation++;
7225 if (actions.demand.blob) {
7226 /* Add code for SecurityAction.Demand */
7227 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7228 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7229 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7230 mono_emit_method_call (cfg, secman->demand, args, NULL);
7232 if (actions.noncasdemand.blob) {
7233 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7234 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7235 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7236 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7237 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7238 mono_emit_method_call (cfg, secman->demand, args, NULL);
7240 if (actions.demandchoice.blob) {
7241 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7242 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7243 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7244 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7245 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7249 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7250 if (pinvoke) {
7251 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7254 if (mono_security_core_clr_enabled ()) {
7255 /* check if this is native code, e.g. an icall or a p/invoke */
7256 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7257 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7258 if (wrapped) {
7259 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7260 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7262 /* if this ia a native call then it can only be JITted from platform code */
7263 if ((icall || pinvk) && method->klass && method->klass->image) {
7264 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7265 MonoException *ex = icall ? mono_get_exception_security () :
7266 mono_get_exception_method_access ();
7267 emit_throw_exception (cfg, ex);
7274 CHECK_CFG_EXCEPTION;
7276 if (header->code_size == 0)
7277 UNVERIFIED;
7279 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7280 ip = err_pos;
7281 UNVERIFIED;
7284 if (cfg->method == method)
7285 mono_debug_init_method (cfg, bblock, breakpoint_id);
7287 for (n = 0; n < header->num_locals; ++n) {
7288 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7289 UNVERIFIED;
7291 class_inits = NULL;
7293 /* We force the vtable variable here for all shared methods
7294 for the possibility that they might show up in a stack
7295 trace where their exact instantiation is needed. */
7296 if (cfg->generic_sharing_context && method == cfg->method) {
7297 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7298 mini_method_get_context (method)->method_inst ||
7299 method->klass->valuetype) {
7300 mono_get_vtable_var (cfg);
7301 } else {
7302 /* FIXME: Is there a better way to do this?
7303 We need the variable live for the duration
7304 of the whole method. */
7305 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7309 /* add a check for this != NULL to inlined methods */
7310 if (is_virtual_call) {
7311 MonoInst *arg_ins;
7313 NEW_ARGLOAD (cfg, arg_ins, 0);
7314 MONO_ADD_INS (cfg->cbb, arg_ins);
7315 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7318 skip_dead_blocks = !dont_verify;
7319 if (skip_dead_blocks) {
7320 original_bb = bb = mono_basic_block_split (method, &error);
7321 if (!mono_error_ok (&error)) {
7322 mono_error_cleanup (&error);
7323 UNVERIFIED;
7325 g_assert (bb);
7328 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7329 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7331 ins_flag = 0;
7332 start_new_bblock = 0;
7333 cfg->cbb = bblock;
7334 while (ip < end) {
7335 if (cfg->method == method)
7336 cfg->real_offset = ip - header->code;
7337 else
7338 cfg->real_offset = inline_offset;
7339 cfg->ip = ip;
7341 context_used = 0;
7343 if (start_new_bblock) {
7344 bblock->cil_length = ip - bblock->cil_code;
7345 if (start_new_bblock == 2) {
7346 g_assert (ip == tblock->cil_code);
7347 } else {
7348 GET_BBLOCK (cfg, tblock, ip);
7350 bblock->next_bb = tblock;
7351 bblock = tblock;
7352 cfg->cbb = bblock;
7353 start_new_bblock = 0;
7354 for (i = 0; i < bblock->in_scount; ++i) {
7355 if (cfg->verbose_level > 3)
7356 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7357 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7358 *sp++ = ins;
7360 if (class_inits)
7361 g_slist_free (class_inits);
7362 class_inits = NULL;
7363 } else {
7364 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7365 link_bblock (cfg, bblock, tblock);
7366 if (sp != stack_start) {
7367 handle_stack_args (cfg, stack_start, sp - stack_start);
7368 sp = stack_start;
7369 CHECK_UNVERIFIABLE (cfg);
7371 bblock->next_bb = tblock;
7372 bblock = tblock;
7373 cfg->cbb = bblock;
7374 for (i = 0; i < bblock->in_scount; ++i) {
7375 if (cfg->verbose_level > 3)
7376 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7377 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7378 *sp++ = ins;
7380 g_slist_free (class_inits);
7381 class_inits = NULL;
7385 if (skip_dead_blocks) {
7386 int ip_offset = ip - header->code;
7388 if (ip_offset == bb->end)
7389 bb = bb->next;
7391 if (bb->dead) {
7392 int op_size = mono_opcode_size (ip, end);
7393 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7395 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7397 if (ip_offset + op_size == bb->end) {
7398 MONO_INST_NEW (cfg, ins, OP_NOP);
7399 MONO_ADD_INS (bblock, ins);
7400 start_new_bblock = 1;
7403 ip += op_size;
7404 continue;
7408 * Sequence points are points where the debugger can place a breakpoint.
7409 * Currently, we generate these automatically at points where the IL
7410 * stack is empty.
7412 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7414 * Make methods interruptable at the beginning, and at the targets of
7415 * backward branches.
7416 * Also, do this at the start of every bblock in methods with clauses too,
7417 * to be able to handle instructions with inprecise control flow like
7418 * throw/endfinally.
7419 * Backward branches are handled at the end of method-to-ir ().
7421 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7423 /* Avoid sequence points on empty IL like .volatile */
7424 // FIXME: Enable this
7425 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7426 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7427 if (sp != stack_start)
7428 ins->flags |= MONO_INST_NONEMPTY_STACK;
7429 MONO_ADD_INS (cfg->cbb, ins);
7431 if (sym_seq_points)
7432 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7435 bblock->real_offset = cfg->real_offset;
7437 if ((cfg->method == method) && cfg->coverage_info) {
7438 guint32 cil_offset = ip - header->code;
7439 cfg->coverage_info->data [cil_offset].cil_code = ip;
7441 /* TODO: Use an increment here */
7442 #if defined(TARGET_X86)
7443 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7444 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7445 ins->inst_imm = 1;
7446 MONO_ADD_INS (cfg->cbb, ins);
7447 #else
7448 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7449 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7450 #endif
7453 if (cfg->verbose_level > 3)
7454 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7456 switch (*ip) {
7457 case CEE_NOP:
7458 if (seq_points && !sym_seq_points && sp != stack_start) {
7460 * The C# compiler uses these nops to notify the JIT that it should
7461 * insert seq points.
7463 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7464 MONO_ADD_INS (cfg->cbb, ins);
7466 if (cfg->keep_cil_nops)
7467 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7468 else
7469 MONO_INST_NEW (cfg, ins, OP_NOP);
7470 ip++;
7471 MONO_ADD_INS (bblock, ins);
7472 break;
7473 case CEE_BREAK:
7474 if (should_insert_brekpoint (cfg->method)) {
7475 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7476 } else {
7477 MONO_INST_NEW (cfg, ins, OP_NOP);
7479 ip++;
7480 MONO_ADD_INS (bblock, ins);
7481 break;
7482 case CEE_LDARG_0:
7483 case CEE_LDARG_1:
7484 case CEE_LDARG_2:
7485 case CEE_LDARG_3:
7486 CHECK_STACK_OVF (1);
7487 n = (*ip)-CEE_LDARG_0;
7488 CHECK_ARG (n);
7489 EMIT_NEW_ARGLOAD (cfg, ins, n);
7490 ip++;
7491 *sp++ = ins;
7492 break;
7493 case CEE_LDLOC_0:
7494 case CEE_LDLOC_1:
7495 case CEE_LDLOC_2:
7496 case CEE_LDLOC_3:
7497 CHECK_STACK_OVF (1);
7498 n = (*ip)-CEE_LDLOC_0;
7499 CHECK_LOCAL (n);
7500 EMIT_NEW_LOCLOAD (cfg, ins, n);
7501 ip++;
7502 *sp++ = ins;
7503 break;
7504 case CEE_STLOC_0:
7505 case CEE_STLOC_1:
7506 case CEE_STLOC_2:
7507 case CEE_STLOC_3: {
7508 CHECK_STACK (1);
7509 n = (*ip)-CEE_STLOC_0;
7510 CHECK_LOCAL (n);
7511 --sp;
7512 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7513 UNVERIFIED;
7514 emit_stloc_ir (cfg, sp, header, n);
7515 ++ip;
7516 inline_costs += 1;
7517 break;
7519 case CEE_LDARG_S:
7520 CHECK_OPSIZE (2);
7521 CHECK_STACK_OVF (1);
7522 n = ip [1];
7523 CHECK_ARG (n);
7524 EMIT_NEW_ARGLOAD (cfg, ins, n);
7525 *sp++ = ins;
7526 ip += 2;
7527 break;
7528 case CEE_LDARGA_S:
7529 CHECK_OPSIZE (2);
7530 CHECK_STACK_OVF (1);
7531 n = ip [1];
7532 CHECK_ARG (n);
7533 NEW_ARGLOADA (cfg, ins, n);
7534 MONO_ADD_INS (cfg->cbb, ins);
7535 *sp++ = ins;
7536 ip += 2;
7537 break;
7538 case CEE_STARG_S:
7539 CHECK_OPSIZE (2);
7540 CHECK_STACK (1);
7541 --sp;
7542 n = ip [1];
7543 CHECK_ARG (n);
7544 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7545 UNVERIFIED;
7546 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7547 ip += 2;
7548 break;
7549 case CEE_LDLOC_S:
7550 CHECK_OPSIZE (2);
7551 CHECK_STACK_OVF (1);
7552 n = ip [1];
7553 CHECK_LOCAL (n);
7554 EMIT_NEW_LOCLOAD (cfg, ins, n);
7555 *sp++ = ins;
7556 ip += 2;
7557 break;
7558 case CEE_LDLOCA_S: {
7559 unsigned char *tmp_ip;
7560 CHECK_OPSIZE (2);
7561 CHECK_STACK_OVF (1);
7562 CHECK_LOCAL (ip [1]);
7564 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7565 ip = tmp_ip;
7566 inline_costs += 1;
7567 break;
7570 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7571 *sp++ = ins;
7572 ip += 2;
7573 break;
7575 case CEE_STLOC_S:
7576 CHECK_OPSIZE (2);
7577 CHECK_STACK (1);
7578 --sp;
7579 CHECK_LOCAL (ip [1]);
7580 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7581 UNVERIFIED;
7582 emit_stloc_ir (cfg, sp, header, ip [1]);
7583 ip += 2;
7584 inline_costs += 1;
7585 break;
7586 case CEE_LDNULL:
7587 CHECK_STACK_OVF (1);
7588 EMIT_NEW_PCONST (cfg, ins, NULL);
7589 ins->type = STACK_OBJ;
7590 ++ip;
7591 *sp++ = ins;
7592 break;
7593 case CEE_LDC_I4_M1:
7594 CHECK_STACK_OVF (1);
7595 EMIT_NEW_ICONST (cfg, ins, -1);
7596 ++ip;
7597 *sp++ = ins;
7598 break;
7599 case CEE_LDC_I4_0:
7600 case CEE_LDC_I4_1:
7601 case CEE_LDC_I4_2:
7602 case CEE_LDC_I4_3:
7603 case CEE_LDC_I4_4:
7604 case CEE_LDC_I4_5:
7605 case CEE_LDC_I4_6:
7606 case CEE_LDC_I4_7:
7607 case CEE_LDC_I4_8:
7608 CHECK_STACK_OVF (1);
7609 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7610 ++ip;
7611 *sp++ = ins;
7612 break;
7613 case CEE_LDC_I4_S:
7614 CHECK_OPSIZE (2);
7615 CHECK_STACK_OVF (1);
7616 ++ip;
7617 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7618 ++ip;
7619 *sp++ = ins;
7620 break;
7621 case CEE_LDC_I4:
7622 CHECK_OPSIZE (5);
7623 CHECK_STACK_OVF (1);
7624 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7625 ip += 5;
7626 *sp++ = ins;
7627 break;
7628 case CEE_LDC_I8:
7629 CHECK_OPSIZE (9);
7630 CHECK_STACK_OVF (1);
7631 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7632 ins->type = STACK_I8;
7633 ins->dreg = alloc_dreg (cfg, STACK_I8);
7634 ++ip;
7635 ins->inst_l = (gint64)read64 (ip);
7636 MONO_ADD_INS (bblock, ins);
7637 ip += 8;
7638 *sp++ = ins;
7639 break;
7640 case CEE_LDC_R4: {
7641 float *f;
7642 gboolean use_aotconst = FALSE;
7644 #ifdef TARGET_POWERPC
7645 /* FIXME: Clean this up */
7646 if (cfg->compile_aot)
7647 use_aotconst = TRUE;
7648 #endif
7650 /* FIXME: we should really allocate this only late in the compilation process */
7651 f = mono_domain_alloc (cfg->domain, sizeof (float));
7652 CHECK_OPSIZE (5);
7653 CHECK_STACK_OVF (1);
7655 if (use_aotconst) {
7656 MonoInst *cons;
7657 int dreg;
7659 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7661 dreg = alloc_freg (cfg);
7662 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7663 ins->type = STACK_R8;
7664 } else {
7665 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7666 ins->type = STACK_R8;
7667 ins->dreg = alloc_dreg (cfg, STACK_R8);
7668 ins->inst_p0 = f;
7669 MONO_ADD_INS (bblock, ins);
7671 ++ip;
7672 readr4 (ip, f);
7673 ip += 4;
7674 *sp++ = ins;
7675 break;
7677 case CEE_LDC_R8: {
7678 double *d;
7679 gboolean use_aotconst = FALSE;
7681 #ifdef TARGET_POWERPC
7682 /* FIXME: Clean this up */
7683 if (cfg->compile_aot)
7684 use_aotconst = TRUE;
7685 #endif
7687 /* FIXME: we should really allocate this only late in the compilation process */
7688 d = mono_domain_alloc (cfg->domain, sizeof (double));
7689 CHECK_OPSIZE (9);
7690 CHECK_STACK_OVF (1);
7692 if (use_aotconst) {
7693 MonoInst *cons;
7694 int dreg;
7696 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7698 dreg = alloc_freg (cfg);
7699 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7700 ins->type = STACK_R8;
7701 } else {
7702 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7703 ins->type = STACK_R8;
7704 ins->dreg = alloc_dreg (cfg, STACK_R8);
7705 ins->inst_p0 = d;
7706 MONO_ADD_INS (bblock, ins);
7708 ++ip;
7709 readr8 (ip, d);
7710 ip += 8;
7711 *sp++ = ins;
7712 break;
7714 case CEE_DUP: {
7715 MonoInst *temp, *store;
7716 CHECK_STACK (1);
7717 CHECK_STACK_OVF (1);
7718 sp--;
7719 ins = *sp;
7721 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7722 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7724 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7725 *sp++ = ins;
7727 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7728 *sp++ = ins;
7730 ++ip;
7731 inline_costs += 2;
7732 break;
7734 case CEE_POP:
7735 CHECK_STACK (1);
7736 ip++;
7737 --sp;
7739 #ifdef TARGET_X86
7740 if (sp [0]->type == STACK_R8)
7741 /* we need to pop the value from the x86 FP stack */
7742 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7743 #endif
7744 break;
7745 case CEE_JMP: {
7746 MonoCallInst *call;
7748 INLINE_FAILURE ("jmp");
7749 GSHAREDVT_FAILURE (*ip);
7751 CHECK_OPSIZE (5);
7752 if (stack_start != sp)
7753 UNVERIFIED;
7754 token = read32 (ip + 1);
7755 /* FIXME: check the signature matches */
7756 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7758 if (!cmethod || mono_loader_get_last_error ())
7759 LOAD_ERROR;
7761 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7762 GENERIC_SHARING_FAILURE (CEE_JMP);
7764 if (mono_security_cas_enabled ())
7765 CHECK_CFG_EXCEPTION;
7767 emit_instrumentation_call (cfg, mono_profiler_method_leave);
7769 if (ARCH_HAVE_OP_TAIL_CALL) {
7770 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7771 int i, n;
7773 /* Handle tail calls similarly to calls */
7774 n = fsig->param_count + fsig->hasthis;
7776 DISABLE_AOT (cfg);
7778 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7779 call->method = cmethod;
7780 call->tail_call = TRUE;
7781 call->signature = mono_method_signature (cmethod);
7782 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7783 call->inst.inst_p0 = cmethod;
7784 for (i = 0; i < n; ++i)
7785 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7787 mono_arch_emit_call (cfg, call);
7788 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
7789 MONO_ADD_INS (bblock, (MonoInst*)call);
7790 } else {
7791 for (i = 0; i < num_args; ++i)
7792 /* Prevent arguments from being optimized away */
7793 arg_array [i]->flags |= MONO_INST_VOLATILE;
7795 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7796 ins = (MonoInst*)call;
7797 ins->inst_p0 = cmethod;
7798 MONO_ADD_INS (bblock, ins);
7801 ip += 5;
7802 start_new_bblock = 1;
7803 break;
7805 case CEE_CALLI:
7806 case CEE_CALL:
7807 case CEE_CALLVIRT: {
7808 MonoInst *addr = NULL;
7809 MonoMethodSignature *fsig = NULL;
7810 int array_rank = 0;
7811 int virtual = *ip == CEE_CALLVIRT;
7812 int calli = *ip == CEE_CALLI;
7813 gboolean pass_imt_from_rgctx = FALSE;
7814 MonoInst *imt_arg = NULL;
7815 MonoInst *keep_this_alive = NULL;
7816 gboolean pass_vtable = FALSE;
7817 gboolean pass_mrgctx = FALSE;
7818 MonoInst *vtable_arg = NULL;
7819 gboolean check_this = FALSE;
7820 gboolean supported_tail_call = FALSE;
7821 gboolean tail_call = FALSE;
7822 gboolean need_seq_point = FALSE;
7823 guint32 call_opcode = *ip;
7824 gboolean emit_widen = TRUE;
7825 gboolean push_res = TRUE;
7826 gboolean skip_ret = FALSE;
7827 gboolean delegate_invoke = FALSE;
7829 CHECK_OPSIZE (5);
7830 token = read32 (ip + 1);
7832 ins = NULL;
7834 if (calli) {
7835 //GSHAREDVT_FAILURE (*ip);
7836 cmethod = NULL;
7837 CHECK_STACK (1);
7838 --sp;
7839 addr = *sp;
7840 fsig = mini_get_signature (method, token, generic_context);
7841 n = fsig->param_count + fsig->hasthis;
7843 if (method->dynamic && fsig->pinvoke) {
7844 MonoInst *args [3];
7847 * This is a call through a function pointer using a pinvoke
7848 * signature. Have to create a wrapper and call that instead.
7849 * FIXME: This is very slow, need to create a wrapper at JIT time
7850 * instead based on the signature.
7852 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7853 EMIT_NEW_PCONST (cfg, args [1], fsig);
7854 args [2] = addr;
7855 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7857 } else {
7858 MonoMethod *cil_method;
7860 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7861 cil_method = cmethod;
7863 if (constrained_call) {
7864 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7865 if (cfg->verbose_level > 2)
7866 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7867 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7868 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7869 cfg->generic_sharing_context)) {
7870 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7872 } else {
7873 if (cfg->verbose_level > 2)
7874 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7876 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7878 * This is needed since get_method_constrained can't find
7879 * the method in klass representing a type var.
7880 * The type var is guaranteed to be a reference type in this
7881 * case.
7883 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7884 g_assert (!cmethod->klass->valuetype);
7885 } else {
7886 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7891 if (!cmethod || mono_loader_get_last_error ())
7892 LOAD_ERROR;
7893 if (!dont_verify && !cfg->skip_visibility) {
7894 MonoMethod *target_method = cil_method;
7895 if (method->is_inflated) {
7896 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7898 if (!mono_method_can_access_method (method_definition, target_method) &&
7899 !mono_method_can_access_method (method, cil_method))
7900 METHOD_ACCESS_FAILURE;
7903 if (mono_security_core_clr_enabled ())
7904 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7906 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7907 /* MS.NET seems to silently convert this to a callvirt */
7908 virtual = 1;
7912 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7913 * converts to a callvirt.
7915 * tests/bug-515884.il is an example of this behavior
7917 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7918 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7919 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7920 virtual = 1;
7923 if (!cmethod->klass->inited)
7924 if (!mono_class_init (cmethod->klass))
7925 TYPE_LOAD_ERROR (cmethod->klass);
7927 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7928 mini_class_is_system_array (cmethod->klass)) {
7929 array_rank = cmethod->klass->rank;
7930 fsig = mono_method_signature (cmethod);
7931 } else {
7932 fsig = mono_method_signature (cmethod);
7934 if (!fsig)
7935 LOAD_ERROR;
7937 if (fsig->pinvoke) {
7938 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7939 check_for_pending_exc, cfg->compile_aot);
7940 fsig = mono_method_signature (wrapper);
7941 } else if (constrained_call) {
7942 fsig = mono_method_signature (cmethod);
7943 } else {
7944 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7948 mono_save_token_info (cfg, image, token, cil_method);
7950 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7952 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7953 * foo (bar (), baz ())
7954 * works correctly. MS does this also:
7955 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7956 * The problem with this approach is that the debugger will stop after all calls returning a value,
7957 * even for simple cases, like:
7958 * int i = foo ();
7960 /* Special case a few common successor opcodes */
7961 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7962 need_seq_point = TRUE;
7965 n = fsig->param_count + fsig->hasthis;
7967 /* Don't support calls made using type arguments for now */
7969 if (cfg->gsharedvt) {
7970 if (mini_is_gsharedvt_signature (cfg, fsig))
7971 GSHAREDVT_FAILURE (*ip);
7975 if (mono_security_cas_enabled ()) {
7976 if (check_linkdemand (cfg, method, cmethod))
7977 INLINE_FAILURE ("linkdemand");
7978 CHECK_CFG_EXCEPTION;
7981 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7982 g_assert_not_reached ();
7985 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7986 UNVERIFIED;
7988 if (!cfg->generic_sharing_context && cmethod)
7989 g_assert (!mono_method_check_context_used (cmethod));
7991 CHECK_STACK (n);
7993 //g_assert (!virtual || fsig->hasthis);
7995 sp -= n;
7997 if (constrained_call) {
7998 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8000 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8002 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8003 /* The 'Own method' case below */
8004 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8005 /* 'The type parameter is instantiated as a reference type' case below. */
8006 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8007 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8008 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8009 MonoInst *args [16];
8012 * This case handles calls to
8013 * - object:ToString()/Equals()/GetHashCode(),
8014 * - System.IComparable<T>:CompareTo()
8015 * - System.IEquatable<T>:Equals ()
8016 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8019 args [0] = sp [0];
8020 if (mono_method_check_context_used (cmethod))
8021 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8022 else
8023 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8024 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8026 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8027 if (fsig->hasthis && fsig->param_count) {
8028 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8029 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8030 ins->dreg = alloc_preg (cfg);
8031 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8032 MONO_ADD_INS (cfg->cbb, ins);
8033 args [4] = ins;
8035 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8036 int addr_reg;
8038 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8040 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8041 addr_reg = ins->dreg;
8042 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8043 } else {
8044 EMIT_NEW_ICONST (cfg, args [3], 0);
8045 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8047 } else {
8048 EMIT_NEW_ICONST (cfg, args [3], 0);
8049 EMIT_NEW_ICONST (cfg, args [4], 0);
8051 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8052 emit_widen = FALSE;
8054 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8055 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8056 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8057 MonoInst *add;
8059 /* Unbox */
8060 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8061 MONO_ADD_INS (cfg->cbb, add);
8062 /* Load value */
8063 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8064 MONO_ADD_INS (cfg->cbb, ins);
8065 /* ins represents the call result */
8068 goto call_end;
8069 } else {
8070 GSHAREDVT_FAILURE (*ip);
8074 * We have the `constrained.' prefix opcode.
8076 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8078 * The type parameter is instantiated as a valuetype,
8079 * but that type doesn't override the method we're
8080 * calling, so we need to box `this'.
8082 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8083 ins->klass = constrained_call;
8084 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8085 CHECK_CFG_EXCEPTION;
8086 } else if (!constrained_call->valuetype) {
8087 int dreg = alloc_ireg_ref (cfg);
8090 * The type parameter is instantiated as a reference
8091 * type. We have a managed pointer on the stack, so
8092 * we need to dereference it here.
8094 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8095 ins->type = STACK_OBJ;
8096 sp [0] = ins;
8097 } else {
8098 if (cmethod->klass->valuetype) {
8099 /* Own method */
8100 } else {
8101 /* Interface method */
8102 int ioffset, slot;
8104 mono_class_setup_vtable (constrained_call);
8105 CHECK_TYPELOAD (constrained_call);
8106 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8107 if (ioffset == -1)
8108 TYPE_LOAD_ERROR (constrained_call);
8109 slot = mono_method_get_vtable_slot (cmethod);
8110 if (slot == -1)
8111 TYPE_LOAD_ERROR (cmethod->klass);
8112 cmethod = constrained_call->vtable [ioffset + slot];
8114 if (cmethod->klass == mono_defaults.enum_class) {
8115 /* Enum implements some interfaces, so treat this as the first case */
8116 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8117 ins->klass = constrained_call;
8118 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8119 CHECK_CFG_EXCEPTION;
8122 virtual = 0;
8124 constrained_call = NULL;
8127 if (!calli && check_call_signature (cfg, fsig, sp))
8128 UNVERIFIED;
8130 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8131 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8132 delegate_invoke = TRUE;
8133 #endif
8135 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8136 bblock = cfg->cbb;
8137 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8138 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8139 emit_widen = FALSE;
8142 goto call_end;
8146 * If the callee is a shared method, then its static cctor
8147 * might not get called after the call was patched.
8149 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8150 emit_generic_class_init (cfg, cmethod->klass);
8151 CHECK_TYPELOAD (cmethod->klass);
8154 if (cmethod)
8155 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8157 if (cfg->generic_sharing_context && cmethod) {
8158 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8160 context_used = mini_method_check_context_used (cfg, cmethod);
8162 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8163 /* Generic method interface
8164 calls are resolved via a
8165 helper function and don't
8166 need an imt. */
8167 if (!cmethod_context || !cmethod_context->method_inst)
8168 pass_imt_from_rgctx = TRUE;
8172 * If a shared method calls another
8173 * shared method then the caller must
8174 * have a generic sharing context
8175 * because the magic trampoline
8176 * requires it. FIXME: We shouldn't
8177 * have to force the vtable/mrgctx
8178 * variable here. Instead there
8179 * should be a flag in the cfg to
8180 * request a generic sharing context.
8182 if (context_used &&
8183 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8184 mono_get_vtable_var (cfg);
8187 if (pass_vtable) {
8188 if (context_used) {
8189 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8190 } else {
8191 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8193 CHECK_TYPELOAD (cmethod->klass);
8194 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8198 if (pass_mrgctx) {
8199 g_assert (!vtable_arg);
8201 if (!cfg->compile_aot) {
8203 * emit_get_rgctx_method () calls mono_class_vtable () so check
8204 * for type load errors before.
8206 mono_class_setup_vtable (cmethod->klass);
8207 CHECK_TYPELOAD (cmethod->klass);
8210 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8212 /* !marshalbyref is needed to properly handle generic methods + remoting */
8213 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8214 MONO_METHOD_IS_FINAL (cmethod)) &&
8215 !mono_class_is_marshalbyref (cmethod->klass)) {
8216 if (virtual)
8217 check_this = TRUE;
8218 virtual = 0;
8222 if (pass_imt_from_rgctx) {
8223 g_assert (!pass_vtable);
8224 g_assert (cmethod);
8226 imt_arg = emit_get_rgctx_method (cfg, context_used,
8227 cmethod, MONO_RGCTX_INFO_METHOD);
8230 if (check_this)
8231 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8233 /* Calling virtual generic methods */
8234 if (cmethod && virtual &&
8235 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8236 !(MONO_METHOD_IS_FINAL (cmethod) &&
8237 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8238 fsig->generic_param_count &&
8239 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8240 MonoInst *this_temp, *this_arg_temp, *store;
8241 MonoInst *iargs [4];
8242 gboolean use_imt = FALSE;
8244 g_assert (fsig->is_inflated);
8246 /* Prevent inlining of methods that contain indirect calls */
8247 INLINE_FAILURE ("virtual generic call");
8249 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8250 GSHAREDVT_FAILURE (*ip);
8252 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8253 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8254 use_imt = TRUE;
8255 #endif
8257 if (use_imt) {
8258 g_assert (!imt_arg);
8259 if (!context_used)
8260 g_assert (cmethod->is_inflated);
8261 imt_arg = emit_get_rgctx_method (cfg, context_used,
8262 cmethod, MONO_RGCTX_INFO_METHOD);
8263 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8264 } else {
8265 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8266 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8267 MONO_ADD_INS (bblock, store);
8269 /* FIXME: This should be a managed pointer */
8270 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8272 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8273 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8274 cmethod, MONO_RGCTX_INFO_METHOD);
8275 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8276 addr = mono_emit_jit_icall (cfg,
8277 mono_helper_compile_generic_method, iargs);
8279 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8281 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8284 goto call_end;
8288 * Implement a workaround for the inherent races involved in locking:
8289 * Monitor.Enter ()
8290 * try {
8291 * } finally {
8292 * Monitor.Exit ()
8294 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8295 * try block, the Exit () won't be executed, see:
8296 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8297 * To work around this, we extend such try blocks to include the last x bytes
8298 * of the Monitor.Enter () call.
8300 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8301 MonoBasicBlock *tbb;
8303 GET_BBLOCK (cfg, tbb, ip + 5);
8305 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8306 * from Monitor.Enter like ArgumentNullException.
8308 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8309 /* Mark this bblock as needing to be extended */
8310 tbb->extend_try_block = TRUE;
8314 /* Conversion to a JIT intrinsic */
8315 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8316 bblock = cfg->cbb;
8317 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8318 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8319 emit_widen = FALSE;
8321 goto call_end;
8324 /* Inlining */
8325 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8326 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8327 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8328 !g_list_find (dont_inline, cmethod)) {
8329 int costs;
8330 gboolean always = FALSE;
8332 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8333 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8334 /* Prevent inlining of methods that call wrappers */
8335 INLINE_FAILURE ("wrapper call");
8336 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8337 always = TRUE;
8340 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8341 if (costs) {
8342 cfg->real_offset += 5;
8343 bblock = cfg->cbb;
8345 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8346 /* *sp is already set by inline_method */
8347 sp++;
8348 push_res = FALSE;
8351 inline_costs += costs;
8353 goto call_end;
8357 /* Tail recursion elimination */
8358 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8359 gboolean has_vtargs = FALSE;
8360 int i;
8362 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8363 INLINE_FAILURE ("tail call");
8365 /* keep it simple */
8366 for (i = fsig->param_count - 1; i >= 0; i--) {
8367 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8368 has_vtargs = TRUE;
8371 if (!has_vtargs) {
8372 for (i = 0; i < n; ++i)
8373 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8374 MONO_INST_NEW (cfg, ins, OP_BR);
8375 MONO_ADD_INS (bblock, ins);
8376 tblock = start_bblock->out_bb [0];
8377 link_bblock (cfg, bblock, tblock);
8378 ins->inst_target_bb = tblock;
8379 start_new_bblock = 1;
8381 /* skip the CEE_RET, too */
8382 if (ip_in_bb (cfg, bblock, ip + 5))
8383 skip_ret = TRUE;
8384 push_res = FALSE;
8385 goto call_end;
8389 inline_costs += 10 * num_calls++;
8392 * Making generic calls out of gsharedvt methods.
8394 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8395 MonoRgctxInfoType info_type;
8397 if (virtual) {
8398 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8399 //GSHAREDVT_FAILURE (*ip);
8400 // disable for possible remoting calls
8401 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8402 GSHAREDVT_FAILURE (*ip);
8403 if (fsig->generic_param_count) {
8404 /* virtual generic call */
8405 g_assert (mono_use_imt);
8406 g_assert (!imt_arg);
8407 /* Same as the virtual generic case above */
8408 imt_arg = emit_get_rgctx_method (cfg, context_used,
8409 cmethod, MONO_RGCTX_INFO_METHOD);
8410 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8411 vtable_arg = NULL;
8415 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8416 /* test_0_multi_dim_arrays () in gshared.cs */
8417 GSHAREDVT_FAILURE (*ip);
8419 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8420 keep_this_alive = sp [0];
8422 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8423 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8424 else
8425 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8426 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8428 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8429 goto call_end;
8430 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8432 * We pass the address to the gsharedvt trampoline in the rgctx reg
8434 MonoInst *callee = addr;
8436 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8437 /* Not tested */
8438 GSHAREDVT_FAILURE (*ip);
8440 addr = emit_get_rgctx_sig (cfg, context_used,
8441 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8442 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8443 goto call_end;
8446 /* Generic sharing */
8447 /* FIXME: only do this for generic methods if
8448 they are not shared! */
8449 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8450 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8451 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8452 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8453 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8454 INLINE_FAILURE ("gshared");
8456 g_assert (cfg->generic_sharing_context && cmethod);
8457 g_assert (!addr);
8460 * We are compiling a call to a
8461 * generic method from shared code,
8462 * which means that we have to look up
8463 * the method in the rgctx and do an
8464 * indirect call.
8466 if (fsig->hasthis)
8467 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8469 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8470 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8471 goto call_end;
8474 /* Indirect calls */
8475 if (addr) {
8476 if (call_opcode == CEE_CALL)
8477 g_assert (context_used);
8478 else if (call_opcode == CEE_CALLI)
8479 g_assert (!vtable_arg);
8480 else
8481 /* FIXME: what the hell is this??? */
8482 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8483 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8485 /* Prevent inlining of methods with indirect calls */
8486 INLINE_FAILURE ("indirect call");
8488 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8489 int info_type;
8490 gpointer info_data;
8493 * Instead of emitting an indirect call, emit a direct call
8494 * with the contents of the aotconst as the patch info.
8496 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8497 info_type = addr->inst_c1;
8498 info_data = addr->inst_p0;
8499 } else {
8500 info_type = addr->inst_right->inst_c1;
8501 info_data = addr->inst_right->inst_left;
8504 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8505 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8506 NULLIFY_INS (addr);
8507 goto call_end;
8510 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8511 goto call_end;
8514 /* Array methods */
8515 if (array_rank) {
8516 MonoInst *addr;
8518 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8519 MonoInst *val = sp [fsig->param_count];
8521 if (val->type == STACK_OBJ) {
8522 MonoInst *iargs [2];
8524 iargs [0] = sp [0];
8525 iargs [1] = val;
8527 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8530 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8531 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8532 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8533 emit_write_barrier (cfg, addr, val);
8534 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8535 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8537 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8538 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8539 if (!cmethod->klass->element_class->valuetype && !readonly)
8540 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8541 CHECK_TYPELOAD (cmethod->klass);
8543 readonly = FALSE;
8544 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8545 ins = addr;
8546 } else {
8547 g_assert_not_reached ();
8550 emit_widen = FALSE;
8551 goto call_end;
8554 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8555 if (ins)
8556 goto call_end;
8558 /* Tail prefix / tail call optimization */
8560 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8561 /* FIXME: runtime generic context pointer for jumps? */
8562 /* FIXME: handle this for generic sharing eventually */
8563 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8564 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8565 supported_tail_call = TRUE;
8567 if (supported_tail_call) {
8568 MonoCallInst *call;
8570 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8571 INLINE_FAILURE ("tail call");
8573 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8575 if (ARCH_HAVE_OP_TAIL_CALL) {
8576 /* Handle tail calls similarly to normal calls */
8577 tail_call = TRUE;
8578 } else {
8579 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8581 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8582 call->tail_call = TRUE;
8583 call->method = cmethod;
8584 call->signature = mono_method_signature (cmethod);
8587 * We implement tail calls by storing the actual arguments into the
8588 * argument variables, then emitting a CEE_JMP.
8590 for (i = 0; i < n; ++i) {
8591 /* Prevent argument from being register allocated */
8592 arg_array [i]->flags |= MONO_INST_VOLATILE;
8593 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8595 ins = (MonoInst*)call;
8596 ins->inst_p0 = cmethod;
8597 ins->inst_p1 = arg_array [0];
8598 MONO_ADD_INS (bblock, ins);
8599 link_bblock (cfg, bblock, end_bblock);
8600 start_new_bblock = 1;
8602 // FIXME: Eliminate unreachable epilogs
8605 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8606 * only reachable from this call.
8608 GET_BBLOCK (cfg, tblock, ip + 5);
8609 if (tblock == bblock || tblock->in_count == 0)
8610 skip_ret = TRUE;
8611 push_res = FALSE;
8613 goto call_end;
8618 * Synchronized wrappers.
8619 * Its hard to determine where to replace a method with its synchronized
8620 * wrapper without causing an infinite recursion. The current solution is
8621 * to add the synchronized wrapper in the trampolines, and to
8622 * change the called method to a dummy wrapper, and resolve that wrapper
8623 * to the real method in mono_jit_compile_method ().
8625 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8626 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8627 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8628 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8631 /* Common call */
8632 INLINE_FAILURE ("call");
8633 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8634 imt_arg, vtable_arg);
8636 if (tail_call) {
8637 link_bblock (cfg, bblock, end_bblock);
8638 start_new_bblock = 1;
8640 // FIXME: Eliminate unreachable epilogs
8643 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8644 * only reachable from this call.
8646 GET_BBLOCK (cfg, tblock, ip + 5);
8647 if (tblock == bblock || tblock->in_count == 0)
8648 skip_ret = TRUE;
8649 push_res = FALSE;
8652 call_end:
8654 /* End of call, INS should contain the result of the call, if any */
8656 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8657 g_assert (ins);
8658 if (emit_widen)
8659 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8660 else
8661 *sp++ = ins;
8664 if (keep_this_alive) {
8665 MonoInst *dummy_use;
8667 /* See mono_emit_method_call_full () */
8668 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8671 CHECK_CFG_EXCEPTION;
8673 ip += 5;
8674 if (skip_ret) {
8675 g_assert (*ip == CEE_RET);
8676 ip += 1;
8678 ins_flag = 0;
8679 constrained_call = NULL;
8680 if (need_seq_point)
8681 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8682 break;
8684 case CEE_RET:
8685 if (cfg->method != method) {
8686 /* return from inlined method */
8688 * If in_count == 0, that means the ret is unreachable due to
8689 * being preceeded by a throw. In that case, inline_method () will
8690 * handle setting the return value
8691 * (test case: test_0_inline_throw ()).
8693 if (return_var && cfg->cbb->in_count) {
8694 MonoType *ret_type = mono_method_signature (method)->ret;
8696 MonoInst *store;
8697 CHECK_STACK (1);
8698 --sp;
8700 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8701 UNVERIFIED;
8703 //g_assert (returnvar != -1);
8704 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8705 cfg->ret_var_set = TRUE;
8707 } else {
8708 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8710 if (cfg->lmf_var && cfg->cbb->in_count)
8711 emit_pop_lmf (cfg);
8713 if (cfg->ret) {
8714 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8716 if (seq_points && !sym_seq_points) {
8718 * Place a seq point here too even through the IL stack is not
8719 * empty, so a step over on
8720 * call <FOO>
8721 * ret
8722 * will work correctly.
8724 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8725 MONO_ADD_INS (cfg->cbb, ins);
8728 g_assert (!return_var);
8729 CHECK_STACK (1);
8730 --sp;
8732 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8733 UNVERIFIED;
8735 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8736 MonoInst *ret_addr;
8738 if (!cfg->vret_addr) {
8739 MonoInst *ins;
8741 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8742 } else {
8743 EMIT_NEW_RETLOADA (cfg, ret_addr);
8745 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8746 ins->klass = mono_class_from_mono_type (ret_type);
8748 } else {
8749 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8750 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8751 MonoInst *iargs [1];
8752 MonoInst *conv;
8754 iargs [0] = *sp;
8755 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8756 mono_arch_emit_setret (cfg, method, conv);
8757 } else {
8758 mono_arch_emit_setret (cfg, method, *sp);
8760 #else
8761 mono_arch_emit_setret (cfg, method, *sp);
8762 #endif
8766 if (sp != stack_start)
8767 UNVERIFIED;
8768 MONO_INST_NEW (cfg, ins, OP_BR);
8769 ip++;
8770 ins->inst_target_bb = end_bblock;
8771 MONO_ADD_INS (bblock, ins);
8772 link_bblock (cfg, bblock, end_bblock);
8773 start_new_bblock = 1;
8774 break;
8775 case CEE_BR_S:
8776 CHECK_OPSIZE (2);
8777 MONO_INST_NEW (cfg, ins, OP_BR);
8778 ip++;
8779 target = ip + 1 + (signed char)(*ip);
8780 ++ip;
8781 GET_BBLOCK (cfg, tblock, target);
8782 link_bblock (cfg, bblock, tblock);
8783 ins->inst_target_bb = tblock;
8784 if (sp != stack_start) {
8785 handle_stack_args (cfg, stack_start, sp - stack_start);
8786 sp = stack_start;
8787 CHECK_UNVERIFIABLE (cfg);
8789 MONO_ADD_INS (bblock, ins);
8790 start_new_bblock = 1;
8791 inline_costs += BRANCH_COST;
8792 break;
8793 case CEE_BEQ_S:
8794 case CEE_BGE_S:
8795 case CEE_BGT_S:
8796 case CEE_BLE_S:
8797 case CEE_BLT_S:
8798 case CEE_BNE_UN_S:
8799 case CEE_BGE_UN_S:
8800 case CEE_BGT_UN_S:
8801 case CEE_BLE_UN_S:
8802 case CEE_BLT_UN_S:
8803 CHECK_OPSIZE (2);
8804 CHECK_STACK (2);
8805 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8806 ip++;
8807 target = ip + 1 + *(signed char*)ip;
8808 ip++;
8810 ADD_BINCOND (NULL);
8812 sp = stack_start;
8813 inline_costs += BRANCH_COST;
8814 break;
8815 case CEE_BR:
8816 CHECK_OPSIZE (5);
8817 MONO_INST_NEW (cfg, ins, OP_BR);
8818 ip++;
8820 target = ip + 4 + (gint32)read32(ip);
8821 ip += 4;
8822 GET_BBLOCK (cfg, tblock, target);
8823 link_bblock (cfg, bblock, tblock);
8824 ins->inst_target_bb = tblock;
8825 if (sp != stack_start) {
8826 handle_stack_args (cfg, stack_start, sp - stack_start);
8827 sp = stack_start;
8828 CHECK_UNVERIFIABLE (cfg);
8831 MONO_ADD_INS (bblock, ins);
8833 start_new_bblock = 1;
8834 inline_costs += BRANCH_COST;
8835 break;
8836 case CEE_BRFALSE_S:
8837 case CEE_BRTRUE_S:
8838 case CEE_BRFALSE:
8839 case CEE_BRTRUE: {
8840 MonoInst *cmp;
8841 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8842 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8843 guint32 opsize = is_short ? 1 : 4;
8845 CHECK_OPSIZE (opsize);
8846 CHECK_STACK (1);
8847 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8848 UNVERIFIED;
8849 ip ++;
8850 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8851 ip += opsize;
8853 sp--;
8855 GET_BBLOCK (cfg, tblock, target);
8856 link_bblock (cfg, bblock, tblock);
8857 GET_BBLOCK (cfg, tblock, ip);
8858 link_bblock (cfg, bblock, tblock);
8860 if (sp != stack_start) {
8861 handle_stack_args (cfg, stack_start, sp - stack_start);
8862 CHECK_UNVERIFIABLE (cfg);
8865 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8866 cmp->sreg1 = sp [0]->dreg;
8867 type_from_op (cmp, sp [0], NULL);
8868 CHECK_TYPE (cmp);
8870 #if SIZEOF_REGISTER == 4
8871 if (cmp->opcode == OP_LCOMPARE_IMM) {
8872 /* Convert it to OP_LCOMPARE */
8873 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8874 ins->type = STACK_I8;
8875 ins->dreg = alloc_dreg (cfg, STACK_I8);
8876 ins->inst_l = 0;
8877 MONO_ADD_INS (bblock, ins);
8878 cmp->opcode = OP_LCOMPARE;
8879 cmp->sreg2 = ins->dreg;
8881 #endif
8882 MONO_ADD_INS (bblock, cmp);
8884 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8885 type_from_op (ins, sp [0], NULL);
8886 MONO_ADD_INS (bblock, ins);
8887 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8888 GET_BBLOCK (cfg, tblock, target);
8889 ins->inst_true_bb = tblock;
8890 GET_BBLOCK (cfg, tblock, ip);
8891 ins->inst_false_bb = tblock;
8892 start_new_bblock = 2;
8894 sp = stack_start;
8895 inline_costs += BRANCH_COST;
8896 break;
8898 case CEE_BEQ:
8899 case CEE_BGE:
8900 case CEE_BGT:
8901 case CEE_BLE:
8902 case CEE_BLT:
8903 case CEE_BNE_UN:
8904 case CEE_BGE_UN:
8905 case CEE_BGT_UN:
8906 case CEE_BLE_UN:
8907 case CEE_BLT_UN:
8908 CHECK_OPSIZE (5);
8909 CHECK_STACK (2);
8910 MONO_INST_NEW (cfg, ins, *ip);
8911 ip++;
8912 target = ip + 4 + (gint32)read32(ip);
8913 ip += 4;
8915 ADD_BINCOND (NULL);
8917 sp = stack_start;
8918 inline_costs += BRANCH_COST;
8919 break;
8920 case CEE_SWITCH: {
8921 MonoInst *src1;
8922 MonoBasicBlock **targets;
8923 MonoBasicBlock *default_bblock;
8924 MonoJumpInfoBBTable *table;
8925 int offset_reg = alloc_preg (cfg);
8926 int target_reg = alloc_preg (cfg);
8927 int table_reg = alloc_preg (cfg);
8928 int sum_reg = alloc_preg (cfg);
8929 gboolean use_op_switch;
8931 CHECK_OPSIZE (5);
8932 CHECK_STACK (1);
8933 n = read32 (ip + 1);
8934 --sp;
8935 src1 = sp [0];
8936 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8937 UNVERIFIED;
8939 ip += 5;
8940 CHECK_OPSIZE (n * sizeof (guint32));
8941 target = ip + n * sizeof (guint32);
8943 GET_BBLOCK (cfg, default_bblock, target);
8944 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8946 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8947 for (i = 0; i < n; ++i) {
8948 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8949 targets [i] = tblock;
8950 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8951 ip += 4;
8954 if (sp != stack_start) {
8956 * Link the current bb with the targets as well, so handle_stack_args
8957 * will set their in_stack correctly.
8959 link_bblock (cfg, bblock, default_bblock);
8960 for (i = 0; i < n; ++i)
8961 link_bblock (cfg, bblock, targets [i]);
8963 handle_stack_args (cfg, stack_start, sp - stack_start);
8964 sp = stack_start;
8965 CHECK_UNVERIFIABLE (cfg);
8968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8970 bblock = cfg->cbb;
8972 for (i = 0; i < n; ++i)
8973 link_bblock (cfg, bblock, targets [i]);
8975 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8976 table->table = targets;
8977 table->table_size = n;
8979 use_op_switch = FALSE;
8980 #ifdef TARGET_ARM
8981 /* ARM implements SWITCH statements differently */
8982 /* FIXME: Make it use the generic implementation */
8983 if (!cfg->compile_aot)
8984 use_op_switch = TRUE;
8985 #endif
8987 if (COMPILE_LLVM (cfg))
8988 use_op_switch = TRUE;
8990 cfg->cbb->has_jump_table = 1;
8992 if (use_op_switch) {
8993 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8994 ins->sreg1 = src1->dreg;
8995 ins->inst_p0 = table;
8996 ins->inst_many_bb = targets;
8997 ins->klass = GUINT_TO_POINTER (n);
8998 MONO_ADD_INS (cfg->cbb, ins);
8999 } else {
9000 if (sizeof (gpointer) == 8)
9001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9002 else
9003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9005 #if SIZEOF_REGISTER == 8
9006 /* The upper word might not be zero, and we add it to a 64 bit address later */
9007 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9008 #endif
9010 if (cfg->compile_aot) {
9011 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9012 } else {
9013 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9014 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9015 ins->inst_p0 = table;
9016 ins->dreg = table_reg;
9017 MONO_ADD_INS (cfg->cbb, ins);
9020 /* FIXME: Use load_memindex */
9021 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9022 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9023 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9025 start_new_bblock = 1;
9026 inline_costs += (BRANCH_COST * 2);
9027 break;
9029 case CEE_LDIND_I1:
9030 case CEE_LDIND_U1:
9031 case CEE_LDIND_I2:
9032 case CEE_LDIND_U2:
9033 case CEE_LDIND_I4:
9034 case CEE_LDIND_U4:
9035 case CEE_LDIND_I8:
9036 case CEE_LDIND_I:
9037 case CEE_LDIND_R4:
9038 case CEE_LDIND_R8:
9039 case CEE_LDIND_REF:
9040 CHECK_STACK (1);
9041 --sp;
9043 switch (*ip) {
9044 case CEE_LDIND_R4:
9045 case CEE_LDIND_R8:
9046 dreg = alloc_freg (cfg);
9047 break;
9048 case CEE_LDIND_I8:
9049 dreg = alloc_lreg (cfg);
9050 break;
9051 case CEE_LDIND_REF:
9052 dreg = alloc_ireg_ref (cfg);
9053 break;
9054 default:
9055 dreg = alloc_preg (cfg);
9058 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9059 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9060 ins->flags |= ins_flag;
9061 MONO_ADD_INS (bblock, ins);
9062 *sp++ = ins;
9063 if (ins_flag & MONO_INST_VOLATILE) {
9064 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9065 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9066 emit_memory_barrier (cfg, FullBarrier);
9068 ins_flag = 0;
9069 ++ip;
9070 break;
9071 case CEE_STIND_REF:
9072 case CEE_STIND_I1:
9073 case CEE_STIND_I2:
9074 case CEE_STIND_I4:
9075 case CEE_STIND_I8:
9076 case CEE_STIND_R4:
9077 case CEE_STIND_R8:
9078 case CEE_STIND_I:
9079 CHECK_STACK (2);
9080 sp -= 2;
9082 if (ins_flag & MONO_INST_VOLATILE) {
9083 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9084 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9085 emit_memory_barrier (cfg, FullBarrier);
9088 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9089 ins->flags |= ins_flag;
9090 ins_flag = 0;
9092 MONO_ADD_INS (bblock, ins);
9094 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9095 emit_write_barrier (cfg, sp [0], sp [1]);
9097 inline_costs += 1;
9098 ++ip;
9099 break;
9101 case CEE_MUL:
9102 CHECK_STACK (2);
9104 MONO_INST_NEW (cfg, ins, (*ip));
9105 sp -= 2;
9106 ins->sreg1 = sp [0]->dreg;
9107 ins->sreg2 = sp [1]->dreg;
9108 type_from_op (ins, sp [0], sp [1]);
9109 CHECK_TYPE (ins);
9110 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9112 /* Use the immediate opcodes if possible */
9113 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9114 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9115 if (imm_opcode != -1) {
9116 ins->opcode = imm_opcode;
9117 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9118 ins->sreg2 = -1;
9120 sp [1]->opcode = OP_NOP;
9124 MONO_ADD_INS ((cfg)->cbb, (ins));
9126 *sp++ = mono_decompose_opcode (cfg, ins);
9127 ip++;
9128 break;
9129 case CEE_ADD:
9130 case CEE_SUB:
9131 case CEE_DIV:
9132 case CEE_DIV_UN:
9133 case CEE_REM:
9134 case CEE_REM_UN:
9135 case CEE_AND:
9136 case CEE_OR:
9137 case CEE_XOR:
9138 case CEE_SHL:
9139 case CEE_SHR:
9140 case CEE_SHR_UN:
9141 CHECK_STACK (2);
9143 MONO_INST_NEW (cfg, ins, (*ip));
9144 sp -= 2;
9145 ins->sreg1 = sp [0]->dreg;
9146 ins->sreg2 = sp [1]->dreg;
9147 type_from_op (ins, sp [0], sp [1]);
9148 CHECK_TYPE (ins);
9149 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9150 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9152 /* FIXME: Pass opcode to is_inst_imm */
9154 /* Use the immediate opcodes if possible */
9155 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9156 int imm_opcode;
9158 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9159 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9160 /* Keep emulated opcodes which are optimized away later */
9161 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9162 imm_opcode = mono_op_to_op_imm (ins->opcode);
9164 #endif
9165 if (imm_opcode != -1) {
9166 ins->opcode = imm_opcode;
9167 if (sp [1]->opcode == OP_I8CONST) {
9168 #if SIZEOF_REGISTER == 8
9169 ins->inst_imm = sp [1]->inst_l;
9170 #else
9171 ins->inst_ls_word = sp [1]->inst_ls_word;
9172 ins->inst_ms_word = sp [1]->inst_ms_word;
9173 #endif
9175 else
9176 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9177 ins->sreg2 = -1;
9179 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9180 if (sp [1]->next == NULL)
9181 sp [1]->opcode = OP_NOP;
9184 MONO_ADD_INS ((cfg)->cbb, (ins));
9186 *sp++ = mono_decompose_opcode (cfg, ins);
9187 ip++;
9188 break;
9189 case CEE_NEG:
9190 case CEE_NOT:
9191 case CEE_CONV_I1:
9192 case CEE_CONV_I2:
9193 case CEE_CONV_I4:
9194 case CEE_CONV_R4:
9195 case CEE_CONV_R8:
9196 case CEE_CONV_U4:
9197 case CEE_CONV_I8:
9198 case CEE_CONV_U8:
9199 case CEE_CONV_OVF_I8:
9200 case CEE_CONV_OVF_U8:
9201 case CEE_CONV_R_UN:
9202 CHECK_STACK (1);
9204 /* Special case this earlier so we have long constants in the IR */
9205 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9206 int data = sp [-1]->inst_c0;
9207 sp [-1]->opcode = OP_I8CONST;
9208 sp [-1]->type = STACK_I8;
9209 #if SIZEOF_REGISTER == 8
9210 if ((*ip) == CEE_CONV_U8)
9211 sp [-1]->inst_c0 = (guint32)data;
9212 else
9213 sp [-1]->inst_c0 = data;
9214 #else
9215 sp [-1]->inst_ls_word = data;
9216 if ((*ip) == CEE_CONV_U8)
9217 sp [-1]->inst_ms_word = 0;
9218 else
9219 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9220 #endif
9221 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9223 else {
9224 ADD_UNOP (*ip);
9226 ip++;
9227 break;
9228 case CEE_CONV_OVF_I4:
9229 case CEE_CONV_OVF_I1:
9230 case CEE_CONV_OVF_I2:
9231 case CEE_CONV_OVF_I:
9232 case CEE_CONV_OVF_U:
9233 CHECK_STACK (1);
9235 if (sp [-1]->type == STACK_R8) {
9236 ADD_UNOP (CEE_CONV_OVF_I8);
9237 ADD_UNOP (*ip);
9238 } else {
9239 ADD_UNOP (*ip);
9241 ip++;
9242 break;
9243 case CEE_CONV_OVF_U1:
9244 case CEE_CONV_OVF_U2:
9245 case CEE_CONV_OVF_U4:
9246 CHECK_STACK (1);
9248 if (sp [-1]->type == STACK_R8) {
9249 ADD_UNOP (CEE_CONV_OVF_U8);
9250 ADD_UNOP (*ip);
9251 } else {
9252 ADD_UNOP (*ip);
9254 ip++;
9255 break;
9256 case CEE_CONV_OVF_I1_UN:
9257 case CEE_CONV_OVF_I2_UN:
9258 case CEE_CONV_OVF_I4_UN:
9259 case CEE_CONV_OVF_I8_UN:
9260 case CEE_CONV_OVF_U1_UN:
9261 case CEE_CONV_OVF_U2_UN:
9262 case CEE_CONV_OVF_U4_UN:
9263 case CEE_CONV_OVF_U8_UN:
9264 case CEE_CONV_OVF_I_UN:
9265 case CEE_CONV_OVF_U_UN:
9266 case CEE_CONV_U2:
9267 case CEE_CONV_U1:
9268 case CEE_CONV_I:
9269 case CEE_CONV_U:
9270 CHECK_STACK (1);
9271 ADD_UNOP (*ip);
9272 CHECK_CFG_EXCEPTION;
9273 ip++;
9274 break;
9275 case CEE_ADD_OVF:
9276 case CEE_ADD_OVF_UN:
9277 case CEE_MUL_OVF:
9278 case CEE_MUL_OVF_UN:
9279 case CEE_SUB_OVF:
9280 case CEE_SUB_OVF_UN:
9281 CHECK_STACK (2);
9282 ADD_BINOP (*ip);
9283 ip++;
9284 break;
9285 case CEE_CPOBJ:
9286 GSHAREDVT_FAILURE (*ip);
9287 CHECK_OPSIZE (5);
9288 CHECK_STACK (2);
9289 token = read32 (ip + 1);
9290 klass = mini_get_class (method, token, generic_context);
9291 CHECK_TYPELOAD (klass);
9292 sp -= 2;
9293 if (generic_class_is_reference_type (cfg, klass)) {
9294 MonoInst *store, *load;
9295 int dreg = alloc_ireg_ref (cfg);
9297 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9298 load->flags |= ins_flag;
9299 MONO_ADD_INS (cfg->cbb, load);
9301 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9302 store->flags |= ins_flag;
9303 MONO_ADD_INS (cfg->cbb, store);
9305 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9306 emit_write_barrier (cfg, sp [0], sp [1]);
9307 } else {
9308 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9310 ins_flag = 0;
9311 ip += 5;
9312 break;
9313 case CEE_LDOBJ: {
9314 int loc_index = -1;
9315 int stloc_len = 0;
9317 CHECK_OPSIZE (5);
9318 CHECK_STACK (1);
9319 --sp;
9320 token = read32 (ip + 1);
9321 klass = mini_get_class (method, token, generic_context);
9322 CHECK_TYPELOAD (klass);
9324 /* Optimize the common ldobj+stloc combination */
9325 switch (ip [5]) {
9326 case CEE_STLOC_S:
9327 loc_index = ip [6];
9328 stloc_len = 2;
9329 break;
9330 case CEE_STLOC_0:
9331 case CEE_STLOC_1:
9332 case CEE_STLOC_2:
9333 case CEE_STLOC_3:
9334 loc_index = ip [5] - CEE_STLOC_0;
9335 stloc_len = 1;
9336 break;
9337 default:
9338 break;
9341 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9342 CHECK_LOCAL (loc_index);
9344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9345 ins->dreg = cfg->locals [loc_index]->dreg;
9346 ins->flags |= ins_flag;
9347 ip += 5;
9348 ip += stloc_len;
9349 if (ins_flag & MONO_INST_VOLATILE) {
9350 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9351 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9352 emit_memory_barrier (cfg, FullBarrier);
9354 ins_flag = 0;
9355 break;
9358 /* Optimize the ldobj+stobj combination */
9359 /* The reference case ends up being a load+store anyway */
9360 /* Skip this if the operation is volatile. */
9361 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9362 CHECK_STACK (1);
9364 sp --;
9366 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9368 ip += 5 + 5;
9369 ins_flag = 0;
9370 break;
9373 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9374 ins->flags |= ins_flag;
9375 *sp++ = ins;
9377 if (ins_flag & MONO_INST_VOLATILE) {
9378 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9379 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9380 emit_memory_barrier (cfg, FullBarrier);
9383 ip += 5;
9384 ins_flag = 0;
9385 inline_costs += 1;
9386 break;
9388 case CEE_LDSTR:
9389 CHECK_STACK_OVF (1);
9390 CHECK_OPSIZE (5);
9391 n = read32 (ip + 1);
9393 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9394 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9395 ins->type = STACK_OBJ;
9396 *sp = ins;
9398 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9399 MonoInst *iargs [1];
9401 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9402 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9403 } else {
9404 if (cfg->opt & MONO_OPT_SHARED) {
9405 MonoInst *iargs [3];
9407 if (cfg->compile_aot) {
9408 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9410 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9411 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9412 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9413 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9414 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9415 } else {
9416 if (bblock->out_of_line) {
9417 MonoInst *iargs [2];
9419 if (image == mono_defaults.corlib) {
9421 * Avoid relocations in AOT and save some space by using a
9422 * version of helper_ldstr specialized to mscorlib.
9424 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9425 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9426 } else {
9427 /* Avoid creating the string object */
9428 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9429 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9430 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9433 else
9434 if (cfg->compile_aot) {
9435 NEW_LDSTRCONST (cfg, ins, image, n);
9436 *sp = ins;
9437 MONO_ADD_INS (bblock, ins);
9439 else {
9440 NEW_PCONST (cfg, ins, NULL);
9441 ins->type = STACK_OBJ;
9442 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9443 if (!ins->inst_p0)
9444 OUT_OF_MEMORY_FAILURE;
9446 *sp = ins;
9447 MONO_ADD_INS (bblock, ins);
9452 sp++;
9453 ip += 5;
9454 break;
9455 case CEE_NEWOBJ: {
9456 MonoInst *iargs [2];
9457 MonoMethodSignature *fsig;
9458 MonoInst this_ins;
9459 MonoInst *alloc;
9460 MonoInst *vtable_arg = NULL;
9462 CHECK_OPSIZE (5);
9463 token = read32 (ip + 1);
9464 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9465 if (!cmethod || mono_loader_get_last_error ())
9466 LOAD_ERROR;
9467 fsig = mono_method_get_signature (cmethod, image, token);
9468 if (!fsig)
9469 LOAD_ERROR;
9471 mono_save_token_info (cfg, image, token, cmethod);
9473 if (!mono_class_init (cmethod->klass))
9474 TYPE_LOAD_ERROR (cmethod->klass);
9476 context_used = mini_method_check_context_used (cfg, cmethod);
9478 if (mono_security_cas_enabled ()) {
9479 if (check_linkdemand (cfg, method, cmethod))
9480 INLINE_FAILURE ("linkdemand");
9481 CHECK_CFG_EXCEPTION;
9482 } else if (mono_security_core_clr_enabled ()) {
9483 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9486 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9487 emit_generic_class_init (cfg, cmethod->klass);
9488 CHECK_TYPELOAD (cmethod->klass);
9492 if (cfg->gsharedvt) {
9493 if (mini_is_gsharedvt_variable_signature (sig))
9494 GSHAREDVT_FAILURE (*ip);
9498 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9499 mono_method_is_generic_sharable (cmethod, TRUE)) {
9500 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9501 mono_class_vtable (cfg->domain, cmethod->klass);
9502 CHECK_TYPELOAD (cmethod->klass);
9504 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9505 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9506 } else {
9507 if (context_used) {
9508 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9509 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9510 } else {
9511 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9513 CHECK_TYPELOAD (cmethod->klass);
9514 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9519 n = fsig->param_count;
9520 CHECK_STACK (n);
9523 * Generate smaller code for the common newobj <exception> instruction in
9524 * argument checking code.
9526 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9527 is_exception_class (cmethod->klass) && n <= 2 &&
9528 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9529 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9530 MonoInst *iargs [3];
9532 g_assert (!vtable_arg);
9534 sp -= n;
9536 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9537 switch (n) {
9538 case 0:
9539 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9540 break;
9541 case 1:
9542 iargs [1] = sp [0];
9543 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9544 break;
9545 case 2:
9546 iargs [1] = sp [0];
9547 iargs [2] = sp [1];
9548 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9549 break;
9550 default:
9551 g_assert_not_reached ();
9554 ip += 5;
9555 inline_costs += 5;
9556 break;
9559 /* move the args to allow room for 'this' in the first position */
9560 while (n--) {
9561 --sp;
9562 sp [1] = sp [0];
9565 /* check_call_signature () requires sp[0] to be set */
9566 this_ins.type = STACK_OBJ;
9567 sp [0] = &this_ins;
9568 if (check_call_signature (cfg, fsig, sp))
9569 UNVERIFIED;
9571 iargs [0] = NULL;
9573 if (mini_class_is_system_array (cmethod->klass)) {
9574 g_assert (!vtable_arg);
9576 *sp = emit_get_rgctx_method (cfg, context_used,
9577 cmethod, MONO_RGCTX_INFO_METHOD);
9579 /* Avoid varargs in the common case */
9580 if (fsig->param_count == 1)
9581 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9582 else if (fsig->param_count == 2)
9583 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9584 else if (fsig->param_count == 3)
9585 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9586 else if (fsig->param_count == 4)
9587 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9588 else
9589 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9590 } else if (cmethod->string_ctor) {
9591 g_assert (!context_used);
9592 g_assert (!vtable_arg);
9593 /* we simply pass a null pointer */
9594 EMIT_NEW_PCONST (cfg, *sp, NULL);
9595 /* now call the string ctor */
9596 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9597 } else {
9598 MonoInst* callvirt_this_arg = NULL;
9600 if (cmethod->klass->valuetype) {
9601 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9602 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9603 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9605 alloc = NULL;
9608 * The code generated by mini_emit_virtual_call () expects
9609 * iargs [0] to be a boxed instance, but luckily the vcall
9610 * will be transformed into a normal call there.
9612 } else if (context_used) {
9613 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9614 *sp = alloc;
9615 } else {
9616 MonoVTable *vtable = NULL;
9618 if (!cfg->compile_aot)
9619 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9620 CHECK_TYPELOAD (cmethod->klass);
9623 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9624 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9625 * As a workaround, we call class cctors before allocating objects.
9627 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9628 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9629 if (cfg->verbose_level > 2)
9630 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9631 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9634 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9635 *sp = alloc;
9637 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9639 if (alloc)
9640 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9642 /* Now call the actual ctor */
9643 /* Avoid virtual calls to ctors if possible */
9644 if (mono_class_is_marshalbyref (cmethod->klass))
9645 callvirt_this_arg = sp [0];
9648 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9649 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9650 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9651 *sp = ins;
9652 sp++;
9655 CHECK_CFG_EXCEPTION;
9656 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9657 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9658 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9659 !g_list_find (dont_inline, cmethod)) {
9660 int costs;
9662 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9663 cfg->real_offset += 5;
9664 bblock = cfg->cbb;
9666 inline_costs += costs - 5;
9667 } else {
9668 INLINE_FAILURE ("inline failure");
9669 // FIXME-VT: Clean this up
9670 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9671 GSHAREDVT_FAILURE(*ip);
9672 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9674 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9675 MonoInst *addr;
9677 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9678 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9679 } else if (context_used &&
9680 ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
9681 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
9682 MonoInst *cmethod_addr;
9684 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
9686 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9687 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9689 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9690 } else {
9691 INLINE_FAILURE ("ctor call");
9692 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9693 callvirt_this_arg, NULL, vtable_arg);
9697 if (alloc == NULL) {
9698 /* Valuetype */
9699 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9700 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9701 *sp++= ins;
9703 else
9704 *sp++ = alloc;
9706 ip += 5;
9707 inline_costs += 5;
9708 break;
9710 case CEE_CASTCLASS:
9711 CHECK_STACK (1);
9712 --sp;
9713 CHECK_OPSIZE (5);
9714 token = read32 (ip + 1);
9715 klass = mini_get_class (method, token, generic_context);
9716 CHECK_TYPELOAD (klass);
9717 if (sp [0]->type != STACK_OBJ)
9718 UNVERIFIED;
9720 context_used = mini_class_check_context_used (cfg, klass);
9722 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9723 MonoInst *args [3];
9725 /* obj */
9726 args [0] = *sp;
9728 /* klass */
9729 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9731 /* inline cache*/
9732 if (cfg->compile_aot)
9733 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9734 else
9735 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9737 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9739 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9740 ip += 5;
9741 inline_costs += 2;
9742 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9743 MonoMethod *mono_castclass;
9744 MonoInst *iargs [1];
9745 int costs;
9747 mono_castclass = mono_marshal_get_castclass (klass);
9748 iargs [0] = sp [0];
9750 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9751 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9752 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9753 reset_cast_details (cfg);
9754 CHECK_CFG_EXCEPTION;
9755 g_assert (costs > 0);
9757 ip += 5;
9758 cfg->real_offset += 5;
9759 bblock = cfg->cbb;
9761 *sp++ = iargs [0];
9763 inline_costs += costs;
9765 else {
9766 ins = handle_castclass (cfg, klass, *sp, context_used);
9767 CHECK_CFG_EXCEPTION;
9768 bblock = cfg->cbb;
9769 *sp ++ = ins;
9770 ip += 5;
9772 break;
9773 case CEE_ISINST: {
9774 CHECK_STACK (1);
9775 --sp;
9776 CHECK_OPSIZE (5);
9777 token = read32 (ip + 1);
9778 klass = mini_get_class (method, token, generic_context);
9779 CHECK_TYPELOAD (klass);
9780 if (sp [0]->type != STACK_OBJ)
9781 UNVERIFIED;
9783 context_used = mini_class_check_context_used (cfg, klass);
9785 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9786 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9787 MonoInst *args [3];
9789 /* obj */
9790 args [0] = *sp;
9792 /* klass */
9793 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9795 /* inline cache*/
9796 if (cfg->compile_aot)
9797 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9798 else
9799 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9801 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9802 ip += 5;
9803 inline_costs += 2;
9804 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9805 MonoMethod *mono_isinst;
9806 MonoInst *iargs [1];
9807 int costs;
9809 mono_isinst = mono_marshal_get_isinst (klass);
9810 iargs [0] = sp [0];
9812 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9813 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9814 CHECK_CFG_EXCEPTION;
9815 g_assert (costs > 0);
9817 ip += 5;
9818 cfg->real_offset += 5;
9819 bblock = cfg->cbb;
9821 *sp++= iargs [0];
9823 inline_costs += costs;
9825 else {
9826 ins = handle_isinst (cfg, klass, *sp, context_used);
9827 CHECK_CFG_EXCEPTION;
9828 bblock = cfg->cbb;
9829 *sp ++ = ins;
9830 ip += 5;
9832 break;
9834 case CEE_UNBOX_ANY: {
9835 CHECK_STACK (1);
9836 --sp;
9837 CHECK_OPSIZE (5);
9838 token = read32 (ip + 1);
9839 klass = mini_get_class (method, token, generic_context);
9840 CHECK_TYPELOAD (klass);
9842 mono_save_token_info (cfg, image, token, klass);
9844 context_used = mini_class_check_context_used (cfg, klass);
9846 if (mini_is_gsharedvt_klass (cfg, klass)) {
9847 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9848 sp ++;
9850 ip += 5;
9851 inline_costs += 2;
9852 break;
9855 if (generic_class_is_reference_type (cfg, klass)) {
9856 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9857 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9858 MonoInst *args [3];
9860 /* obj */
9861 args [0] = *sp;
9863 /* klass */
9864 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9866 /* inline cache*/
9867 /*FIXME AOT support*/
9868 if (cfg->compile_aot)
9869 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9870 else
9871 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9873 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9874 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9875 ip += 5;
9876 inline_costs += 2;
9877 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9878 MonoMethod *mono_castclass;
9879 MonoInst *iargs [1];
9880 int costs;
9882 mono_castclass = mono_marshal_get_castclass (klass);
9883 iargs [0] = sp [0];
9885 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9886 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9887 CHECK_CFG_EXCEPTION;
9888 g_assert (costs > 0);
9890 ip += 5;
9891 cfg->real_offset += 5;
9892 bblock = cfg->cbb;
9894 *sp++ = iargs [0];
9895 inline_costs += costs;
9896 } else {
9897 ins = handle_castclass (cfg, klass, *sp, context_used);
9898 CHECK_CFG_EXCEPTION;
9899 bblock = cfg->cbb;
9900 *sp ++ = ins;
9901 ip += 5;
9903 break;
9906 if (mono_class_is_nullable (klass)) {
9907 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9908 *sp++= ins;
9909 ip += 5;
9910 break;
9913 /* UNBOX */
9914 ins = handle_unbox (cfg, klass, sp, context_used);
9915 *sp = ins;
9917 ip += 5;
9919 /* LDOBJ */
9920 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9921 *sp++ = ins;
9923 inline_costs += 2;
9924 break;
9926 case CEE_BOX: {
9927 MonoInst *val;
9929 CHECK_STACK (1);
9930 --sp;
9931 val = *sp;
9932 CHECK_OPSIZE (5);
9933 token = read32 (ip + 1);
9934 klass = mini_get_class (method, token, generic_context);
9935 CHECK_TYPELOAD (klass);
9937 mono_save_token_info (cfg, image, token, klass);
9939 context_used = mini_class_check_context_used (cfg, klass);
9941 if (generic_class_is_reference_type (cfg, klass)) {
9942 *sp++ = val;
9943 ip += 5;
9944 break;
9947 if (klass == mono_defaults.void_class)
9948 UNVERIFIED;
9949 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9950 UNVERIFIED;
9951 /* frequent check in generic code: box (struct), brtrue */
9953 // FIXME: LLVM can't handle the inconsistent bb linking
9954 if (!mono_class_is_nullable (klass) &&
9955 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9956 (ip [5] == CEE_BRTRUE ||
9957 ip [5] == CEE_BRTRUE_S ||
9958 ip [5] == CEE_BRFALSE ||
9959 ip [5] == CEE_BRFALSE_S)) {
9960 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9961 int dreg;
9962 MonoBasicBlock *true_bb, *false_bb;
9964 ip += 5;
9966 if (cfg->verbose_level > 3) {
9967 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9968 printf ("<box+brtrue opt>\n");
9971 switch (*ip) {
9972 case CEE_BRTRUE_S:
9973 case CEE_BRFALSE_S:
9974 CHECK_OPSIZE (2);
9975 ip++;
9976 target = ip + 1 + (signed char)(*ip);
9977 ip++;
9978 break;
9979 case CEE_BRTRUE:
9980 case CEE_BRFALSE:
9981 CHECK_OPSIZE (5);
9982 ip++;
9983 target = ip + 4 + (gint)(read32 (ip));
9984 ip += 4;
9985 break;
9986 default:
9987 g_assert_not_reached ();
9991 * We need to link both bblocks, since it is needed for handling stack
9992 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9993 * Branching to only one of them would lead to inconsistencies, so
9994 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9996 GET_BBLOCK (cfg, true_bb, target);
9997 GET_BBLOCK (cfg, false_bb, ip);
9999 mono_link_bblock (cfg, cfg->cbb, true_bb);
10000 mono_link_bblock (cfg, cfg->cbb, false_bb);
10002 if (sp != stack_start) {
10003 handle_stack_args (cfg, stack_start, sp - stack_start);
10004 sp = stack_start;
10005 CHECK_UNVERIFIABLE (cfg);
10008 if (COMPILE_LLVM (cfg)) {
10009 dreg = alloc_ireg (cfg);
10010 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10013 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10014 } else {
10015 /* The JIT can't eliminate the iconst+compare */
10016 MONO_INST_NEW (cfg, ins, OP_BR);
10017 ins->inst_target_bb = is_true ? true_bb : false_bb;
10018 MONO_ADD_INS (cfg->cbb, ins);
10021 start_new_bblock = 1;
10022 break;
10025 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10027 CHECK_CFG_EXCEPTION;
10028 ip += 5;
10029 inline_costs += 1;
10030 break;
10032 case CEE_UNBOX: {
10033 CHECK_STACK (1);
10034 --sp;
10035 CHECK_OPSIZE (5);
10036 token = read32 (ip + 1);
10037 klass = mini_get_class (method, token, generic_context);
10038 CHECK_TYPELOAD (klass);
10040 mono_save_token_info (cfg, image, token, klass);
10042 context_used = mini_class_check_context_used (cfg, klass);
10044 if (mono_class_is_nullable (klass)) {
10045 MonoInst *val;
10047 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10048 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10050 *sp++= ins;
10051 } else {
10052 ins = handle_unbox (cfg, klass, sp, context_used);
10053 *sp++ = ins;
10055 ip += 5;
10056 inline_costs += 2;
10057 break;
10059 case CEE_LDFLD:
10060 case CEE_LDFLDA:
10061 case CEE_STFLD:
10062 case CEE_LDSFLD:
10063 case CEE_LDSFLDA:
10064 case CEE_STSFLD: {
10065 MonoClassField *field;
10066 #ifndef DISABLE_REMOTING
10067 int costs;
10068 #endif
10069 guint foffset;
10070 gboolean is_instance;
10071 int op;
10072 gpointer addr = NULL;
10073 gboolean is_special_static;
10074 MonoType *ftype;
10075 MonoInst *store_val = NULL;
10076 MonoInst *thread_ins;
10078 op = *ip;
10079 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10080 if (is_instance) {
10081 if (op == CEE_STFLD) {
10082 CHECK_STACK (2);
10083 sp -= 2;
10084 store_val = sp [1];
10085 } else {
10086 CHECK_STACK (1);
10087 --sp;
10089 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10090 UNVERIFIED;
10091 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10092 UNVERIFIED;
10093 } else {
10094 if (op == CEE_STSFLD) {
10095 CHECK_STACK (1);
10096 sp--;
10097 store_val = sp [0];
10101 CHECK_OPSIZE (5);
10102 token = read32 (ip + 1);
10103 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10104 field = mono_method_get_wrapper_data (method, token);
10105 klass = field->parent;
10107 else {
10108 field = mono_field_from_token (image, token, &klass, generic_context);
10110 if (!field)
10111 LOAD_ERROR;
10112 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10113 FIELD_ACCESS_FAILURE;
10114 mono_class_init (klass);
10116 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10117 UNVERIFIED;
10119 /* if the class is Critical then transparent code cannot access it's fields */
10120 if (!is_instance && mono_security_core_clr_enabled ())
10121 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10123 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10124 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10125 if (mono_security_core_clr_enabled ())
10126 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10130 * LDFLD etc. is usable on static fields as well, so convert those cases to
10131 * the static case.
10133 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10134 switch (op) {
10135 case CEE_LDFLD:
10136 op = CEE_LDSFLD;
10137 break;
10138 case CEE_STFLD:
10139 op = CEE_STSFLD;
10140 break;
10141 case CEE_LDFLDA:
10142 op = CEE_LDSFLDA;
10143 break;
10144 default:
10145 g_assert_not_reached ();
10147 is_instance = FALSE;
10150 context_used = mini_class_check_context_used (cfg, klass);
10152 /* INSTANCE CASE */
10154 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10155 if (op == CEE_STFLD) {
10156 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10157 UNVERIFIED;
10158 #ifndef DISABLE_REMOTING
10159 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10160 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10161 MonoInst *iargs [5];
10163 GSHAREDVT_FAILURE (op);
10165 iargs [0] = sp [0];
10166 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10167 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10168 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10169 field->offset);
10170 iargs [4] = sp [1];
10172 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10173 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10174 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10175 CHECK_CFG_EXCEPTION;
10176 g_assert (costs > 0);
10178 cfg->real_offset += 5;
10179 bblock = cfg->cbb;
10181 inline_costs += costs;
10182 } else {
10183 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10185 } else
10186 #endif
10188 MonoInst *store;
10190 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10192 if (mini_is_gsharedvt_klass (cfg, klass)) {
10193 MonoInst *offset_ins;
10195 context_used = mini_class_check_context_used (cfg, klass);
10197 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10198 dreg = alloc_ireg_mp (cfg);
10199 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10200 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10201 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10202 } else {
10203 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10205 if (sp [0]->opcode != OP_LDADDR)
10206 store->flags |= MONO_INST_FAULT;
10208 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10209 /* insert call to write barrier */
10210 MonoInst *ptr;
10211 int dreg;
10213 dreg = alloc_ireg_mp (cfg);
10214 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10215 emit_write_barrier (cfg, ptr, sp [1]);
10218 store->flags |= ins_flag;
10220 ins_flag = 0;
10221 ip += 5;
10222 break;
10225 #ifndef DISABLE_REMOTING
10226 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10227 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10228 MonoInst *iargs [4];
10230 GSHAREDVT_FAILURE (op);
10232 iargs [0] = sp [0];
10233 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10234 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10235 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10236 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10237 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10238 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10239 CHECK_CFG_EXCEPTION;
10240 bblock = cfg->cbb;
10241 g_assert (costs > 0);
10243 cfg->real_offset += 5;
10245 *sp++ = iargs [0];
10247 inline_costs += costs;
10248 } else {
10249 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10250 *sp++ = ins;
10252 } else
10253 #endif
10254 if (is_instance) {
10255 if (sp [0]->type == STACK_VTYPE) {
10256 MonoInst *var;
10258 /* Have to compute the address of the variable */
10260 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10261 if (!var)
10262 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10263 else
10264 g_assert (var->klass == klass);
10266 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10267 sp [0] = ins;
10270 if (op == CEE_LDFLDA) {
10271 if (is_magic_tls_access (field)) {
10272 GSHAREDVT_FAILURE (*ip);
10273 ins = sp [0];
10274 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10275 } else {
10276 if (sp [0]->type == STACK_OBJ) {
10277 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10278 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10281 dreg = alloc_ireg_mp (cfg);
10283 if (mini_is_gsharedvt_klass (cfg, klass)) {
10284 MonoInst *offset_ins;
10286 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10287 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10288 } else {
10289 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10291 ins->klass = mono_class_from_mono_type (field->type);
10292 ins->type = STACK_MP;
10293 *sp++ = ins;
10295 } else {
10296 MonoInst *load;
10298 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10300 if (mini_is_gsharedvt_klass (cfg, klass)) {
10301 MonoInst *offset_ins;
10303 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10304 dreg = alloc_ireg_mp (cfg);
10305 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10306 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10307 } else {
10308 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10310 load->flags |= ins_flag;
10311 if (sp [0]->opcode != OP_LDADDR)
10312 load->flags |= MONO_INST_FAULT;
10313 *sp++ = load;
10317 if (is_instance) {
10318 ins_flag = 0;
10319 ip += 5;
10320 break;
10323 /* STATIC CASE */
10326 * We can only support shared generic static
10327 * field access on architectures where the
10328 * trampoline code has been extended to handle
10329 * the generic class init.
10331 #ifndef MONO_ARCH_VTABLE_REG
10332 GENERIC_SHARING_FAILURE (op);
10333 #endif
10335 context_used = mini_class_check_context_used (cfg, klass);
10337 ftype = mono_field_get_type (field);
10339 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10340 UNVERIFIED;
10342 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10343 * to be called here.
10345 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10346 mono_class_vtable (cfg->domain, klass);
10347 CHECK_TYPELOAD (klass);
10349 mono_domain_lock (cfg->domain);
10350 if (cfg->domain->special_static_fields)
10351 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10352 mono_domain_unlock (cfg->domain);
10354 is_special_static = mono_class_field_is_special_static (field);
10356 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10357 thread_ins = mono_get_thread_intrinsic (cfg);
10358 else
10359 thread_ins = NULL;
10361 /* Generate IR to compute the field address */
10362 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10364 * Fast access to TLS data
10365 * Inline version of get_thread_static_data () in
10366 * threads.c.
10368 guint32 offset;
10369 int idx, static_data_reg, array_reg, dreg;
10371 GSHAREDVT_FAILURE (op);
10373 // offset &= 0x7fffffff;
10374 // idx = (offset >> 24) - 1;
10375 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10376 MONO_ADD_INS (cfg->cbb, thread_ins);
10377 static_data_reg = alloc_ireg (cfg);
10378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10380 if (cfg->compile_aot) {
10381 int offset_reg, offset2_reg, idx_reg;
10383 /* For TLS variables, this will return the TLS offset */
10384 EMIT_NEW_SFLDACONST (cfg, ins, field);
10385 offset_reg = ins->dreg;
10386 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10387 idx_reg = alloc_ireg (cfg);
10388 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10389 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10391 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10392 array_reg = alloc_ireg (cfg);
10393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10394 offset2_reg = alloc_ireg (cfg);
10395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10396 dreg = alloc_ireg (cfg);
10397 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10398 } else {
10399 offset = (gsize)addr & 0x7fffffff;
10400 idx = (offset >> 24) - 1;
10402 array_reg = alloc_ireg (cfg);
10403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10404 dreg = alloc_ireg (cfg);
10405 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10407 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10408 (cfg->compile_aot && is_special_static) ||
10409 (context_used && is_special_static)) {
10410 MonoInst *iargs [2];
10412 g_assert (field->parent);
10413 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10414 if (context_used) {
10415 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10416 field, MONO_RGCTX_INFO_CLASS_FIELD);
10417 } else {
10418 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10420 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10421 } else if (context_used) {
10422 MonoInst *static_data;
10425 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10426 method->klass->name_space, method->klass->name, method->name,
10427 depth, field->offset);
10430 if (mono_class_needs_cctor_run (klass, method))
10431 emit_generic_class_init (cfg, klass);
10434 * The pointer we're computing here is
10436 * super_info.static_data + field->offset
10438 static_data = emit_get_rgctx_klass (cfg, context_used,
10439 klass, MONO_RGCTX_INFO_STATIC_DATA);
10441 if (mini_is_gsharedvt_klass (cfg, klass)) {
10442 MonoInst *offset_ins;
10444 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10445 dreg = alloc_ireg_mp (cfg);
10446 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10447 } else if (field->offset == 0) {
10448 ins = static_data;
10449 } else {
10450 int addr_reg = mono_alloc_preg (cfg);
10451 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10453 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10454 MonoInst *iargs [2];
10456 g_assert (field->parent);
10457 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10458 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10459 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10460 } else {
10461 MonoVTable *vtable = NULL;
10463 if (!cfg->compile_aot)
10464 vtable = mono_class_vtable (cfg->domain, klass);
10465 CHECK_TYPELOAD (klass);
10467 if (!addr) {
10468 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10469 if (!(g_slist_find (class_inits, klass))) {
10470 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10471 if (cfg->verbose_level > 2)
10472 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10473 class_inits = g_slist_prepend (class_inits, klass);
10475 } else {
10476 if (cfg->run_cctors) {
10477 MonoException *ex;
10478 /* This makes so that inline cannot trigger */
10479 /* .cctors: too many apps depend on them */
10480 /* running with a specific order... */
10481 g_assert (vtable);
10482 if (! vtable->initialized)
10483 INLINE_FAILURE ("class init");
10484 ex = mono_runtime_class_init_full (vtable, FALSE);
10485 if (ex) {
10486 set_exception_object (cfg, ex);
10487 goto exception_exit;
10491 if (cfg->compile_aot)
10492 EMIT_NEW_SFLDACONST (cfg, ins, field);
10493 else {
10494 g_assert (vtable);
10495 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10496 g_assert (addr);
10497 EMIT_NEW_PCONST (cfg, ins, addr);
10499 } else {
10500 MonoInst *iargs [1];
10501 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10502 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10506 /* Generate IR to do the actual load/store operation */
10508 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10509 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10510 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10511 emit_memory_barrier (cfg, FullBarrier);
10514 if (op == CEE_LDSFLDA) {
10515 ins->klass = mono_class_from_mono_type (ftype);
10516 ins->type = STACK_PTR;
10517 *sp++ = ins;
10518 } else if (op == CEE_STSFLD) {
10519 MonoInst *store;
10521 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10522 store->flags |= ins_flag;
10523 } else {
10524 gboolean is_const = FALSE;
10525 MonoVTable *vtable = NULL;
10526 gpointer addr = NULL;
10528 if (!context_used) {
10529 vtable = mono_class_vtable (cfg->domain, klass);
10530 CHECK_TYPELOAD (klass);
10532 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10533 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10534 int ro_type = ftype->type;
10535 if (!addr)
10536 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10537 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10538 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10541 GSHAREDVT_FAILURE (op);
10543 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10544 is_const = TRUE;
10545 switch (ro_type) {
10546 case MONO_TYPE_BOOLEAN:
10547 case MONO_TYPE_U1:
10548 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10549 sp++;
10550 break;
10551 case MONO_TYPE_I1:
10552 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10553 sp++;
10554 break;
10555 case MONO_TYPE_CHAR:
10556 case MONO_TYPE_U2:
10557 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10558 sp++;
10559 break;
10560 case MONO_TYPE_I2:
10561 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10562 sp++;
10563 break;
10564 break;
10565 case MONO_TYPE_I4:
10566 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10567 sp++;
10568 break;
10569 case MONO_TYPE_U4:
10570 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10571 sp++;
10572 break;
10573 case MONO_TYPE_I:
10574 case MONO_TYPE_U:
10575 case MONO_TYPE_PTR:
10576 case MONO_TYPE_FNPTR:
10577 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10578 type_to_eval_stack_type ((cfg), field->type, *sp);
10579 sp++;
10580 break;
10581 case MONO_TYPE_STRING:
10582 case MONO_TYPE_OBJECT:
10583 case MONO_TYPE_CLASS:
10584 case MONO_TYPE_SZARRAY:
10585 case MONO_TYPE_ARRAY:
10586 if (!mono_gc_is_moving ()) {
10587 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10588 type_to_eval_stack_type ((cfg), field->type, *sp);
10589 sp++;
10590 } else {
10591 is_const = FALSE;
10593 break;
10594 case MONO_TYPE_I8:
10595 case MONO_TYPE_U8:
10596 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10597 sp++;
10598 break;
10599 case MONO_TYPE_R4:
10600 case MONO_TYPE_R8:
10601 case MONO_TYPE_VALUETYPE:
10602 default:
10603 is_const = FALSE;
10604 break;
10608 if (!is_const) {
10609 MonoInst *load;
10611 CHECK_STACK_OVF (1);
10613 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10614 load->flags |= ins_flag;
10615 ins_flag = 0;
10616 *sp++ = load;
10620 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10621 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10622 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10623 emit_memory_barrier (cfg, FullBarrier);
10626 ins_flag = 0;
10627 ip += 5;
10628 break;
10630 case CEE_STOBJ:
10631 CHECK_STACK (2);
10632 sp -= 2;
10633 CHECK_OPSIZE (5);
10634 token = read32 (ip + 1);
10635 klass = mini_get_class (method, token, generic_context);
10636 CHECK_TYPELOAD (klass);
10637 if (ins_flag & MONO_INST_VOLATILE) {
10638 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10639 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10640 emit_memory_barrier (cfg, FullBarrier);
10642 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10643 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10644 ins->flags |= ins_flag;
10645 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10646 generic_class_is_reference_type (cfg, klass)) {
10647 /* insert call to write barrier */
10648 emit_write_barrier (cfg, sp [0], sp [1]);
10650 ins_flag = 0;
10651 ip += 5;
10652 inline_costs += 1;
10653 break;
10656 * Array opcodes
10658 case CEE_NEWARR: {
10659 MonoInst *len_ins;
10660 const char *data_ptr;
10661 int data_size = 0;
10662 guint32 field_token;
10664 CHECK_STACK (1);
10665 --sp;
10667 CHECK_OPSIZE (5);
10668 token = read32 (ip + 1);
10670 klass = mini_get_class (method, token, generic_context);
10671 CHECK_TYPELOAD (klass);
10673 context_used = mini_class_check_context_used (cfg, klass);
10675 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10676 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10677 ins->sreg1 = sp [0]->dreg;
10678 ins->type = STACK_I4;
10679 ins->dreg = alloc_ireg (cfg);
10680 MONO_ADD_INS (cfg->cbb, ins);
10681 *sp = mono_decompose_opcode (cfg, ins);
10684 if (context_used) {
10685 MonoInst *args [3];
10686 MonoClass *array_class = mono_array_class_get (klass, 1);
10687 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10689 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10691 /* vtable */
10692 args [0] = emit_get_rgctx_klass (cfg, context_used,
10693 array_class, MONO_RGCTX_INFO_VTABLE);
10694 /* array len */
10695 args [1] = sp [0];
10697 if (managed_alloc)
10698 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10699 else
10700 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10701 } else {
10702 if (cfg->opt & MONO_OPT_SHARED) {
10703 /* Decompose now to avoid problems with references to the domainvar */
10704 MonoInst *iargs [3];
10706 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10707 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10708 iargs [2] = sp [0];
10710 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10711 } else {
10712 /* Decompose later since it is needed by abcrem */
10713 MonoClass *array_type = mono_array_class_get (klass, 1);
10714 mono_class_vtable (cfg->domain, array_type);
10715 CHECK_TYPELOAD (array_type);
10717 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10718 ins->dreg = alloc_ireg_ref (cfg);
10719 ins->sreg1 = sp [0]->dreg;
10720 ins->inst_newa_class = klass;
10721 ins->type = STACK_OBJ;
10722 ins->klass = array_type;
10723 MONO_ADD_INS (cfg->cbb, ins);
10724 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10725 cfg->cbb->has_array_access = TRUE;
10727 /* Needed so mono_emit_load_get_addr () gets called */
10728 mono_get_got_var (cfg);
10732 len_ins = sp [0];
10733 ip += 5;
10734 *sp++ = ins;
10735 inline_costs += 1;
10738 * we inline/optimize the initialization sequence if possible.
10739 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10740 * for small sizes open code the memcpy
10741 * ensure the rva field is big enough
10743 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10744 MonoMethod *memcpy_method = get_memcpy_method ();
10745 MonoInst *iargs [3];
10746 int add_reg = alloc_ireg_mp (cfg);
10748 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10749 if (cfg->compile_aot) {
10750 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10751 } else {
10752 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10754 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10755 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10756 ip += 11;
10759 break;
10761 case CEE_LDLEN:
10762 CHECK_STACK (1);
10763 --sp;
10764 if (sp [0]->type != STACK_OBJ)
10765 UNVERIFIED;
10767 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10768 ins->dreg = alloc_preg (cfg);
10769 ins->sreg1 = sp [0]->dreg;
10770 ins->type = STACK_I4;
10771 /* This flag will be inherited by the decomposition */
10772 ins->flags |= MONO_INST_FAULT;
10773 MONO_ADD_INS (cfg->cbb, ins);
10774 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10775 cfg->cbb->has_array_access = TRUE;
10776 ip ++;
10777 *sp++ = ins;
10778 break;
10779 case CEE_LDELEMA:
10780 CHECK_STACK (2);
10781 sp -= 2;
10782 CHECK_OPSIZE (5);
10783 if (sp [0]->type != STACK_OBJ)
10784 UNVERIFIED;
10786 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10788 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10789 CHECK_TYPELOAD (klass);
10790 /* we need to make sure that this array is exactly the type it needs
10791 * to be for correctness. the wrappers are lax with their usage
10792 * so we need to ignore them here
10794 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10795 MonoClass *array_class = mono_array_class_get (klass, 1);
10796 mini_emit_check_array_type (cfg, sp [0], array_class);
10797 CHECK_TYPELOAD (array_class);
10800 readonly = FALSE;
10801 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10802 *sp++ = ins;
10803 ip += 5;
10804 break;
10805 case CEE_LDELEM:
10806 case CEE_LDELEM_I1:
10807 case CEE_LDELEM_U1:
10808 case CEE_LDELEM_I2:
10809 case CEE_LDELEM_U2:
10810 case CEE_LDELEM_I4:
10811 case CEE_LDELEM_U4:
10812 case CEE_LDELEM_I8:
10813 case CEE_LDELEM_I:
10814 case CEE_LDELEM_R4:
10815 case CEE_LDELEM_R8:
10816 case CEE_LDELEM_REF: {
10817 MonoInst *addr;
10819 CHECK_STACK (2);
10820 sp -= 2;
10822 if (*ip == CEE_LDELEM) {
10823 CHECK_OPSIZE (5);
10824 token = read32 (ip + 1);
10825 klass = mini_get_class (method, token, generic_context);
10826 CHECK_TYPELOAD (klass);
10827 mono_class_init (klass);
10829 else
10830 klass = array_access_to_klass (*ip);
10832 if (sp [0]->type != STACK_OBJ)
10833 UNVERIFIED;
10835 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10837 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10838 // FIXME-VT: OP_ICONST optimization
10839 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10840 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10841 ins->opcode = OP_LOADV_MEMBASE;
10842 } else if (sp [1]->opcode == OP_ICONST) {
10843 int array_reg = sp [0]->dreg;
10844 int index_reg = sp [1]->dreg;
10845 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10847 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10848 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10849 } else {
10850 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10851 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10853 *sp++ = ins;
10854 if (*ip == CEE_LDELEM)
10855 ip += 5;
10856 else
10857 ++ip;
10858 break;
10860 case CEE_STELEM_I:
10861 case CEE_STELEM_I1:
10862 case CEE_STELEM_I2:
10863 case CEE_STELEM_I4:
10864 case CEE_STELEM_I8:
10865 case CEE_STELEM_R4:
10866 case CEE_STELEM_R8:
10867 case CEE_STELEM_REF:
10868 case CEE_STELEM: {
10869 CHECK_STACK (3);
10870 sp -= 3;
10872 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10874 if (*ip == CEE_STELEM) {
10875 CHECK_OPSIZE (5);
10876 token = read32 (ip + 1);
10877 klass = mini_get_class (method, token, generic_context);
10878 CHECK_TYPELOAD (klass);
10879 mono_class_init (klass);
10881 else
10882 klass = array_access_to_klass (*ip);
10884 if (sp [0]->type != STACK_OBJ)
10885 UNVERIFIED;
10887 emit_array_store (cfg, klass, sp, TRUE);
10889 if (*ip == CEE_STELEM)
10890 ip += 5;
10891 else
10892 ++ip;
10893 inline_costs += 1;
10894 break;
10896 case CEE_CKFINITE: {
10897 CHECK_STACK (1);
10898 --sp;
10900 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10901 ins->sreg1 = sp [0]->dreg;
10902 ins->dreg = alloc_freg (cfg);
10903 ins->type = STACK_R8;
10904 MONO_ADD_INS (bblock, ins);
10906 *sp++ = mono_decompose_opcode (cfg, ins);
10908 ++ip;
10909 break;
10911 case CEE_REFANYVAL: {
10912 MonoInst *src_var, *src;
10914 int klass_reg = alloc_preg (cfg);
10915 int dreg = alloc_preg (cfg);
10917 GSHAREDVT_FAILURE (*ip);
10919 CHECK_STACK (1);
10920 MONO_INST_NEW (cfg, ins, *ip);
10921 --sp;
10922 CHECK_OPSIZE (5);
10923 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10924 CHECK_TYPELOAD (klass);
10925 mono_class_init (klass);
10927 context_used = mini_class_check_context_used (cfg, klass);
10929 // FIXME:
10930 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10931 if (!src_var)
10932 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10933 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
10936 if (context_used) {
10937 MonoInst *klass_ins;
10939 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10940 klass, MONO_RGCTX_INFO_KLASS);
10942 // FIXME:
10943 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10944 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10945 } else {
10946 mini_emit_class_check (cfg, klass_reg, klass);
10948 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
10949 ins->type = STACK_MP;
10950 *sp++ = ins;
10951 ip += 5;
10952 break;
10954 case CEE_MKREFANY: {
10955 MonoInst *loc, *addr;
10957 GSHAREDVT_FAILURE (*ip);
10959 CHECK_STACK (1);
10960 MONO_INST_NEW (cfg, ins, *ip);
10961 --sp;
10962 CHECK_OPSIZE (5);
10963 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10964 CHECK_TYPELOAD (klass);
10965 mono_class_init (klass);
10967 context_used = mini_class_check_context_used (cfg, klass);
10969 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10970 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10972 if (context_used) {
10973 MonoInst *const_ins;
10974 int type_reg = alloc_preg (cfg);
10976 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10977 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
10979 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10980 } else if (cfg->compile_aot) {
10981 int const_reg = alloc_preg (cfg);
10982 int type_reg = alloc_preg (cfg);
10984 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
10987 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10988 } else {
10989 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10990 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10992 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10994 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10995 ins->type = STACK_VTYPE;
10996 ins->klass = mono_defaults.typed_reference_class;
10997 *sp++ = ins;
10998 ip += 5;
10999 break;
11001 case CEE_LDTOKEN: {
11002 gpointer handle;
11003 MonoClass *handle_class;
11005 CHECK_STACK_OVF (1);
11007 CHECK_OPSIZE (5);
11008 n = read32 (ip + 1);
11010 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11011 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11012 handle = mono_method_get_wrapper_data (method, n);
11013 handle_class = mono_method_get_wrapper_data (method, n + 1);
11014 if (handle_class == mono_defaults.typehandle_class)
11015 handle = &((MonoClass*)handle)->byval_arg;
11017 else {
11018 handle = mono_ldtoken (image, n, &handle_class, generic_context);
11020 if (!handle)
11021 LOAD_ERROR;
11022 mono_class_init (handle_class);
11023 if (cfg->generic_sharing_context) {
11024 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11025 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11026 /* This case handles ldtoken
11027 of an open type, like for
11028 typeof(Gen<>). */
11029 context_used = 0;
11030 } else if (handle_class == mono_defaults.typehandle_class) {
11031 /* If we get a MONO_TYPE_CLASS
11032 then we need to provide the
11033 open type, not an
11034 instantiation of it. */
11035 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
11036 context_used = 0;
11037 else
11038 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11039 } else if (handle_class == mono_defaults.fieldhandle_class)
11040 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11041 else if (handle_class == mono_defaults.methodhandle_class)
11042 context_used = mini_method_check_context_used (cfg, handle);
11043 else
11044 g_assert_not_reached ();
11047 if ((cfg->opt & MONO_OPT_SHARED) &&
11048 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11049 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11050 MonoInst *addr, *vtvar, *iargs [3];
11051 int method_context_used;
11053 method_context_used = mini_method_check_context_used (cfg, method);
11055 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11057 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11058 EMIT_NEW_ICONST (cfg, iargs [1], n);
11059 if (method_context_used) {
11060 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11061 method, MONO_RGCTX_INFO_METHOD);
11062 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11063 } else {
11064 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11065 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11067 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11069 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11071 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11072 } else {
11073 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11074 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11075 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11076 (cmethod->klass == mono_defaults.systemtype_class) &&
11077 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11078 MonoClass *tclass = mono_class_from_mono_type (handle);
11080 mono_class_init (tclass);
11081 if (context_used) {
11082 ins = emit_get_rgctx_klass (cfg, context_used,
11083 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11084 } else if (cfg->compile_aot) {
11085 if (method->wrapper_type) {
11086 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
11087 /* Special case for static synchronized wrappers */
11088 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11089 } else {
11090 /* FIXME: n is not a normal token */
11091 DISABLE_AOT (cfg);
11092 EMIT_NEW_PCONST (cfg, ins, NULL);
11094 } else {
11095 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11097 } else {
11098 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11100 ins->type = STACK_OBJ;
11101 ins->klass = cmethod->klass;
11102 ip += 5;
11103 } else {
11104 MonoInst *addr, *vtvar;
11106 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11108 if (context_used) {
11109 if (handle_class == mono_defaults.typehandle_class) {
11110 ins = emit_get_rgctx_klass (cfg, context_used,
11111 mono_class_from_mono_type (handle),
11112 MONO_RGCTX_INFO_TYPE);
11113 } else if (handle_class == mono_defaults.methodhandle_class) {
11114 ins = emit_get_rgctx_method (cfg, context_used,
11115 handle, MONO_RGCTX_INFO_METHOD);
11116 } else if (handle_class == mono_defaults.fieldhandle_class) {
11117 ins = emit_get_rgctx_field (cfg, context_used,
11118 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11119 } else {
11120 g_assert_not_reached ();
11122 } else if (cfg->compile_aot) {
11123 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11124 } else {
11125 EMIT_NEW_PCONST (cfg, ins, handle);
11127 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11129 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11133 *sp++ = ins;
11134 ip += 5;
11135 break;
11137 case CEE_THROW:
11138 CHECK_STACK (1);
11139 MONO_INST_NEW (cfg, ins, OP_THROW);
11140 --sp;
11141 ins->sreg1 = sp [0]->dreg;
11142 ip++;
11143 bblock->out_of_line = TRUE;
11144 MONO_ADD_INS (bblock, ins);
11145 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11146 MONO_ADD_INS (bblock, ins);
11147 sp = stack_start;
11149 link_bblock (cfg, bblock, end_bblock);
11150 start_new_bblock = 1;
11151 break;
11152 case CEE_ENDFINALLY:
11153 /* mono_save_seq_point_info () depends on this */
11154 if (sp != stack_start)
11155 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11156 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11157 MONO_ADD_INS (bblock, ins);
11158 ip++;
11159 start_new_bblock = 1;
11162 * Control will leave the method so empty the stack, otherwise
11163 * the next basic block will start with a nonempty stack.
11165 while (sp != stack_start) {
11166 sp--;
11168 break;
11169 case CEE_LEAVE:
11170 case CEE_LEAVE_S: {
11171 GList *handlers;
11173 if (*ip == CEE_LEAVE) {
11174 CHECK_OPSIZE (5);
11175 target = ip + 5 + (gint32)read32(ip + 1);
11176 } else {
11177 CHECK_OPSIZE (2);
11178 target = ip + 2 + (signed char)(ip [1]);
11181 /* empty the stack */
11182 while (sp != stack_start) {
11183 sp--;
11187 * If this leave statement is in a catch block, check for a
11188 * pending exception, and rethrow it if necessary.
11189 * We avoid doing this in runtime invoke wrappers, since those are called
11190 * by native code which excepts the wrapper to catch all exceptions.
11192 for (i = 0; i < header->num_clauses; ++i) {
11193 MonoExceptionClause *clause = &header->clauses [i];
11196 * Use <= in the final comparison to handle clauses with multiple
11197 * leave statements, like in bug #78024.
11198 * The ordering of the exception clauses guarantees that we find the
11199 * innermost clause.
11201 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11202 MonoInst *exc_ins;
11203 MonoBasicBlock *dont_throw;
11206 MonoInst *load;
11208 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11211 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11213 NEW_BBLOCK (cfg, dont_throw);
11216 * Currently, we always rethrow the abort exception, despite the
11217 * fact that this is not correct. See thread6.cs for an example.
11218 * But propagating the abort exception is more important than
11219 * getting the sematics right.
11221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11223 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11225 MONO_START_BB (cfg, dont_throw);
11226 bblock = cfg->cbb;
11230 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11231 GList *tmp;
11232 MonoExceptionClause *clause;
11234 for (tmp = handlers; tmp; tmp = tmp->next) {
11235 clause = tmp->data;
11236 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11237 g_assert (tblock);
11238 link_bblock (cfg, bblock, tblock);
11239 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11240 ins->inst_target_bb = tblock;
11241 ins->inst_eh_block = clause;
11242 MONO_ADD_INS (bblock, ins);
11243 bblock->has_call_handler = 1;
11244 if (COMPILE_LLVM (cfg)) {
11245 MonoBasicBlock *target_bb;
11248 * Link the finally bblock with the target, since it will
11249 * conceptually branch there.
11250 * FIXME: Have to link the bblock containing the endfinally.
11252 GET_BBLOCK (cfg, target_bb, target);
11253 link_bblock (cfg, tblock, target_bb);
11256 g_list_free (handlers);
11259 MONO_INST_NEW (cfg, ins, OP_BR);
11260 MONO_ADD_INS (bblock, ins);
11261 GET_BBLOCK (cfg, tblock, target);
11262 link_bblock (cfg, bblock, tblock);
11263 ins->inst_target_bb = tblock;
11264 start_new_bblock = 1;
11266 if (*ip == CEE_LEAVE)
11267 ip += 5;
11268 else
11269 ip += 2;
11271 break;
11275 * Mono specific opcodes
11277 case MONO_CUSTOM_PREFIX: {
11279 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11281 CHECK_OPSIZE (2);
11282 switch (ip [1]) {
11283 case CEE_MONO_ICALL: {
11284 gpointer func;
11285 MonoJitICallInfo *info;
11287 token = read32 (ip + 2);
11288 func = mono_method_get_wrapper_data (method, token);
11289 info = mono_find_jit_icall_by_addr (func);
11290 if (!info)
11291 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11292 g_assert (info);
11294 CHECK_STACK (info->sig->param_count);
11295 sp -= info->sig->param_count;
11297 ins = mono_emit_jit_icall (cfg, info->func, sp);
11298 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11299 *sp++ = ins;
11301 ip += 6;
11302 inline_costs += 10 * num_calls++;
11304 break;
11306 case CEE_MONO_LDPTR: {
11307 gpointer ptr;
11309 CHECK_STACK_OVF (1);
11310 CHECK_OPSIZE (6);
11311 token = read32 (ip + 2);
11313 ptr = mono_method_get_wrapper_data (method, token);
11314 /* FIXME: Generalize this */
11315 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11316 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11317 *sp++ = ins;
11318 ip += 6;
11319 break;
11321 EMIT_NEW_PCONST (cfg, ins, ptr);
11322 *sp++ = ins;
11323 ip += 6;
11324 inline_costs += 10 * num_calls++;
11325 /* Can't embed random pointers into AOT code */
11326 DISABLE_AOT (cfg);
11327 break;
11329 case CEE_MONO_JIT_ICALL_ADDR: {
11330 MonoJitICallInfo *callinfo;
11331 gpointer ptr;
11333 CHECK_STACK_OVF (1);
11334 CHECK_OPSIZE (6);
11335 token = read32 (ip + 2);
11337 ptr = mono_method_get_wrapper_data (method, token);
11338 callinfo = mono_find_jit_icall_by_addr (ptr);
11339 g_assert (callinfo);
11340 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11341 *sp++ = ins;
11342 ip += 6;
11343 inline_costs += 10 * num_calls++;
11344 break;
11346 case CEE_MONO_ICALL_ADDR: {
11347 MonoMethod *cmethod;
11348 gpointer ptr;
11350 CHECK_STACK_OVF (1);
11351 CHECK_OPSIZE (6);
11352 token = read32 (ip + 2);
11354 cmethod = mono_method_get_wrapper_data (method, token);
11356 if (cfg->compile_aot) {
11357 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11358 } else {
11359 ptr = mono_lookup_internal_call (cmethod);
11360 g_assert (ptr);
11361 EMIT_NEW_PCONST (cfg, ins, ptr);
11363 *sp++ = ins;
11364 ip += 6;
11365 break;
11367 case CEE_MONO_VTADDR: {
11368 MonoInst *src_var, *src;
11370 CHECK_STACK (1);
11371 --sp;
11373 // FIXME:
11374 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11375 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11376 *sp++ = src;
11377 ip += 2;
11378 break;
11380 case CEE_MONO_NEWOBJ: {
11381 MonoInst *iargs [2];
11383 CHECK_STACK_OVF (1);
11384 CHECK_OPSIZE (6);
11385 token = read32 (ip + 2);
11386 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11387 mono_class_init (klass);
11388 NEW_DOMAINCONST (cfg, iargs [0]);
11389 MONO_ADD_INS (cfg->cbb, iargs [0]);
11390 NEW_CLASSCONST (cfg, iargs [1], klass);
11391 MONO_ADD_INS (cfg->cbb, iargs [1]);
11392 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11393 ip += 6;
11394 inline_costs += 10 * num_calls++;
11395 break;
11397 case CEE_MONO_OBJADDR:
11398 CHECK_STACK (1);
11399 --sp;
11400 MONO_INST_NEW (cfg, ins, OP_MOVE);
11401 ins->dreg = alloc_ireg_mp (cfg);
11402 ins->sreg1 = sp [0]->dreg;
11403 ins->type = STACK_MP;
11404 MONO_ADD_INS (cfg->cbb, ins);
11405 *sp++ = ins;
11406 ip += 2;
11407 break;
11408 case CEE_MONO_LDNATIVEOBJ:
11410 * Similar to LDOBJ, but instead load the unmanaged
11411 * representation of the vtype to the stack.
11413 CHECK_STACK (1);
11414 CHECK_OPSIZE (6);
11415 --sp;
11416 token = read32 (ip + 2);
11417 klass = mono_method_get_wrapper_data (method, token);
11418 g_assert (klass->valuetype);
11419 mono_class_init (klass);
11422 MonoInst *src, *dest, *temp;
11424 src = sp [0];
11425 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11426 temp->backend.is_pinvoke = 1;
11427 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11428 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11430 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11431 dest->type = STACK_VTYPE;
11432 dest->klass = klass;
11434 *sp ++ = dest;
11435 ip += 6;
11437 break;
11438 case CEE_MONO_RETOBJ: {
11440 * Same as RET, but return the native representation of a vtype
11441 * to the caller.
11443 g_assert (cfg->ret);
11444 g_assert (mono_method_signature (method)->pinvoke);
11445 CHECK_STACK (1);
11446 --sp;
11448 CHECK_OPSIZE (6);
11449 token = read32 (ip + 2);
11450 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11452 if (!cfg->vret_addr) {
11453 g_assert (cfg->ret_var_is_local);
11455 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11456 } else {
11457 EMIT_NEW_RETLOADA (cfg, ins);
11459 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11461 if (sp != stack_start)
11462 UNVERIFIED;
11464 MONO_INST_NEW (cfg, ins, OP_BR);
11465 ins->inst_target_bb = end_bblock;
11466 MONO_ADD_INS (bblock, ins);
11467 link_bblock (cfg, bblock, end_bblock);
11468 start_new_bblock = 1;
11469 ip += 6;
11470 break;
11472 case CEE_MONO_CISINST:
11473 case CEE_MONO_CCASTCLASS: {
11474 int token;
11475 CHECK_STACK (1);
11476 --sp;
11477 CHECK_OPSIZE (6);
11478 token = read32 (ip + 2);
11479 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11480 if (ip [1] == CEE_MONO_CISINST)
11481 ins = handle_cisinst (cfg, klass, sp [0]);
11482 else
11483 ins = handle_ccastclass (cfg, klass, sp [0]);
11484 bblock = cfg->cbb;
11485 *sp++ = ins;
11486 ip += 6;
11487 break;
11489 case CEE_MONO_SAVE_LMF:
11490 case CEE_MONO_RESTORE_LMF:
11491 #ifdef MONO_ARCH_HAVE_LMF_OPS
11492 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11493 MONO_ADD_INS (bblock, ins);
11494 cfg->need_lmf_area = TRUE;
11495 #endif
11496 ip += 2;
11497 break;
11498 case CEE_MONO_CLASSCONST:
11499 CHECK_STACK_OVF (1);
11500 CHECK_OPSIZE (6);
11501 token = read32 (ip + 2);
11502 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11503 *sp++ = ins;
11504 ip += 6;
11505 inline_costs += 10 * num_calls++;
11506 break;
11507 case CEE_MONO_NOT_TAKEN:
11508 bblock->out_of_line = TRUE;
11509 ip += 2;
11510 break;
11511 case CEE_MONO_TLS: {
11512 int key;
11514 CHECK_STACK_OVF (1);
11515 CHECK_OPSIZE (6);
11516 key = (gint32)read32 (ip + 2);
11517 g_assert (key < TLS_KEY_NUM);
11519 ins = mono_create_tls_get (cfg, key);
11520 if (!ins) {
11521 if (cfg->compile_aot) {
11522 DISABLE_AOT (cfg);
11523 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11524 ins->dreg = alloc_preg (cfg);
11525 ins->type = STACK_PTR;
11526 } else {
11527 g_assert_not_reached ();
11530 ins->type = STACK_PTR;
11531 MONO_ADD_INS (bblock, ins);
11532 *sp++ = ins;
11533 ip += 6;
11534 break;
11536 case CEE_MONO_DYN_CALL: {
11537 MonoCallInst *call;
11539 /* It would be easier to call a trampoline, but that would put an
11540 * extra frame on the stack, confusing exception handling. So
11541 * implement it inline using an opcode for now.
11544 if (!cfg->dyn_call_var) {
11545 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11546 /* prevent it from being register allocated */
11547 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11550 /* Has to use a call inst since it local regalloc expects it */
11551 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11552 ins = (MonoInst*)call;
11553 sp -= 2;
11554 ins->sreg1 = sp [0]->dreg;
11555 ins->sreg2 = sp [1]->dreg;
11556 MONO_ADD_INS (bblock, ins);
11558 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11560 ip += 2;
11561 inline_costs += 10 * num_calls++;
11563 break;
11565 case CEE_MONO_MEMORY_BARRIER: {
11566 CHECK_OPSIZE (5);
11567 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11568 ip += 5;
11569 break;
11571 case CEE_MONO_JIT_ATTACH: {
11572 MonoInst *args [16];
11573 MonoInst *ad_ins, *lmf_ins;
11574 MonoBasicBlock *next_bb = NULL;
11576 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11578 EMIT_NEW_PCONST (cfg, ins, NULL);
11579 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11581 #if TARGET_WIN32
11582 ad_ins = NULL;
11583 lmf_ins = NULL;
11584 #else
11585 ad_ins = mono_get_domain_intrinsic (cfg);
11586 lmf_ins = mono_get_lmf_intrinsic (cfg);
11587 #endif
11589 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11590 NEW_BBLOCK (cfg, next_bb);
11592 MONO_ADD_INS (cfg->cbb, ad_ins);
11593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11596 MONO_ADD_INS (cfg->cbb, lmf_ins);
11597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11598 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11601 if (cfg->compile_aot) {
11602 /* AOT code is only used in the root domain */
11603 EMIT_NEW_PCONST (cfg, args [0], NULL);
11604 } else {
11605 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11607 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11608 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11610 if (next_bb) {
11611 MONO_START_BB (cfg, next_bb);
11612 bblock = cfg->cbb;
11614 ip += 2;
11615 break;
11617 case CEE_MONO_JIT_DETACH: {
11618 MonoInst *args [16];
11620 /* Restore the original domain */
11621 dreg = alloc_ireg (cfg);
11622 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11623 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11624 ip += 2;
11625 break;
11627 default:
11628 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11629 break;
11631 break;
11634 case CEE_PREFIX1: {
11635 CHECK_OPSIZE (2);
11636 switch (ip [1]) {
11637 case CEE_ARGLIST: {
11638 /* somewhat similar to LDTOKEN */
11639 MonoInst *addr, *vtvar;
11640 CHECK_STACK_OVF (1);
11641 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11643 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11644 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11646 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11647 ins->type = STACK_VTYPE;
11648 ins->klass = mono_defaults.argumenthandle_class;
11649 *sp++ = ins;
11650 ip += 2;
11651 break;
11653 case CEE_CEQ:
11654 case CEE_CGT:
11655 case CEE_CGT_UN:
11656 case CEE_CLT:
11657 case CEE_CLT_UN: {
11658 MonoInst *cmp;
11659 CHECK_STACK (2);
11661 * The following transforms:
11662 * CEE_CEQ into OP_CEQ
11663 * CEE_CGT into OP_CGT
11664 * CEE_CGT_UN into OP_CGT_UN
11665 * CEE_CLT into OP_CLT
11666 * CEE_CLT_UN into OP_CLT_UN
11668 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11670 MONO_INST_NEW (cfg, ins, cmp->opcode);
11671 sp -= 2;
11672 cmp->sreg1 = sp [0]->dreg;
11673 cmp->sreg2 = sp [1]->dreg;
11674 type_from_op (cmp, sp [0], sp [1]);
11675 CHECK_TYPE (cmp);
11676 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11677 cmp->opcode = OP_LCOMPARE;
11678 else if (sp [0]->type == STACK_R8)
11679 cmp->opcode = OP_FCOMPARE;
11680 else
11681 cmp->opcode = OP_ICOMPARE;
11682 MONO_ADD_INS (bblock, cmp);
11683 ins->type = STACK_I4;
11684 ins->dreg = alloc_dreg (cfg, ins->type);
11685 type_from_op (ins, sp [0], sp [1]);
11687 if (cmp->opcode == OP_FCOMPARE) {
11689 * The backends expect the fceq opcodes to do the
11690 * comparison too.
11692 cmp->opcode = OP_NOP;
11693 ins->sreg1 = cmp->sreg1;
11694 ins->sreg2 = cmp->sreg2;
11696 MONO_ADD_INS (bblock, ins);
11697 *sp++ = ins;
11698 ip += 2;
11699 break;
11701 case CEE_LDFTN: {
11702 MonoInst *argconst;
11703 MonoMethod *cil_method;
11705 CHECK_STACK_OVF (1);
11706 CHECK_OPSIZE (6);
11707 n = read32 (ip + 2);
11708 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11709 if (!cmethod || mono_loader_get_last_error ())
11710 LOAD_ERROR;
11711 mono_class_init (cmethod->klass);
11713 mono_save_token_info (cfg, image, n, cmethod);
11715 context_used = mini_method_check_context_used (cfg, cmethod);
11717 cil_method = cmethod;
11718 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11719 METHOD_ACCESS_FAILURE;
11721 if (mono_security_cas_enabled ()) {
11722 if (check_linkdemand (cfg, method, cmethod))
11723 INLINE_FAILURE ("linkdemand");
11724 CHECK_CFG_EXCEPTION;
11725 } else if (mono_security_core_clr_enabled ()) {
11726 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11730 * Optimize the common case of ldftn+delegate creation
11732 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11733 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11734 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11735 MonoInst *target_ins, *handle_ins;
11736 MonoMethod *invoke;
11737 int invoke_context_used;
11739 invoke = mono_get_delegate_invoke (ctor_method->klass);
11740 if (!invoke || !mono_method_signature (invoke))
11741 LOAD_ERROR;
11743 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11745 target_ins = sp [-1];
11747 if (mono_security_core_clr_enabled ())
11748 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11750 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11751 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11752 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11754 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11758 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11759 /* FIXME: SGEN support */
11760 if (invoke_context_used == 0) {
11761 ip += 6;
11762 if (cfg->verbose_level > 3)
11763 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11764 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used))) {
11765 sp --;
11766 *sp = handle_ins;
11767 CHECK_CFG_EXCEPTION;
11768 ip += 5;
11769 sp ++;
11770 break;
11772 ip -= 6;
11774 #endif
11778 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11779 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11780 *sp++ = ins;
11782 ip += 6;
11783 inline_costs += 10 * num_calls++;
11784 break;
11786 case CEE_LDVIRTFTN: {
11787 MonoInst *args [2];
11789 CHECK_STACK (1);
11790 CHECK_OPSIZE (6);
11791 n = read32 (ip + 2);
11792 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11793 if (!cmethod || mono_loader_get_last_error ())
11794 LOAD_ERROR;
11795 mono_class_init (cmethod->klass);
11797 context_used = mini_method_check_context_used (cfg, cmethod);
11799 if (mono_security_cas_enabled ()) {
11800 if (check_linkdemand (cfg, method, cmethod))
11801 INLINE_FAILURE ("linkdemand");
11802 CHECK_CFG_EXCEPTION;
11803 } else if (mono_security_core_clr_enabled ()) {
11804 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11807 --sp;
11808 args [0] = *sp;
11810 args [1] = emit_get_rgctx_method (cfg, context_used,
11811 cmethod, MONO_RGCTX_INFO_METHOD);
11813 if (context_used)
11814 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11815 else
11816 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11818 ip += 6;
11819 inline_costs += 10 * num_calls++;
11820 break;
11822 case CEE_LDARG:
11823 CHECK_STACK_OVF (1);
11824 CHECK_OPSIZE (4);
11825 n = read16 (ip + 2);
11826 CHECK_ARG (n);
11827 EMIT_NEW_ARGLOAD (cfg, ins, n);
11828 *sp++ = ins;
11829 ip += 4;
11830 break;
11831 case CEE_LDARGA:
11832 CHECK_STACK_OVF (1);
11833 CHECK_OPSIZE (4);
11834 n = read16 (ip + 2);
11835 CHECK_ARG (n);
11836 NEW_ARGLOADA (cfg, ins, n);
11837 MONO_ADD_INS (cfg->cbb, ins);
11838 *sp++ = ins;
11839 ip += 4;
11840 break;
11841 case CEE_STARG:
11842 CHECK_STACK (1);
11843 --sp;
11844 CHECK_OPSIZE (4);
11845 n = read16 (ip + 2);
11846 CHECK_ARG (n);
11847 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11848 UNVERIFIED;
11849 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11850 ip += 4;
11851 break;
11852 case CEE_LDLOC:
11853 CHECK_STACK_OVF (1);
11854 CHECK_OPSIZE (4);
11855 n = read16 (ip + 2);
11856 CHECK_LOCAL (n);
11857 EMIT_NEW_LOCLOAD (cfg, ins, n);
11858 *sp++ = ins;
11859 ip += 4;
11860 break;
11861 case CEE_LDLOCA: {
11862 unsigned char *tmp_ip;
11863 CHECK_STACK_OVF (1);
11864 CHECK_OPSIZE (4);
11865 n = read16 (ip + 2);
11866 CHECK_LOCAL (n);
11868 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11869 ip = tmp_ip;
11870 inline_costs += 1;
11871 break;
11874 EMIT_NEW_LOCLOADA (cfg, ins, n);
11875 *sp++ = ins;
11876 ip += 4;
11877 break;
11879 case CEE_STLOC:
11880 CHECK_STACK (1);
11881 --sp;
11882 CHECK_OPSIZE (4);
11883 n = read16 (ip + 2);
11884 CHECK_LOCAL (n);
11885 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11886 UNVERIFIED;
11887 emit_stloc_ir (cfg, sp, header, n);
11888 ip += 4;
11889 inline_costs += 1;
11890 break;
11891 case CEE_LOCALLOC:
11892 CHECK_STACK (1);
11893 --sp;
11894 if (sp != stack_start)
11895 UNVERIFIED;
11896 if (cfg->method != method)
11898 * Inlining this into a loop in a parent could lead to
11899 * stack overflows which is different behavior than the
11900 * non-inlined case, thus disable inlining in this case.
11902 goto inline_failure;
11904 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11905 ins->dreg = alloc_preg (cfg);
11906 ins->sreg1 = sp [0]->dreg;
11907 ins->type = STACK_PTR;
11908 MONO_ADD_INS (cfg->cbb, ins);
11910 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11911 if (init_locals)
11912 ins->flags |= MONO_INST_INIT;
11914 *sp++ = ins;
11915 ip += 2;
11916 break;
11917 case CEE_ENDFILTER: {
11918 MonoExceptionClause *clause, *nearest;
11919 int cc, nearest_num;
11921 CHECK_STACK (1);
11922 --sp;
11923 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11924 UNVERIFIED;
11925 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11926 ins->sreg1 = (*sp)->dreg;
11927 MONO_ADD_INS (bblock, ins);
11928 start_new_bblock = 1;
11929 ip += 2;
11931 nearest = NULL;
11932 nearest_num = 0;
11933 for (cc = 0; cc < header->num_clauses; ++cc) {
11934 clause = &header->clauses [cc];
11935 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11936 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11937 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11938 nearest = clause;
11939 nearest_num = cc;
11942 g_assert (nearest);
11943 if ((ip - header->code) != nearest->handler_offset)
11944 UNVERIFIED;
11946 break;
11948 case CEE_UNALIGNED_:
11949 ins_flag |= MONO_INST_UNALIGNED;
11950 /* FIXME: record alignment? we can assume 1 for now */
11951 CHECK_OPSIZE (3);
11952 ip += 3;
11953 break;
11954 case CEE_VOLATILE_:
11955 ins_flag |= MONO_INST_VOLATILE;
11956 ip += 2;
11957 break;
11958 case CEE_TAIL_:
11959 ins_flag |= MONO_INST_TAILCALL;
11960 cfg->flags |= MONO_CFG_HAS_TAIL;
11961 /* Can't inline tail calls at this time */
11962 inline_costs += 100000;
11963 ip += 2;
11964 break;
11965 case CEE_INITOBJ:
11966 CHECK_STACK (1);
11967 --sp;
11968 CHECK_OPSIZE (6);
11969 token = read32 (ip + 2);
11970 klass = mini_get_class (method, token, generic_context);
11971 CHECK_TYPELOAD (klass);
11972 if (generic_class_is_reference_type (cfg, klass))
11973 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11974 else
11975 mini_emit_initobj (cfg, *sp, NULL, klass);
11976 ip += 6;
11977 inline_costs += 1;
11978 break;
11979 case CEE_CONSTRAINED_:
11980 CHECK_OPSIZE (6);
11981 token = read32 (ip + 2);
11982 constrained_call = mini_get_class (method, token, generic_context);
11983 CHECK_TYPELOAD (constrained_call);
11984 ip += 6;
11985 break;
11986 case CEE_CPBLK:
11987 case CEE_INITBLK: {
11988 MonoInst *iargs [3];
11989 CHECK_STACK (3);
11990 sp -= 3;
11992 /* Skip optimized paths for volatile operations. */
11993 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11994 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11995 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11996 /* emit_memset only works when val == 0 */
11997 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11998 } else {
11999 MonoInst *call;
12000 iargs [0] = sp [0];
12001 iargs [1] = sp [1];
12002 iargs [2] = sp [2];
12003 if (ip [1] == CEE_CPBLK) {
12005 * FIXME: It's unclear whether we should be emitting both the acquire
12006 * and release barriers for cpblk. It is technically both a load and
12007 * store operation, so it seems like that's the sensible thing to do.
12009 MonoMethod *memcpy_method = get_memcpy_method ();
12010 if (ins_flag & MONO_INST_VOLATILE) {
12011 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12012 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12013 emit_memory_barrier (cfg, FullBarrier);
12015 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12016 call->flags |= ins_flag;
12017 if (ins_flag & MONO_INST_VOLATILE) {
12018 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12019 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12020 emit_memory_barrier (cfg, FullBarrier);
12022 } else {
12023 MonoMethod *memset_method = get_memset_method ();
12024 if (ins_flag & MONO_INST_VOLATILE) {
12025 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12026 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12027 emit_memory_barrier (cfg, FullBarrier);
12029 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12030 call->flags |= ins_flag;
12033 ip += 2;
12034 ins_flag = 0;
12035 inline_costs += 1;
12036 break;
12038 case CEE_NO_:
12039 CHECK_OPSIZE (3);
12040 if (ip [2] & 0x1)
12041 ins_flag |= MONO_INST_NOTYPECHECK;
12042 if (ip [2] & 0x2)
12043 ins_flag |= MONO_INST_NORANGECHECK;
12044 /* we ignore the no-nullcheck for now since we
12045 * really do it explicitly only when doing callvirt->call
12047 ip += 3;
12048 break;
12049 case CEE_RETHROW: {
12050 MonoInst *load;
12051 int handler_offset = -1;
12053 for (i = 0; i < header->num_clauses; ++i) {
12054 MonoExceptionClause *clause = &header->clauses [i];
12055 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12056 handler_offset = clause->handler_offset;
12057 break;
12061 bblock->flags |= BB_EXCEPTION_UNSAFE;
12063 if (handler_offset == -1)
12064 UNVERIFIED;
12066 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12067 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12068 ins->sreg1 = load->dreg;
12069 MONO_ADD_INS (bblock, ins);
12071 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12072 MONO_ADD_INS (bblock, ins);
12074 sp = stack_start;
12075 link_bblock (cfg, bblock, end_bblock);
12076 start_new_bblock = 1;
12077 ip += 2;
12078 break;
12080 case CEE_SIZEOF: {
12081 guint32 val;
12082 int ialign;
12084 GSHAREDVT_FAILURE (*ip);
12086 CHECK_STACK_OVF (1);
12087 CHECK_OPSIZE (6);
12088 token = read32 (ip + 2);
12089 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12090 MonoType *type = mono_type_create_from_typespec (image, token);
12091 val = mono_type_size (type, &ialign);
12092 } else {
12093 MonoClass *klass = mono_class_get_full (image, token, generic_context);
12094 CHECK_TYPELOAD (klass);
12095 mono_class_init (klass);
12096 val = mono_type_size (&klass->byval_arg, &ialign);
12098 EMIT_NEW_ICONST (cfg, ins, val);
12099 *sp++= ins;
12100 ip += 6;
12101 break;
12103 case CEE_REFANYTYPE: {
12104 MonoInst *src_var, *src;
12106 GSHAREDVT_FAILURE (*ip);
12108 CHECK_STACK (1);
12109 --sp;
12111 // FIXME:
12112 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12113 if (!src_var)
12114 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12115 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12116 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12117 *sp++ = ins;
12118 ip += 2;
12119 break;
12121 case CEE_READONLY_:
12122 readonly = TRUE;
12123 ip += 2;
12124 break;
12126 case CEE_UNUSED56:
12127 case CEE_UNUSED57:
12128 case CEE_UNUSED70:
12129 case CEE_UNUSED:
12130 case CEE_UNUSED99:
12131 UNVERIFIED;
12133 default:
12134 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12135 UNVERIFIED;
12137 break;
12139 case CEE_UNUSED58:
12140 case CEE_UNUSED1:
12141 UNVERIFIED;
12143 default:
12144 g_warning ("opcode 0x%02x not handled", *ip);
12145 UNVERIFIED;
12148 if (start_new_bblock != 1)
12149 UNVERIFIED;
12151 bblock->cil_length = ip - bblock->cil_code;
12152 if (bblock->next_bb) {
12153 /* This could already be set because of inlining, #693905 */
12154 MonoBasicBlock *bb = bblock;
12156 while (bb->next_bb)
12157 bb = bb->next_bb;
12158 bb->next_bb = end_bblock;
12159 } else {
12160 bblock->next_bb = end_bblock;
12163 if (cfg->method == method && cfg->domainvar) {
12164 MonoInst *store;
12165 MonoInst *get_domain;
12167 cfg->cbb = init_localsbb;
12169 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12170 MONO_ADD_INS (cfg->cbb, get_domain);
12171 } else {
12172 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12174 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12175 MONO_ADD_INS (cfg->cbb, store);
12178 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12179 if (cfg->compile_aot)
12180 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12181 mono_get_got_var (cfg);
12182 #endif
12184 if (cfg->method == method && cfg->got_var)
12185 mono_emit_load_got_addr (cfg);
12187 if (init_localsbb) {
12188 cfg->cbb = init_localsbb;
12189 cfg->ip = NULL;
12190 for (i = 0; i < header->num_locals; ++i) {
12191 emit_init_local (cfg, i, header->locals [i], init_locals);
12195 if (cfg->init_ref_vars && cfg->method == method) {
12196 /* Emit initialization for ref vars */
12197 // FIXME: Avoid duplication initialization for IL locals.
12198 for (i = 0; i < cfg->num_varinfo; ++i) {
12199 MonoInst *ins = cfg->varinfo [i];
12201 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12202 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12206 if (cfg->lmf_var && cfg->method == method) {
12207 cfg->cbb = init_localsbb;
12208 emit_push_lmf (cfg);
12211 cfg->cbb = init_localsbb;
12212 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12214 if (seq_points) {
12215 MonoBasicBlock *bb;
12218 * Make seq points at backward branch targets interruptable.
12220 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12221 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12222 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12225 /* Add a sequence point for method entry/exit events */
12226 if (seq_points) {
12227 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12228 MONO_ADD_INS (init_localsbb, ins);
12229 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12230 MONO_ADD_INS (cfg->bb_exit, ins);
12234 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12235 * the code they refer to was dead (#11880).
12237 if (sym_seq_points) {
12238 for (i = 0; i < header->code_size; ++i) {
12239 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12240 MonoInst *ins;
12242 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12243 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12248 cfg->ip = NULL;
12250 if (cfg->method == method) {
12251 MonoBasicBlock *bb;
12252 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12253 bb->region = mono_find_block_region (cfg, bb->real_offset);
12254 if (cfg->spvars)
12255 mono_create_spvar_for_region (cfg, bb->region);
12256 if (cfg->verbose_level > 2)
12257 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12261 g_slist_free (class_inits);
12262 dont_inline = g_list_remove (dont_inline, method);
12264 if (inline_costs < 0) {
12265 char *mname;
12267 /* Method is too large */
12268 mname = mono_method_full_name (method, TRUE);
12269 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12270 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12271 g_free (mname);
12272 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12273 mono_basic_block_free (original_bb);
12274 return -1;
12277 if ((cfg->verbose_level > 2) && (cfg->method == method))
12278 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12280 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12281 mono_basic_block_free (original_bb);
12282 return inline_costs;
12284 exception_exit:
12285 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12286 goto cleanup;
12288 inline_failure:
12289 goto cleanup;
12291 load_error:
12292 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12293 goto cleanup;
12295 unverified:
12296 set_exception_type_from_invalid_il (cfg, method, ip);
12297 goto cleanup;
12299 cleanup:
12300 g_slist_free (class_inits);
12301 mono_basic_block_free (original_bb);
12302 dont_inline = g_list_remove (dont_inline, method);
12303 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12304 return -1;
12307 static int
12308 store_membase_reg_to_store_membase_imm (int opcode)
12310 switch (opcode) {
12311 case OP_STORE_MEMBASE_REG:
12312 return OP_STORE_MEMBASE_IMM;
12313 case OP_STOREI1_MEMBASE_REG:
12314 return OP_STOREI1_MEMBASE_IMM;
12315 case OP_STOREI2_MEMBASE_REG:
12316 return OP_STOREI2_MEMBASE_IMM;
12317 case OP_STOREI4_MEMBASE_REG:
12318 return OP_STOREI4_MEMBASE_IMM;
12319 case OP_STOREI8_MEMBASE_REG:
12320 return OP_STOREI8_MEMBASE_IMM;
12321 default:
12322 g_assert_not_reached ();
12325 return -1;
12329 mono_op_to_op_imm (int opcode)
12331 switch (opcode) {
12332 case OP_IADD:
12333 return OP_IADD_IMM;
12334 case OP_ISUB:
12335 return OP_ISUB_IMM;
12336 case OP_IDIV:
12337 return OP_IDIV_IMM;
12338 case OP_IDIV_UN:
12339 return OP_IDIV_UN_IMM;
12340 case OP_IREM:
12341 return OP_IREM_IMM;
12342 case OP_IREM_UN:
12343 return OP_IREM_UN_IMM;
12344 case OP_IMUL:
12345 return OP_IMUL_IMM;
12346 case OP_IAND:
12347 return OP_IAND_IMM;
12348 case OP_IOR:
12349 return OP_IOR_IMM;
12350 case OP_IXOR:
12351 return OP_IXOR_IMM;
12352 case OP_ISHL:
12353 return OP_ISHL_IMM;
12354 case OP_ISHR:
12355 return OP_ISHR_IMM;
12356 case OP_ISHR_UN:
12357 return OP_ISHR_UN_IMM;
12359 case OP_LADD:
12360 return OP_LADD_IMM;
12361 case OP_LSUB:
12362 return OP_LSUB_IMM;
12363 case OP_LAND:
12364 return OP_LAND_IMM;
12365 case OP_LOR:
12366 return OP_LOR_IMM;
12367 case OP_LXOR:
12368 return OP_LXOR_IMM;
12369 case OP_LSHL:
12370 return OP_LSHL_IMM;
12371 case OP_LSHR:
12372 return OP_LSHR_IMM;
12373 case OP_LSHR_UN:
12374 return OP_LSHR_UN_IMM;
12375 #if SIZEOF_REGISTER == 8
12376 case OP_LREM:
12377 return OP_LREM_IMM;
12378 #endif
12380 case OP_COMPARE:
12381 return OP_COMPARE_IMM;
12382 case OP_ICOMPARE:
12383 return OP_ICOMPARE_IMM;
12384 case OP_LCOMPARE:
12385 return OP_LCOMPARE_IMM;
12387 case OP_STORE_MEMBASE_REG:
12388 return OP_STORE_MEMBASE_IMM;
12389 case OP_STOREI1_MEMBASE_REG:
12390 return OP_STOREI1_MEMBASE_IMM;
12391 case OP_STOREI2_MEMBASE_REG:
12392 return OP_STOREI2_MEMBASE_IMM;
12393 case OP_STOREI4_MEMBASE_REG:
12394 return OP_STOREI4_MEMBASE_IMM;
12396 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12397 case OP_X86_PUSH:
12398 return OP_X86_PUSH_IMM;
12399 case OP_X86_COMPARE_MEMBASE_REG:
12400 return OP_X86_COMPARE_MEMBASE_IMM;
12401 #endif
12402 #if defined(TARGET_AMD64)
12403 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12404 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12405 #endif
12406 case OP_VOIDCALL_REG:
12407 return OP_VOIDCALL;
12408 case OP_CALL_REG:
12409 return OP_CALL;
12410 case OP_LCALL_REG:
12411 return OP_LCALL;
12412 case OP_FCALL_REG:
12413 return OP_FCALL;
12414 case OP_LOCALLOC:
12415 return OP_LOCALLOC_IMM;
12418 return -1;
12421 static int
12422 ldind_to_load_membase (int opcode)
12424 switch (opcode) {
12425 case CEE_LDIND_I1:
12426 return OP_LOADI1_MEMBASE;
12427 case CEE_LDIND_U1:
12428 return OP_LOADU1_MEMBASE;
12429 case CEE_LDIND_I2:
12430 return OP_LOADI2_MEMBASE;
12431 case CEE_LDIND_U2:
12432 return OP_LOADU2_MEMBASE;
12433 case CEE_LDIND_I4:
12434 return OP_LOADI4_MEMBASE;
12435 case CEE_LDIND_U4:
12436 return OP_LOADU4_MEMBASE;
12437 case CEE_LDIND_I:
12438 return OP_LOAD_MEMBASE;
12439 case CEE_LDIND_REF:
12440 return OP_LOAD_MEMBASE;
12441 case CEE_LDIND_I8:
12442 return OP_LOADI8_MEMBASE;
12443 case CEE_LDIND_R4:
12444 return OP_LOADR4_MEMBASE;
12445 case CEE_LDIND_R8:
12446 return OP_LOADR8_MEMBASE;
12447 default:
12448 g_assert_not_reached ();
12451 return -1;
12454 static int
12455 stind_to_store_membase (int opcode)
12457 switch (opcode) {
12458 case CEE_STIND_I1:
12459 return OP_STOREI1_MEMBASE_REG;
12460 case CEE_STIND_I2:
12461 return OP_STOREI2_MEMBASE_REG;
12462 case CEE_STIND_I4:
12463 return OP_STOREI4_MEMBASE_REG;
12464 case CEE_STIND_I:
12465 case CEE_STIND_REF:
12466 return OP_STORE_MEMBASE_REG;
12467 case CEE_STIND_I8:
12468 return OP_STOREI8_MEMBASE_REG;
12469 case CEE_STIND_R4:
12470 return OP_STORER4_MEMBASE_REG;
12471 case CEE_STIND_R8:
12472 return OP_STORER8_MEMBASE_REG;
12473 default:
12474 g_assert_not_reached ();
12477 return -1;
12481 mono_load_membase_to_load_mem (int opcode)
12483 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12484 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12485 switch (opcode) {
12486 case OP_LOAD_MEMBASE:
12487 return OP_LOAD_MEM;
12488 case OP_LOADU1_MEMBASE:
12489 return OP_LOADU1_MEM;
12490 case OP_LOADU2_MEMBASE:
12491 return OP_LOADU2_MEM;
12492 case OP_LOADI4_MEMBASE:
12493 return OP_LOADI4_MEM;
12494 case OP_LOADU4_MEMBASE:
12495 return OP_LOADU4_MEM;
12496 #if SIZEOF_REGISTER == 8
12497 case OP_LOADI8_MEMBASE:
12498 return OP_LOADI8_MEM;
12499 #endif
12501 #endif
12503 return -1;
12506 static inline int
12507 op_to_op_dest_membase (int store_opcode, int opcode)
12509 #if defined(TARGET_X86)
12510 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12511 return -1;
12513 switch (opcode) {
12514 case OP_IADD:
12515 return OP_X86_ADD_MEMBASE_REG;
12516 case OP_ISUB:
12517 return OP_X86_SUB_MEMBASE_REG;
12518 case OP_IAND:
12519 return OP_X86_AND_MEMBASE_REG;
12520 case OP_IOR:
12521 return OP_X86_OR_MEMBASE_REG;
12522 case OP_IXOR:
12523 return OP_X86_XOR_MEMBASE_REG;
12524 case OP_ADD_IMM:
12525 case OP_IADD_IMM:
12526 return OP_X86_ADD_MEMBASE_IMM;
12527 case OP_SUB_IMM:
12528 case OP_ISUB_IMM:
12529 return OP_X86_SUB_MEMBASE_IMM;
12530 case OP_AND_IMM:
12531 case OP_IAND_IMM:
12532 return OP_X86_AND_MEMBASE_IMM;
12533 case OP_OR_IMM:
12534 case OP_IOR_IMM:
12535 return OP_X86_OR_MEMBASE_IMM;
12536 case OP_XOR_IMM:
12537 case OP_IXOR_IMM:
12538 return OP_X86_XOR_MEMBASE_IMM;
12539 case OP_MOVE:
12540 return OP_NOP;
12542 #endif
12544 #if defined(TARGET_AMD64)
12545 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12546 return -1;
12548 switch (opcode) {
12549 case OP_IADD:
12550 return OP_X86_ADD_MEMBASE_REG;
12551 case OP_ISUB:
12552 return OP_X86_SUB_MEMBASE_REG;
12553 case OP_IAND:
12554 return OP_X86_AND_MEMBASE_REG;
12555 case OP_IOR:
12556 return OP_X86_OR_MEMBASE_REG;
12557 case OP_IXOR:
12558 return OP_X86_XOR_MEMBASE_REG;
12559 case OP_IADD_IMM:
12560 return OP_X86_ADD_MEMBASE_IMM;
12561 case OP_ISUB_IMM:
12562 return OP_X86_SUB_MEMBASE_IMM;
12563 case OP_IAND_IMM:
12564 return OP_X86_AND_MEMBASE_IMM;
12565 case OP_IOR_IMM:
12566 return OP_X86_OR_MEMBASE_IMM;
12567 case OP_IXOR_IMM:
12568 return OP_X86_XOR_MEMBASE_IMM;
12569 case OP_LADD:
12570 return OP_AMD64_ADD_MEMBASE_REG;
12571 case OP_LSUB:
12572 return OP_AMD64_SUB_MEMBASE_REG;
12573 case OP_LAND:
12574 return OP_AMD64_AND_MEMBASE_REG;
12575 case OP_LOR:
12576 return OP_AMD64_OR_MEMBASE_REG;
12577 case OP_LXOR:
12578 return OP_AMD64_XOR_MEMBASE_REG;
12579 case OP_ADD_IMM:
12580 case OP_LADD_IMM:
12581 return OP_AMD64_ADD_MEMBASE_IMM;
12582 case OP_SUB_IMM:
12583 case OP_LSUB_IMM:
12584 return OP_AMD64_SUB_MEMBASE_IMM;
12585 case OP_AND_IMM:
12586 case OP_LAND_IMM:
12587 return OP_AMD64_AND_MEMBASE_IMM;
12588 case OP_OR_IMM:
12589 case OP_LOR_IMM:
12590 return OP_AMD64_OR_MEMBASE_IMM;
12591 case OP_XOR_IMM:
12592 case OP_LXOR_IMM:
12593 return OP_AMD64_XOR_MEMBASE_IMM;
12594 case OP_MOVE:
12595 return OP_NOP;
12597 #endif
12599 return -1;
12602 static inline int
12603 op_to_op_store_membase (int store_opcode, int opcode)
12605 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12606 switch (opcode) {
12607 case OP_ICEQ:
12608 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12609 return OP_X86_SETEQ_MEMBASE;
12610 case OP_CNE:
12611 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12612 return OP_X86_SETNE_MEMBASE;
12614 #endif
12616 return -1;
12619 static inline int
12620 op_to_op_src1_membase (int load_opcode, int opcode)
12622 #ifdef TARGET_X86
12623 /* FIXME: This has sign extension issues */
12625 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12626 return OP_X86_COMPARE_MEMBASE8_IMM;
12629 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12630 return -1;
12632 switch (opcode) {
12633 case OP_X86_PUSH:
12634 return OP_X86_PUSH_MEMBASE;
12635 case OP_COMPARE_IMM:
12636 case OP_ICOMPARE_IMM:
12637 return OP_X86_COMPARE_MEMBASE_IMM;
12638 case OP_COMPARE:
12639 case OP_ICOMPARE:
12640 return OP_X86_COMPARE_MEMBASE_REG;
12642 #endif
12644 #ifdef TARGET_AMD64
12645 /* FIXME: This has sign extension issues */
12647 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12648 return OP_X86_COMPARE_MEMBASE8_IMM;
12651 switch (opcode) {
12652 case OP_X86_PUSH:
12653 #ifdef __mono_ilp32__
12654 if (load_opcode == OP_LOADI8_MEMBASE)
12655 #else
12656 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12657 #endif
12658 return OP_X86_PUSH_MEMBASE;
12659 break;
12660 /* FIXME: This only works for 32 bit immediates
12661 case OP_COMPARE_IMM:
12662 case OP_LCOMPARE_IMM:
12663 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12664 return OP_AMD64_COMPARE_MEMBASE_IMM;
12666 case OP_ICOMPARE_IMM:
12667 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12668 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12669 break;
12670 case OP_COMPARE:
12671 case OP_LCOMPARE:
12672 #ifdef __mono_ilp32__
12673 if (load_opcode == OP_LOAD_MEMBASE)
12674 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12675 if (load_opcode == OP_LOADI8_MEMBASE)
12676 #else
12677 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12678 #endif
12679 return OP_AMD64_COMPARE_MEMBASE_REG;
12680 break;
12681 case OP_ICOMPARE:
12682 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12683 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12684 break;
12686 #endif
12688 return -1;
12691 static inline int
12692 op_to_op_src2_membase (int load_opcode, int opcode)
12694 #ifdef TARGET_X86
12695 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12696 return -1;
12698 switch (opcode) {
12699 case OP_COMPARE:
12700 case OP_ICOMPARE:
12701 return OP_X86_COMPARE_REG_MEMBASE;
12702 case OP_IADD:
12703 return OP_X86_ADD_REG_MEMBASE;
12704 case OP_ISUB:
12705 return OP_X86_SUB_REG_MEMBASE;
12706 case OP_IAND:
12707 return OP_X86_AND_REG_MEMBASE;
12708 case OP_IOR:
12709 return OP_X86_OR_REG_MEMBASE;
12710 case OP_IXOR:
12711 return OP_X86_XOR_REG_MEMBASE;
12713 #endif
12715 #ifdef TARGET_AMD64
12716 #ifdef __mono_ilp32__
12717 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12718 #else
12719 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12720 #endif
12721 switch (opcode) {
12722 case OP_ICOMPARE:
12723 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12724 case OP_IADD:
12725 return OP_X86_ADD_REG_MEMBASE;
12726 case OP_ISUB:
12727 return OP_X86_SUB_REG_MEMBASE;
12728 case OP_IAND:
12729 return OP_X86_AND_REG_MEMBASE;
12730 case OP_IOR:
12731 return OP_X86_OR_REG_MEMBASE;
12732 case OP_IXOR:
12733 return OP_X86_XOR_REG_MEMBASE;
12735 #ifdef __mono_ilp32__
12736 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12737 #else
12738 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12739 #endif
12740 switch (opcode) {
12741 case OP_COMPARE:
12742 case OP_LCOMPARE:
12743 return OP_AMD64_COMPARE_REG_MEMBASE;
12744 case OP_LADD:
12745 return OP_AMD64_ADD_REG_MEMBASE;
12746 case OP_LSUB:
12747 return OP_AMD64_SUB_REG_MEMBASE;
12748 case OP_LAND:
12749 return OP_AMD64_AND_REG_MEMBASE;
12750 case OP_LOR:
12751 return OP_AMD64_OR_REG_MEMBASE;
12752 case OP_LXOR:
12753 return OP_AMD64_XOR_REG_MEMBASE;
12756 #endif
12758 return -1;
12762 mono_op_to_op_imm_noemul (int opcode)
12764 switch (opcode) {
12765 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12766 case OP_LSHR:
12767 case OP_LSHL:
12768 case OP_LSHR_UN:
12769 return -1;
12770 #endif
12771 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12772 case OP_IDIV:
12773 case OP_IDIV_UN:
12774 case OP_IREM:
12775 case OP_IREM_UN:
12776 return -1;
12777 #endif
12778 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12779 case OP_IMUL:
12780 return -1;
12781 #endif
12782 default:
12783 return mono_op_to_op_imm (opcode);
12788 * mono_handle_global_vregs:
12790 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12791 * for them.
12793 void
12794 mono_handle_global_vregs (MonoCompile *cfg)
12796 gint32 *vreg_to_bb;
12797 MonoBasicBlock *bb;
12798 int i, pos;
12800 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12802 #ifdef MONO_ARCH_SIMD_INTRINSICS
12803 if (cfg->uses_simd_intrinsics)
12804 mono_simd_simplify_indirection (cfg);
12805 #endif
12807 /* Find local vregs used in more than one bb */
12808 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12809 MonoInst *ins = bb->code;
12810 int block_num = bb->block_num;
12812 if (cfg->verbose_level > 2)
12813 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12815 cfg->cbb = bb;
12816 for (; ins; ins = ins->next) {
12817 const char *spec = INS_INFO (ins->opcode);
12818 int regtype = 0, regindex;
12819 gint32 prev_bb;
12821 if (G_UNLIKELY (cfg->verbose_level > 2))
12822 mono_print_ins (ins);
12824 g_assert (ins->opcode >= MONO_CEE_LAST);
12826 for (regindex = 0; regindex < 4; regindex ++) {
12827 int vreg = 0;
12829 if (regindex == 0) {
12830 regtype = spec [MONO_INST_DEST];
12831 if (regtype == ' ')
12832 continue;
12833 vreg = ins->dreg;
12834 } else if (regindex == 1) {
12835 regtype = spec [MONO_INST_SRC1];
12836 if (regtype == ' ')
12837 continue;
12838 vreg = ins->sreg1;
12839 } else if (regindex == 2) {
12840 regtype = spec [MONO_INST_SRC2];
12841 if (regtype == ' ')
12842 continue;
12843 vreg = ins->sreg2;
12844 } else if (regindex == 3) {
12845 regtype = spec [MONO_INST_SRC3];
12846 if (regtype == ' ')
12847 continue;
12848 vreg = ins->sreg3;
12851 #if SIZEOF_REGISTER == 4
12852 /* In the LLVM case, the long opcodes are not decomposed */
12853 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12855 * Since some instructions reference the original long vreg,
12856 * and some reference the two component vregs, it is quite hard
12857 * to determine when it needs to be global. So be conservative.
12859 if (!get_vreg_to_inst (cfg, vreg)) {
12860 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12862 if (cfg->verbose_level > 2)
12863 printf ("LONG VREG R%d made global.\n", vreg);
12867 * Make the component vregs volatile since the optimizations can
12868 * get confused otherwise.
12870 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12871 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12873 #endif
12875 g_assert (vreg != -1);
12877 prev_bb = vreg_to_bb [vreg];
12878 if (prev_bb == 0) {
12879 /* 0 is a valid block num */
12880 vreg_to_bb [vreg] = block_num + 1;
12881 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12882 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12883 continue;
12885 if (!get_vreg_to_inst (cfg, vreg)) {
12886 if (G_UNLIKELY (cfg->verbose_level > 2))
12887 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12889 switch (regtype) {
12890 case 'i':
12891 if (vreg_is_ref (cfg, vreg))
12892 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12893 else
12894 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12895 break;
12896 case 'l':
12897 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12898 break;
12899 case 'f':
12900 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12901 break;
12902 case 'v':
12903 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12904 break;
12905 default:
12906 g_assert_not_reached ();
12910 /* Flag as having been used in more than one bb */
12911 vreg_to_bb [vreg] = -1;
12917 /* If a variable is used in only one bblock, convert it into a local vreg */
12918 for (i = 0; i < cfg->num_varinfo; i++) {
12919 MonoInst *var = cfg->varinfo [i];
12920 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12922 switch (var->type) {
12923 case STACK_I4:
12924 case STACK_OBJ:
12925 case STACK_PTR:
12926 case STACK_MP:
12927 case STACK_VTYPE:
12928 #if SIZEOF_REGISTER == 8
12929 case STACK_I8:
12930 #endif
12931 #if !defined(TARGET_X86)
12932 /* Enabling this screws up the fp stack on x86 */
12933 case STACK_R8:
12934 #endif
12935 if (mono_arch_is_soft_float ())
12936 break;
12938 /* Arguments are implicitly global */
12939 /* Putting R4 vars into registers doesn't work currently */
12940 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12941 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12943 * Make that the variable's liveness interval doesn't contain a call, since
12944 * that would cause the lvreg to be spilled, making the whole optimization
12945 * useless.
12947 /* This is too slow for JIT compilation */
12948 #if 0
12949 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12950 MonoInst *ins;
12951 int def_index, call_index, ins_index;
12952 gboolean spilled = FALSE;
12954 def_index = -1;
12955 call_index = -1;
12956 ins_index = 0;
12957 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12958 const char *spec = INS_INFO (ins->opcode);
12960 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12961 def_index = ins_index;
12963 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12964 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12965 if (call_index > def_index) {
12966 spilled = TRUE;
12967 break;
12971 if (MONO_IS_CALL (ins))
12972 call_index = ins_index;
12974 ins_index ++;
12977 if (spilled)
12978 break;
12980 #endif
12982 if (G_UNLIKELY (cfg->verbose_level > 2))
12983 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12984 var->flags |= MONO_INST_IS_DEAD;
12985 cfg->vreg_to_inst [var->dreg] = NULL;
12987 break;
12992 * Compress the varinfo and vars tables so the liveness computation is faster and
12993 * takes up less space.
12995 pos = 0;
12996 for (i = 0; i < cfg->num_varinfo; ++i) {
12997 MonoInst *var = cfg->varinfo [i];
12998 if (pos < i && cfg->locals_start == i)
12999 cfg->locals_start = pos;
13000 if (!(var->flags & MONO_INST_IS_DEAD)) {
13001 if (pos < i) {
13002 cfg->varinfo [pos] = cfg->varinfo [i];
13003 cfg->varinfo [pos]->inst_c0 = pos;
13004 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13005 cfg->vars [pos].idx = pos;
13006 #if SIZEOF_REGISTER == 4
13007 if (cfg->varinfo [pos]->type == STACK_I8) {
13008 /* Modify the two component vars too */
13009 MonoInst *var1;
13011 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13012 var1->inst_c0 = pos;
13013 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13014 var1->inst_c0 = pos;
13016 #endif
13018 pos ++;
13021 cfg->num_varinfo = pos;
13022 if (cfg->locals_start > cfg->num_varinfo)
13023 cfg->locals_start = cfg->num_varinfo;
13027 * mono_spill_global_vars:
13029 * Generate spill code for variables which are not allocated to registers,
13030 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13031 * code is generated which could be optimized by the local optimization passes.
13033 void
13034 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13036 MonoBasicBlock *bb;
13037 char spec2 [16];
13038 int orig_next_vreg;
13039 guint32 *vreg_to_lvreg;
13040 guint32 *lvregs;
13041 guint32 i, lvregs_len;
13042 gboolean dest_has_lvreg = FALSE;
13043 guint32 stacktypes [128];
13044 MonoInst **live_range_start, **live_range_end;
13045 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13046 int *gsharedvt_vreg_to_idx = NULL;
13048 *need_local_opts = FALSE;
13050 memset (spec2, 0, sizeof (spec2));
13052 /* FIXME: Move this function to mini.c */
13053 stacktypes ['i'] = STACK_PTR;
13054 stacktypes ['l'] = STACK_I8;
13055 stacktypes ['f'] = STACK_R8;
13056 #ifdef MONO_ARCH_SIMD_INTRINSICS
13057 stacktypes ['x'] = STACK_VTYPE;
13058 #endif
13060 #if SIZEOF_REGISTER == 4
13061 /* Create MonoInsts for longs */
13062 for (i = 0; i < cfg->num_varinfo; i++) {
13063 MonoInst *ins = cfg->varinfo [i];
13065 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13066 switch (ins->type) {
13067 case STACK_R8:
13068 case STACK_I8: {
13069 MonoInst *tree;
13071 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13072 break;
13074 g_assert (ins->opcode == OP_REGOFFSET);
13076 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13077 g_assert (tree);
13078 tree->opcode = OP_REGOFFSET;
13079 tree->inst_basereg = ins->inst_basereg;
13080 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13082 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13083 g_assert (tree);
13084 tree->opcode = OP_REGOFFSET;
13085 tree->inst_basereg = ins->inst_basereg;
13086 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13087 break;
13089 default:
13090 break;
13094 #endif
13096 if (cfg->compute_gc_maps) {
13097 /* registers need liveness info even for !non refs */
13098 for (i = 0; i < cfg->num_varinfo; i++) {
13099 MonoInst *ins = cfg->varinfo [i];
13101 if (ins->opcode == OP_REGVAR)
13102 ins->flags |= MONO_INST_GC_TRACK;
13106 if (cfg->gsharedvt) {
13107 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13109 for (i = 0; i < cfg->num_varinfo; ++i) {
13110 MonoInst *ins = cfg->varinfo [i];
13111 int idx;
13113 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13114 if (i >= cfg->locals_start) {
13115 /* Local */
13116 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13117 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13118 ins->opcode = OP_GSHAREDVT_LOCAL;
13119 ins->inst_imm = idx;
13120 } else {
13121 /* Arg */
13122 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13123 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13129 /* FIXME: widening and truncation */
13132 * As an optimization, when a variable allocated to the stack is first loaded into
13133 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13134 * the variable again.
13136 orig_next_vreg = cfg->next_vreg;
13137 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13138 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13139 lvregs_len = 0;
13142 * These arrays contain the first and last instructions accessing a given
13143 * variable.
13144 * Since we emit bblocks in the same order we process them here, and we
13145 * don't split live ranges, these will precisely describe the live range of
13146 * the variable, i.e. the instruction range where a valid value can be found
13147 * in the variables location.
13148 * The live range is computed using the liveness info computed by the liveness pass.
13149 * We can't use vmv->range, since that is an abstract live range, and we need
13150 * one which is instruction precise.
13151 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13153 /* FIXME: Only do this if debugging info is requested */
13154 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13155 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13156 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13157 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13159 /* Add spill loads/stores */
13160 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13161 MonoInst *ins;
13163 if (cfg->verbose_level > 2)
13164 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13166 /* Clear vreg_to_lvreg array */
13167 for (i = 0; i < lvregs_len; i++)
13168 vreg_to_lvreg [lvregs [i]] = 0;
13169 lvregs_len = 0;
13171 cfg->cbb = bb;
13172 MONO_BB_FOR_EACH_INS (bb, ins) {
13173 const char *spec = INS_INFO (ins->opcode);
13174 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13175 gboolean store, no_lvreg;
13176 int sregs [MONO_MAX_SRC_REGS];
13178 if (G_UNLIKELY (cfg->verbose_level > 2))
13179 mono_print_ins (ins);
13181 if (ins->opcode == OP_NOP)
13182 continue;
13185 * We handle LDADDR here as well, since it can only be decomposed
13186 * when variable addresses are known.
13188 if (ins->opcode == OP_LDADDR) {
13189 MonoInst *var = ins->inst_p0;
13191 if (var->opcode == OP_VTARG_ADDR) {
13192 /* Happens on SPARC/S390 where vtypes are passed by reference */
13193 MonoInst *vtaddr = var->inst_left;
13194 if (vtaddr->opcode == OP_REGVAR) {
13195 ins->opcode = OP_MOVE;
13196 ins->sreg1 = vtaddr->dreg;
13198 else if (var->inst_left->opcode == OP_REGOFFSET) {
13199 ins->opcode = OP_LOAD_MEMBASE;
13200 ins->inst_basereg = vtaddr->inst_basereg;
13201 ins->inst_offset = vtaddr->inst_offset;
13202 } else
13203 NOT_IMPLEMENTED;
13204 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13205 /* gsharedvt arg passed by ref */
13206 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13208 ins->opcode = OP_LOAD_MEMBASE;
13209 ins->inst_basereg = var->inst_basereg;
13210 ins->inst_offset = var->inst_offset;
13211 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13212 MonoInst *load, *load2, *load3;
13213 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13214 int reg1, reg2, reg3;
13215 MonoInst *info_var = cfg->gsharedvt_info_var;
13216 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13219 * gsharedvt local.
13220 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13223 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13225 g_assert (info_var);
13226 g_assert (locals_var);
13228 /* Mark the instruction used to compute the locals var as used */
13229 cfg->gsharedvt_locals_var_ins = NULL;
13231 /* Load the offset */
13232 if (info_var->opcode == OP_REGOFFSET) {
13233 reg1 = alloc_ireg (cfg);
13234 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13235 } else if (info_var->opcode == OP_REGVAR) {
13236 load = NULL;
13237 reg1 = info_var->dreg;
13238 } else {
13239 g_assert_not_reached ();
13241 reg2 = alloc_ireg (cfg);
13242 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13243 /* Load the locals area address */
13244 reg3 = alloc_ireg (cfg);
13245 if (locals_var->opcode == OP_REGOFFSET) {
13246 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13247 } else if (locals_var->opcode == OP_REGVAR) {
13248 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13249 } else {
13250 g_assert_not_reached ();
13252 /* Compute the address */
13253 ins->opcode = OP_PADD;
13254 ins->sreg1 = reg3;
13255 ins->sreg2 = reg2;
13257 mono_bblock_insert_before_ins (bb, ins, load3);
13258 mono_bblock_insert_before_ins (bb, load3, load2);
13259 if (load)
13260 mono_bblock_insert_before_ins (bb, load2, load);
13261 } else {
13262 g_assert (var->opcode == OP_REGOFFSET);
13264 ins->opcode = OP_ADD_IMM;
13265 ins->sreg1 = var->inst_basereg;
13266 ins->inst_imm = var->inst_offset;
13269 *need_local_opts = TRUE;
13270 spec = INS_INFO (ins->opcode);
13273 if (ins->opcode < MONO_CEE_LAST) {
13274 mono_print_ins (ins);
13275 g_assert_not_reached ();
13279 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13280 * src register.
13281 * FIXME:
13283 if (MONO_IS_STORE_MEMBASE (ins)) {
13284 tmp_reg = ins->dreg;
13285 ins->dreg = ins->sreg2;
13286 ins->sreg2 = tmp_reg;
13287 store = TRUE;
13289 spec2 [MONO_INST_DEST] = ' ';
13290 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13291 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13292 spec2 [MONO_INST_SRC3] = ' ';
13293 spec = spec2;
13294 } else if (MONO_IS_STORE_MEMINDEX (ins))
13295 g_assert_not_reached ();
13296 else
13297 store = FALSE;
13298 no_lvreg = FALSE;
13300 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13301 printf ("\t %.3s %d", spec, ins->dreg);
13302 num_sregs = mono_inst_get_src_registers (ins, sregs);
13303 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13304 printf (" %d", sregs [srcindex]);
13305 printf ("\n");
13308 /***************/
13309 /* DREG */
13310 /***************/
13311 regtype = spec [MONO_INST_DEST];
13312 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13313 prev_dreg = -1;
13315 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13316 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13317 MonoInst *store_ins;
13318 int store_opcode;
13319 MonoInst *def_ins = ins;
13320 int dreg = ins->dreg; /* The original vreg */
13322 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13324 if (var->opcode == OP_REGVAR) {
13325 ins->dreg = var->dreg;
13326 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13328 * Instead of emitting a load+store, use a _membase opcode.
13330 g_assert (var->opcode == OP_REGOFFSET);
13331 if (ins->opcode == OP_MOVE) {
13332 NULLIFY_INS (ins);
13333 def_ins = NULL;
13334 } else {
13335 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13336 ins->inst_basereg = var->inst_basereg;
13337 ins->inst_offset = var->inst_offset;
13338 ins->dreg = -1;
13340 spec = INS_INFO (ins->opcode);
13341 } else {
13342 guint32 lvreg;
13344 g_assert (var->opcode == OP_REGOFFSET);
13346 prev_dreg = ins->dreg;
13348 /* Invalidate any previous lvreg for this vreg */
13349 vreg_to_lvreg [ins->dreg] = 0;
13351 lvreg = 0;
13353 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13354 regtype = 'l';
13355 store_opcode = OP_STOREI8_MEMBASE_REG;
13358 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13360 #if SIZEOF_REGISTER != 8
13361 if (regtype == 'l') {
13362 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13363 mono_bblock_insert_after_ins (bb, ins, store_ins);
13364 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13365 mono_bblock_insert_after_ins (bb, ins, store_ins);
13366 def_ins = store_ins;
13368 else
13369 #endif
13371 g_assert (store_opcode != OP_STOREV_MEMBASE);
13373 /* Try to fuse the store into the instruction itself */
13374 /* FIXME: Add more instructions */
13375 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13376 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13377 ins->inst_imm = ins->inst_c0;
13378 ins->inst_destbasereg = var->inst_basereg;
13379 ins->inst_offset = var->inst_offset;
13380 spec = INS_INFO (ins->opcode);
13381 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13382 ins->opcode = store_opcode;
13383 ins->inst_destbasereg = var->inst_basereg;
13384 ins->inst_offset = var->inst_offset;
13386 no_lvreg = TRUE;
13388 tmp_reg = ins->dreg;
13389 ins->dreg = ins->sreg2;
13390 ins->sreg2 = tmp_reg;
13391 store = TRUE;
13393 spec2 [MONO_INST_DEST] = ' ';
13394 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13395 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13396 spec2 [MONO_INST_SRC3] = ' ';
13397 spec = spec2;
13398 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13399 // FIXME: The backends expect the base reg to be in inst_basereg
13400 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13401 ins->dreg = -1;
13402 ins->inst_basereg = var->inst_basereg;
13403 ins->inst_offset = var->inst_offset;
13404 spec = INS_INFO (ins->opcode);
13405 } else {
13406 /* printf ("INS: "); mono_print_ins (ins); */
13407 /* Create a store instruction */
13408 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13410 /* Insert it after the instruction */
13411 mono_bblock_insert_after_ins (bb, ins, store_ins);
13413 def_ins = store_ins;
13416 * We can't assign ins->dreg to var->dreg here, since the
13417 * sregs could use it. So set a flag, and do it after
13418 * the sregs.
13420 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13421 dest_has_lvreg = TRUE;
13426 if (def_ins && !live_range_start [dreg]) {
13427 live_range_start [dreg] = def_ins;
13428 live_range_start_bb [dreg] = bb;
13431 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13432 MonoInst *tmp;
13434 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13435 tmp->inst_c1 = dreg;
13436 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13440 /************/
13441 /* SREGS */
13442 /************/
13443 num_sregs = mono_inst_get_src_registers (ins, sregs);
13444 for (srcindex = 0; srcindex < 3; ++srcindex) {
13445 regtype = spec [MONO_INST_SRC1 + srcindex];
13446 sreg = sregs [srcindex];
13448 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13449 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13450 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13451 MonoInst *use_ins = ins;
13452 MonoInst *load_ins;
13453 guint32 load_opcode;
13455 if (var->opcode == OP_REGVAR) {
13456 sregs [srcindex] = var->dreg;
13457 //mono_inst_set_src_registers (ins, sregs);
13458 live_range_end [sreg] = use_ins;
13459 live_range_end_bb [sreg] = bb;
13461 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13462 MonoInst *tmp;
13464 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13465 /* var->dreg is a hreg */
13466 tmp->inst_c1 = sreg;
13467 mono_bblock_insert_after_ins (bb, ins, tmp);
13470 continue;
13473 g_assert (var->opcode == OP_REGOFFSET);
13475 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13477 g_assert (load_opcode != OP_LOADV_MEMBASE);
13479 if (vreg_to_lvreg [sreg]) {
13480 g_assert (vreg_to_lvreg [sreg] != -1);
13482 /* The variable is already loaded to an lvreg */
13483 if (G_UNLIKELY (cfg->verbose_level > 2))
13484 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13485 sregs [srcindex] = vreg_to_lvreg [sreg];
13486 //mono_inst_set_src_registers (ins, sregs);
13487 continue;
13490 /* Try to fuse the load into the instruction */
13491 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13492 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13493 sregs [0] = var->inst_basereg;
13494 //mono_inst_set_src_registers (ins, sregs);
13495 ins->inst_offset = var->inst_offset;
13496 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13497 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13498 sregs [1] = var->inst_basereg;
13499 //mono_inst_set_src_registers (ins, sregs);
13500 ins->inst_offset = var->inst_offset;
13501 } else {
13502 if (MONO_IS_REAL_MOVE (ins)) {
13503 ins->opcode = OP_NOP;
13504 sreg = ins->dreg;
13505 } else {
13506 //printf ("%d ", srcindex); mono_print_ins (ins);
13508 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13510 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13511 if (var->dreg == prev_dreg) {
13513 * sreg refers to the value loaded by the load
13514 * emitted below, but we need to use ins->dreg
13515 * since it refers to the store emitted earlier.
13517 sreg = ins->dreg;
13519 g_assert (sreg != -1);
13520 vreg_to_lvreg [var->dreg] = sreg;
13521 g_assert (lvregs_len < 1024);
13522 lvregs [lvregs_len ++] = var->dreg;
13526 sregs [srcindex] = sreg;
13527 //mono_inst_set_src_registers (ins, sregs);
13529 #if SIZEOF_REGISTER != 8
13530 if (regtype == 'l') {
13531 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13532 mono_bblock_insert_before_ins (bb, ins, load_ins);
13533 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13534 mono_bblock_insert_before_ins (bb, ins, load_ins);
13535 use_ins = load_ins;
13537 else
13538 #endif
13540 #if SIZEOF_REGISTER == 4
13541 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13542 #endif
13543 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13544 mono_bblock_insert_before_ins (bb, ins, load_ins);
13545 use_ins = load_ins;
13549 if (var->dreg < orig_next_vreg) {
13550 live_range_end [var->dreg] = use_ins;
13551 live_range_end_bb [var->dreg] = bb;
13554 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13555 MonoInst *tmp;
13557 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13558 tmp->inst_c1 = var->dreg;
13559 mono_bblock_insert_after_ins (bb, ins, tmp);
13563 mono_inst_set_src_registers (ins, sregs);
13565 if (dest_has_lvreg) {
13566 g_assert (ins->dreg != -1);
13567 vreg_to_lvreg [prev_dreg] = ins->dreg;
13568 g_assert (lvregs_len < 1024);
13569 lvregs [lvregs_len ++] = prev_dreg;
13570 dest_has_lvreg = FALSE;
13573 if (store) {
13574 tmp_reg = ins->dreg;
13575 ins->dreg = ins->sreg2;
13576 ins->sreg2 = tmp_reg;
13579 if (MONO_IS_CALL (ins)) {
13580 /* Clear vreg_to_lvreg array */
13581 for (i = 0; i < lvregs_len; i++)
13582 vreg_to_lvreg [lvregs [i]] = 0;
13583 lvregs_len = 0;
13584 } else if (ins->opcode == OP_NOP) {
13585 ins->dreg = -1;
13586 MONO_INST_NULLIFY_SREGS (ins);
13589 if (cfg->verbose_level > 2)
13590 mono_print_ins_index (1, ins);
13593 /* Extend the live range based on the liveness info */
13594 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13595 for (i = 0; i < cfg->num_varinfo; i ++) {
13596 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13598 if (vreg_is_volatile (cfg, vi->vreg))
13599 /* The liveness info is incomplete */
13600 continue;
13602 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13603 /* Live from at least the first ins of this bb */
13604 live_range_start [vi->vreg] = bb->code;
13605 live_range_start_bb [vi->vreg] = bb;
13608 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13609 /* Live at least until the last ins of this bb */
13610 live_range_end [vi->vreg] = bb->last_ins;
13611 live_range_end_bb [vi->vreg] = bb;
13617 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13619 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13620 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13622 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13623 for (i = 0; i < cfg->num_varinfo; ++i) {
13624 int vreg = MONO_VARINFO (cfg, i)->vreg;
13625 MonoInst *ins;
13627 if (live_range_start [vreg]) {
13628 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13629 ins->inst_c0 = i;
13630 ins->inst_c1 = vreg;
13631 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13633 if (live_range_end [vreg]) {
13634 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13635 ins->inst_c0 = i;
13636 ins->inst_c1 = vreg;
13637 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13638 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13639 else
13640 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13644 #endif
13646 if (cfg->gsharedvt_locals_var_ins) {
13647 /* Nullify if unused */
13648 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13649 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13652 g_free (live_range_start);
13653 g_free (live_range_end);
13654 g_free (live_range_start_bb);
13655 g_free (live_range_end_bb);
13659 * FIXME:
13660 * - use 'iadd' instead of 'int_add'
13661 * - handling ovf opcodes: decompose in method_to_ir.
13662 * - unify iregs/fregs
13663 * -> partly done, the missing parts are:
13664 * - a more complete unification would involve unifying the hregs as well, so
13665 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13666 * would no longer map to the machine hregs, so the code generators would need to
13667 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13668 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13669 * fp/non-fp branches speeds it up by about 15%.
13670 * - use sext/zext opcodes instead of shifts
13671 * - add OP_ICALL
13672 * - get rid of TEMPLOADs if possible and use vregs instead
13673 * - clean up usage of OP_P/OP_ opcodes
13674 * - cleanup usage of DUMMY_USE
13675 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13676 * stack
13677 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13678 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13679 * - make sure handle_stack_args () is called before the branch is emitted
13680 * - when the new IR is done, get rid of all unused stuff
13681 * - COMPARE/BEQ as separate instructions or unify them ?
13682 * - keeping them separate allows specialized compare instructions like
13683 * compare_imm, compare_membase
13684 * - most back ends unify fp compare+branch, fp compare+ceq
13685 * - integrate mono_save_args into inline_method
13686 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13687 * - handle long shift opts on 32 bit platforms somehow: they require
13688 * 3 sregs (2 for arg1 and 1 for arg2)
13689 * - make byref a 'normal' type.
13690 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13691 * variable if needed.
13692 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13693 * like inline_method.
13694 * - remove inlining restrictions
13695 * - fix LNEG and enable cfold of INEG
13696 * - generalize x86 optimizations like ldelema as a peephole optimization
13697 * - add store_mem_imm for amd64
13698 * - optimize the loading of the interruption flag in the managed->native wrappers
13699 * - avoid special handling of OP_NOP in passes
13700 * - move code inserting instructions into one function/macro.
13701 * - try a coalescing phase after liveness analysis
13702 * - add float -> vreg conversion + local optimizations on !x86
13703 * - figure out how to handle decomposed branches during optimizations, ie.
13704 * compare+branch, op_jump_table+op_br etc.
13705 * - promote RuntimeXHandles to vregs
13706 * - vtype cleanups:
13707 * - add a NEW_VARLOADA_VREG macro
13708 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13709 * accessing vtype fields.
13710 * - get rid of I8CONST on 64 bit platforms
13711 * - dealing with the increase in code size due to branches created during opcode
13712 * decomposition:
13713 * - use extended basic blocks
13714 * - all parts of the JIT
13715 * - handle_global_vregs () && local regalloc
13716 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13717 * - sources of increase in code size:
13718 * - vtypes
13719 * - long compares
13720 * - isinst and castclass
13721 * - lvregs not allocated to global registers even if used multiple times
13722 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13723 * meaningful.
13724 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13725 * - add all micro optimizations from the old JIT
13726 * - put tree optimizations into the deadce pass
13727 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13728 * specific function.
13729 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13730 * fcompare + branchCC.
13731 * - create a helper function for allocating a stack slot, taking into account
13732 * MONO_CFG_HAS_SPILLUP.
13733 * - merge r68207.
13734 * - merge the ia64 switch changes.
13735 * - optimize mono_regstate2_alloc_int/float.
13736 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13737 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13738 * parts of the tree could be separated by other instructions, killing the tree
13739 * arguments, or stores killing loads etc. Also, should we fold loads into other
13740 * instructions if the result of the load is used multiple times ?
13741 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13742 * - LAST MERGE: 108395.
13743 * - when returning vtypes in registers, generate IR and append it to the end of the
13744 * last bb instead of doing it in the epilog.
13745 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13750 NOTES
13751 -----
13753 - When to decompose opcodes:
13754 - earlier: this makes some optimizations hard to implement, since the low level IR
13755 no longer contains the neccessary information. But it is easier to do.
13756 - later: harder to implement, enables more optimizations.
13757 - Branches inside bblocks:
13758 - created when decomposing complex opcodes.
13759 - branches to another bblock: harmless, but not tracked by the branch
13760 optimizations, so need to branch to a label at the start of the bblock.
13761 - branches to inside the same bblock: very problematic, trips up the local
13762 reg allocator. Can be fixed by spitting the current bblock, but that is a
13763 complex operation, since some local vregs can become global vregs etc.
13764 - Local/global vregs:
13765 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13766 local register allocator.
13767 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13768 structure, created by mono_create_var (). Assigned to hregs or the stack by
13769 the global register allocator.
13770 - When to do optimizations like alu->alu_imm:
13771 - earlier -> saves work later on since the IR will be smaller/simpler
13772 - later -> can work on more instructions
13773 - Handling of valuetypes:
13774 - When a vtype is pushed on the stack, a new temporary is created, an
13775 instruction computing its address (LDADDR) is emitted and pushed on
13776 the stack. Need to optimize cases when the vtype is used immediately as in
13777 argument passing, stloc etc.
13778 - Instead of the to_end stuff in the old JIT, simply call the function handling
13779 the values on the stack before emitting the last instruction of the bb.
13782 #endif /* DISABLE_JIT */