Handle gsharing and marshal by ref.
[mono-project.git] / mono / mini / method-to-ir.c
blobf4f9ed33892d9572b0b05d6f054b90eb7f7356ab
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
56 #include "mini.h"
57 #include "trace.h"
59 #include "ir-emit.h"
61 #include "jit-icalls.h"
62 #include "jit.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
69 goto inline_failure;\
70 } while (0)
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
73 goto exception_exit;\
74 } while (0)
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
83 } while (0)
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
92 } while (0)
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
99 } \
100 } while (0)
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
104 } while (0)
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
130 #ifdef MINI_OP
131 #undef MINI_OP
132 #endif
133 #ifdef MINI_OP3
134 #undef MINI_OP3
135 #endif
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #define NONE ' '
139 #define IREG 'i'
140 #define FREG 'f'
141 #define VREG 'v'
142 #define XREG 'x'
143 #if SIZEOF_REGISTER == 8
144 #define LREG IREG
145 #else
146 #define LREG 'l'
147 #endif
148 /* keep in sync with the enum in mini.h */
149 const char
150 ins_info[] = {
151 #include "mini-ops.h"
153 #undef MINI_OP
154 #undef MINI_OP3
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
165 #undef MINI_OP
166 #undef MINI_OP3
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
170 (vi)->reg = -1; \
171 (vi)->idx = (id); \
172 } while (0)
174 void
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
182 guint32
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
188 guint32
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
194 guint32
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
200 guint32
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
211 guint32
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 * mono_alloc_ireg_mp:
220 * Allocate an IREG, and mark it as holding a managed pointer.
222 guint32
223 mono_alloc_ireg_mp (MonoCompile *cfg)
225 return alloc_ireg_mp (cfg);
229 * mono_alloc_ireg_copy:
231 * Allocate an IREG with the same GC type as VREG.
233 guint32
234 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
236 if (vreg_is_ref (cfg, vreg))
237 return alloc_ireg_ref (cfg);
238 else if (vreg_is_mp (cfg, vreg))
239 return alloc_ireg_mp (cfg);
240 else
241 return alloc_ireg (cfg);
244 guint
245 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
247 if (type->byref)
248 return OP_MOVE;
250 handle_enum:
251 switch (type->type) {
252 case MONO_TYPE_I1:
253 case MONO_TYPE_U1:
254 case MONO_TYPE_BOOLEAN:
255 return OP_MOVE;
256 case MONO_TYPE_I2:
257 case MONO_TYPE_U2:
258 case MONO_TYPE_CHAR:
259 return OP_MOVE;
260 case MONO_TYPE_I4:
261 case MONO_TYPE_U4:
262 return OP_MOVE;
263 case MONO_TYPE_I:
264 case MONO_TYPE_U:
265 case MONO_TYPE_PTR:
266 case MONO_TYPE_FNPTR:
267 return OP_MOVE;
268 case MONO_TYPE_CLASS:
269 case MONO_TYPE_STRING:
270 case MONO_TYPE_OBJECT:
271 case MONO_TYPE_SZARRAY:
272 case MONO_TYPE_ARRAY:
273 return OP_MOVE;
274 case MONO_TYPE_I8:
275 case MONO_TYPE_U8:
276 #if SIZEOF_REGISTER == 8
277 return OP_MOVE;
278 #else
279 return OP_LMOVE;
280 #endif
281 case MONO_TYPE_R4:
282 return OP_FMOVE;
283 case MONO_TYPE_R8:
284 return OP_FMOVE;
285 case MONO_TYPE_VALUETYPE:
286 if (type->data.klass->enumtype) {
287 type = mono_class_enum_basetype (type->data.klass);
288 goto handle_enum;
290 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
291 return OP_XMOVE;
292 return OP_VMOVE;
293 case MONO_TYPE_TYPEDBYREF:
294 return OP_VMOVE;
295 case MONO_TYPE_GENERICINST:
296 type = &type->data.generic_class->container_class->byval_arg;
297 goto handle_enum;
298 case MONO_TYPE_VAR:
299 case MONO_TYPE_MVAR:
300 g_assert (cfg->generic_sharing_context);
301 return OP_MOVE;
302 default:
303 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
305 return -1;
308 void
309 mono_print_bb (MonoBasicBlock *bb, const char *msg)
311 int i;
312 MonoInst *tree;
314 printf ("\n%s %d: [IN: ", msg, bb->block_num);
315 for (i = 0; i < bb->in_count; ++i)
316 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
317 printf (", OUT: ");
318 for (i = 0; i < bb->out_count; ++i)
319 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
320 printf (" ]\n");
321 for (tree = bb->code; tree; tree = tree->next)
322 mono_print_ins_index (-1, tree);
325 void
326 mono_create_helper_signatures (void)
328 helper_sig_domain_get = mono_create_icall_signature ("ptr");
329 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
330 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
332 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
333 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
334 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
338 * Can't put this at the beginning, since other files reference stuff from this
339 * file.
341 #ifndef DISABLE_JIT
343 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
345 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
347 #define GET_BBLOCK(cfg,tblock,ip) do { \
348 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
349 if (!(tblock)) { \
350 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
351 NEW_BBLOCK (cfg, (tblock)); \
352 (tblock)->cil_code = (ip); \
353 ADD_BBLOCK (cfg, (tblock)); \
355 } while (0)
357 #if defined(TARGET_X86) || defined(TARGET_AMD64)
358 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
359 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
360 (dest)->dreg = alloc_ireg_mp ((cfg)); \
361 (dest)->sreg1 = (sr1); \
362 (dest)->sreg2 = (sr2); \
363 (dest)->inst_imm = (imm); \
364 (dest)->backend.shift_amount = (shift); \
365 MONO_ADD_INS ((cfg)->cbb, (dest)); \
366 } while (0)
367 #endif
369 #if SIZEOF_REGISTER == 8
370 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
371 /* FIXME: Need to add many more cases */ \
372 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
373 MonoInst *widen; \
374 int dr = alloc_preg (cfg); \
375 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
376 (ins)->sreg2 = widen->dreg; \
378 } while (0)
379 #else
380 #define ADD_WIDEN_OP(ins, arg1, arg2)
381 #endif
383 #define ADD_BINOP(op) do { \
384 MONO_INST_NEW (cfg, ins, (op)); \
385 sp -= 2; \
386 ins->sreg1 = sp [0]->dreg; \
387 ins->sreg2 = sp [1]->dreg; \
388 type_from_op (ins, sp [0], sp [1]); \
389 CHECK_TYPE (ins); \
390 /* Have to insert a widening op */ \
391 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
392 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
393 MONO_ADD_INS ((cfg)->cbb, (ins)); \
394 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
395 } while (0)
397 #define ADD_UNOP(op) do { \
398 MONO_INST_NEW (cfg, ins, (op)); \
399 sp--; \
400 ins->sreg1 = sp [0]->dreg; \
401 type_from_op (ins, sp [0], NULL); \
402 CHECK_TYPE (ins); \
403 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
404 MONO_ADD_INS ((cfg)->cbb, (ins)); \
405 *sp++ = mono_decompose_opcode (cfg, ins); \
406 } while (0)
408 #define ADD_BINCOND(next_block) do { \
409 MonoInst *cmp; \
410 sp -= 2; \
411 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
412 cmp->sreg1 = sp [0]->dreg; \
413 cmp->sreg2 = sp [1]->dreg; \
414 type_from_op (cmp, sp [0], sp [1]); \
415 CHECK_TYPE (cmp); \
416 type_from_op (ins, sp [0], sp [1]); \
417 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
418 GET_BBLOCK (cfg, tblock, target); \
419 link_bblock (cfg, bblock, tblock); \
420 ins->inst_true_bb = tblock; \
421 if ((next_block)) { \
422 link_bblock (cfg, bblock, (next_block)); \
423 ins->inst_false_bb = (next_block); \
424 start_new_bblock = 1; \
425 } else { \
426 GET_BBLOCK (cfg, tblock, ip); \
427 link_bblock (cfg, bblock, tblock); \
428 ins->inst_false_bb = tblock; \
429 start_new_bblock = 2; \
431 if (sp != stack_start) { \
432 handle_stack_args (cfg, stack_start, sp - stack_start); \
433 CHECK_UNVERIFIABLE (cfg); \
435 MONO_ADD_INS (bblock, cmp); \
436 MONO_ADD_INS (bblock, ins); \
437 } while (0)
439 /* *
440 * link_bblock: Links two basic blocks
442 * links two basic blocks in the control flow graph, the 'from'
443 * argument is the starting block and the 'to' argument is the block
444 * the control flow ends to after 'from'.
446 static void
447 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 MonoBasicBlock **newa;
450 int i, found;
452 #if 0
453 if (from->cil_code) {
454 if (to->cil_code)
455 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
456 else
457 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
458 } else {
459 if (to->cil_code)
460 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
461 else
462 printf ("edge from entry to exit\n");
464 #endif
466 found = FALSE;
467 for (i = 0; i < from->out_count; ++i) {
468 if (to == from->out_bb [i]) {
469 found = TRUE;
470 break;
473 if (!found) {
474 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
475 for (i = 0; i < from->out_count; ++i) {
476 newa [i] = from->out_bb [i];
478 newa [i] = to;
479 from->out_count++;
480 from->out_bb = newa;
483 found = FALSE;
484 for (i = 0; i < to->in_count; ++i) {
485 if (from == to->in_bb [i]) {
486 found = TRUE;
487 break;
490 if (!found) {
491 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
492 for (i = 0; i < to->in_count; ++i) {
493 newa [i] = to->in_bb [i];
495 newa [i] = from;
496 to->in_count++;
497 to->in_bb = newa;
501 void
502 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
504 link_bblock (cfg, from, to);
508 * mono_find_block_region:
510 * We mark each basic block with a region ID. We use that to avoid BB
511 * optimizations when blocks are in different regions.
513 * Returns:
514 * A region token that encodes where this region is, and information
515 * about the clause owner for this block.
517 * The region encodes the try/catch/filter clause that owns this block
518 * as well as the type. -1 is a special value that represents a block
519 * that is in none of try/catch/filter.
521 static int
522 mono_find_block_region (MonoCompile *cfg, int offset)
524 MonoMethodHeader *header = cfg->header;
525 MonoExceptionClause *clause;
526 int i;
528 for (i = 0; i < header->num_clauses; ++i) {
529 clause = &header->clauses [i];
530 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
531 (offset < (clause->handler_offset)))
532 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
534 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
535 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
536 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
537 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
538 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
539 else
540 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
543 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
544 return ((i + 1) << 8) | clause->flags;
547 return -1;
550 static GList*
551 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
553 MonoMethodHeader *header = cfg->header;
554 MonoExceptionClause *clause;
555 int i;
556 GList *res = NULL;
558 for (i = 0; i < header->num_clauses; ++i) {
559 clause = &header->clauses [i];
560 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
561 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
562 if (clause->flags == type)
563 res = g_list_append (res, clause);
566 return res;
569 static void
570 mono_create_spvar_for_region (MonoCompile *cfg, int region)
572 MonoInst *var;
574 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
575 if (var)
576 return;
578 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
579 /* prevent it from being register allocated */
580 var->flags |= MONO_INST_INDIRECT;
582 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
585 MonoInst *
586 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
588 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
591 static MonoInst*
592 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
594 MonoInst *var;
596 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
597 if (var)
598 return var;
600 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
601 /* prevent it from being register allocated */
602 var->flags |= MONO_INST_INDIRECT;
604 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
606 return var;
610 * Returns the type used in the eval stack when @type is loaded.
611 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
613 void
614 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
616 MonoClass *klass;
618 inst->klass = klass = mono_class_from_mono_type (type);
619 if (type->byref) {
620 inst->type = STACK_MP;
621 return;
624 handle_enum:
625 switch (type->type) {
626 case MONO_TYPE_VOID:
627 inst->type = STACK_INV;
628 return;
629 case MONO_TYPE_I1:
630 case MONO_TYPE_U1:
631 case MONO_TYPE_BOOLEAN:
632 case MONO_TYPE_I2:
633 case MONO_TYPE_U2:
634 case MONO_TYPE_CHAR:
635 case MONO_TYPE_I4:
636 case MONO_TYPE_U4:
637 inst->type = STACK_I4;
638 return;
639 case MONO_TYPE_I:
640 case MONO_TYPE_U:
641 case MONO_TYPE_PTR:
642 case MONO_TYPE_FNPTR:
643 inst->type = STACK_PTR;
644 return;
645 case MONO_TYPE_CLASS:
646 case MONO_TYPE_STRING:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 inst->type = STACK_OBJ;
651 return;
652 case MONO_TYPE_I8:
653 case MONO_TYPE_U8:
654 inst->type = STACK_I8;
655 return;
656 case MONO_TYPE_R4:
657 case MONO_TYPE_R8:
658 inst->type = STACK_R8;
659 return;
660 case MONO_TYPE_VALUETYPE:
661 if (type->data.klass->enumtype) {
662 type = mono_class_enum_basetype (type->data.klass);
663 goto handle_enum;
664 } else {
665 inst->klass = klass;
666 inst->type = STACK_VTYPE;
667 return;
669 case MONO_TYPE_TYPEDBYREF:
670 inst->klass = mono_defaults.typed_reference_class;
671 inst->type = STACK_VTYPE;
672 return;
673 case MONO_TYPE_GENERICINST:
674 type = &type->data.generic_class->container_class->byval_arg;
675 goto handle_enum;
676 case MONO_TYPE_VAR :
677 case MONO_TYPE_MVAR :
678 /* FIXME: all the arguments must be references for now,
679 * later look inside cfg and see if the arg num is
680 * really a reference
682 g_assert (cfg->generic_sharing_context);
683 inst->type = STACK_OBJ;
684 return;
685 default:
686 g_error ("unknown type 0x%02x in eval stack type", type->type);
691 * The following tables are used to quickly validate the IL code in type_from_op ().
693 static const char
694 bin_num_table [STACK_MAX] [STACK_MAX] = {
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
705 static const char
706 neg_table [] = {
707 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
710 /* reduce the size of this table */
711 static const char
712 bin_int_table [STACK_MAX] [STACK_MAX] = {
713 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
723 static const char
724 bin_comp_table [STACK_MAX] [STACK_MAX] = {
725 /* Inv i L p F & O vt */
726 {0},
727 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
728 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
729 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
730 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
731 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
732 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
733 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
736 /* reduce the size of this table */
737 static const char
738 shift_table [STACK_MAX] [STACK_MAX] = {
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
750 * Tables to map from the non-specific opcode to the matching
751 * type-specific opcode.
753 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
754 static const guint16
755 binops_op_map [STACK_MAX] = {
756 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
759 /* handles from CEE_NEG to CEE_CONV_U8 */
760 static const guint16
761 unops_op_map [STACK_MAX] = {
762 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
765 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
766 static const guint16
767 ovfops_op_map [STACK_MAX] = {
768 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
771 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
772 static const guint16
773 ovf2ops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
777 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
778 static const guint16
779 ovf3ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
783 /* handles from CEE_BEQ to CEE_BLT_UN */
784 static const guint16
785 beqops_op_map [STACK_MAX] = {
786 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
789 /* handles from CEE_CEQ to CEE_CLT_UN */
790 static const guint16
791 ceqops_op_map [STACK_MAX] = {
792 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
796 * Sets ins->type (the type on the eval stack) according to the
797 * type of the opcode and the arguments to it.
798 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
800 * FIXME: this function sets ins->type unconditionally in some cases, but
801 * it should set it to invalid for some types (a conv.x on an object)
803 static void
804 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
806 switch (ins->opcode) {
807 /* binops */
808 case CEE_ADD:
809 case CEE_SUB:
810 case CEE_MUL:
811 case CEE_DIV:
812 case CEE_REM:
813 /* FIXME: check unverifiable args for STACK_MP */
814 ins->type = bin_num_table [src1->type] [src2->type];
815 ins->opcode += binops_op_map [ins->type];
816 break;
817 case CEE_DIV_UN:
818 case CEE_REM_UN:
819 case CEE_AND:
820 case CEE_OR:
821 case CEE_XOR:
822 ins->type = bin_int_table [src1->type] [src2->type];
823 ins->opcode += binops_op_map [ins->type];
824 break;
825 case CEE_SHL:
826 case CEE_SHR:
827 case CEE_SHR_UN:
828 ins->type = shift_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
830 break;
831 case OP_COMPARE:
832 case OP_LCOMPARE:
833 case OP_ICOMPARE:
834 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
835 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
836 ins->opcode = OP_LCOMPARE;
837 else if (src1->type == STACK_R8)
838 ins->opcode = OP_FCOMPARE;
839 else
840 ins->opcode = OP_ICOMPARE;
841 break;
842 case OP_ICOMPARE_IMM:
843 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE_IMM;
846 break;
847 case CEE_BEQ:
848 case CEE_BGE:
849 case CEE_BGT:
850 case CEE_BLE:
851 case CEE_BLT:
852 case CEE_BNE_UN:
853 case CEE_BGE_UN:
854 case CEE_BGT_UN:
855 case CEE_BLE_UN:
856 case CEE_BLT_UN:
857 ins->opcode += beqops_op_map [src1->type];
858 break;
859 case OP_CEQ:
860 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
861 ins->opcode += ceqops_op_map [src1->type];
862 break;
863 case OP_CGT:
864 case OP_CGT_UN:
865 case OP_CLT:
866 case OP_CLT_UN:
867 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
868 ins->opcode += ceqops_op_map [src1->type];
869 break;
870 /* unops */
871 case CEE_NEG:
872 ins->type = neg_table [src1->type];
873 ins->opcode += unops_op_map [ins->type];
874 break;
875 case CEE_NOT:
876 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
877 ins->type = src1->type;
878 else
879 ins->type = STACK_INV;
880 ins->opcode += unops_op_map [ins->type];
881 break;
882 case CEE_CONV_I1:
883 case CEE_CONV_I2:
884 case CEE_CONV_I4:
885 case CEE_CONV_U4:
886 ins->type = STACK_I4;
887 ins->opcode += unops_op_map [src1->type];
888 break;
889 case CEE_CONV_R_UN:
890 ins->type = STACK_R8;
891 switch (src1->type) {
892 case STACK_I4:
893 case STACK_PTR:
894 ins->opcode = OP_ICONV_TO_R_UN;
895 break;
896 case STACK_I8:
897 ins->opcode = OP_LCONV_TO_R_UN;
898 break;
900 break;
901 case CEE_CONV_OVF_I1:
902 case CEE_CONV_OVF_U1:
903 case CEE_CONV_OVF_I2:
904 case CEE_CONV_OVF_U2:
905 case CEE_CONV_OVF_I4:
906 case CEE_CONV_OVF_U4:
907 ins->type = STACK_I4;
908 ins->opcode += ovf3ops_op_map [src1->type];
909 break;
910 case CEE_CONV_OVF_I_UN:
911 case CEE_CONV_OVF_U_UN:
912 ins->type = STACK_PTR;
913 ins->opcode += ovf2ops_op_map [src1->type];
914 break;
915 case CEE_CONV_OVF_I1_UN:
916 case CEE_CONV_OVF_I2_UN:
917 case CEE_CONV_OVF_I4_UN:
918 case CEE_CONV_OVF_U1_UN:
919 case CEE_CONV_OVF_U2_UN:
920 case CEE_CONV_OVF_U4_UN:
921 ins->type = STACK_I4;
922 ins->opcode += ovf2ops_op_map [src1->type];
923 break;
924 case CEE_CONV_U:
925 ins->type = STACK_PTR;
926 switch (src1->type) {
927 case STACK_I4:
928 ins->opcode = OP_ICONV_TO_U;
929 break;
930 case STACK_PTR:
931 case STACK_MP:
932 #if SIZEOF_VOID_P == 8
933 ins->opcode = OP_LCONV_TO_U;
934 #else
935 ins->opcode = OP_MOVE;
936 #endif
937 break;
938 case STACK_I8:
939 ins->opcode = OP_LCONV_TO_U;
940 break;
941 case STACK_R8:
942 ins->opcode = OP_FCONV_TO_U;
943 break;
945 break;
946 case CEE_CONV_I8:
947 case CEE_CONV_U8:
948 ins->type = STACK_I8;
949 ins->opcode += unops_op_map [src1->type];
950 break;
951 case CEE_CONV_OVF_I8:
952 case CEE_CONV_OVF_U8:
953 ins->type = STACK_I8;
954 ins->opcode += ovf3ops_op_map [src1->type];
955 break;
956 case CEE_CONV_OVF_U8_UN:
957 case CEE_CONV_OVF_I8_UN:
958 ins->type = STACK_I8;
959 ins->opcode += ovf2ops_op_map [src1->type];
960 break;
961 case CEE_CONV_R4:
962 case CEE_CONV_R8:
963 ins->type = STACK_R8;
964 ins->opcode += unops_op_map [src1->type];
965 break;
966 case OP_CKFINITE:
967 ins->type = STACK_R8;
968 break;
969 case CEE_CONV_U2:
970 case CEE_CONV_U1:
971 ins->type = STACK_I4;
972 ins->opcode += ovfops_op_map [src1->type];
973 break;
974 case CEE_CONV_I:
975 case CEE_CONV_OVF_I:
976 case CEE_CONV_OVF_U:
977 ins->type = STACK_PTR;
978 ins->opcode += ovfops_op_map [src1->type];
979 break;
980 case CEE_ADD_OVF:
981 case CEE_ADD_OVF_UN:
982 case CEE_MUL_OVF:
983 case CEE_MUL_OVF_UN:
984 case CEE_SUB_OVF:
985 case CEE_SUB_OVF_UN:
986 ins->type = bin_num_table [src1->type] [src2->type];
987 ins->opcode += ovfops_op_map [src1->type];
988 if (ins->type == STACK_R8)
989 ins->type = STACK_INV;
990 break;
991 case OP_LOAD_MEMBASE:
992 ins->type = STACK_PTR;
993 break;
994 case OP_LOADI1_MEMBASE:
995 case OP_LOADU1_MEMBASE:
996 case OP_LOADI2_MEMBASE:
997 case OP_LOADU2_MEMBASE:
998 case OP_LOADI4_MEMBASE:
999 case OP_LOADU4_MEMBASE:
1000 ins->type = STACK_PTR;
1001 break;
1002 case OP_LOADI8_MEMBASE:
1003 ins->type = STACK_I8;
1004 break;
1005 case OP_LOADR4_MEMBASE:
1006 case OP_LOADR8_MEMBASE:
1007 ins->type = STACK_R8;
1008 break;
1009 default:
1010 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1011 break;
1014 if (ins->type == STACK_MP)
1015 ins->klass = mono_defaults.object_class;
1018 static const char
1019 ldind_type [] = {
1020 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1023 #if 0
1025 static const char
1026 param_table [STACK_MAX] [STACK_MAX] = {
1027 {0},
1030 static int
1031 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1032 int i;
1034 if (sig->hasthis) {
1035 switch (args->type) {
1036 case STACK_I4:
1037 case STACK_I8:
1038 case STACK_R8:
1039 case STACK_VTYPE:
1040 case STACK_INV:
1041 return 0;
1043 args++;
1045 for (i = 0; i < sig->param_count; ++i) {
1046 switch (args [i].type) {
1047 case STACK_INV:
1048 return 0;
1049 case STACK_MP:
1050 if (!sig->params [i]->byref)
1051 return 0;
1052 continue;
1053 case STACK_OBJ:
1054 if (sig->params [i]->byref)
1055 return 0;
1056 switch (sig->params [i]->type) {
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_STRING:
1059 case MONO_TYPE_OBJECT:
1060 case MONO_TYPE_SZARRAY:
1061 case MONO_TYPE_ARRAY:
1062 break;
1063 default:
1064 return 0;
1066 continue;
1067 case STACK_R8:
1068 if (sig->params [i]->byref)
1069 return 0;
1070 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1071 return 0;
1072 continue;
1073 case STACK_PTR:
1074 case STACK_I4:
1075 case STACK_I8:
1076 case STACK_VTYPE:
1077 break;
1079 /*if (!param_table [args [i].type] [sig->params [i]->type])
1080 return 0;*/
1082 return 1;
1084 #endif
1087 * When we need a pointer to the current domain many times in a method, we
1088 * call mono_domain_get() once and we store the result in a local variable.
1089 * This function returns the variable that represents the MonoDomain*.
1091 inline static MonoInst *
1092 mono_get_domainvar (MonoCompile *cfg)
1094 if (!cfg->domainvar)
1095 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1096 return cfg->domainvar;
1100 * The got_var contains the address of the Global Offset Table when AOT
1101 * compiling.
1103 MonoInst *
1104 mono_get_got_var (MonoCompile *cfg)
1106 #ifdef MONO_ARCH_NEED_GOT_VAR
1107 if (!cfg->compile_aot)
1108 return NULL;
1109 if (!cfg->got_var) {
1110 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1112 return cfg->got_var;
1113 #else
1114 return NULL;
1115 #endif
1118 static MonoInst *
1119 mono_get_vtable_var (MonoCompile *cfg)
1121 g_assert (cfg->generic_sharing_context);
1123 if (!cfg->rgctx_var) {
1124 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1125 /* force the var to be stack allocated */
1126 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1129 return cfg->rgctx_var;
1132 static MonoType*
1133 type_from_stack_type (MonoInst *ins) {
1134 switch (ins->type) {
1135 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1136 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1137 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1138 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1139 case STACK_MP:
1140 return &ins->klass->this_arg;
1141 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1142 case STACK_VTYPE: return &ins->klass->byval_arg;
1143 default:
1144 g_error ("stack type %d to monotype not handled\n", ins->type);
1146 return NULL;
1149 static G_GNUC_UNUSED int
1150 type_to_stack_type (MonoType *t)
1152 t = mono_type_get_underlying_type (t);
1153 switch (t->type) {
1154 case MONO_TYPE_I1:
1155 case MONO_TYPE_U1:
1156 case MONO_TYPE_BOOLEAN:
1157 case MONO_TYPE_I2:
1158 case MONO_TYPE_U2:
1159 case MONO_TYPE_CHAR:
1160 case MONO_TYPE_I4:
1161 case MONO_TYPE_U4:
1162 return STACK_I4;
1163 case MONO_TYPE_I:
1164 case MONO_TYPE_U:
1165 case MONO_TYPE_PTR:
1166 case MONO_TYPE_FNPTR:
1167 return STACK_PTR;
1168 case MONO_TYPE_CLASS:
1169 case MONO_TYPE_STRING:
1170 case MONO_TYPE_OBJECT:
1171 case MONO_TYPE_SZARRAY:
1172 case MONO_TYPE_ARRAY:
1173 return STACK_OBJ;
1174 case MONO_TYPE_I8:
1175 case MONO_TYPE_U8:
1176 return STACK_I8;
1177 case MONO_TYPE_R4:
1178 case MONO_TYPE_R8:
1179 return STACK_R8;
1180 case MONO_TYPE_VALUETYPE:
1181 case MONO_TYPE_TYPEDBYREF:
1182 return STACK_VTYPE;
1183 case MONO_TYPE_GENERICINST:
1184 if (mono_type_generic_inst_is_valuetype (t))
1185 return STACK_VTYPE;
1186 else
1187 return STACK_OBJ;
1188 break;
1189 default:
1190 g_assert_not_reached ();
1193 return -1;
1196 static MonoClass*
1197 array_access_to_klass (int opcode)
1199 switch (opcode) {
1200 case CEE_LDELEM_U1:
1201 return mono_defaults.byte_class;
1202 case CEE_LDELEM_U2:
1203 return mono_defaults.uint16_class;
1204 case CEE_LDELEM_I:
1205 case CEE_STELEM_I:
1206 return mono_defaults.int_class;
1207 case CEE_LDELEM_I1:
1208 case CEE_STELEM_I1:
1209 return mono_defaults.sbyte_class;
1210 case CEE_LDELEM_I2:
1211 case CEE_STELEM_I2:
1212 return mono_defaults.int16_class;
1213 case CEE_LDELEM_I4:
1214 case CEE_STELEM_I4:
1215 return mono_defaults.int32_class;
1216 case CEE_LDELEM_U4:
1217 return mono_defaults.uint32_class;
1218 case CEE_LDELEM_I8:
1219 case CEE_STELEM_I8:
1220 return mono_defaults.int64_class;
1221 case CEE_LDELEM_R4:
1222 case CEE_STELEM_R4:
1223 return mono_defaults.single_class;
1224 case CEE_LDELEM_R8:
1225 case CEE_STELEM_R8:
1226 return mono_defaults.double_class;
1227 case CEE_LDELEM_REF:
1228 case CEE_STELEM_REF:
1229 return mono_defaults.object_class;
1230 default:
1231 g_assert_not_reached ();
1233 return NULL;
1237 * We try to share variables when possible
1239 static MonoInst *
1240 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1242 MonoInst *res;
1243 int pos, vnum;
1245 /* inlining can result in deeper stacks */
1246 if (slot >= cfg->header->max_stack)
1247 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1249 pos = ins->type - 1 + slot * STACK_MAX;
1251 switch (ins->type) {
1252 case STACK_I4:
1253 case STACK_I8:
1254 case STACK_R8:
1255 case STACK_PTR:
1256 case STACK_MP:
1257 case STACK_OBJ:
1258 if ((vnum = cfg->intvars [pos]))
1259 return cfg->varinfo [vnum];
1260 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1261 cfg->intvars [pos] = res->inst_c0;
1262 break;
1263 default:
1264 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1266 return res;
1269 static void
1270 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1273 * Don't use this if a generic_context is set, since that means AOT can't
1274 * look up the method using just the image+token.
1275 * table == 0 means this is a reference made from a wrapper.
1277 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1278 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1279 jump_info_token->image = image;
1280 jump_info_token->token = token;
1281 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1286 * This function is called to handle items that are left on the evaluation stack
1287 * at basic block boundaries. What happens is that we save the values to local variables
1288 * and we reload them later when first entering the target basic block (with the
1289 * handle_loaded_temps () function).
1290 * A single joint point will use the same variables (stored in the array bb->out_stack or
1291 * bb->in_stack, if the basic block is before or after the joint point).
1293 * This function needs to be called _before_ emitting the last instruction of
1294 * the bb (i.e. before emitting a branch).
1295 * If the stack merge fails at a join point, cfg->unverifiable is set.
1297 static void
1298 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1300 int i, bindex;
1301 MonoBasicBlock *bb = cfg->cbb;
1302 MonoBasicBlock *outb;
1303 MonoInst *inst, **locals;
1304 gboolean found;
1306 if (!count)
1307 return;
1308 if (cfg->verbose_level > 3)
1309 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1310 if (!bb->out_scount) {
1311 bb->out_scount = count;
1312 //printf ("bblock %d has out:", bb->block_num);
1313 found = FALSE;
1314 for (i = 0; i < bb->out_count; ++i) {
1315 outb = bb->out_bb [i];
1316 /* exception handlers are linked, but they should not be considered for stack args */
1317 if (outb->flags & BB_EXCEPTION_HANDLER)
1318 continue;
1319 //printf (" %d", outb->block_num);
1320 if (outb->in_stack) {
1321 found = TRUE;
1322 bb->out_stack = outb->in_stack;
1323 break;
1326 //printf ("\n");
1327 if (!found) {
1328 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1329 for (i = 0; i < count; ++i) {
1331 * try to reuse temps already allocated for this purpouse, if they occupy the same
1332 * stack slot and if they are of the same type.
1333 * This won't cause conflicts since if 'local' is used to
1334 * store one of the values in the in_stack of a bblock, then
1335 * the same variable will be used for the same outgoing stack
1336 * slot as well.
1337 * This doesn't work when inlining methods, since the bblocks
1338 * in the inlined methods do not inherit their in_stack from
1339 * the bblock they are inlined to. See bug #58863 for an
1340 * example.
1342 if (cfg->inlined_method)
1343 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1344 else
1345 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1350 for (i = 0; i < bb->out_count; ++i) {
1351 outb = bb->out_bb [i];
1352 /* exception handlers are linked, but they should not be considered for stack args */
1353 if (outb->flags & BB_EXCEPTION_HANDLER)
1354 continue;
1355 if (outb->in_scount) {
1356 if (outb->in_scount != bb->out_scount) {
1357 cfg->unverifiable = TRUE;
1358 return;
1360 continue; /* check they are the same locals */
1362 outb->in_scount = count;
1363 outb->in_stack = bb->out_stack;
1366 locals = bb->out_stack;
1367 cfg->cbb = bb;
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1377 * It is possible that the out bblocks already have in_stack assigned, and
1378 * the in_stacks differ. In this case, we will store to all the different
1379 * in_stacks.
1382 found = TRUE;
1383 bindex = 0;
1384 while (found) {
1385 /* Find a bblock which has a different in_stack */
1386 found = FALSE;
1387 while (bindex < bb->out_count) {
1388 outb = bb->out_bb [bindex];
1389 /* exception handlers are linked, but they should not be considered for stack args */
1390 if (outb->flags & BB_EXCEPTION_HANDLER) {
1391 bindex++;
1392 continue;
1394 if (outb->in_stack != locals) {
1395 for (i = 0; i < count; ++i) {
1396 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1397 inst->cil_code = sp [i]->cil_code;
1398 sp [i] = locals [i];
1399 if (cfg->verbose_level > 3)
1400 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1402 locals = outb->in_stack;
1403 found = TRUE;
1404 break;
1406 bindex ++;
1411 /* Emit code which loads interface_offsets [klass->interface_id]
1412 * The array is stored in memory before vtable.
1414 static void
1415 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1417 if (cfg->compile_aot) {
1418 int ioffset_reg = alloc_preg (cfg);
1419 int iid_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1425 else {
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1430 static void
1431 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1433 int ibitmap_reg = alloc_preg (cfg);
1434 #ifdef COMPRESSED_INTERFACE_BITMAP
1435 MonoInst *args [2];
1436 MonoInst *res, *ins;
1437 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1438 MONO_ADD_INS (cfg->cbb, ins);
1439 args [0] = ins;
1440 if (cfg->compile_aot)
1441 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1442 else
1443 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1444 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1445 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1446 #else
1447 int ibitmap_byte_reg = alloc_preg (cfg);
1449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 int shifted_iid_reg = alloc_preg (cfg);
1454 int ibitmap_byte_address_reg = alloc_preg (cfg);
1455 int masked_iid_reg = alloc_preg (cfg);
1456 int iid_one_bit_reg = alloc_preg (cfg);
1457 int iid_bit_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1461 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1463 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1464 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1466 } else {
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1470 #endif
1474 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1475 * stored in "klass_reg" implements the interface "klass".
1477 static void
1478 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1480 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1484 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1485 * stored in "vtable_reg" implements the interface "klass".
1487 static void
1488 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1490 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1494 * Emit code which checks whenever the interface id of @klass is smaller than
1495 * than the value given by max_iid_reg.
1497 static void
1498 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1499 MonoBasicBlock *false_target)
1501 if (cfg->compile_aot) {
1502 int iid_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1506 else
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1508 if (false_target)
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1510 else
1511 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1514 /* Same as above, but obtains max_iid from a vtable */
1515 static void
1516 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1517 MonoBasicBlock *false_target)
1519 int max_iid_reg = alloc_preg (cfg);
1521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1522 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1525 /* Same as above, but obtains max_iid from a klass */
1526 static void
1527 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1528 MonoBasicBlock *false_target)
1530 int max_iid_reg = alloc_preg (cfg);
1532 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1533 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1536 static void
1537 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1539 int idepth_reg = alloc_preg (cfg);
1540 int stypes_reg = alloc_preg (cfg);
1541 int stype = alloc_preg (cfg);
1543 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1550 if (klass_ins) {
1551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1552 } else if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1556 } else {
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1562 static void
1563 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1565 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1568 static void
1569 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1571 int intf_reg = alloc_preg (cfg);
1573 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1574 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1576 if (true_target)
1577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1578 else
1579 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1583 * Variant of the above that takes a register to the class, not the vtable.
1585 static void
1586 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1588 int intf_bit_reg = alloc_preg (cfg);
1590 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1591 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1593 if (true_target)
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1595 else
1596 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1599 static inline void
1600 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1602 if (klass_inst) {
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1604 } else if (cfg->compile_aot) {
1605 int const_reg = alloc_preg (cfg);
1606 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1607 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1608 } else {
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1611 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1614 static inline void
1615 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1617 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1620 static inline void
1621 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1623 if (cfg->compile_aot) {
1624 int const_reg = alloc_preg (cfg);
1625 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1626 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1627 } else {
1628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1633 static void
1634 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1636 static void
1637 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1639 if (klass->rank) {
1640 int rank_reg = alloc_preg (cfg);
1641 int eclass_reg = alloc_preg (cfg);
1643 g_assert (!klass_inst);
1644 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1648 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1649 if (klass->cast_class == mono_defaults.object_class) {
1650 int parent_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1652 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1653 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1654 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1655 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class) {
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1660 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1661 } else {
1662 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1663 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1666 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1667 /* Check that the object is a vector too */
1668 int bounds_reg = alloc_preg (cfg);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1671 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1673 } else {
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1679 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1681 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1689 static void
1690 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1692 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1695 static void
1696 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1698 int val_reg;
1700 g_assert (val == 0);
1702 if (align == 0)
1703 align = 4;
1705 if ((size <= 4) && (size <= align)) {
1706 switch (size) {
1707 case 1:
1708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1709 return;
1710 case 2:
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1712 return;
1713 case 4:
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1715 return;
1716 #if SIZEOF_REGISTER == 8
1717 case 8:
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1719 return;
1720 #endif
1724 val_reg = alloc_preg (cfg);
1726 if (SIZEOF_REGISTER == 8)
1727 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1728 else
1729 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1731 if (align < 4) {
1732 /* This could be optimized further if neccesary */
1733 while (size >= 1) {
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1735 offset += 1;
1736 size -= 1;
1738 return;
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1743 if (offset % 8) {
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1745 offset += 4;
1746 size -= 4;
1748 while (size >= 8) {
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1750 offset += 8;
1751 size -= 8;
1754 #endif
1756 while (size >= 4) {
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1758 offset += 4;
1759 size -= 4;
1761 while (size >= 2) {
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1763 offset += 2;
1764 size -= 2;
1766 while (size >= 1) {
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1768 offset += 1;
1769 size -= 1;
1773 void
1774 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1776 int cur_reg;
1778 if (align == 0)
1779 align = 4;
1781 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1782 g_assert (size < 10000);
1784 if (align < 4) {
1785 /* This could be optimized further if neccesary */
1786 while (size >= 1) {
1787 cur_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1790 doffset += 1;
1791 soffset += 1;
1792 size -= 1;
1796 #if !NO_UNALIGNED_ACCESS
1797 if (SIZEOF_REGISTER == 8) {
1798 while (size >= 8) {
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1802 doffset += 8;
1803 soffset += 8;
1804 size -= 8;
1807 #endif
1809 while (size >= 4) {
1810 cur_reg = alloc_preg (cfg);
1811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1813 doffset += 4;
1814 soffset += 4;
1815 size -= 4;
1817 while (size >= 2) {
1818 cur_reg = alloc_preg (cfg);
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1821 doffset += 2;
1822 soffset += 2;
1823 size -= 2;
1825 while (size >= 1) {
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1829 doffset += 1;
1830 soffset += 1;
1831 size -= 1;
1835 static int
1836 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1838 if (type->byref)
1839 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1841 handle_enum:
1842 type = mini_get_basic_type_from_generic (gsctx, type);
1843 switch (type->type) {
1844 case MONO_TYPE_VOID:
1845 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1846 case MONO_TYPE_I1:
1847 case MONO_TYPE_U1:
1848 case MONO_TYPE_BOOLEAN:
1849 case MONO_TYPE_I2:
1850 case MONO_TYPE_U2:
1851 case MONO_TYPE_CHAR:
1852 case MONO_TYPE_I4:
1853 case MONO_TYPE_U4:
1854 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1855 case MONO_TYPE_I:
1856 case MONO_TYPE_U:
1857 case MONO_TYPE_PTR:
1858 case MONO_TYPE_FNPTR:
1859 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1860 case MONO_TYPE_CLASS:
1861 case MONO_TYPE_STRING:
1862 case MONO_TYPE_OBJECT:
1863 case MONO_TYPE_SZARRAY:
1864 case MONO_TYPE_ARRAY:
1865 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1866 case MONO_TYPE_I8:
1867 case MONO_TYPE_U8:
1868 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1869 case MONO_TYPE_R4:
1870 case MONO_TYPE_R8:
1871 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1872 case MONO_TYPE_VALUETYPE:
1873 if (type->data.klass->enumtype) {
1874 type = mono_class_enum_basetype (type->data.klass);
1875 goto handle_enum;
1876 } else
1877 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1878 case MONO_TYPE_TYPEDBYREF:
1879 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1880 case MONO_TYPE_GENERICINST:
1881 type = &type->data.generic_class->container_class->byval_arg;
1882 goto handle_enum;
1883 default:
1884 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1886 return -1;
1890 * target_type_is_incompatible:
1891 * @cfg: MonoCompile context
1893 * Check that the item @arg on the evaluation stack can be stored
1894 * in the target type (can be a local, or field, etc).
1895 * The cfg arg can be used to check if we need verification or just
1896 * validity checks.
1898 * Returns: non-0 value if arg can't be stored on a target.
1900 static int
1901 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1903 MonoType *simple_type;
1904 MonoClass *klass;
1906 if (target->byref) {
1907 /* FIXME: check that the pointed to types match */
1908 if (arg->type == STACK_MP)
1909 return arg->klass != mono_class_from_mono_type (target);
1910 if (arg->type == STACK_PTR)
1911 return 0;
1912 return 1;
1915 simple_type = mono_type_get_underlying_type (target);
1916 switch (simple_type->type) {
1917 case MONO_TYPE_VOID:
1918 return 1;
1919 case MONO_TYPE_I1:
1920 case MONO_TYPE_U1:
1921 case MONO_TYPE_BOOLEAN:
1922 case MONO_TYPE_I2:
1923 case MONO_TYPE_U2:
1924 case MONO_TYPE_CHAR:
1925 case MONO_TYPE_I4:
1926 case MONO_TYPE_U4:
1927 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1928 return 1;
1929 return 0;
1930 case MONO_TYPE_PTR:
1931 /* STACK_MP is needed when setting pinned locals */
1932 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1933 return 1;
1934 return 0;
1935 case MONO_TYPE_I:
1936 case MONO_TYPE_U:
1937 case MONO_TYPE_FNPTR:
1938 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1939 return 1;
1940 return 0;
1941 case MONO_TYPE_CLASS:
1942 case MONO_TYPE_STRING:
1943 case MONO_TYPE_OBJECT:
1944 case MONO_TYPE_SZARRAY:
1945 case MONO_TYPE_ARRAY:
1946 if (arg->type != STACK_OBJ)
1947 return 1;
1948 /* FIXME: check type compatibility */
1949 return 0;
1950 case MONO_TYPE_I8:
1951 case MONO_TYPE_U8:
1952 if (arg->type != STACK_I8)
1953 return 1;
1954 return 0;
1955 case MONO_TYPE_R4:
1956 case MONO_TYPE_R8:
1957 if (arg->type != STACK_R8)
1958 return 1;
1959 return 0;
1960 case MONO_TYPE_VALUETYPE:
1961 if (arg->type != STACK_VTYPE)
1962 return 1;
1963 klass = mono_class_from_mono_type (simple_type);
1964 if (klass != arg->klass)
1965 return 1;
1966 return 0;
1967 case MONO_TYPE_TYPEDBYREF:
1968 if (arg->type != STACK_VTYPE)
1969 return 1;
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1972 return 1;
1973 return 0;
1974 case MONO_TYPE_GENERICINST:
1975 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1976 if (arg->type != STACK_VTYPE)
1977 return 1;
1978 klass = mono_class_from_mono_type (simple_type);
1979 if (klass != arg->klass)
1980 return 1;
1981 return 0;
1982 } else {
1983 if (arg->type != STACK_OBJ)
1984 return 1;
1985 /* FIXME: check type compatibility */
1986 return 0;
1988 case MONO_TYPE_VAR:
1989 case MONO_TYPE_MVAR:
1990 /* FIXME: all the arguments must be references for now,
1991 * later look inside cfg and see if the arg num is
1992 * really a reference
1994 g_assert (cfg->generic_sharing_context);
1995 if (arg->type != STACK_OBJ)
1996 return 1;
1997 return 0;
1998 default:
1999 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2001 return 1;
2005 * Prepare arguments for passing to a function call.
2006 * Return a non-zero value if the arguments can't be passed to the given
2007 * signature.
2008 * The type checks are not yet complete and some conversions may need
2009 * casts on 32 or 64 bit architectures.
2011 * FIXME: implement this using target_type_is_incompatible ()
2013 static int
2014 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2016 MonoType *simple_type;
2017 int i;
2019 if (sig->hasthis) {
2020 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2021 return 1;
2022 args++;
2024 for (i = 0; i < sig->param_count; ++i) {
2025 if (sig->params [i]->byref) {
2026 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2027 return 1;
2028 continue;
2030 simple_type = sig->params [i];
2031 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2032 handle_enum:
2033 switch (simple_type->type) {
2034 case MONO_TYPE_VOID:
2035 return 1;
2036 continue;
2037 case MONO_TYPE_I1:
2038 case MONO_TYPE_U1:
2039 case MONO_TYPE_BOOLEAN:
2040 case MONO_TYPE_I2:
2041 case MONO_TYPE_U2:
2042 case MONO_TYPE_CHAR:
2043 case MONO_TYPE_I4:
2044 case MONO_TYPE_U4:
2045 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2046 return 1;
2047 continue;
2048 case MONO_TYPE_I:
2049 case MONO_TYPE_U:
2050 case MONO_TYPE_PTR:
2051 case MONO_TYPE_FNPTR:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2053 return 1;
2054 continue;
2055 case MONO_TYPE_CLASS:
2056 case MONO_TYPE_STRING:
2057 case MONO_TYPE_OBJECT:
2058 case MONO_TYPE_SZARRAY:
2059 case MONO_TYPE_ARRAY:
2060 if (args [i]->type != STACK_OBJ)
2061 return 1;
2062 continue;
2063 case MONO_TYPE_I8:
2064 case MONO_TYPE_U8:
2065 if (args [i]->type != STACK_I8)
2066 return 1;
2067 continue;
2068 case MONO_TYPE_R4:
2069 case MONO_TYPE_R8:
2070 if (args [i]->type != STACK_R8)
2071 return 1;
2072 continue;
2073 case MONO_TYPE_VALUETYPE:
2074 if (simple_type->data.klass->enumtype) {
2075 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2076 goto handle_enum;
2078 if (args [i]->type != STACK_VTYPE)
2079 return 1;
2080 continue;
2081 case MONO_TYPE_TYPEDBYREF:
2082 if (args [i]->type != STACK_VTYPE)
2083 return 1;
2084 continue;
2085 case MONO_TYPE_GENERICINST:
2086 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2087 goto handle_enum;
2089 default:
2090 g_error ("unknown type 0x%02x in check_call_signature",
2091 simple_type->type);
2094 return 0;
2097 static int
2098 callvirt_to_call (int opcode)
2100 switch (opcode) {
2101 case OP_CALLVIRT:
2102 return OP_CALL;
2103 case OP_VOIDCALLVIRT:
2104 return OP_VOIDCALL;
2105 case OP_FCALLVIRT:
2106 return OP_FCALL;
2107 case OP_VCALLVIRT:
2108 return OP_VCALL;
2109 case OP_LCALLVIRT:
2110 return OP_LCALL;
2111 default:
2112 g_assert_not_reached ();
2115 return -1;
2118 static int
2119 callvirt_to_call_membase (int opcode)
2121 switch (opcode) {
2122 case OP_CALLVIRT:
2123 return OP_CALL_MEMBASE;
2124 case OP_VOIDCALLVIRT:
2125 return OP_VOIDCALL_MEMBASE;
2126 case OP_FCALLVIRT:
2127 return OP_FCALL_MEMBASE;
2128 case OP_LCALLVIRT:
2129 return OP_LCALL_MEMBASE;
2130 case OP_VCALLVIRT:
2131 return OP_VCALL_MEMBASE;
2132 default:
2133 g_assert_not_reached ();
2136 return -1;
2139 #ifdef MONO_ARCH_HAVE_IMT
2140 static void
2141 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2143 int method_reg;
2145 if (COMPILE_LLVM (cfg)) {
2146 method_reg = alloc_preg (cfg);
2148 if (imt_arg) {
2149 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2150 } else if (cfg->compile_aot) {
2151 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2152 } else {
2153 MonoInst *ins;
2154 MONO_INST_NEW (cfg, ins, OP_PCONST);
2155 ins->inst_p0 = call->method;
2156 ins->dreg = method_reg;
2157 MONO_ADD_INS (cfg->cbb, ins);
2160 #ifdef ENABLE_LLVM
2161 call->imt_arg_reg = method_reg;
2162 #endif
2163 #ifdef MONO_ARCH_IMT_REG
2164 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2165 #else
2166 /* Need this to keep the IMT arg alive */
2167 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2168 #endif
2169 return;
2172 #ifdef MONO_ARCH_IMT_REG
2173 method_reg = alloc_preg (cfg);
2175 if (imt_arg) {
2176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2177 } else if (cfg->compile_aot) {
2178 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2179 } else {
2180 MonoInst *ins;
2181 MONO_INST_NEW (cfg, ins, OP_PCONST);
2182 ins->inst_p0 = call->method;
2183 ins->dreg = method_reg;
2184 MONO_ADD_INS (cfg->cbb, ins);
2187 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2188 #else
2189 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2190 #endif
2192 #endif
2194 static MonoJumpInfo *
2195 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2197 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2199 ji->ip.i = ip;
2200 ji->type = type;
2201 ji->data.target = target;
2203 return ji;
2206 inline static MonoCallInst *
2207 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2208 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2210 MonoCallInst *call;
2211 #ifdef MONO_ARCH_SOFT_FLOAT
2212 int i;
2213 #endif
2215 if (tail)
2216 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2217 else
2218 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2220 call->args = args;
2221 call->signature = sig;
2222 call->rgctx_reg = rgctx;
2224 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2226 if (tail) {
2227 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2228 call->vret_var = cfg->vret_addr;
2229 //g_assert_not_reached ();
2231 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2232 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2233 MonoInst *loada;
2235 temp->backend.is_pinvoke = sig->pinvoke;
2238 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2239 * address of return value to increase optimization opportunities.
2240 * Before vtype decomposition, the dreg of the call ins itself represents the
2241 * fact the call modifies the return value. After decomposition, the call will
2242 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2243 * will be transformed into an LDADDR.
2245 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2246 loada->dreg = alloc_preg (cfg);
2247 loada->inst_p0 = temp;
2248 /* We reference the call too since call->dreg could change during optimization */
2249 loada->inst_p1 = call;
2250 MONO_ADD_INS (cfg->cbb, loada);
2252 call->inst.dreg = temp->dreg;
2254 call->vret_var = loada;
2255 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2256 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2258 #ifdef MONO_ARCH_SOFT_FLOAT
2259 if (COMPILE_SOFT_FLOAT (cfg)) {
2261 * If the call has a float argument, we would need to do an r8->r4 conversion using
2262 * an icall, but that cannot be done during the call sequence since it would clobber
2263 * the call registers + the stack. So we do it before emitting the call.
2265 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2266 MonoType *t;
2267 MonoInst *in = call->args [i];
2269 if (i >= sig->hasthis)
2270 t = sig->params [i - sig->hasthis];
2271 else
2272 t = &mono_defaults.int_class->byval_arg;
2273 t = mono_type_get_underlying_type (t);
2275 if (!t->byref && t->type == MONO_TYPE_R4) {
2276 MonoInst *iargs [1];
2277 MonoInst *conv;
2279 iargs [0] = in;
2280 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2282 /* The result will be in an int vreg */
2283 call->args [i] = conv;
2287 #endif
2289 #ifdef ENABLE_LLVM
2290 if (COMPILE_LLVM (cfg))
2291 mono_llvm_emit_call (cfg, call);
2292 else
2293 mono_arch_emit_call (cfg, call);
2294 #else
2295 mono_arch_emit_call (cfg, call);
2296 #endif
2298 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2299 cfg->flags |= MONO_CFG_HAS_CALLS;
2301 return call;
2304 static void
2305 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2307 #ifdef MONO_ARCH_RGCTX_REG
2308 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2309 cfg->uses_rgctx_reg = TRUE;
2310 call->rgctx_reg = TRUE;
2311 #ifdef ENABLE_LLVM
2312 call->rgctx_arg_reg = rgctx_reg;
2313 #endif
2314 #else
2315 NOT_IMPLEMENTED;
2316 #endif
2319 inline static MonoInst*
2320 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2322 MonoCallInst *call;
2323 int rgctx_reg = -1;
2325 if (rgctx_arg) {
2326 rgctx_reg = mono_alloc_preg (cfg);
2327 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2330 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2332 call->inst.sreg1 = addr->dreg;
2334 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2336 if (rgctx_arg)
2337 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2339 return (MonoInst*)call;
2342 static MonoInst*
2343 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2344 static MonoInst*
2345 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2347 static MonoInst*
2348 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2349 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2351 gboolean might_be_remote;
2352 gboolean virtual = this != NULL;
2353 gboolean enable_for_aot = TRUE;
2354 int context_used;
2355 MonoCallInst *call;
2356 int rgctx_reg = 0;
2358 if (rgctx_arg) {
2359 rgctx_reg = mono_alloc_preg (cfg);
2360 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2363 if (method->string_ctor) {
2364 /* Create the real signature */
2365 /* FIXME: Cache these */
2366 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2367 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2369 sig = ctor_sig;
2372 context_used = mono_method_check_context_used (method);
2374 might_be_remote = this && sig->hasthis &&
2375 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2376 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2378 if (might_be_remote && context_used) {
2379 MonoInst *addr;
2381 g_assert (cfg->generic_sharing_context);
2383 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2385 return mono_emit_calli (cfg, sig, args, addr, NULL);
2388 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2390 if (might_be_remote)
2391 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2392 else
2393 call->method = method;
2394 call->inst.flags |= MONO_INST_HAS_METHOD;
2395 call->inst.inst_left = this;
2397 if (virtual) {
2398 int vtable_reg, slot_reg, this_reg;
2400 this_reg = this->dreg;
2402 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2403 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2404 MonoInst *dummy_use;
2406 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2408 /* Make a call to delegate->invoke_impl */
2409 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2410 call->inst.inst_basereg = this_reg;
2411 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2412 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2414 /* We must emit a dummy use here because the delegate trampoline will
2415 replace the 'this' argument with the delegate target making this activation
2416 no longer a root for the delegate.
2417 This is an issue for delegates that target collectible code such as dynamic
2418 methods of GC'able assemblies.
2420 For a test case look into #667921.
2422 FIXME: a dummy use is not the best way to do it as the local register allocator
2423 will put it on a caller save register and spil it around the call.
2424 Ideally, we would either put it on a callee save register or only do the store part.
2426 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2428 return (MonoInst*)call;
2430 #endif
2432 if ((!cfg->compile_aot || enable_for_aot) &&
2433 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2434 (MONO_METHOD_IS_FINAL (method) &&
2435 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2436 !(method->klass->marshalbyref && context_used)) {
2438 * the method is not virtual, we just need to ensure this is not null
2439 * and then we can call the method directly.
2441 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2443 * The check above ensures method is not gshared, this is needed since
2444 * gshared methods can't have wrappers.
2446 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2449 if (!method->string_ctor)
2450 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2452 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2454 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2456 return (MonoInst*)call;
2459 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2461 * the method is virtual, but we can statically dispatch since either
2462 * it's class or the method itself are sealed.
2463 * But first we need to ensure it's not a null reference.
2465 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2467 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2468 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2470 return (MonoInst*)call;
2473 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2475 vtable_reg = alloc_preg (cfg);
2476 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2477 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2478 slot_reg = -1;
2479 #ifdef MONO_ARCH_HAVE_IMT
2480 if (mono_use_imt) {
2481 guint32 imt_slot = mono_method_get_imt_slot (method);
2482 emit_imt_argument (cfg, call, imt_arg);
2483 slot_reg = vtable_reg;
2484 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2486 #endif
2487 if (slot_reg == -1) {
2488 slot_reg = alloc_preg (cfg);
2489 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2490 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2492 } else {
2493 slot_reg = vtable_reg;
2494 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2495 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2496 #ifdef MONO_ARCH_HAVE_IMT
2497 if (imt_arg) {
2498 g_assert (mono_method_signature (method)->generic_param_count);
2499 emit_imt_argument (cfg, call, imt_arg);
2501 #endif
2504 call->inst.sreg1 = slot_reg;
2505 call->virtual = TRUE;
2508 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2510 if (rgctx_arg)
2511 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2513 return (MonoInst*)call;
2516 MonoInst*
2517 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2519 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2522 MonoInst*
2523 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2524 MonoInst **args)
2526 MonoCallInst *call;
2528 g_assert (sig);
2530 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2531 call->fptr = func;
2533 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2535 return (MonoInst*)call;
2538 MonoInst*
2539 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2541 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2543 g_assert (info);
2545 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2549 * mono_emit_abs_call:
2551 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2553 inline static MonoInst*
2554 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2555 MonoMethodSignature *sig, MonoInst **args)
2557 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2558 MonoInst *ins;
2561 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2562 * handle it.
2564 if (cfg->abs_patches == NULL)
2565 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2566 g_hash_table_insert (cfg->abs_patches, ji, ji);
2567 ins = mono_emit_native_call (cfg, ji, sig, args);
2568 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2569 return ins;
2572 static MonoInst*
2573 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2575 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2576 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2577 int widen_op = -1;
2580 * Native code might return non register sized integers
2581 * without initializing the upper bits.
2583 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2584 case OP_LOADI1_MEMBASE:
2585 widen_op = OP_ICONV_TO_I1;
2586 break;
2587 case OP_LOADU1_MEMBASE:
2588 widen_op = OP_ICONV_TO_U1;
2589 break;
2590 case OP_LOADI2_MEMBASE:
2591 widen_op = OP_ICONV_TO_I2;
2592 break;
2593 case OP_LOADU2_MEMBASE:
2594 widen_op = OP_ICONV_TO_U2;
2595 break;
2596 default:
2597 break;
2600 if (widen_op != -1) {
2601 int dreg = alloc_preg (cfg);
2602 MonoInst *widen;
2604 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2605 widen->type = ins->type;
2606 ins = widen;
2611 return ins;
2614 static MonoMethod*
2615 get_memcpy_method (void)
2617 static MonoMethod *memcpy_method = NULL;
2618 if (!memcpy_method) {
2619 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2620 if (!memcpy_method)
2621 g_error ("Old corlib found. Install a new one");
2623 return memcpy_method;
2626 static void
2627 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2629 MonoClassField *field;
2630 gpointer iter = NULL;
2632 while ((field = mono_class_get_fields (klass, &iter))) {
2633 int foffset;
2635 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2636 continue;
2637 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2638 if (mono_type_is_reference (field->type)) {
2639 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2640 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2641 } else {
2642 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2643 MonoClass *field_class = mono_class_from_mono_type (field->type);
2644 if (field_class->has_references)
2645 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2650 static void
2651 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2653 int card_table_shift_bits;
2654 gpointer card_table_mask;
2655 guint8 *card_table;
2656 MonoInst *dummy_use;
2657 int nursery_shift_bits;
2658 size_t nursery_size;
2659 gboolean has_card_table_wb = FALSE;
2661 if (!cfg->gen_write_barriers)
2662 return;
2664 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2666 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2668 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2669 has_card_table_wb = TRUE;
2670 #endif
2672 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2673 MonoInst *wbarrier;
2675 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2676 wbarrier->sreg1 = ptr->dreg;
2677 if (value)
2678 wbarrier->sreg2 = value->dreg;
2679 else
2680 wbarrier->sreg2 = value_reg;
2681 MONO_ADD_INS (cfg->cbb, wbarrier);
2682 } else if (card_table) {
2683 int offset_reg = alloc_preg (cfg);
2684 int card_reg = alloc_preg (cfg);
2685 MonoInst *ins;
2687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2688 if (card_table_mask)
2689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2691 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2692 * IMM's larger than 32bits.
2694 if (cfg->compile_aot) {
2695 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2696 } else {
2697 MONO_INST_NEW (cfg, ins, OP_PCONST);
2698 ins->inst_p0 = card_table;
2699 ins->dreg = card_reg;
2700 MONO_ADD_INS (cfg->cbb, ins);
2703 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2704 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2705 } else {
2706 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2707 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2710 if (value) {
2711 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2712 } else {
2713 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2714 dummy_use->sreg1 = value_reg;
2715 MONO_ADD_INS (cfg->cbb, dummy_use);
2719 static gboolean
2720 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2722 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2723 unsigned need_wb = 0;
2725 if (align == 0)
2726 align = 4;
2728 /*types with references can't have alignment smaller than sizeof(void*) */
2729 if (align < SIZEOF_VOID_P)
2730 return FALSE;
2732 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2733 if (size > 32 * SIZEOF_VOID_P)
2734 return FALSE;
2736 create_write_barrier_bitmap (klass, &need_wb, 0);
2738 /* We don't unroll more than 5 stores to avoid code bloat. */
2739 if (size > 5 * SIZEOF_VOID_P) {
2740 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2741 size += (SIZEOF_VOID_P - 1);
2742 size &= ~(SIZEOF_VOID_P - 1);
2744 EMIT_NEW_ICONST (cfg, iargs [2], size);
2745 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2746 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2747 return TRUE;
2750 destreg = iargs [0]->dreg;
2751 srcreg = iargs [1]->dreg;
2752 offset = 0;
2754 dest_ptr_reg = alloc_preg (cfg);
2755 tmp_reg = alloc_preg (cfg);
2757 /*tmp = dreg*/
2758 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2760 while (size >= SIZEOF_VOID_P) {
2761 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2764 if (need_wb & 0x1)
2765 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2767 offset += SIZEOF_VOID_P;
2768 size -= SIZEOF_VOID_P;
2769 need_wb >>= 1;
2771 /*tmp += sizeof (void*)*/
2772 if (size >= SIZEOF_VOID_P) {
2773 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2774 MONO_ADD_INS (cfg->cbb, iargs [0]);
2778 /* Those cannot be references since size < sizeof (void*) */
2779 while (size >= 4) {
2780 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2781 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2782 offset += 4;
2783 size -= 4;
2786 while (size >= 2) {
2787 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2789 offset += 2;
2790 size -= 2;
2793 while (size >= 1) {
2794 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2795 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2796 offset += 1;
2797 size -= 1;
2800 return TRUE;
2804 * Emit code to copy a valuetype of type @klass whose address is stored in
2805 * @src->dreg to memory whose address is stored at @dest->dreg.
2807 void
2808 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2810 MonoInst *iargs [4];
2811 int n;
2812 guint32 align = 0;
2813 MonoMethod *memcpy_method;
2815 g_assert (klass);
2817 * This check breaks with spilled vars... need to handle it during verification anyway.
2818 * g_assert (klass && klass == src->klass && klass == dest->klass);
2821 if (native)
2822 n = mono_class_native_size (klass, &align);
2823 else
2824 n = mono_class_value_size (klass, &align);
2826 /* if native is true there should be no references in the struct */
2827 if (cfg->gen_write_barriers && klass->has_references && !native) {
2828 /* Avoid barriers when storing to the stack */
2829 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2830 (dest->opcode == OP_LDADDR))) {
2831 int context_used = 0;
2833 iargs [0] = dest;
2834 iargs [1] = src;
2836 if (cfg->generic_sharing_context)
2837 context_used = mono_class_check_context_used (klass);
2839 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2840 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2841 return;
2842 } else if (context_used) {
2843 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2844 } else {
2845 if (cfg->compile_aot) {
2846 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2847 } else {
2848 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2849 mono_class_compute_gc_descriptor (klass);
2853 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2854 return;
2858 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2859 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2860 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2861 } else {
2862 iargs [0] = dest;
2863 iargs [1] = src;
2864 EMIT_NEW_ICONST (cfg, iargs [2], n);
2866 memcpy_method = get_memcpy_method ();
2867 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2871 static MonoMethod*
2872 get_memset_method (void)
2874 static MonoMethod *memset_method = NULL;
2875 if (!memset_method) {
2876 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2877 if (!memset_method)
2878 g_error ("Old corlib found. Install a new one");
2880 return memset_method;
2883 void
2884 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2886 MonoInst *iargs [3];
2887 int n;
2888 guint32 align;
2889 MonoMethod *memset_method;
2891 /* FIXME: Optimize this for the case when dest is an LDADDR */
2893 mono_class_init (klass);
2894 n = mono_class_value_size (klass, &align);
2896 if (n <= sizeof (gpointer) * 5) {
2897 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2899 else {
2900 memset_method = get_memset_method ();
2901 iargs [0] = dest;
2902 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2903 EMIT_NEW_ICONST (cfg, iargs [2], n);
2904 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2908 static MonoInst*
2909 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2911 MonoInst *this = NULL;
2913 g_assert (cfg->generic_sharing_context);
2915 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2916 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2917 !method->klass->valuetype)
2918 EMIT_NEW_ARGLOAD (cfg, this, 0);
2920 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2921 MonoInst *mrgctx_loc, *mrgctx_var;
2923 g_assert (!this);
2924 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2926 mrgctx_loc = mono_get_vtable_var (cfg);
2927 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2929 return mrgctx_var;
2930 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2931 MonoInst *vtable_loc, *vtable_var;
2933 g_assert (!this);
2935 vtable_loc = mono_get_vtable_var (cfg);
2936 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2938 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2939 MonoInst *mrgctx_var = vtable_var;
2940 int vtable_reg;
2942 vtable_reg = alloc_preg (cfg);
2943 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2944 vtable_var->type = STACK_PTR;
2947 return vtable_var;
2948 } else {
2949 MonoInst *ins;
2950 int vtable_reg;
2952 vtable_reg = alloc_preg (cfg);
2953 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2954 return ins;
2958 static MonoJumpInfoRgctxEntry *
2959 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2961 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2962 res->method = method;
2963 res->in_mrgctx = in_mrgctx;
2964 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2965 res->data->type = patch_type;
2966 res->data->data.target = patch_data;
2967 res->info_type = info_type;
2969 return res;
2972 static inline MonoInst*
2973 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2975 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2978 static MonoInst*
2979 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2980 MonoClass *klass, int rgctx_type)
2982 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2983 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2985 return emit_rgctx_fetch (cfg, rgctx, entry);
2989 * emit_get_rgctx_method:
2991 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2992 * normal constants, else emit a load from the rgctx.
2994 static MonoInst*
2995 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2996 MonoMethod *cmethod, int rgctx_type)
2998 if (!context_used) {
2999 MonoInst *ins;
3001 switch (rgctx_type) {
3002 case MONO_RGCTX_INFO_METHOD:
3003 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3004 return ins;
3005 case MONO_RGCTX_INFO_METHOD_RGCTX:
3006 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3007 return ins;
3008 default:
3009 g_assert_not_reached ();
3011 } else {
3012 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3013 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3015 return emit_rgctx_fetch (cfg, rgctx, entry);
3019 static MonoInst*
3020 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3021 MonoClassField *field, int rgctx_type)
3023 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3024 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3026 return emit_rgctx_fetch (cfg, rgctx, entry);
3030 * On return the caller must check @klass for load errors.
3032 static void
3033 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3035 MonoInst *vtable_arg;
3036 MonoCallInst *call;
3037 int context_used = 0;
3039 if (cfg->generic_sharing_context)
3040 context_used = mono_class_check_context_used (klass);
3042 if (context_used) {
3043 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3044 klass, MONO_RGCTX_INFO_VTABLE);
3045 } else {
3046 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3048 if (!vtable)
3049 return;
3050 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3053 if (COMPILE_LLVM (cfg))
3054 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3055 else
3056 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3057 #ifdef MONO_ARCH_VTABLE_REG
3058 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3059 cfg->uses_vtable_reg = TRUE;
3060 #else
3061 NOT_IMPLEMENTED;
3062 #endif
3065 static void
3066 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3068 if (mini_get_debug_options ()->better_cast_details) {
3069 int to_klass_reg = alloc_preg (cfg);
3070 int vtable_reg = alloc_preg (cfg);
3071 int klass_reg = alloc_preg (cfg);
3072 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3074 if (!tls_get) {
3075 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3076 exit (1);
3079 MONO_ADD_INS (cfg->cbb, tls_get);
3080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3083 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3084 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3085 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3089 static void
3090 reset_cast_details (MonoCompile *cfg)
3092 /* Reset the variables holding the cast details */
3093 if (mini_get_debug_options ()->better_cast_details) {
3094 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3096 MONO_ADD_INS (cfg->cbb, tls_get);
3097 /* It is enough to reset the from field */
3098 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3103 * On return the caller must check @array_class for load errors
3105 static void
3106 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3108 int vtable_reg = alloc_preg (cfg);
3109 int context_used = 0;
3111 if (cfg->generic_sharing_context)
3112 context_used = mono_class_check_context_used (array_class);
3114 save_cast_details (cfg, array_class, obj->dreg);
3116 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3118 if (cfg->opt & MONO_OPT_SHARED) {
3119 int class_reg = alloc_preg (cfg);
3120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3121 if (cfg->compile_aot) {
3122 int klass_reg = alloc_preg (cfg);
3123 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3124 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3125 } else {
3126 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3128 } else if (context_used) {
3129 MonoInst *vtable_ins;
3131 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3132 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3133 } else {
3134 if (cfg->compile_aot) {
3135 int vt_reg;
3136 MonoVTable *vtable;
3138 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3139 return;
3140 vt_reg = alloc_preg (cfg);
3141 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3142 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3143 } else {
3144 MonoVTable *vtable;
3145 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3146 return;
3147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3151 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3153 reset_cast_details (cfg);
3157 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3158 * generic code is generated.
3160 static MonoInst*
3161 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3163 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3165 if (context_used) {
3166 MonoInst *rgctx, *addr;
3168 /* FIXME: What if the class is shared? We might not
3169 have to get the address of the method from the
3170 RGCTX. */
3171 addr = emit_get_rgctx_method (cfg, context_used, method,
3172 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3174 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3176 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3177 } else {
3178 return mono_emit_method_call (cfg, method, &val, NULL);
3182 static MonoInst*
3183 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3185 MonoInst *add;
3186 int obj_reg;
3187 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3188 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3189 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3190 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3192 obj_reg = sp [0]->dreg;
3193 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3194 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3196 /* FIXME: generics */
3197 g_assert (klass->rank == 0);
3199 // Check rank == 0
3200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3201 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3206 if (context_used) {
3207 MonoInst *element_class;
3209 /* This assertion is from the unboxcast insn */
3210 g_assert (klass->rank == 0);
3212 element_class = emit_get_rgctx_klass (cfg, context_used,
3213 klass->element_class, MONO_RGCTX_INFO_KLASS);
3215 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3216 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3217 } else {
3218 save_cast_details (cfg, klass->element_class, obj_reg);
3219 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3220 reset_cast_details (cfg);
3223 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3224 MONO_ADD_INS (cfg->cbb, add);
3225 add->type = STACK_MP;
3226 add->klass = klass;
3228 return add;
3232 * Returns NULL and set the cfg exception on error.
3234 static MonoInst*
3235 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3237 MonoInst *iargs [2];
3238 void *alloc_ftn;
3240 if (context_used) {
3241 MonoInst *data;
3242 int rgctx_info;
3243 MonoInst *iargs [2];
3246 FIXME: we cannot get managed_alloc here because we can't get
3247 the class's vtable (because it's not a closed class)
3249 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3250 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3253 if (cfg->opt & MONO_OPT_SHARED)
3254 rgctx_info = MONO_RGCTX_INFO_KLASS;
3255 else
3256 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3257 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3259 if (cfg->opt & MONO_OPT_SHARED) {
3260 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3261 iargs [1] = data;
3262 alloc_ftn = mono_object_new;
3263 } else {
3264 iargs [0] = data;
3265 alloc_ftn = mono_object_new_specific;
3268 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3271 if (cfg->opt & MONO_OPT_SHARED) {
3272 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3273 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3275 alloc_ftn = mono_object_new;
3276 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3277 /* This happens often in argument checking code, eg. throw new FooException... */
3278 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3279 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3280 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3281 } else {
3282 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3283 MonoMethod *managed_alloc = NULL;
3284 gboolean pass_lw;
3286 if (!vtable) {
3287 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3288 cfg->exception_ptr = klass;
3289 return NULL;
3292 #ifndef MONO_CROSS_COMPILE
3293 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3294 #endif
3296 if (managed_alloc) {
3297 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3298 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3300 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3301 if (pass_lw) {
3302 guint32 lw = vtable->klass->instance_size;
3303 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3304 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3305 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3307 else {
3308 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3312 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3316 * Returns NULL and set the cfg exception on error.
3318 static MonoInst*
3319 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3321 MonoInst *alloc, *ins;
3323 if (mono_class_is_nullable (klass)) {
3324 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3326 if (context_used) {
3327 /* FIXME: What if the class is shared? We might not
3328 have to get the method address from the RGCTX. */
3329 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3330 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3331 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3333 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3334 } else {
3335 return mono_emit_method_call (cfg, method, &val, NULL);
3339 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3340 if (!alloc)
3341 return NULL;
3343 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3345 return alloc;
3349 static gboolean
3350 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3352 int i;
3353 MonoGenericContainer *container;
3354 MonoGenericInst *ginst;
3356 if (klass->generic_class) {
3357 container = klass->generic_class->container_class->generic_container;
3358 ginst = klass->generic_class->context.class_inst;
3359 } else if (klass->generic_container && context_used) {
3360 container = klass->generic_container;
3361 ginst = container->context.class_inst;
3362 } else {
3363 return FALSE;
3366 for (i = 0; i < container->type_argc; ++i) {
3367 MonoType *type;
3368 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3369 continue;
3370 type = ginst->type_argv [i];
3371 if (MONO_TYPE_IS_REFERENCE (type))
3372 return TRUE;
3374 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3375 return TRUE;
3377 return FALSE;
3380 // FIXME: This doesn't work yet (class libs tests fail?)
3381 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3384 * Returns NULL and set the cfg exception on error.
3386 static MonoInst*
3387 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3389 MonoBasicBlock *is_null_bb;
3390 int obj_reg = src->dreg;
3391 int vtable_reg = alloc_preg (cfg);
3392 MonoInst *klass_inst = NULL;
3394 if (context_used) {
3395 MonoInst *args [3];
3397 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3398 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3399 MonoInst *cache_ins;
3401 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3403 /* obj */
3404 args [0] = src;
3406 /* klass - it's the second element of the cache entry*/
3407 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3409 /* cache */
3410 args [2] = cache_ins;
3412 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3415 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3417 if (is_complex_isinst (klass)) {
3418 /* Complex case, handle by an icall */
3420 /* obj */
3421 args [0] = src;
3423 /* klass */
3424 args [1] = klass_inst;
3426 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3427 } else {
3428 /* Simple case, handled by the code below */
3432 NEW_BBLOCK (cfg, is_null_bb);
3434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3435 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3437 save_cast_details (cfg, klass, obj_reg);
3439 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3440 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3441 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3442 } else {
3443 int klass_reg = alloc_preg (cfg);
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3447 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3448 /* the remoting code is broken, access the class for now */
3449 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3450 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3451 if (!vt) {
3452 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3453 cfg->exception_ptr = klass;
3454 return NULL;
3456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3457 } else {
3458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3461 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3462 } else {
3463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3464 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3468 MONO_START_BB (cfg, is_null_bb);
3470 reset_cast_details (cfg);
3472 return src;
3476 * Returns NULL and set the cfg exception on error.
3478 static MonoInst*
3479 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3481 MonoInst *ins;
3482 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3483 int obj_reg = src->dreg;
3484 int vtable_reg = alloc_preg (cfg);
3485 int res_reg = alloc_ireg_ref (cfg);
3486 MonoInst *klass_inst = NULL;
3488 if (context_used) {
3489 MonoInst *args [3];
3491 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3492 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3493 MonoInst *cache_ins;
3495 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3497 /* obj */
3498 args [0] = src;
3500 /* klass - it's the second element of the cache entry*/
3501 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3503 /* cache */
3504 args [2] = cache_ins;
3506 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3509 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3511 if (is_complex_isinst (klass)) {
3512 /* Complex case, handle by an icall */
3514 /* obj */
3515 args [0] = src;
3517 /* klass */
3518 args [1] = klass_inst;
3520 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3521 } else {
3522 /* Simple case, the code below can handle it */
3526 NEW_BBLOCK (cfg, is_null_bb);
3527 NEW_BBLOCK (cfg, false_bb);
3528 NEW_BBLOCK (cfg, end_bb);
3530 /* Do the assignment at the beginning, so the other assignment can be if converted */
3531 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3532 ins->type = STACK_OBJ;
3533 ins->klass = klass;
3535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3540 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3541 g_assert (!context_used);
3542 /* the is_null_bb target simply copies the input register to the output */
3543 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3544 } else {
3545 int klass_reg = alloc_preg (cfg);
3547 if (klass->rank) {
3548 int rank_reg = alloc_preg (cfg);
3549 int eclass_reg = alloc_preg (cfg);
3551 g_assert (!context_used);
3552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3557 if (klass->cast_class == mono_defaults.object_class) {
3558 int parent_reg = alloc_preg (cfg);
3559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3560 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3561 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3563 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3564 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3565 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3567 } else if (klass->cast_class == mono_defaults.enum_class) {
3568 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3570 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3571 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3572 } else {
3573 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3574 /* Check that the object is a vector too */
3575 int bounds_reg = alloc_preg (cfg);
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3581 /* the is_null_bb target simply copies the input register to the output */
3582 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3584 } else if (mono_class_is_nullable (klass)) {
3585 g_assert (!context_used);
3586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3587 /* the is_null_bb target simply copies the input register to the output */
3588 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3589 } else {
3590 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3591 g_assert (!context_used);
3592 /* the remoting code is broken, access the class for now */
3593 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3594 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3595 if (!vt) {
3596 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3597 cfg->exception_ptr = klass;
3598 return NULL;
3600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3601 } else {
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3607 } else {
3608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3609 /* the is_null_bb target simply copies the input register to the output */
3610 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3615 MONO_START_BB (cfg, false_bb);
3617 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3620 MONO_START_BB (cfg, is_null_bb);
3622 MONO_START_BB (cfg, end_bb);
3624 return ins;
3627 static MonoInst*
3628 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3630 /* This opcode takes as input an object reference and a class, and returns:
3631 0) if the object is an instance of the class,
3632 1) if the object is not instance of the class,
3633 2) if the object is a proxy whose type cannot be determined */
3635 MonoInst *ins;
3636 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3637 int obj_reg = src->dreg;
3638 int dreg = alloc_ireg (cfg);
3639 int tmp_reg;
3640 int klass_reg = alloc_preg (cfg);
3642 NEW_BBLOCK (cfg, true_bb);
3643 NEW_BBLOCK (cfg, false_bb);
3644 NEW_BBLOCK (cfg, false2_bb);
3645 NEW_BBLOCK (cfg, end_bb);
3646 NEW_BBLOCK (cfg, no_proxy_bb);
3648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3649 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3651 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3652 NEW_BBLOCK (cfg, interface_fail_bb);
3654 tmp_reg = alloc_preg (cfg);
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3656 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3657 MONO_START_BB (cfg, interface_fail_bb);
3658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3660 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3662 tmp_reg = alloc_preg (cfg);
3663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3666 } else {
3667 tmp_reg = alloc_preg (cfg);
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3671 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3672 tmp_reg = alloc_preg (cfg);
3673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3674 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3676 tmp_reg = alloc_preg (cfg);
3677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3681 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3684 MONO_START_BB (cfg, no_proxy_bb);
3686 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3689 MONO_START_BB (cfg, false_bb);
3691 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3694 MONO_START_BB (cfg, false2_bb);
3696 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3697 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3699 MONO_START_BB (cfg, true_bb);
3701 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3703 MONO_START_BB (cfg, end_bb);
3705 /* FIXME: */
3706 MONO_INST_NEW (cfg, ins, OP_ICONST);
3707 ins->dreg = dreg;
3708 ins->type = STACK_I4;
3710 return ins;
3713 static MonoInst*
3714 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3716 /* This opcode takes as input an object reference and a class, and returns:
3717 0) if the object is an instance of the class,
3718 1) if the object is a proxy whose type cannot be determined
3719 an InvalidCastException exception is thrown otherwhise*/
3721 MonoInst *ins;
3722 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3723 int obj_reg = src->dreg;
3724 int dreg = alloc_ireg (cfg);
3725 int tmp_reg = alloc_preg (cfg);
3726 int klass_reg = alloc_preg (cfg);
3728 NEW_BBLOCK (cfg, end_bb);
3729 NEW_BBLOCK (cfg, ok_result_bb);
3731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3732 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3734 save_cast_details (cfg, klass, obj_reg);
3736 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3737 NEW_BBLOCK (cfg, interface_fail_bb);
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3740 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3741 MONO_START_BB (cfg, interface_fail_bb);
3742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3744 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3746 tmp_reg = alloc_preg (cfg);
3747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3749 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3751 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3752 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3754 } else {
3755 NEW_BBLOCK (cfg, no_proxy_bb);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3759 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3761 tmp_reg = alloc_preg (cfg);
3762 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3765 tmp_reg = alloc_preg (cfg);
3766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3770 NEW_BBLOCK (cfg, fail_1_bb);
3772 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3774 MONO_START_BB (cfg, fail_1_bb);
3776 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3777 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3779 MONO_START_BB (cfg, no_proxy_bb);
3781 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3784 MONO_START_BB (cfg, ok_result_bb);
3786 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3788 MONO_START_BB (cfg, end_bb);
3790 /* FIXME: */
3791 MONO_INST_NEW (cfg, ins, OP_ICONST);
3792 ins->dreg = dreg;
3793 ins->type = STACK_I4;
3795 return ins;
3799 * Returns NULL and set the cfg exception on error.
3801 static G_GNUC_UNUSED MonoInst*
3802 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3804 MonoInst *ptr;
3805 int dreg;
3806 gpointer *trampoline;
3807 MonoInst *obj, *method_ins, *tramp_ins;
3808 MonoDomain *domain;
3809 guint8 **code_slot;
3811 obj = handle_alloc (cfg, klass, FALSE, 0);
3812 if (!obj)
3813 return NULL;
3815 /* Inline the contents of mono_delegate_ctor */
3817 /* Set target field */
3818 /* Optimize away setting of NULL target */
3819 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3821 if (cfg->gen_write_barriers) {
3822 dreg = alloc_preg (cfg);
3823 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3824 emit_write_barrier (cfg, ptr, target, 0);
3828 /* Set method field */
3829 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3831 if (cfg->gen_write_barriers) {
3832 dreg = alloc_preg (cfg);
3833 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3834 emit_write_barrier (cfg, ptr, method_ins, 0);
3837 * To avoid looking up the compiled code belonging to the target method
3838 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3839 * store it, and we fill it after the method has been compiled.
3841 if (!cfg->compile_aot && !method->dynamic) {
3842 MonoInst *code_slot_ins;
3844 if (context_used) {
3845 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3846 } else {
3847 domain = mono_domain_get ();
3848 mono_domain_lock (domain);
3849 if (!domain_jit_info (domain)->method_code_hash)
3850 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3851 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3852 if (!code_slot) {
3853 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3854 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3856 mono_domain_unlock (domain);
3858 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3860 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3863 /* Set invoke_impl field */
3864 if (cfg->compile_aot) {
3865 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3866 } else {
3867 trampoline = mono_create_delegate_trampoline (klass);
3868 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3872 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3874 return obj;
3877 static MonoInst*
3878 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3880 MonoJitICallInfo *info;
3882 /* Need to register the icall so it gets an icall wrapper */
3883 info = mono_get_array_new_va_icall (rank);
3885 cfg->flags |= MONO_CFG_HAS_VARARGS;
3887 /* mono_array_new_va () needs a vararg calling convention */
3888 cfg->disable_llvm = TRUE;
3890 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3891 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3894 static void
3895 mono_emit_load_got_addr (MonoCompile *cfg)
3897 MonoInst *getaddr, *dummy_use;
3899 if (!cfg->got_var || cfg->got_var_allocated)
3900 return;
3902 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3903 getaddr->dreg = cfg->got_var->dreg;
3905 /* Add it to the start of the first bblock */
3906 if (cfg->bb_entry->code) {
3907 getaddr->next = cfg->bb_entry->code;
3908 cfg->bb_entry->code = getaddr;
3910 else
3911 MONO_ADD_INS (cfg->bb_entry, getaddr);
3913 cfg->got_var_allocated = TRUE;
3916 * Add a dummy use to keep the got_var alive, since real uses might
3917 * only be generated by the back ends.
3918 * Add it to end_bblock, so the variable's lifetime covers the whole
3919 * method.
3920 * It would be better to make the usage of the got var explicit in all
3921 * cases when the backend needs it (i.e. calls, throw etc.), so this
3922 * wouldn't be needed.
3924 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3925 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3928 static int inline_limit;
3929 static gboolean inline_limit_inited;
3931 static gboolean
3932 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3934 MonoMethodHeaderSummary header;
3935 MonoVTable *vtable;
3936 #ifdef MONO_ARCH_SOFT_FLOAT
3937 MonoMethodSignature *sig = mono_method_signature (method);
3938 int i;
3939 #endif
3941 if (cfg->generic_sharing_context)
3942 return FALSE;
3944 if (cfg->inline_depth > 10)
3945 return FALSE;
3947 #ifdef MONO_ARCH_HAVE_LMF_OPS
3948 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3949 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3950 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3951 return TRUE;
3952 #endif
3955 if (!mono_method_get_header_summary (method, &header))
3956 return FALSE;
3958 /*runtime, icall and pinvoke are checked by summary call*/
3959 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3960 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3961 (method->klass->marshalbyref) ||
3962 header.has_clauses)
3963 return FALSE;
3965 /* also consider num_locals? */
3966 /* Do the size check early to avoid creating vtables */
3967 if (!inline_limit_inited) {
3968 if (getenv ("MONO_INLINELIMIT"))
3969 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3970 else
3971 inline_limit = INLINE_LENGTH_LIMIT;
3972 inline_limit_inited = TRUE;
3974 if (header.code_size >= inline_limit)
3975 return FALSE;
3978 * if we can initialize the class of the method right away, we do,
3979 * otherwise we don't allow inlining if the class needs initialization,
3980 * since it would mean inserting a call to mono_runtime_class_init()
3981 * inside the inlined code
3983 if (!(cfg->opt & MONO_OPT_SHARED)) {
3984 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3985 if (cfg->run_cctors && method->klass->has_cctor) {
3986 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3987 if (!method->klass->runtime_info)
3988 /* No vtable created yet */
3989 return FALSE;
3990 vtable = mono_class_vtable (cfg->domain, method->klass);
3991 if (!vtable)
3992 return FALSE;
3993 /* This makes so that inline cannot trigger */
3994 /* .cctors: too many apps depend on them */
3995 /* running with a specific order... */
3996 if (! vtable->initialized)
3997 return FALSE;
3998 mono_runtime_class_init (vtable);
4000 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4001 if (!method->klass->runtime_info)
4002 /* No vtable created yet */
4003 return FALSE;
4004 vtable = mono_class_vtable (cfg->domain, method->klass);
4005 if (!vtable)
4006 return FALSE;
4007 if (!vtable->initialized)
4008 return FALSE;
4010 } else {
4012 * If we're compiling for shared code
4013 * the cctor will need to be run at aot method load time, for example,
4014 * or at the end of the compilation of the inlining method.
4016 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4017 return FALSE;
4021 * CAS - do not inline methods with declarative security
4022 * Note: this has to be before any possible return TRUE;
4024 if (mono_method_has_declsec (method))
4025 return FALSE;
4027 #ifdef MONO_ARCH_SOFT_FLOAT
4028 /* FIXME: */
4029 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4030 return FALSE;
4031 for (i = 0; i < sig->param_count; ++i)
4032 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4033 return FALSE;
4034 #endif
4036 return TRUE;
4039 static gboolean
4040 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4042 if (vtable->initialized && !cfg->compile_aot)
4043 return FALSE;
4045 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4046 return FALSE;
4048 if (!mono_class_needs_cctor_run (vtable->klass, method))
4049 return FALSE;
4051 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4052 /* The initialization is already done before the method is called */
4053 return FALSE;
4055 return TRUE;
4058 static MonoInst*
4059 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4061 MonoInst *ins;
4062 guint32 size;
4063 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4065 mono_class_init (klass);
4066 size = mono_class_array_element_size (klass);
4068 mult_reg = alloc_preg (cfg);
4069 array_reg = arr->dreg;
4070 index_reg = index->dreg;
4072 #if SIZEOF_REGISTER == 8
4073 /* The array reg is 64 bits but the index reg is only 32 */
4074 if (COMPILE_LLVM (cfg)) {
4075 /* Not needed */
4076 index2_reg = index_reg;
4077 } else {
4078 index2_reg = alloc_preg (cfg);
4079 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4081 #else
4082 if (index->type == STACK_I8) {
4083 index2_reg = alloc_preg (cfg);
4084 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4085 } else {
4086 index2_reg = index_reg;
4088 #endif
4090 if (bcheck)
4091 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4093 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4094 if (size == 1 || size == 2 || size == 4 || size == 8) {
4095 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4097 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4098 ins->klass = mono_class_get_element_class (klass);
4099 ins->type = STACK_MP;
4101 return ins;
4103 #endif
4105 add_reg = alloc_ireg_mp (cfg);
4107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4108 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4109 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4110 ins->klass = mono_class_get_element_class (klass);
4111 ins->type = STACK_MP;
4112 MONO_ADD_INS (cfg->cbb, ins);
4114 return ins;
4117 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4118 static MonoInst*
4119 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4121 int bounds_reg = alloc_preg (cfg);
4122 int add_reg = alloc_ireg_mp (cfg);
4123 int mult_reg = alloc_preg (cfg);
4124 int mult2_reg = alloc_preg (cfg);
4125 int low1_reg = alloc_preg (cfg);
4126 int low2_reg = alloc_preg (cfg);
4127 int high1_reg = alloc_preg (cfg);
4128 int high2_reg = alloc_preg (cfg);
4129 int realidx1_reg = alloc_preg (cfg);
4130 int realidx2_reg = alloc_preg (cfg);
4131 int sum_reg = alloc_preg (cfg);
4132 int index1, index2;
4133 MonoInst *ins;
4134 guint32 size;
4136 mono_class_init (klass);
4137 size = mono_class_array_element_size (klass);
4139 index1 = index_ins1->dreg;
4140 index2 = index_ins2->dreg;
4142 /* range checking */
4143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4144 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4146 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4147 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4148 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4149 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4150 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4151 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4152 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4154 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4155 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4156 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4158 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4159 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4160 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4162 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4163 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4165 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4166 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4168 ins->type = STACK_MP;
4169 ins->klass = klass;
4170 MONO_ADD_INS (cfg->cbb, ins);
4172 return ins;
4174 #endif
4176 static MonoInst*
4177 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4179 int rank;
4180 MonoInst *addr;
4181 MonoMethod *addr_method;
4182 int element_size;
4184 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4186 if (rank == 1)
4187 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4189 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4190 /* emit_ldelema_2 depends on OP_LMUL */
4191 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4192 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4194 #endif
4196 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4197 addr_method = mono_marshal_get_array_address (rank, element_size);
4198 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4200 return addr;
4203 static MonoBreakPolicy
4204 always_insert_breakpoint (MonoMethod *method)
4206 return MONO_BREAK_POLICY_ALWAYS;
4209 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4212 * mono_set_break_policy:
4213 * policy_callback: the new callback function
4215 * Allow embedders to decide wherther to actually obey breakpoint instructions
4216 * (both break IL instructions and Debugger.Break () method calls), for example
4217 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4218 * untrusted or semi-trusted code.
4220 * @policy_callback will be called every time a break point instruction needs to
4221 * be inserted with the method argument being the method that calls Debugger.Break()
4222 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4223 * if it wants the breakpoint to not be effective in the given method.
4224 * #MONO_BREAK_POLICY_ALWAYS is the default.
4226 void
4227 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4229 if (policy_callback)
4230 break_policy_func = policy_callback;
4231 else
4232 break_policy_func = always_insert_breakpoint;
4235 static gboolean
4236 should_insert_brekpoint (MonoMethod *method) {
4237 switch (break_policy_func (method)) {
4238 case MONO_BREAK_POLICY_ALWAYS:
4239 return TRUE;
4240 case MONO_BREAK_POLICY_NEVER:
4241 return FALSE;
4242 case MONO_BREAK_POLICY_ON_DBG:
4243 return mono_debug_using_mono_debugger ();
4244 default:
4245 g_warning ("Incorrect value returned from break policy callback");
4246 return FALSE;
4250 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4251 static MonoInst*
4252 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4254 MonoInst *addr, *store, *load;
4255 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4257 /* the bounds check is already done by the callers */
4258 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4259 if (is_set) {
4260 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4261 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4262 } else {
4263 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4264 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4266 return store;
4269 static MonoInst*
4270 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4272 MonoInst *ins = NULL;
4273 #ifdef MONO_ARCH_SIMD_INTRINSICS
4274 if (cfg->opt & MONO_OPT_SIMD) {
4275 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4276 if (ins)
4277 return ins;
4279 #endif
4281 return ins;
4284 static MonoInst*
4285 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4287 MonoInst *ins = NULL;
4289 static MonoClass *runtime_helpers_class = NULL;
4290 if (! runtime_helpers_class)
4291 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4292 "System.Runtime.CompilerServices", "RuntimeHelpers");
4294 if (cmethod->klass == mono_defaults.string_class) {
4295 if (strcmp (cmethod->name, "get_Chars") == 0) {
4296 int dreg = alloc_ireg (cfg);
4297 int index_reg = alloc_preg (cfg);
4298 int mult_reg = alloc_preg (cfg);
4299 int add_reg = alloc_preg (cfg);
4301 #if SIZEOF_REGISTER == 8
4302 /* The array reg is 64 bits but the index reg is only 32 */
4303 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4304 #else
4305 index_reg = args [1]->dreg;
4306 #endif
4307 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4309 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4310 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4311 add_reg = ins->dreg;
4312 /* Avoid a warning */
4313 mult_reg = 0;
4314 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4315 add_reg, 0);
4316 #else
4317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4318 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4319 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4320 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4321 #endif
4322 type_from_op (ins, NULL, NULL);
4323 return ins;
4324 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4325 int dreg = alloc_ireg (cfg);
4326 /* Decompose later to allow more optimizations */
4327 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4328 ins->type = STACK_I4;
4329 ins->flags |= MONO_INST_FAULT;
4330 cfg->cbb->has_array_access = TRUE;
4331 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4333 return ins;
4334 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4335 int mult_reg = alloc_preg (cfg);
4336 int add_reg = alloc_preg (cfg);
4338 /* The corlib functions check for oob already. */
4339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4340 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4342 return cfg->cbb->last_ins;
4343 } else
4344 return NULL;
4345 } else if (cmethod->klass == mono_defaults.object_class) {
4347 if (strcmp (cmethod->name, "GetType") == 0) {
4348 int dreg = alloc_ireg_ref (cfg);
4349 int vt_reg = alloc_preg (cfg);
4350 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4351 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4352 type_from_op (ins, NULL, NULL);
4354 return ins;
4355 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4356 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4357 int dreg = alloc_ireg (cfg);
4358 int t1 = alloc_ireg (cfg);
4360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4361 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4362 ins->type = STACK_I4;
4364 return ins;
4365 #endif
4366 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4367 MONO_INST_NEW (cfg, ins, OP_NOP);
4368 MONO_ADD_INS (cfg->cbb, ins);
4369 return ins;
4370 } else
4371 return NULL;
4372 } else if (cmethod->klass == mono_defaults.array_class) {
4373 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4374 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4376 #ifndef MONO_BIG_ARRAYS
4378 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4379 * Array methods.
4381 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4382 int dreg = alloc_ireg (cfg);
4383 int bounds_reg = alloc_ireg_mp (cfg);
4384 MonoBasicBlock *end_bb, *szarray_bb;
4385 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4387 NEW_BBLOCK (cfg, end_bb);
4388 NEW_BBLOCK (cfg, szarray_bb);
4390 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4391 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4394 /* Non-szarray case */
4395 if (get_length)
4396 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4397 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4398 else
4399 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4400 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4401 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4402 MONO_START_BB (cfg, szarray_bb);
4403 /* Szarray case */
4404 if (get_length)
4405 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4406 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4407 else
4408 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4409 MONO_START_BB (cfg, end_bb);
4411 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4412 ins->type = STACK_I4;
4414 return ins;
4416 #endif
4418 if (cmethod->name [0] != 'g')
4419 return NULL;
4421 if (strcmp (cmethod->name, "get_Rank") == 0) {
4422 int dreg = alloc_ireg (cfg);
4423 int vtable_reg = alloc_preg (cfg);
4424 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4425 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4426 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4427 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4428 type_from_op (ins, NULL, NULL);
4430 return ins;
4431 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4432 int dreg = alloc_ireg (cfg);
4434 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4435 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4436 type_from_op (ins, NULL, NULL);
4438 return ins;
4439 } else
4440 return NULL;
4441 } else if (cmethod->klass == runtime_helpers_class) {
4443 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4444 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4445 return ins;
4446 } else
4447 return NULL;
4448 } else if (cmethod->klass == mono_defaults.thread_class) {
4449 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4450 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4451 MONO_ADD_INS (cfg->cbb, ins);
4452 return ins;
4453 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4454 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4455 MONO_ADD_INS (cfg->cbb, ins);
4456 return ins;
4458 } else if (cmethod->klass == mono_defaults.monitor_class) {
4459 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4460 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4461 MonoCallInst *call;
4463 if (COMPILE_LLVM (cfg)) {
4465 * Pass the argument normally, the LLVM backend will handle the
4466 * calling convention problems.
4468 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4469 } else {
4470 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4471 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4472 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4473 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4476 return (MonoInst*)call;
4477 } else if (strcmp (cmethod->name, "Exit") == 0) {
4478 MonoCallInst *call;
4480 if (COMPILE_LLVM (cfg)) {
4481 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4482 } else {
4483 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4484 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4485 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4486 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4489 return (MonoInst*)call;
4491 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4492 MonoMethod *fast_method = NULL;
4494 /* Avoid infinite recursion */
4495 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4496 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4497 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4498 return NULL;
4500 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4501 strcmp (cmethod->name, "Exit") == 0)
4502 fast_method = mono_monitor_get_fast_path (cmethod);
4503 if (!fast_method)
4504 return NULL;
4506 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4507 #endif
4508 } else if (cmethod->klass->image == mono_defaults.corlib &&
4509 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4510 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4511 ins = NULL;
4513 #if SIZEOF_REGISTER == 8
4514 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4515 /* 64 bit reads are already atomic */
4516 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4517 ins->dreg = mono_alloc_preg (cfg);
4518 ins->inst_basereg = args [0]->dreg;
4519 ins->inst_offset = 0;
4520 MONO_ADD_INS (cfg->cbb, ins);
4522 #endif
4524 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4525 if (strcmp (cmethod->name, "Increment") == 0) {
4526 MonoInst *ins_iconst;
4527 guint32 opcode = 0;
4529 if (fsig->params [0]->type == MONO_TYPE_I4)
4530 opcode = OP_ATOMIC_ADD_NEW_I4;
4531 #if SIZEOF_REGISTER == 8
4532 else if (fsig->params [0]->type == MONO_TYPE_I8)
4533 opcode = OP_ATOMIC_ADD_NEW_I8;
4534 #endif
4535 if (opcode) {
4536 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4537 ins_iconst->inst_c0 = 1;
4538 ins_iconst->dreg = mono_alloc_ireg (cfg);
4539 MONO_ADD_INS (cfg->cbb, ins_iconst);
4541 MONO_INST_NEW (cfg, ins, opcode);
4542 ins->dreg = mono_alloc_ireg (cfg);
4543 ins->inst_basereg = args [0]->dreg;
4544 ins->inst_offset = 0;
4545 ins->sreg2 = ins_iconst->dreg;
4546 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4547 MONO_ADD_INS (cfg->cbb, ins);
4549 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4550 MonoInst *ins_iconst;
4551 guint32 opcode = 0;
4553 if (fsig->params [0]->type == MONO_TYPE_I4)
4554 opcode = OP_ATOMIC_ADD_NEW_I4;
4555 #if SIZEOF_REGISTER == 8
4556 else if (fsig->params [0]->type == MONO_TYPE_I8)
4557 opcode = OP_ATOMIC_ADD_NEW_I8;
4558 #endif
4559 if (opcode) {
4560 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4561 ins_iconst->inst_c0 = -1;
4562 ins_iconst->dreg = mono_alloc_ireg (cfg);
4563 MONO_ADD_INS (cfg->cbb, ins_iconst);
4565 MONO_INST_NEW (cfg, ins, opcode);
4566 ins->dreg = mono_alloc_ireg (cfg);
4567 ins->inst_basereg = args [0]->dreg;
4568 ins->inst_offset = 0;
4569 ins->sreg2 = ins_iconst->dreg;
4570 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4571 MONO_ADD_INS (cfg->cbb, ins);
4573 } else if (strcmp (cmethod->name, "Add") == 0) {
4574 guint32 opcode = 0;
4576 if (fsig->params [0]->type == MONO_TYPE_I4)
4577 opcode = OP_ATOMIC_ADD_NEW_I4;
4578 #if SIZEOF_REGISTER == 8
4579 else if (fsig->params [0]->type == MONO_TYPE_I8)
4580 opcode = OP_ATOMIC_ADD_NEW_I8;
4581 #endif
4583 if (opcode) {
4584 MONO_INST_NEW (cfg, ins, opcode);
4585 ins->dreg = mono_alloc_ireg (cfg);
4586 ins->inst_basereg = args [0]->dreg;
4587 ins->inst_offset = 0;
4588 ins->sreg2 = args [1]->dreg;
4589 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4590 MONO_ADD_INS (cfg->cbb, ins);
4593 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4595 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4596 if (strcmp (cmethod->name, "Exchange") == 0) {
4597 guint32 opcode;
4598 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4600 if (fsig->params [0]->type == MONO_TYPE_I4)
4601 opcode = OP_ATOMIC_EXCHANGE_I4;
4602 #if SIZEOF_REGISTER == 8
4603 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4604 (fsig->params [0]->type == MONO_TYPE_I))
4605 opcode = OP_ATOMIC_EXCHANGE_I8;
4606 #else
4607 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4608 opcode = OP_ATOMIC_EXCHANGE_I4;
4609 #endif
4610 else
4611 return NULL;
4613 MONO_INST_NEW (cfg, ins, opcode);
4614 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4615 ins->inst_basereg = args [0]->dreg;
4616 ins->inst_offset = 0;
4617 ins->sreg2 = args [1]->dreg;
4618 MONO_ADD_INS (cfg->cbb, ins);
4620 switch (fsig->params [0]->type) {
4621 case MONO_TYPE_I4:
4622 ins->type = STACK_I4;
4623 break;
4624 case MONO_TYPE_I8:
4625 case MONO_TYPE_I:
4626 ins->type = STACK_I8;
4627 break;
4628 case MONO_TYPE_OBJECT:
4629 ins->type = STACK_OBJ;
4630 break;
4631 default:
4632 g_assert_not_reached ();
4635 if (cfg->gen_write_barriers && is_ref)
4636 emit_write_barrier (cfg, args [0], args [1], -1);
4638 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4640 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4641 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4642 int size = 0;
4643 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4644 if (fsig->params [1]->type == MONO_TYPE_I4)
4645 size = 4;
4646 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4647 size = sizeof (gpointer);
4648 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4649 size = 8;
4650 if (size == 4) {
4651 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4652 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4653 ins->sreg1 = args [0]->dreg;
4654 ins->sreg2 = args [1]->dreg;
4655 ins->sreg3 = args [2]->dreg;
4656 ins->type = STACK_I4;
4657 MONO_ADD_INS (cfg->cbb, ins);
4658 } else if (size == 8) {
4659 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4660 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4661 ins->sreg1 = args [0]->dreg;
4662 ins->sreg2 = args [1]->dreg;
4663 ins->sreg3 = args [2]->dreg;
4664 ins->type = STACK_I8;
4665 MONO_ADD_INS (cfg->cbb, ins);
4666 } else {
4667 /* g_assert_not_reached (); */
4669 if (cfg->gen_write_barriers && is_ref)
4670 emit_write_barrier (cfg, args [0], args [1], -1);
4672 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4674 if (ins)
4675 return ins;
4676 } else if (cmethod->klass->image == mono_defaults.corlib) {
4677 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4678 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4679 if (should_insert_brekpoint (cfg->method))
4680 MONO_INST_NEW (cfg, ins, OP_BREAK);
4681 else
4682 MONO_INST_NEW (cfg, ins, OP_NOP);
4683 MONO_ADD_INS (cfg->cbb, ins);
4684 return ins;
4686 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4687 && strcmp (cmethod->klass->name, "Environment") == 0) {
4688 #ifdef TARGET_WIN32
4689 EMIT_NEW_ICONST (cfg, ins, 1);
4690 #else
4691 EMIT_NEW_ICONST (cfg, ins, 0);
4692 #endif
4693 return ins;
4695 } else if (cmethod->klass == mono_defaults.math_class) {
4697 * There is general branches code for Min/Max, but it does not work for
4698 * all inputs:
4699 * http://everything2.com/?node_id=1051618
4703 #ifdef MONO_ARCH_SIMD_INTRINSICS
4704 if (cfg->opt & MONO_OPT_SIMD) {
4705 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4706 if (ins)
4707 return ins;
4709 #endif
4711 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4715 * This entry point could be used later for arbitrary method
4716 * redirection.
4718 inline static MonoInst*
4719 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4720 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4722 if (method->klass == mono_defaults.string_class) {
4723 /* managed string allocation support */
4724 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4725 MonoInst *iargs [2];
4726 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4727 MonoMethod *managed_alloc = NULL;
4729 g_assert (vtable); /*Should not fail since it System.String*/
4730 #ifndef MONO_CROSS_COMPILE
4731 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4732 #endif
4733 if (!managed_alloc)
4734 return NULL;
4735 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4736 iargs [1] = args [0];
4737 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4740 return NULL;
4743 static void
4744 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4746 MonoInst *store, *temp;
4747 int i;
4749 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4750 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4753 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4754 * would be different than the MonoInst's used to represent arguments, and
4755 * the ldelema implementation can't deal with that.
4756 * Solution: When ldelema is used on an inline argument, create a var for
4757 * it, emit ldelema on that var, and emit the saving code below in
4758 * inline_method () if needed.
4760 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4761 cfg->args [i] = temp;
4762 /* This uses cfg->args [i] which is set by the preceeding line */
4763 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4764 store->cil_code = sp [0]->cil_code;
4765 sp++;
4769 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4770 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4772 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4773 static gboolean
4774 check_inline_called_method_name_limit (MonoMethod *called_method)
4776 int strncmp_result;
4777 static char *limit = NULL;
4779 if (limit == NULL) {
4780 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4782 if (limit_string != NULL)
4783 limit = limit_string;
4784 else
4785 limit = (char *) "";
4788 if (limit [0] != '\0') {
4789 char *called_method_name = mono_method_full_name (called_method, TRUE);
4791 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4792 g_free (called_method_name);
4794 //return (strncmp_result <= 0);
4795 return (strncmp_result == 0);
4796 } else {
4797 return TRUE;
4800 #endif
4802 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4803 static gboolean
4804 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4806 int strncmp_result;
4807 static char *limit = NULL;
4809 if (limit == NULL) {
4810 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4811 if (limit_string != NULL) {
4812 limit = limit_string;
4813 } else {
4814 limit = (char *) "";
4818 if (limit [0] != '\0') {
4819 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4821 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4822 g_free (caller_method_name);
4824 //return (strncmp_result <= 0);
4825 return (strncmp_result == 0);
4826 } else {
4827 return TRUE;
4830 #endif
4832 static int
4833 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4834 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4836 MonoInst *ins, *rvar = NULL;
4837 MonoMethodHeader *cheader;
4838 MonoBasicBlock *ebblock, *sbblock;
4839 int i, costs;
4840 MonoMethod *prev_inlined_method;
4841 MonoInst **prev_locals, **prev_args;
4842 MonoType **prev_arg_types;
4843 guint prev_real_offset;
4844 GHashTable *prev_cbb_hash;
4845 MonoBasicBlock **prev_cil_offset_to_bb;
4846 MonoBasicBlock *prev_cbb;
4847 unsigned char* prev_cil_start;
4848 guint32 prev_cil_offset_to_bb_len;
4849 MonoMethod *prev_current_method;
4850 MonoGenericContext *prev_generic_context;
4851 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4853 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4855 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4856 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4857 return 0;
4858 #endif
4859 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4860 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4861 return 0;
4862 #endif
4864 if (cfg->verbose_level > 2)
4865 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4867 if (!cmethod->inline_info) {
4868 mono_jit_stats.inlineable_methods++;
4869 cmethod->inline_info = 1;
4872 /* allocate local variables */
4873 cheader = mono_method_get_header (cmethod);
4875 if (cheader == NULL || mono_loader_get_last_error ()) {
4876 MonoLoaderError *error = mono_loader_get_last_error ();
4878 if (cheader)
4879 mono_metadata_free_mh (cheader);
4880 if (inline_always && error)
4881 mono_cfg_set_exception (cfg, error->exception_type);
4883 mono_loader_clear_error ();
4884 return 0;
4887 /*Must verify before creating locals as it can cause the JIT to assert.*/
4888 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4889 mono_metadata_free_mh (cheader);
4890 return 0;
4893 /* allocate space to store the return value */
4894 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4895 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4898 prev_locals = cfg->locals;
4899 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4900 for (i = 0; i < cheader->num_locals; ++i)
4901 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4903 /* allocate start and end blocks */
4904 /* This is needed so if the inline is aborted, we can clean up */
4905 NEW_BBLOCK (cfg, sbblock);
4906 sbblock->real_offset = real_offset;
4908 NEW_BBLOCK (cfg, ebblock);
4909 ebblock->block_num = cfg->num_bblocks++;
4910 ebblock->real_offset = real_offset;
4912 prev_args = cfg->args;
4913 prev_arg_types = cfg->arg_types;
4914 prev_inlined_method = cfg->inlined_method;
4915 cfg->inlined_method = cmethod;
4916 cfg->ret_var_set = FALSE;
4917 cfg->inline_depth ++;
4918 prev_real_offset = cfg->real_offset;
4919 prev_cbb_hash = cfg->cbb_hash;
4920 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4921 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4922 prev_cil_start = cfg->cil_start;
4923 prev_cbb = cfg->cbb;
4924 prev_current_method = cfg->current_method;
4925 prev_generic_context = cfg->generic_context;
4926 prev_ret_var_set = cfg->ret_var_set;
4928 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4929 virtual = TRUE;
4931 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4933 ret_var_set = cfg->ret_var_set;
4935 cfg->inlined_method = prev_inlined_method;
4936 cfg->real_offset = prev_real_offset;
4937 cfg->cbb_hash = prev_cbb_hash;
4938 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4939 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4940 cfg->cil_start = prev_cil_start;
4941 cfg->locals = prev_locals;
4942 cfg->args = prev_args;
4943 cfg->arg_types = prev_arg_types;
4944 cfg->current_method = prev_current_method;
4945 cfg->generic_context = prev_generic_context;
4946 cfg->ret_var_set = prev_ret_var_set;
4947 cfg->inline_depth --;
4949 if ((costs >= 0 && costs < 60) || inline_always) {
4950 if (cfg->verbose_level > 2)
4951 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4953 mono_jit_stats.inlined_methods++;
4955 /* always add some code to avoid block split failures */
4956 MONO_INST_NEW (cfg, ins, OP_NOP);
4957 MONO_ADD_INS (prev_cbb, ins);
4959 prev_cbb->next_bb = sbblock;
4960 link_bblock (cfg, prev_cbb, sbblock);
4963 * Get rid of the begin and end bblocks if possible to aid local
4964 * optimizations.
4966 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4968 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4969 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4971 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4972 MonoBasicBlock *prev = ebblock->in_bb [0];
4973 mono_merge_basic_blocks (cfg, prev, ebblock);
4974 cfg->cbb = prev;
4975 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4976 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4977 cfg->cbb = prev_cbb;
4979 } else {
4980 cfg->cbb = ebblock;
4983 if (rvar) {
4985 * If the inlined method contains only a throw, then the ret var is not
4986 * set, so set it to a dummy value.
4988 if (!ret_var_set) {
4989 static double r8_0 = 0.0;
4991 switch (rvar->type) {
4992 case STACK_I4:
4993 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4994 break;
4995 case STACK_I8:
4996 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4997 break;
4998 case STACK_PTR:
4999 case STACK_MP:
5000 case STACK_OBJ:
5001 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5002 break;
5003 case STACK_R8:
5004 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5005 ins->type = STACK_R8;
5006 ins->inst_p0 = (void*)&r8_0;
5007 ins->dreg = rvar->dreg;
5008 MONO_ADD_INS (cfg->cbb, ins);
5009 break;
5010 case STACK_VTYPE:
5011 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5012 break;
5013 default:
5014 g_assert_not_reached ();
5018 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5019 *sp++ = ins;
5021 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5022 return costs + 1;
5023 } else {
5024 if (cfg->verbose_level > 2)
5025 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5026 cfg->exception_type = MONO_EXCEPTION_NONE;
5027 mono_loader_clear_error ();
5029 /* This gets rid of the newly added bblocks */
5030 cfg->cbb = prev_cbb;
5032 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5033 return 0;
5037 * Some of these comments may well be out-of-date.
5038 * Design decisions: we do a single pass over the IL code (and we do bblock
5039 * splitting/merging in the few cases when it's required: a back jump to an IL
5040 * address that was not already seen as bblock starting point).
5041 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5042 * Complex operations are decomposed in simpler ones right away. We need to let the
5043 * arch-specific code peek and poke inside this process somehow (except when the
5044 * optimizations can take advantage of the full semantic info of coarse opcodes).
5045 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5046 * MonoInst->opcode initially is the IL opcode or some simplification of that
5047 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5048 * opcode with value bigger than OP_LAST.
5049 * At this point the IR can be handed over to an interpreter, a dumb code generator
5050 * or to the optimizing code generator that will translate it to SSA form.
5052 * Profiling directed optimizations.
5053 * We may compile by default with few or no optimizations and instrument the code
5054 * or the user may indicate what methods to optimize the most either in a config file
5055 * or through repeated runs where the compiler applies offline the optimizations to
5056 * each method and then decides if it was worth it.
5059 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5060 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5061 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5062 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5063 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5064 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5065 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5066 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5068 /* offset from br.s -> br like opcodes */
5069 #define BIG_BRANCH_OFFSET 13
5071 static gboolean
5072 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5074 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5076 return b == NULL || b == bb;
5079 static int
5080 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5082 unsigned char *ip = start;
5083 unsigned char *target;
5084 int i;
5085 guint cli_addr;
5086 MonoBasicBlock *bblock;
5087 const MonoOpcode *opcode;
5089 while (ip < end) {
5090 cli_addr = ip - start;
5091 i = mono_opcode_value ((const guint8 **)&ip, end);
5092 if (i < 0)
5093 UNVERIFIED;
5094 opcode = &mono_opcodes [i];
5095 switch (opcode->argument) {
5096 case MonoInlineNone:
5097 ip++;
5098 break;
5099 case MonoInlineString:
5100 case MonoInlineType:
5101 case MonoInlineField:
5102 case MonoInlineMethod:
5103 case MonoInlineTok:
5104 case MonoInlineSig:
5105 case MonoShortInlineR:
5106 case MonoInlineI:
5107 ip += 5;
5108 break;
5109 case MonoInlineVar:
5110 ip += 3;
5111 break;
5112 case MonoShortInlineVar:
5113 case MonoShortInlineI:
5114 ip += 2;
5115 break;
5116 case MonoShortInlineBrTarget:
5117 target = start + cli_addr + 2 + (signed char)ip [1];
5118 GET_BBLOCK (cfg, bblock, target);
5119 ip += 2;
5120 if (ip < end)
5121 GET_BBLOCK (cfg, bblock, ip);
5122 break;
5123 case MonoInlineBrTarget:
5124 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5125 GET_BBLOCK (cfg, bblock, target);
5126 ip += 5;
5127 if (ip < end)
5128 GET_BBLOCK (cfg, bblock, ip);
5129 break;
5130 case MonoInlineSwitch: {
5131 guint32 n = read32 (ip + 1);
5132 guint32 j;
5133 ip += 5;
5134 cli_addr += 5 + 4 * n;
5135 target = start + cli_addr;
5136 GET_BBLOCK (cfg, bblock, target);
5138 for (j = 0; j < n; ++j) {
5139 target = start + cli_addr + (gint32)read32 (ip);
5140 GET_BBLOCK (cfg, bblock, target);
5141 ip += 4;
5143 break;
5145 case MonoInlineR:
5146 case MonoInlineI8:
5147 ip += 9;
5148 break;
5149 default:
5150 g_assert_not_reached ();
5153 if (i == CEE_THROW) {
5154 unsigned char *bb_start = ip - 1;
5156 /* Find the start of the bblock containing the throw */
5157 bblock = NULL;
5158 while ((bb_start >= start) && !bblock) {
5159 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5160 bb_start --;
5162 if (bblock)
5163 bblock->out_of_line = 1;
5166 return 0;
5167 unverified:
5168 *pos = ip;
5169 return 1;
5172 static inline MonoMethod *
5173 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5175 MonoMethod *method;
5177 if (m->wrapper_type != MONO_WRAPPER_NONE)
5178 return mono_method_get_wrapper_data (m, token);
5180 method = mono_get_method_full (m->klass->image, token, klass, context);
5182 return method;
5185 static inline MonoMethod *
5186 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5188 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5190 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5191 return NULL;
5193 return method;
5196 static inline MonoClass*
5197 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5199 MonoClass *klass;
5201 if (method->wrapper_type != MONO_WRAPPER_NONE)
5202 klass = mono_method_get_wrapper_data (method, token);
5203 else
5204 klass = mono_class_get_full (method->klass->image, token, context);
5205 if (klass)
5206 mono_class_init (klass);
5207 return klass;
5211 * Returns TRUE if the JIT should abort inlining because "callee"
5212 * is influenced by security attributes.
5214 static
5215 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5217 guint32 result;
5219 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5220 return TRUE;
5223 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5224 if (result == MONO_JIT_SECURITY_OK)
5225 return FALSE;
5227 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5228 /* Generate code to throw a SecurityException before the actual call/link */
5229 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5230 MonoInst *args [2];
5232 NEW_ICONST (cfg, args [0], 4);
5233 NEW_METHODCONST (cfg, args [1], caller);
5234 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5235 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5236 /* don't hide previous results */
5237 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5238 cfg->exception_data = result;
5239 return TRUE;
5242 return FALSE;
5245 static MonoMethod*
5246 throw_exception (void)
5248 static MonoMethod *method = NULL;
5250 if (!method) {
5251 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5252 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5254 g_assert (method);
5255 return method;
5258 static void
5259 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5261 MonoMethod *thrower = throw_exception ();
5262 MonoInst *args [1];
5264 EMIT_NEW_PCONST (cfg, args [0], ex);
5265 mono_emit_method_call (cfg, thrower, args, NULL);
5269 * Return the original method is a wrapper is specified. We can only access
5270 * the custom attributes from the original method.
5272 static MonoMethod*
5273 get_original_method (MonoMethod *method)
5275 if (method->wrapper_type == MONO_WRAPPER_NONE)
5276 return method;
5278 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5279 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5280 return NULL;
5282 /* in other cases we need to find the original method */
5283 return mono_marshal_method_from_wrapper (method);
5286 static void
5287 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5288 MonoBasicBlock *bblock, unsigned char *ip)
5290 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5291 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5292 if (ex)
5293 emit_throw_exception (cfg, ex);
5296 static void
5297 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5298 MonoBasicBlock *bblock, unsigned char *ip)
5300 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5301 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5302 if (ex)
5303 emit_throw_exception (cfg, ex);
5307 * Check that the IL instructions at ip are the array initialization
5308 * sequence and return the pointer to the data and the size.
5310 static const char*
5311 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5314 * newarr[System.Int32]
5315 * dup
5316 * ldtoken field valuetype ...
5317 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5319 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5320 guint32 token = read32 (ip + 7);
5321 guint32 field_token = read32 (ip + 2);
5322 guint32 field_index = field_token & 0xffffff;
5323 guint32 rva;
5324 const char *data_ptr;
5325 int size = 0;
5326 MonoMethod *cmethod;
5327 MonoClass *dummy_class;
5328 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5329 int dummy_align;
5331 if (!field)
5332 return NULL;
5334 *out_field_token = field_token;
5336 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5337 if (!cmethod)
5338 return NULL;
5339 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5340 return NULL;
5341 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5342 case MONO_TYPE_BOOLEAN:
5343 case MONO_TYPE_I1:
5344 case MONO_TYPE_U1:
5345 size = 1; break;
5346 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5347 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5348 case MONO_TYPE_CHAR:
5349 case MONO_TYPE_I2:
5350 case MONO_TYPE_U2:
5351 size = 2; break;
5352 case MONO_TYPE_I4:
5353 case MONO_TYPE_U4:
5354 case MONO_TYPE_R4:
5355 size = 4; break;
5356 case MONO_TYPE_R8:
5357 #ifdef ARM_FPU_FPA
5358 return NULL; /* stupid ARM FP swapped format */
5359 #endif
5360 case MONO_TYPE_I8:
5361 case MONO_TYPE_U8:
5362 size = 8; break;
5363 #endif
5364 default:
5365 return NULL;
5367 size *= len;
5368 if (size > mono_type_size (field->type, &dummy_align))
5369 return NULL;
5370 *out_size = size;
5371 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5372 if (!method->klass->image->dynamic) {
5373 field_index = read32 (ip + 2) & 0xffffff;
5374 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5375 data_ptr = mono_image_rva_map (method->klass->image, rva);
5376 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5377 /* for aot code we do the lookup on load */
5378 if (aot && data_ptr)
5379 return GUINT_TO_POINTER (rva);
5380 } else {
5381 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5382 g_assert (!aot);
5383 data_ptr = mono_field_get_data (field);
5385 return data_ptr;
5387 return NULL;
5390 static void
5391 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5393 char *method_fname = mono_method_full_name (method, TRUE);
5394 char *method_code;
5395 MonoMethodHeader *header = mono_method_get_header (method);
5397 if (header->code_size == 0)
5398 method_code = g_strdup ("method body is empty.");
5399 else
5400 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5402 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5403 g_free (method_fname);
5404 g_free (method_code);
5405 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5408 static void
5409 set_exception_object (MonoCompile *cfg, MonoException *exception)
5411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5412 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5413 cfg->exception_ptr = exception;
5416 static gboolean
5417 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5419 MonoType *type;
5421 if (cfg->generic_sharing_context)
5422 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5423 else
5424 type = &klass->byval_arg;
5425 return MONO_TYPE_IS_REFERENCE (type);
5428 static void
5429 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5431 MonoInst *ins;
5432 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5433 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5434 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5435 /* Optimize reg-reg moves away */
5437 * Can't optimize other opcodes, since sp[0] might point to
5438 * the last ins of a decomposed opcode.
5440 sp [0]->dreg = (cfg)->locals [n]->dreg;
5441 } else {
5442 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5447 * ldloca inhibits many optimizations so try to get rid of it in common
5448 * cases.
5450 static inline unsigned char *
5451 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5453 int local, token;
5454 MonoClass *klass;
5456 if (size == 1) {
5457 local = ip [1];
5458 ip += 2;
5459 } else {
5460 local = read16 (ip + 2);
5461 ip += 4;
5464 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5465 gboolean skip = FALSE;
5467 /* From the INITOBJ case */
5468 token = read32 (ip + 2);
5469 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5470 CHECK_TYPELOAD (klass);
5471 if (generic_class_is_reference_type (cfg, klass)) {
5472 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5473 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5474 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5475 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5476 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5477 } else {
5478 skip = TRUE;
5481 if (!skip)
5482 return ip + 6;
5484 load_error:
5485 return NULL;
5488 static gboolean
5489 is_exception_class (MonoClass *class)
5491 while (class) {
5492 if (class == mono_defaults.exception_class)
5493 return TRUE;
5494 class = class->parent;
5496 return FALSE;
5500 * is_jit_optimizer_disabled:
5502 * Determine whenever M's assembly has a DebuggableAttribute with the
5503 * IsJITOptimizerDisabled flag set.
5505 static gboolean
5506 is_jit_optimizer_disabled (MonoMethod *m)
5508 MonoAssembly *ass = m->klass->image->assembly;
5509 MonoCustomAttrInfo* attrs;
5510 static MonoClass *klass;
5511 int i;
5512 gboolean val = FALSE;
5514 g_assert (ass);
5515 if (ass->jit_optimizer_disabled_inited)
5516 return ass->jit_optimizer_disabled;
5518 if (!klass)
5519 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5520 if (!klass) {
5521 /* Linked away */
5522 ass->jit_optimizer_disabled = FALSE;
5523 mono_memory_barrier ();
5524 ass->jit_optimizer_disabled_inited = TRUE;
5525 return FALSE;
5528 attrs = mono_custom_attrs_from_assembly (ass);
5529 if (attrs) {
5530 for (i = 0; i < attrs->num_attrs; ++i) {
5531 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5532 const gchar *p;
5533 int len;
5534 MonoMethodSignature *sig;
5536 if (!attr->ctor || attr->ctor->klass != klass)
5537 continue;
5538 /* Decode the attribute. See reflection.c */
5539 len = attr->data_size;
5540 p = (const char*)attr->data;
5541 g_assert (read16 (p) == 0x0001);
5542 p += 2;
5544 // FIXME: Support named parameters
5545 sig = mono_method_signature (attr->ctor);
5546 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5547 continue;
5548 /* Two boolean arguments */
5549 p ++;
5550 val = *p;
5552 mono_custom_attrs_free (attrs);
5555 ass->jit_optimizer_disabled = val;
5556 mono_memory_barrier ();
5557 ass->jit_optimizer_disabled_inited = TRUE;
5559 return val;
5562 static gboolean
5563 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5565 gboolean supported_tail_call;
5566 int i;
5568 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5569 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5570 #else
5571 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5572 #endif
5574 for (i = 0; i < fsig->param_count; ++i) {
5575 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5576 /* These can point to the current method's stack */
5577 supported_tail_call = FALSE;
5579 if (fsig->hasthis && cmethod->klass->valuetype)
5580 /* this might point to the current method's stack */
5581 supported_tail_call = FALSE;
5582 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5583 supported_tail_call = FALSE;
5584 if (cfg->method->save_lmf)
5585 supported_tail_call = FALSE;
5586 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5587 supported_tail_call = FALSE;
5589 /* Debugging support */
5590 #if 0
5591 if (supported_tail_call) {
5592 static int count = 0;
5593 count ++;
5594 if (getenv ("COUNT")) {
5595 if (count == atoi (getenv ("COUNT")))
5596 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5597 if (count > atoi (getenv ("COUNT")))
5598 supported_tail_call = FALSE;
5601 #endif
5603 return supported_tail_call;
5607 * mono_method_to_ir:
5609 * Translate the .net IL into linear IR.
5612 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5613 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5614 guint inline_offset, gboolean is_virtual_call)
5616 MonoError error;
5617 MonoInst *ins, **sp, **stack_start;
5618 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5619 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5620 MonoMethod *cmethod, *method_definition;
5621 MonoInst **arg_array;
5622 MonoMethodHeader *header;
5623 MonoImage *image;
5624 guint32 token, ins_flag;
5625 MonoClass *klass;
5626 MonoClass *constrained_call = NULL;
5627 unsigned char *ip, *end, *target, *err_pos;
5628 static double r8_0 = 0.0;
5629 MonoMethodSignature *sig;
5630 MonoGenericContext *generic_context = NULL;
5631 MonoGenericContainer *generic_container = NULL;
5632 MonoType **param_types;
5633 int i, n, start_new_bblock, dreg;
5634 int num_calls = 0, inline_costs = 0;
5635 int breakpoint_id = 0;
5636 guint num_args;
5637 MonoBoolean security, pinvoke;
5638 MonoSecurityManager* secman = NULL;
5639 MonoDeclSecurityActions actions;
5640 GSList *class_inits = NULL;
5641 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5642 int context_used;
5643 gboolean init_locals, seq_points, skip_dead_blocks;
5644 gboolean disable_inline;
5646 disable_inline = is_jit_optimizer_disabled (method);
5648 /* serialization and xdomain stuff may need access to private fields and methods */
5649 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5650 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5651 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5652 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5653 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5654 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5656 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5658 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5659 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5660 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5661 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5662 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5664 image = method->klass->image;
5665 header = mono_method_get_header (method);
5666 if (!header) {
5667 MonoLoaderError *error;
5669 if ((error = mono_loader_get_last_error ())) {
5670 mono_cfg_set_exception (cfg, error->exception_type);
5671 } else {
5672 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5673 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5675 goto exception_exit;
5677 generic_container = mono_method_get_generic_container (method);
5678 sig = mono_method_signature (method);
5679 num_args = sig->hasthis + sig->param_count;
5680 ip = (unsigned char*)header->code;
5681 cfg->cil_start = ip;
5682 end = ip + header->code_size;
5683 mono_jit_stats.cil_code_size += header->code_size;
5684 init_locals = header->init_locals;
5686 seq_points = cfg->gen_seq_points && cfg->method == method;
5689 * Methods without init_locals set could cause asserts in various passes
5690 * (#497220).
5692 init_locals = TRUE;
5694 method_definition = method;
5695 while (method_definition->is_inflated) {
5696 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5697 method_definition = imethod->declaring;
5700 /* SkipVerification is not allowed if core-clr is enabled */
5701 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5702 dont_verify = TRUE;
5703 dont_verify_stloc = TRUE;
5706 if (mono_debug_using_mono_debugger ())
5707 cfg->keep_cil_nops = TRUE;
5709 if (sig->is_inflated)
5710 generic_context = mono_method_get_context (method);
5711 else if (generic_container)
5712 generic_context = &generic_container->context;
5713 cfg->generic_context = generic_context;
5715 if (!cfg->generic_sharing_context)
5716 g_assert (!sig->has_type_parameters);
5718 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5719 g_assert (method->is_inflated);
5720 g_assert (mono_method_get_context (method)->method_inst);
5722 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5723 g_assert (sig->generic_param_count);
5725 if (cfg->method == method) {
5726 cfg->real_offset = 0;
5727 } else {
5728 cfg->real_offset = inline_offset;
5731 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5732 cfg->cil_offset_to_bb_len = header->code_size;
5734 cfg->current_method = method;
5736 if (cfg->verbose_level > 2)
5737 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5739 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5740 if (sig->hasthis)
5741 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5742 for (n = 0; n < sig->param_count; ++n)
5743 param_types [n + sig->hasthis] = sig->params [n];
5744 cfg->arg_types = param_types;
5746 dont_inline = g_list_prepend (dont_inline, method);
5747 if (cfg->method == method) {
5749 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5750 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5752 /* ENTRY BLOCK */
5753 NEW_BBLOCK (cfg, start_bblock);
5754 cfg->bb_entry = start_bblock;
5755 start_bblock->cil_code = NULL;
5756 start_bblock->cil_length = 0;
5757 #if defined(__native_client_codegen__)
5758 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5759 ins->dreg = alloc_dreg (cfg, STACK_I4);
5760 MONO_ADD_INS (start_bblock, ins);
5761 #endif
5763 /* EXIT BLOCK */
5764 NEW_BBLOCK (cfg, end_bblock);
5765 cfg->bb_exit = end_bblock;
5766 end_bblock->cil_code = NULL;
5767 end_bblock->cil_length = 0;
5768 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5769 g_assert (cfg->num_bblocks == 2);
5771 arg_array = cfg->args;
5773 if (header->num_clauses) {
5774 cfg->spvars = g_hash_table_new (NULL, NULL);
5775 cfg->exvars = g_hash_table_new (NULL, NULL);
5777 /* handle exception clauses */
5778 for (i = 0; i < header->num_clauses; ++i) {
5779 MonoBasicBlock *try_bb;
5780 MonoExceptionClause *clause = &header->clauses [i];
5781 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5782 try_bb->real_offset = clause->try_offset;
5783 try_bb->try_start = TRUE;
5784 try_bb->region = ((i + 1) << 8) | clause->flags;
5785 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5786 tblock->real_offset = clause->handler_offset;
5787 tblock->flags |= BB_EXCEPTION_HANDLER;
5789 link_bblock (cfg, try_bb, tblock);
5791 if (*(ip + clause->handler_offset) == CEE_POP)
5792 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5794 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5795 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5796 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5797 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5798 MONO_ADD_INS (tblock, ins);
5800 if (seq_points) {
5801 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5802 MONO_ADD_INS (tblock, ins);
5805 /* todo: is a fault block unsafe to optimize? */
5806 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5807 tblock->flags |= BB_EXCEPTION_UNSAFE;
5811 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5812 while (p < end) {
5813 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5815 /* catch and filter blocks get the exception object on the stack */
5816 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5817 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5818 MonoInst *dummy_use;
5820 /* mostly like handle_stack_args (), but just sets the input args */
5821 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5822 tblock->in_scount = 1;
5823 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5824 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5827 * Add a dummy use for the exvar so its liveness info will be
5828 * correct.
5830 cfg->cbb = tblock;
5831 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5833 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5834 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5835 tblock->flags |= BB_EXCEPTION_HANDLER;
5836 tblock->real_offset = clause->data.filter_offset;
5837 tblock->in_scount = 1;
5838 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5839 /* The filter block shares the exvar with the handler block */
5840 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5841 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5842 MONO_ADD_INS (tblock, ins);
5846 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5847 clause->data.catch_class &&
5848 cfg->generic_sharing_context &&
5849 mono_class_check_context_used (clause->data.catch_class)) {
5851 * In shared generic code with catch
5852 * clauses containing type variables
5853 * the exception handling code has to
5854 * be able to get to the rgctx.
5855 * Therefore we have to make sure that
5856 * the vtable/mrgctx argument (for
5857 * static or generic methods) or the
5858 * "this" argument (for non-static
5859 * methods) are live.
5861 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5862 mini_method_get_context (method)->method_inst ||
5863 method->klass->valuetype) {
5864 mono_get_vtable_var (cfg);
5865 } else {
5866 MonoInst *dummy_use;
5868 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5872 } else {
5873 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5874 cfg->cbb = start_bblock;
5875 cfg->args = arg_array;
5876 mono_save_args (cfg, sig, inline_args);
5879 /* FIRST CODE BLOCK */
5880 NEW_BBLOCK (cfg, bblock);
5881 bblock->cil_code = ip;
5882 cfg->cbb = bblock;
5883 cfg->ip = ip;
5885 ADD_BBLOCK (cfg, bblock);
5887 if (cfg->method == method) {
5888 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5889 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5890 MONO_INST_NEW (cfg, ins, OP_BREAK);
5891 MONO_ADD_INS (bblock, ins);
5895 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5896 secman = mono_security_manager_get_methods ();
5898 security = (secman && mono_method_has_declsec (method));
5899 /* at this point having security doesn't mean we have any code to generate */
5900 if (security && (cfg->method == method)) {
5901 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5902 * And we do not want to enter the next section (with allocation) if we
5903 * have nothing to generate */
5904 security = mono_declsec_get_demands (method, &actions);
5907 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5908 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5909 if (pinvoke) {
5910 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5911 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5912 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5914 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5915 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5916 pinvoke = FALSE;
5918 if (custom)
5919 mono_custom_attrs_free (custom);
5921 if (pinvoke) {
5922 custom = mono_custom_attrs_from_class (wrapped->klass);
5923 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5924 pinvoke = FALSE;
5926 if (custom)
5927 mono_custom_attrs_free (custom);
5929 } else {
5930 /* not a P/Invoke after all */
5931 pinvoke = FALSE;
5935 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5936 /* we use a separate basic block for the initialization code */
5937 NEW_BBLOCK (cfg, init_localsbb);
5938 cfg->bb_init = init_localsbb;
5939 init_localsbb->real_offset = cfg->real_offset;
5940 start_bblock->next_bb = init_localsbb;
5941 init_localsbb->next_bb = bblock;
5942 link_bblock (cfg, start_bblock, init_localsbb);
5943 link_bblock (cfg, init_localsbb, bblock);
5945 cfg->cbb = init_localsbb;
5946 } else {
5947 start_bblock->next_bb = bblock;
5948 link_bblock (cfg, start_bblock, bblock);
5951 /* at this point we know, if security is TRUE, that some code needs to be generated */
5952 if (security && (cfg->method == method)) {
5953 MonoInst *args [2];
5955 mono_jit_stats.cas_demand_generation++;
5957 if (actions.demand.blob) {
5958 /* Add code for SecurityAction.Demand */
5959 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5960 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5961 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5962 mono_emit_method_call (cfg, secman->demand, args, NULL);
5964 if (actions.noncasdemand.blob) {
5965 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5966 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5967 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5968 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5969 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5970 mono_emit_method_call (cfg, secman->demand, args, NULL);
5972 if (actions.demandchoice.blob) {
5973 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5974 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5975 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5976 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5977 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5981 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5982 if (pinvoke) {
5983 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5986 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5987 /* check if this is native code, e.g. an icall or a p/invoke */
5988 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5989 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5990 if (wrapped) {
5991 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5992 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5994 /* if this ia a native call then it can only be JITted from platform code */
5995 if ((icall || pinvk) && method->klass && method->klass->image) {
5996 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5997 MonoException *ex = icall ? mono_get_exception_security () :
5998 mono_get_exception_method_access ();
5999 emit_throw_exception (cfg, ex);
6006 if (header->code_size == 0)
6007 UNVERIFIED;
6009 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6010 ip = err_pos;
6011 UNVERIFIED;
6014 if (cfg->method == method)
6015 mono_debug_init_method (cfg, bblock, breakpoint_id);
6017 for (n = 0; n < header->num_locals; ++n) {
6018 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6019 UNVERIFIED;
6021 class_inits = NULL;
6023 /* We force the vtable variable here for all shared methods
6024 for the possibility that they might show up in a stack
6025 trace where their exact instantiation is needed. */
6026 if (cfg->generic_sharing_context && method == cfg->method) {
6027 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6028 mini_method_get_context (method)->method_inst ||
6029 method->klass->valuetype) {
6030 mono_get_vtable_var (cfg);
6031 } else {
6032 /* FIXME: Is there a better way to do this?
6033 We need the variable live for the duration
6034 of the whole method. */
6035 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6039 /* add a check for this != NULL to inlined methods */
6040 if (is_virtual_call) {
6041 MonoInst *arg_ins;
6043 NEW_ARGLOAD (cfg, arg_ins, 0);
6044 MONO_ADD_INS (cfg->cbb, arg_ins);
6045 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6048 skip_dead_blocks = !dont_verify;
6049 if (skip_dead_blocks) {
6050 original_bb = bb = mono_basic_block_split (method, &error);
6051 if (!mono_error_ok (&error)) {
6052 mono_error_cleanup (&error);
6053 UNVERIFIED;
6055 g_assert (bb);
6058 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6059 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6061 ins_flag = 0;
6062 start_new_bblock = 0;
6063 cfg->cbb = bblock;
6064 while (ip < end) {
6065 if (cfg->method == method)
6066 cfg->real_offset = ip - header->code;
6067 else
6068 cfg->real_offset = inline_offset;
6069 cfg->ip = ip;
6071 context_used = 0;
6073 if (start_new_bblock) {
6074 bblock->cil_length = ip - bblock->cil_code;
6075 if (start_new_bblock == 2) {
6076 g_assert (ip == tblock->cil_code);
6077 } else {
6078 GET_BBLOCK (cfg, tblock, ip);
6080 bblock->next_bb = tblock;
6081 bblock = tblock;
6082 cfg->cbb = bblock;
6083 start_new_bblock = 0;
6084 for (i = 0; i < bblock->in_scount; ++i) {
6085 if (cfg->verbose_level > 3)
6086 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6087 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6088 *sp++ = ins;
6090 if (class_inits)
6091 g_slist_free (class_inits);
6092 class_inits = NULL;
6093 } else {
6094 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6095 link_bblock (cfg, bblock, tblock);
6096 if (sp != stack_start) {
6097 handle_stack_args (cfg, stack_start, sp - stack_start);
6098 sp = stack_start;
6099 CHECK_UNVERIFIABLE (cfg);
6101 bblock->next_bb = tblock;
6102 bblock = tblock;
6103 cfg->cbb = bblock;
6104 for (i = 0; i < bblock->in_scount; ++i) {
6105 if (cfg->verbose_level > 3)
6106 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6107 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6108 *sp++ = ins;
6110 g_slist_free (class_inits);
6111 class_inits = NULL;
6115 if (skip_dead_blocks) {
6116 int ip_offset = ip - header->code;
6118 if (ip_offset == bb->end)
6119 bb = bb->next;
6121 if (bb->dead) {
6122 int op_size = mono_opcode_size (ip, end);
6123 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6125 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6127 if (ip_offset + op_size == bb->end) {
6128 MONO_INST_NEW (cfg, ins, OP_NOP);
6129 MONO_ADD_INS (bblock, ins);
6130 start_new_bblock = 1;
6133 ip += op_size;
6134 continue;
6138 * Sequence points are points where the debugger can place a breakpoint.
6139 * Currently, we generate these automatically at points where the IL
6140 * stack is empty.
6142 if (seq_points && sp == stack_start) {
6143 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6144 MONO_ADD_INS (cfg->cbb, ins);
6147 bblock->real_offset = cfg->real_offset;
6149 if ((cfg->method == method) && cfg->coverage_info) {
6150 guint32 cil_offset = ip - header->code;
6151 cfg->coverage_info->data [cil_offset].cil_code = ip;
6153 /* TODO: Use an increment here */
6154 #if defined(TARGET_X86)
6155 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6156 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6157 ins->inst_imm = 1;
6158 MONO_ADD_INS (cfg->cbb, ins);
6159 #else
6160 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6161 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6162 #endif
6165 if (cfg->verbose_level > 3)
6166 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6168 switch (*ip) {
6169 case CEE_NOP:
6170 if (cfg->keep_cil_nops)
6171 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6172 else
6173 MONO_INST_NEW (cfg, ins, OP_NOP);
6174 ip++;
6175 MONO_ADD_INS (bblock, ins);
6176 break;
6177 case CEE_BREAK:
6178 if (should_insert_brekpoint (cfg->method))
6179 MONO_INST_NEW (cfg, ins, OP_BREAK);
6180 else
6181 MONO_INST_NEW (cfg, ins, OP_NOP);
6182 ip++;
6183 MONO_ADD_INS (bblock, ins);
6184 break;
6185 case CEE_LDARG_0:
6186 case CEE_LDARG_1:
6187 case CEE_LDARG_2:
6188 case CEE_LDARG_3:
6189 CHECK_STACK_OVF (1);
6190 n = (*ip)-CEE_LDARG_0;
6191 CHECK_ARG (n);
6192 EMIT_NEW_ARGLOAD (cfg, ins, n);
6193 ip++;
6194 *sp++ = ins;
6195 break;
6196 case CEE_LDLOC_0:
6197 case CEE_LDLOC_1:
6198 case CEE_LDLOC_2:
6199 case CEE_LDLOC_3:
6200 CHECK_STACK_OVF (1);
6201 n = (*ip)-CEE_LDLOC_0;
6202 CHECK_LOCAL (n);
6203 EMIT_NEW_LOCLOAD (cfg, ins, n);
6204 ip++;
6205 *sp++ = ins;
6206 break;
6207 case CEE_STLOC_0:
6208 case CEE_STLOC_1:
6209 case CEE_STLOC_2:
6210 case CEE_STLOC_3: {
6211 CHECK_STACK (1);
6212 n = (*ip)-CEE_STLOC_0;
6213 CHECK_LOCAL (n);
6214 --sp;
6215 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6216 UNVERIFIED;
6217 emit_stloc_ir (cfg, sp, header, n);
6218 ++ip;
6219 inline_costs += 1;
6220 break;
6222 case CEE_LDARG_S:
6223 CHECK_OPSIZE (2);
6224 CHECK_STACK_OVF (1);
6225 n = ip [1];
6226 CHECK_ARG (n);
6227 EMIT_NEW_ARGLOAD (cfg, ins, n);
6228 *sp++ = ins;
6229 ip += 2;
6230 break;
6231 case CEE_LDARGA_S:
6232 CHECK_OPSIZE (2);
6233 CHECK_STACK_OVF (1);
6234 n = ip [1];
6235 CHECK_ARG (n);
6236 NEW_ARGLOADA (cfg, ins, n);
6237 MONO_ADD_INS (cfg->cbb, ins);
6238 *sp++ = ins;
6239 ip += 2;
6240 break;
6241 case CEE_STARG_S:
6242 CHECK_OPSIZE (2);
6243 CHECK_STACK (1);
6244 --sp;
6245 n = ip [1];
6246 CHECK_ARG (n);
6247 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6248 UNVERIFIED;
6249 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6250 ip += 2;
6251 break;
6252 case CEE_LDLOC_S:
6253 CHECK_OPSIZE (2);
6254 CHECK_STACK_OVF (1);
6255 n = ip [1];
6256 CHECK_LOCAL (n);
6257 EMIT_NEW_LOCLOAD (cfg, ins, n);
6258 *sp++ = ins;
6259 ip += 2;
6260 break;
6261 case CEE_LDLOCA_S: {
6262 unsigned char *tmp_ip;
6263 CHECK_OPSIZE (2);
6264 CHECK_STACK_OVF (1);
6265 CHECK_LOCAL (ip [1]);
6267 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6268 ip = tmp_ip;
6269 inline_costs += 1;
6270 break;
6273 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6274 *sp++ = ins;
6275 ip += 2;
6276 break;
6278 case CEE_STLOC_S:
6279 CHECK_OPSIZE (2);
6280 CHECK_STACK (1);
6281 --sp;
6282 CHECK_LOCAL (ip [1]);
6283 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6284 UNVERIFIED;
6285 emit_stloc_ir (cfg, sp, header, ip [1]);
6286 ip += 2;
6287 inline_costs += 1;
6288 break;
6289 case CEE_LDNULL:
6290 CHECK_STACK_OVF (1);
6291 EMIT_NEW_PCONST (cfg, ins, NULL);
6292 ins->type = STACK_OBJ;
6293 ++ip;
6294 *sp++ = ins;
6295 break;
6296 case CEE_LDC_I4_M1:
6297 CHECK_STACK_OVF (1);
6298 EMIT_NEW_ICONST (cfg, ins, -1);
6299 ++ip;
6300 *sp++ = ins;
6301 break;
6302 case CEE_LDC_I4_0:
6303 case CEE_LDC_I4_1:
6304 case CEE_LDC_I4_2:
6305 case CEE_LDC_I4_3:
6306 case CEE_LDC_I4_4:
6307 case CEE_LDC_I4_5:
6308 case CEE_LDC_I4_6:
6309 case CEE_LDC_I4_7:
6310 case CEE_LDC_I4_8:
6311 CHECK_STACK_OVF (1);
6312 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6313 ++ip;
6314 *sp++ = ins;
6315 break;
6316 case CEE_LDC_I4_S:
6317 CHECK_OPSIZE (2);
6318 CHECK_STACK_OVF (1);
6319 ++ip;
6320 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6321 ++ip;
6322 *sp++ = ins;
6323 break;
6324 case CEE_LDC_I4:
6325 CHECK_OPSIZE (5);
6326 CHECK_STACK_OVF (1);
6327 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6328 ip += 5;
6329 *sp++ = ins;
6330 break;
6331 case CEE_LDC_I8:
6332 CHECK_OPSIZE (9);
6333 CHECK_STACK_OVF (1);
6334 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6335 ins->type = STACK_I8;
6336 ins->dreg = alloc_dreg (cfg, STACK_I8);
6337 ++ip;
6338 ins->inst_l = (gint64)read64 (ip);
6339 MONO_ADD_INS (bblock, ins);
6340 ip += 8;
6341 *sp++ = ins;
6342 break;
6343 case CEE_LDC_R4: {
6344 float *f;
6345 gboolean use_aotconst = FALSE;
6347 #ifdef TARGET_POWERPC
6348 /* FIXME: Clean this up */
6349 if (cfg->compile_aot)
6350 use_aotconst = TRUE;
6351 #endif
6353 /* FIXME: we should really allocate this only late in the compilation process */
6354 f = mono_domain_alloc (cfg->domain, sizeof (float));
6355 CHECK_OPSIZE (5);
6356 CHECK_STACK_OVF (1);
6358 if (use_aotconst) {
6359 MonoInst *cons;
6360 int dreg;
6362 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6364 dreg = alloc_freg (cfg);
6365 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6366 ins->type = STACK_R8;
6367 } else {
6368 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6369 ins->type = STACK_R8;
6370 ins->dreg = alloc_dreg (cfg, STACK_R8);
6371 ins->inst_p0 = f;
6372 MONO_ADD_INS (bblock, ins);
6374 ++ip;
6375 readr4 (ip, f);
6376 ip += 4;
6377 *sp++ = ins;
6378 break;
6380 case CEE_LDC_R8: {
6381 double *d;
6382 gboolean use_aotconst = FALSE;
6384 #ifdef TARGET_POWERPC
6385 /* FIXME: Clean this up */
6386 if (cfg->compile_aot)
6387 use_aotconst = TRUE;
6388 #endif
6390 /* FIXME: we should really allocate this only late in the compilation process */
6391 d = mono_domain_alloc (cfg->domain, sizeof (double));
6392 CHECK_OPSIZE (9);
6393 CHECK_STACK_OVF (1);
6395 if (use_aotconst) {
6396 MonoInst *cons;
6397 int dreg;
6399 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6401 dreg = alloc_freg (cfg);
6402 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6403 ins->type = STACK_R8;
6404 } else {
6405 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6406 ins->type = STACK_R8;
6407 ins->dreg = alloc_dreg (cfg, STACK_R8);
6408 ins->inst_p0 = d;
6409 MONO_ADD_INS (bblock, ins);
6411 ++ip;
6412 readr8 (ip, d);
6413 ip += 8;
6414 *sp++ = ins;
6415 break;
6417 case CEE_DUP: {
6418 MonoInst *temp, *store;
6419 CHECK_STACK (1);
6420 CHECK_STACK_OVF (1);
6421 sp--;
6422 ins = *sp;
6424 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6425 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6427 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6428 *sp++ = ins;
6430 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6431 *sp++ = ins;
6433 ++ip;
6434 inline_costs += 2;
6435 break;
6437 case CEE_POP:
6438 CHECK_STACK (1);
6439 ip++;
6440 --sp;
6442 #ifdef TARGET_X86
6443 if (sp [0]->type == STACK_R8)
6444 /* we need to pop the value from the x86 FP stack */
6445 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6446 #endif
6447 break;
6448 case CEE_JMP: {
6449 MonoCallInst *call;
6451 INLINE_FAILURE;
6453 CHECK_OPSIZE (5);
6454 if (stack_start != sp)
6455 UNVERIFIED;
6456 token = read32 (ip + 1);
6457 /* FIXME: check the signature matches */
6458 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6460 if (!cmethod || mono_loader_get_last_error ())
6461 LOAD_ERROR;
6463 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6464 GENERIC_SHARING_FAILURE (CEE_JMP);
6466 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6467 CHECK_CFG_EXCEPTION;
6469 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6471 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6472 int i, n;
6474 /* Handle tail calls similarly to calls */
6475 n = fsig->param_count + fsig->hasthis;
6477 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6478 call->method = cmethod;
6479 call->tail_call = TRUE;
6480 call->signature = mono_method_signature (cmethod);
6481 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6482 call->inst.inst_p0 = cmethod;
6483 for (i = 0; i < n; ++i)
6484 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6486 mono_arch_emit_call (cfg, call);
6487 MONO_ADD_INS (bblock, (MonoInst*)call);
6489 #else
6490 for (i = 0; i < num_args; ++i)
6491 /* Prevent arguments from being optimized away */
6492 arg_array [i]->flags |= MONO_INST_VOLATILE;
6494 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6495 ins = (MonoInst*)call;
6496 ins->inst_p0 = cmethod;
6497 MONO_ADD_INS (bblock, ins);
6498 #endif
6500 ip += 5;
6501 start_new_bblock = 1;
6502 break;
6504 case CEE_CALLI:
6505 case CEE_CALL:
6506 case CEE_CALLVIRT: {
6507 MonoInst *addr = NULL;
6508 MonoMethodSignature *fsig = NULL;
6509 int array_rank = 0;
6510 int virtual = *ip == CEE_CALLVIRT;
6511 int calli = *ip == CEE_CALLI;
6512 gboolean pass_imt_from_rgctx = FALSE;
6513 MonoInst *imt_arg = NULL;
6514 gboolean pass_vtable = FALSE;
6515 gboolean pass_mrgctx = FALSE;
6516 MonoInst *vtable_arg = NULL;
6517 gboolean check_this = FALSE;
6518 gboolean supported_tail_call = FALSE;
6520 CHECK_OPSIZE (5);
6521 token = read32 (ip + 1);
6523 if (calli) {
6524 cmethod = NULL;
6525 CHECK_STACK (1);
6526 --sp;
6527 addr = *sp;
6528 if (method->wrapper_type != MONO_WRAPPER_NONE)
6529 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6530 else
6531 fsig = mono_metadata_parse_signature (image, token);
6533 n = fsig->param_count + fsig->hasthis;
6535 if (method->dynamic && fsig->pinvoke) {
6536 MonoInst *args [3];
6539 * This is a call through a function pointer using a pinvoke
6540 * signature. Have to create a wrapper and call that instead.
6541 * FIXME: This is very slow, need to create a wrapper at JIT time
6542 * instead based on the signature.
6544 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6545 EMIT_NEW_PCONST (cfg, args [1], fsig);
6546 args [2] = addr;
6547 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6549 } else {
6550 MonoMethod *cil_method;
6552 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6553 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6554 cil_method = cmethod;
6555 } else if (constrained_call) {
6556 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6558 * This is needed since get_method_constrained can't find
6559 * the method in klass representing a type var.
6560 * The type var is guaranteed to be a reference type in this
6561 * case.
6563 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6564 cil_method = cmethod;
6565 g_assert (!cmethod->klass->valuetype);
6566 } else {
6567 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6569 } else {
6570 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6571 cil_method = cmethod;
6574 if (!cmethod || mono_loader_get_last_error ())
6575 LOAD_ERROR;
6576 if (!dont_verify && !cfg->skip_visibility) {
6577 MonoMethod *target_method = cil_method;
6578 if (method->is_inflated) {
6579 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6581 if (!mono_method_can_access_method (method_definition, target_method) &&
6582 !mono_method_can_access_method (method, cil_method))
6583 METHOD_ACCESS_FAILURE;
6586 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6587 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6589 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6590 /* MS.NET seems to silently convert this to a callvirt */
6591 virtual = 1;
6595 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6596 * converts to a callvirt.
6598 * tests/bug-515884.il is an example of this behavior
6600 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6601 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6602 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6603 virtual = 1;
6606 if (!cmethod->klass->inited)
6607 if (!mono_class_init (cmethod->klass))
6608 LOAD_ERROR;
6610 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6611 mini_class_is_system_array (cmethod->klass)) {
6612 array_rank = cmethod->klass->rank;
6613 fsig = mono_method_signature (cmethod);
6614 } else {
6615 fsig = mono_method_signature (cmethod);
6617 if (!fsig)
6618 LOAD_ERROR;
6620 if (fsig->pinvoke) {
6621 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6622 check_for_pending_exc, FALSE);
6623 fsig = mono_method_signature (wrapper);
6624 } else if (constrained_call) {
6625 fsig = mono_method_signature (cmethod);
6626 } else {
6627 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6631 mono_save_token_info (cfg, image, token, cil_method);
6633 n = fsig->param_count + fsig->hasthis;
6635 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6636 if (check_linkdemand (cfg, method, cmethod))
6637 INLINE_FAILURE;
6638 CHECK_CFG_EXCEPTION;
6641 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6642 g_assert_not_reached ();
6645 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6646 UNVERIFIED;
6648 if (!cfg->generic_sharing_context && cmethod)
6649 g_assert (!mono_method_check_context_used (cmethod));
6651 CHECK_STACK (n);
6653 //g_assert (!virtual || fsig->hasthis);
6655 sp -= n;
6657 if (constrained_call) {
6659 * We have the `constrained.' prefix opcode.
6661 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6663 * The type parameter is instantiated as a valuetype,
6664 * but that type doesn't override the method we're
6665 * calling, so we need to box `this'.
6667 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6668 ins->klass = constrained_call;
6669 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6670 CHECK_CFG_EXCEPTION;
6671 } else if (!constrained_call->valuetype) {
6672 int dreg = alloc_ireg_ref (cfg);
6675 * The type parameter is instantiated as a reference
6676 * type. We have a managed pointer on the stack, so
6677 * we need to dereference it here.
6679 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6680 ins->type = STACK_OBJ;
6681 sp [0] = ins;
6682 } else if (cmethod->klass->valuetype)
6683 virtual = 0;
6684 constrained_call = NULL;
6687 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6688 UNVERIFIED;
6691 * If the callee is a shared method, then its static cctor
6692 * might not get called after the call was patched.
6694 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6695 emit_generic_class_init (cfg, cmethod->klass);
6696 CHECK_TYPELOAD (cmethod->klass);
6699 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6700 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6701 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6702 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6703 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6706 * Pass vtable iff target method might
6707 * be shared, which means that sharing
6708 * is enabled for its class and its
6709 * context is sharable (and it's not a
6710 * generic method).
6712 if (sharing_enabled && context_sharable &&
6713 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6714 pass_vtable = TRUE;
6717 if (cmethod && mini_method_get_context (cmethod) &&
6718 mini_method_get_context (cmethod)->method_inst) {
6719 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6720 MonoGenericContext *context = mini_method_get_context (cmethod);
6721 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6723 g_assert (!pass_vtable);
6725 if (sharing_enabled && context_sharable)
6726 pass_mrgctx = TRUE;
6729 if (cfg->generic_sharing_context && cmethod) {
6730 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6732 context_used = mono_method_check_context_used (cmethod);
6734 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6735 /* Generic method interface
6736 calls are resolved via a
6737 helper function and don't
6738 need an imt. */
6739 if (!cmethod_context || !cmethod_context->method_inst)
6740 pass_imt_from_rgctx = TRUE;
6744 * If a shared method calls another
6745 * shared method then the caller must
6746 * have a generic sharing context
6747 * because the magic trampoline
6748 * requires it. FIXME: We shouldn't
6749 * have to force the vtable/mrgctx
6750 * variable here. Instead there
6751 * should be a flag in the cfg to
6752 * request a generic sharing context.
6754 if (context_used &&
6755 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6756 mono_get_vtable_var (cfg);
6759 if (pass_vtable) {
6760 if (context_used) {
6761 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6762 } else {
6763 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6765 CHECK_TYPELOAD (cmethod->klass);
6766 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6770 if (pass_mrgctx) {
6771 g_assert (!vtable_arg);
6773 if (!cfg->compile_aot) {
6775 * emit_get_rgctx_method () calls mono_class_vtable () so check
6776 * for type load errors before.
6778 mono_class_setup_vtable (cmethod->klass);
6779 CHECK_TYPELOAD (cmethod->klass);
6782 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6784 /* !marshalbyref is needed to properly handle generic methods + remoting */
6785 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6786 MONO_METHOD_IS_FINAL (cmethod)) &&
6787 !cmethod->klass->marshalbyref) {
6788 if (virtual)
6789 check_this = TRUE;
6790 virtual = 0;
6794 if (pass_imt_from_rgctx) {
6795 g_assert (!pass_vtable);
6796 g_assert (cmethod);
6798 imt_arg = emit_get_rgctx_method (cfg, context_used,
6799 cmethod, MONO_RGCTX_INFO_METHOD);
6802 if (check_this)
6803 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6805 /* Calling virtual generic methods */
6806 if (cmethod && virtual &&
6807 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6808 !(MONO_METHOD_IS_FINAL (cmethod) &&
6809 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6810 mono_method_signature (cmethod)->generic_param_count) {
6811 MonoInst *this_temp, *this_arg_temp, *store;
6812 MonoInst *iargs [4];
6814 g_assert (mono_method_signature (cmethod)->is_inflated);
6816 /* Prevent inlining of methods that contain indirect calls */
6817 INLINE_FAILURE;
6819 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6820 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6821 g_assert (!imt_arg);
6822 if (!context_used)
6823 g_assert (cmethod->is_inflated);
6824 imt_arg = emit_get_rgctx_method (cfg, context_used,
6825 cmethod, MONO_RGCTX_INFO_METHOD);
6826 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6827 } else
6828 #endif
6830 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6831 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6832 MONO_ADD_INS (bblock, store);
6834 /* FIXME: This should be a managed pointer */
6835 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6837 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6838 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6839 cmethod, MONO_RGCTX_INFO_METHOD);
6840 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6841 addr = mono_emit_jit_icall (cfg,
6842 mono_helper_compile_generic_method, iargs);
6844 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6846 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6849 if (!MONO_TYPE_IS_VOID (fsig->ret))
6850 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6852 CHECK_CFG_EXCEPTION;
6854 ip += 5;
6855 ins_flag = 0;
6856 break;
6860 * Implement a workaround for the inherent races involved in locking:
6861 * Monitor.Enter ()
6862 * try {
6863 * } finally {
6864 * Monitor.Exit ()
6866 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6867 * try block, the Exit () won't be executed, see:
6868 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6869 * To work around this, we extend such try blocks to include the last x bytes
6870 * of the Monitor.Enter () call.
6872 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6873 MonoBasicBlock *tbb;
6875 GET_BBLOCK (cfg, tbb, ip + 5);
6877 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6878 * from Monitor.Enter like ArgumentNullException.
6880 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6881 /* Mark this bblock as needing to be extended */
6882 tbb->extend_try_block = TRUE;
6886 /* Conversion to a JIT intrinsic */
6887 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6888 bblock = cfg->cbb;
6889 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6890 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6891 *sp = ins;
6892 sp++;
6895 CHECK_CFG_EXCEPTION;
6897 ip += 5;
6898 ins_flag = 0;
6899 break;
6902 /* Inlining */
6903 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6904 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6905 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6906 !g_list_find (dont_inline, cmethod)) {
6907 int costs;
6908 gboolean always = FALSE;
6910 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6911 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6912 /* Prevent inlining of methods that call wrappers */
6913 INLINE_FAILURE;
6914 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6915 always = TRUE;
6918 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6919 ip += 5;
6920 cfg->real_offset += 5;
6921 bblock = cfg->cbb;
6923 if (!MONO_TYPE_IS_VOID (fsig->ret))
6924 /* *sp is already set by inline_method */
6925 sp++;
6927 inline_costs += costs;
6928 ins_flag = 0;
6929 break;
6933 inline_costs += 10 * num_calls++;
6935 /* Tail recursion elimination */
6936 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6937 gboolean has_vtargs = FALSE;
6938 int i;
6940 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6941 INLINE_FAILURE;
6943 /* keep it simple */
6944 for (i = fsig->param_count - 1; i >= 0; i--) {
6945 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6946 has_vtargs = TRUE;
6949 if (!has_vtargs) {
6950 for (i = 0; i < n; ++i)
6951 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6952 MONO_INST_NEW (cfg, ins, OP_BR);
6953 MONO_ADD_INS (bblock, ins);
6954 tblock = start_bblock->out_bb [0];
6955 link_bblock (cfg, bblock, tblock);
6956 ins->inst_target_bb = tblock;
6957 start_new_bblock = 1;
6959 /* skip the CEE_RET, too */
6960 if (ip_in_bb (cfg, bblock, ip + 5))
6961 ip += 6;
6962 else
6963 ip += 5;
6965 ins_flag = 0;
6966 break;
6970 /* Generic sharing */
6971 /* FIXME: only do this for generic methods if
6972 they are not shared! */
6973 if (context_used && !imt_arg && !array_rank &&
6974 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6975 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6976 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6977 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6978 INLINE_FAILURE;
6980 g_assert (cfg->generic_sharing_context && cmethod);
6981 g_assert (!addr);
6984 * We are compiling a call to a
6985 * generic method from shared code,
6986 * which means that we have to look up
6987 * the method in the rgctx and do an
6988 * indirect call.
6990 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6993 /* Indirect calls */
6994 if (addr) {
6995 g_assert (!imt_arg);
6997 if (*ip == CEE_CALL)
6998 g_assert (context_used);
6999 else if (*ip == CEE_CALLI)
7000 g_assert (!vtable_arg);
7001 else
7002 /* FIXME: what the hell is this??? */
7003 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7004 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7006 /* Prevent inlining of methods with indirect calls */
7007 INLINE_FAILURE;
7009 if (vtable_arg) {
7010 MonoCallInst *call;
7012 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7013 call = (MonoCallInst*)ins;
7014 } else {
7015 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7017 * Instead of emitting an indirect call, emit a direct call
7018 * with the contents of the aotconst as the patch info.
7020 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7021 NULLIFY_INS (addr);
7022 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7023 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7024 NULLIFY_INS (addr);
7025 } else {
7026 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7029 if (!MONO_TYPE_IS_VOID (fsig->ret))
7030 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7032 CHECK_CFG_EXCEPTION;
7034 ip += 5;
7035 ins_flag = 0;
7036 break;
7039 /* Array methods */
7040 if (array_rank) {
7041 MonoInst *addr;
7043 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7044 MonoInst *val = sp [fsig->param_count];
7046 if (val->type == STACK_OBJ) {
7047 MonoInst *iargs [2];
7049 iargs [0] = sp [0];
7050 iargs [1] = val;
7052 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7055 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7056 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7057 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7058 emit_write_barrier (cfg, addr, val, 0);
7059 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7060 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7064 *sp++ = ins;
7065 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7066 if (!cmethod->klass->element_class->valuetype && !readonly)
7067 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7068 CHECK_TYPELOAD (cmethod->klass);
7070 readonly = FALSE;
7071 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7072 *sp++ = addr;
7073 } else {
7074 g_assert_not_reached ();
7077 CHECK_CFG_EXCEPTION;
7079 ip += 5;
7080 ins_flag = 0;
7081 break;
7084 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7085 if (ins) {
7086 if (!MONO_TYPE_IS_VOID (fsig->ret))
7087 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7089 CHECK_CFG_EXCEPTION;
7091 ip += 5;
7092 ins_flag = 0;
7093 break;
7096 /* Tail prefix / tail call optimization */
7098 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7099 /* FIXME: runtime generic context pointer for jumps? */
7100 /* FIXME: handle this for generic sharing eventually */
7101 supported_tail_call = cmethod &&
7102 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7103 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7104 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7106 if (supported_tail_call) {
7107 MonoCallInst *call;
7109 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7110 INLINE_FAILURE;
7112 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7114 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7115 /* Handle tail calls similarly to calls */
7116 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7117 #else
7118 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7119 call->tail_call = TRUE;
7120 call->method = cmethod;
7121 call->signature = mono_method_signature (cmethod);
7124 * We implement tail calls by storing the actual arguments into the
7125 * argument variables, then emitting a CEE_JMP.
7127 for (i = 0; i < n; ++i) {
7128 /* Prevent argument from being register allocated */
7129 arg_array [i]->flags |= MONO_INST_VOLATILE;
7130 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7132 #endif
7134 ins = (MonoInst*)call;
7135 ins->inst_p0 = cmethod;
7136 ins->inst_p1 = arg_array [0];
7137 MONO_ADD_INS (bblock, ins);
7138 link_bblock (cfg, bblock, end_bblock);
7139 start_new_bblock = 1;
7141 CHECK_CFG_EXCEPTION;
7143 ip += 5;
7144 ins_flag = 0;
7146 // FIXME: Eliminate unreachable epilogs
7149 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7150 * only reachable from this call.
7152 GET_BBLOCK (cfg, tblock, ip);
7153 if (tblock == bblock || tblock->in_count == 0)
7154 ip += 1;
7155 break;
7158 /* Common call */
7159 INLINE_FAILURE;
7160 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7161 imt_arg, vtable_arg);
7163 if (!MONO_TYPE_IS_VOID (fsig->ret))
7164 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7166 CHECK_CFG_EXCEPTION;
7168 ip += 5;
7169 ins_flag = 0;
7170 break;
7172 case CEE_RET:
7173 if (cfg->method != method) {
7174 /* return from inlined method */
7176 * If in_count == 0, that means the ret is unreachable due to
7177 * being preceeded by a throw. In that case, inline_method () will
7178 * handle setting the return value
7179 * (test case: test_0_inline_throw ()).
7181 if (return_var && cfg->cbb->in_count) {
7182 MonoInst *store;
7183 CHECK_STACK (1);
7184 --sp;
7185 //g_assert (returnvar != -1);
7186 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7187 cfg->ret_var_set = TRUE;
7189 } else {
7190 if (cfg->ret) {
7191 MonoType *ret_type = mono_method_signature (method)->ret;
7193 if (seq_points) {
7195 * Place a seq point here too even through the IL stack is not
7196 * empty, so a step over on
7197 * call <FOO>
7198 * ret
7199 * will work correctly.
7201 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7202 MONO_ADD_INS (cfg->cbb, ins);
7205 g_assert (!return_var);
7206 CHECK_STACK (1);
7207 --sp;
7209 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7210 UNVERIFIED;
7212 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7213 MonoInst *ret_addr;
7215 if (!cfg->vret_addr) {
7216 MonoInst *ins;
7218 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7219 } else {
7220 EMIT_NEW_RETLOADA (cfg, ret_addr);
7222 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7223 ins->klass = mono_class_from_mono_type (ret_type);
7225 } else {
7226 #ifdef MONO_ARCH_SOFT_FLOAT
7227 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7228 MonoInst *iargs [1];
7229 MonoInst *conv;
7231 iargs [0] = *sp;
7232 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7233 mono_arch_emit_setret (cfg, method, conv);
7234 } else {
7235 mono_arch_emit_setret (cfg, method, *sp);
7237 #else
7238 mono_arch_emit_setret (cfg, method, *sp);
7239 #endif
7243 if (sp != stack_start)
7244 UNVERIFIED;
7245 MONO_INST_NEW (cfg, ins, OP_BR);
7246 ip++;
7247 ins->inst_target_bb = end_bblock;
7248 MONO_ADD_INS (bblock, ins);
7249 link_bblock (cfg, bblock, end_bblock);
7250 start_new_bblock = 1;
7251 break;
7252 case CEE_BR_S:
7253 CHECK_OPSIZE (2);
7254 MONO_INST_NEW (cfg, ins, OP_BR);
7255 ip++;
7256 target = ip + 1 + (signed char)(*ip);
7257 ++ip;
7258 GET_BBLOCK (cfg, tblock, target);
7259 link_bblock (cfg, bblock, tblock);
7260 ins->inst_target_bb = tblock;
7261 if (sp != stack_start) {
7262 handle_stack_args (cfg, stack_start, sp - stack_start);
7263 sp = stack_start;
7264 CHECK_UNVERIFIABLE (cfg);
7266 MONO_ADD_INS (bblock, ins);
7267 start_new_bblock = 1;
7268 inline_costs += BRANCH_COST;
7269 break;
7270 case CEE_BEQ_S:
7271 case CEE_BGE_S:
7272 case CEE_BGT_S:
7273 case CEE_BLE_S:
7274 case CEE_BLT_S:
7275 case CEE_BNE_UN_S:
7276 case CEE_BGE_UN_S:
7277 case CEE_BGT_UN_S:
7278 case CEE_BLE_UN_S:
7279 case CEE_BLT_UN_S:
7280 CHECK_OPSIZE (2);
7281 CHECK_STACK (2);
7282 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7283 ip++;
7284 target = ip + 1 + *(signed char*)ip;
7285 ip++;
7287 ADD_BINCOND (NULL);
7289 sp = stack_start;
7290 inline_costs += BRANCH_COST;
7291 break;
7292 case CEE_BR:
7293 CHECK_OPSIZE (5);
7294 MONO_INST_NEW (cfg, ins, OP_BR);
7295 ip++;
7297 target = ip + 4 + (gint32)read32(ip);
7298 ip += 4;
7299 GET_BBLOCK (cfg, tblock, target);
7300 link_bblock (cfg, bblock, tblock);
7301 ins->inst_target_bb = tblock;
7302 if (sp != stack_start) {
7303 handle_stack_args (cfg, stack_start, sp - stack_start);
7304 sp = stack_start;
7305 CHECK_UNVERIFIABLE (cfg);
7308 MONO_ADD_INS (bblock, ins);
7310 start_new_bblock = 1;
7311 inline_costs += BRANCH_COST;
7312 break;
7313 case CEE_BRFALSE_S:
7314 case CEE_BRTRUE_S:
7315 case CEE_BRFALSE:
7316 case CEE_BRTRUE: {
7317 MonoInst *cmp;
7318 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7319 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7320 guint32 opsize = is_short ? 1 : 4;
7322 CHECK_OPSIZE (opsize);
7323 CHECK_STACK (1);
7324 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7325 UNVERIFIED;
7326 ip ++;
7327 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7328 ip += opsize;
7330 sp--;
7332 GET_BBLOCK (cfg, tblock, target);
7333 link_bblock (cfg, bblock, tblock);
7334 GET_BBLOCK (cfg, tblock, ip);
7335 link_bblock (cfg, bblock, tblock);
7337 if (sp != stack_start) {
7338 handle_stack_args (cfg, stack_start, sp - stack_start);
7339 CHECK_UNVERIFIABLE (cfg);
7342 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7343 cmp->sreg1 = sp [0]->dreg;
7344 type_from_op (cmp, sp [0], NULL);
7345 CHECK_TYPE (cmp);
7347 #if SIZEOF_REGISTER == 4
7348 if (cmp->opcode == OP_LCOMPARE_IMM) {
7349 /* Convert it to OP_LCOMPARE */
7350 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7351 ins->type = STACK_I8;
7352 ins->dreg = alloc_dreg (cfg, STACK_I8);
7353 ins->inst_l = 0;
7354 MONO_ADD_INS (bblock, ins);
7355 cmp->opcode = OP_LCOMPARE;
7356 cmp->sreg2 = ins->dreg;
7358 #endif
7359 MONO_ADD_INS (bblock, cmp);
7361 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7362 type_from_op (ins, sp [0], NULL);
7363 MONO_ADD_INS (bblock, ins);
7364 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7365 GET_BBLOCK (cfg, tblock, target);
7366 ins->inst_true_bb = tblock;
7367 GET_BBLOCK (cfg, tblock, ip);
7368 ins->inst_false_bb = tblock;
7369 start_new_bblock = 2;
7371 sp = stack_start;
7372 inline_costs += BRANCH_COST;
7373 break;
7375 case CEE_BEQ:
7376 case CEE_BGE:
7377 case CEE_BGT:
7378 case CEE_BLE:
7379 case CEE_BLT:
7380 case CEE_BNE_UN:
7381 case CEE_BGE_UN:
7382 case CEE_BGT_UN:
7383 case CEE_BLE_UN:
7384 case CEE_BLT_UN:
7385 CHECK_OPSIZE (5);
7386 CHECK_STACK (2);
7387 MONO_INST_NEW (cfg, ins, *ip);
7388 ip++;
7389 target = ip + 4 + (gint32)read32(ip);
7390 ip += 4;
7392 ADD_BINCOND (NULL);
7394 sp = stack_start;
7395 inline_costs += BRANCH_COST;
7396 break;
7397 case CEE_SWITCH: {
7398 MonoInst *src1;
7399 MonoBasicBlock **targets;
7400 MonoBasicBlock *default_bblock;
7401 MonoJumpInfoBBTable *table;
7402 int offset_reg = alloc_preg (cfg);
7403 int target_reg = alloc_preg (cfg);
7404 int table_reg = alloc_preg (cfg);
7405 int sum_reg = alloc_preg (cfg);
7406 gboolean use_op_switch;
7408 CHECK_OPSIZE (5);
7409 CHECK_STACK (1);
7410 n = read32 (ip + 1);
7411 --sp;
7412 src1 = sp [0];
7413 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7414 UNVERIFIED;
7416 ip += 5;
7417 CHECK_OPSIZE (n * sizeof (guint32));
7418 target = ip + n * sizeof (guint32);
7420 GET_BBLOCK (cfg, default_bblock, target);
7421 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7423 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7424 for (i = 0; i < n; ++i) {
7425 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7426 targets [i] = tblock;
7427 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7428 ip += 4;
7431 if (sp != stack_start) {
7433 * Link the current bb with the targets as well, so handle_stack_args
7434 * will set their in_stack correctly.
7436 link_bblock (cfg, bblock, default_bblock);
7437 for (i = 0; i < n; ++i)
7438 link_bblock (cfg, bblock, targets [i]);
7440 handle_stack_args (cfg, stack_start, sp - stack_start);
7441 sp = stack_start;
7442 CHECK_UNVERIFIABLE (cfg);
7445 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7447 bblock = cfg->cbb;
7449 for (i = 0; i < n; ++i)
7450 link_bblock (cfg, bblock, targets [i]);
7452 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7453 table->table = targets;
7454 table->table_size = n;
7456 use_op_switch = FALSE;
7457 #ifdef TARGET_ARM
7458 /* ARM implements SWITCH statements differently */
7459 /* FIXME: Make it use the generic implementation */
7460 if (!cfg->compile_aot)
7461 use_op_switch = TRUE;
7462 #endif
7464 if (COMPILE_LLVM (cfg))
7465 use_op_switch = TRUE;
7467 cfg->cbb->has_jump_table = 1;
7469 if (use_op_switch) {
7470 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7471 ins->sreg1 = src1->dreg;
7472 ins->inst_p0 = table;
7473 ins->inst_many_bb = targets;
7474 ins->klass = GUINT_TO_POINTER (n);
7475 MONO_ADD_INS (cfg->cbb, ins);
7476 } else {
7477 if (sizeof (gpointer) == 8)
7478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7479 else
7480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7482 #if SIZEOF_REGISTER == 8
7483 /* The upper word might not be zero, and we add it to a 64 bit address later */
7484 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7485 #endif
7487 if (cfg->compile_aot) {
7488 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7489 } else {
7490 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7491 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7492 ins->inst_p0 = table;
7493 ins->dreg = table_reg;
7494 MONO_ADD_INS (cfg->cbb, ins);
7497 /* FIXME: Use load_memindex */
7498 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7500 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7502 start_new_bblock = 1;
7503 inline_costs += (BRANCH_COST * 2);
7504 break;
7506 case CEE_LDIND_I1:
7507 case CEE_LDIND_U1:
7508 case CEE_LDIND_I2:
7509 case CEE_LDIND_U2:
7510 case CEE_LDIND_I4:
7511 case CEE_LDIND_U4:
7512 case CEE_LDIND_I8:
7513 case CEE_LDIND_I:
7514 case CEE_LDIND_R4:
7515 case CEE_LDIND_R8:
7516 case CEE_LDIND_REF:
7517 CHECK_STACK (1);
7518 --sp;
7520 switch (*ip) {
7521 case CEE_LDIND_R4:
7522 case CEE_LDIND_R8:
7523 dreg = alloc_freg (cfg);
7524 break;
7525 case CEE_LDIND_I8:
7526 dreg = alloc_lreg (cfg);
7527 break;
7528 case CEE_LDIND_REF:
7529 dreg = alloc_ireg_ref (cfg);
7530 break;
7531 default:
7532 dreg = alloc_preg (cfg);
7535 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7536 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7537 ins->flags |= ins_flag;
7538 ins_flag = 0;
7539 MONO_ADD_INS (bblock, ins);
7540 *sp++ = ins;
7541 ++ip;
7542 break;
7543 case CEE_STIND_REF:
7544 case CEE_STIND_I1:
7545 case CEE_STIND_I2:
7546 case CEE_STIND_I4:
7547 case CEE_STIND_I8:
7548 case CEE_STIND_R4:
7549 case CEE_STIND_R8:
7550 case CEE_STIND_I:
7551 CHECK_STACK (2);
7552 sp -= 2;
7554 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7555 ins->flags |= ins_flag;
7556 ins_flag = 0;
7557 MONO_ADD_INS (bblock, ins);
7559 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7560 emit_write_barrier (cfg, sp [0], sp [1], -1);
7562 inline_costs += 1;
7563 ++ip;
7564 break;
7566 case CEE_MUL:
7567 CHECK_STACK (2);
7569 MONO_INST_NEW (cfg, ins, (*ip));
7570 sp -= 2;
7571 ins->sreg1 = sp [0]->dreg;
7572 ins->sreg2 = sp [1]->dreg;
7573 type_from_op (ins, sp [0], sp [1]);
7574 CHECK_TYPE (ins);
7575 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7577 /* Use the immediate opcodes if possible */
7578 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7579 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7580 if (imm_opcode != -1) {
7581 ins->opcode = imm_opcode;
7582 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7583 ins->sreg2 = -1;
7585 sp [1]->opcode = OP_NOP;
7589 MONO_ADD_INS ((cfg)->cbb, (ins));
7591 *sp++ = mono_decompose_opcode (cfg, ins);
7592 ip++;
7593 break;
7594 case CEE_ADD:
7595 case CEE_SUB:
7596 case CEE_DIV:
7597 case CEE_DIV_UN:
7598 case CEE_REM:
7599 case CEE_REM_UN:
7600 case CEE_AND:
7601 case CEE_OR:
7602 case CEE_XOR:
7603 case CEE_SHL:
7604 case CEE_SHR:
7605 case CEE_SHR_UN:
7606 CHECK_STACK (2);
7608 MONO_INST_NEW (cfg, ins, (*ip));
7609 sp -= 2;
7610 ins->sreg1 = sp [0]->dreg;
7611 ins->sreg2 = sp [1]->dreg;
7612 type_from_op (ins, sp [0], sp [1]);
7613 CHECK_TYPE (ins);
7614 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7615 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7617 /* FIXME: Pass opcode to is_inst_imm */
7619 /* Use the immediate opcodes if possible */
7620 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7621 int imm_opcode;
7623 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7624 if (imm_opcode != -1) {
7625 ins->opcode = imm_opcode;
7626 if (sp [1]->opcode == OP_I8CONST) {
7627 #if SIZEOF_REGISTER == 8
7628 ins->inst_imm = sp [1]->inst_l;
7629 #else
7630 ins->inst_ls_word = sp [1]->inst_ls_word;
7631 ins->inst_ms_word = sp [1]->inst_ms_word;
7632 #endif
7634 else
7635 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7636 ins->sreg2 = -1;
7638 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7639 if (sp [1]->next == NULL)
7640 sp [1]->opcode = OP_NOP;
7643 MONO_ADD_INS ((cfg)->cbb, (ins));
7645 *sp++ = mono_decompose_opcode (cfg, ins);
7646 ip++;
7647 break;
7648 case CEE_NEG:
7649 case CEE_NOT:
7650 case CEE_CONV_I1:
7651 case CEE_CONV_I2:
7652 case CEE_CONV_I4:
7653 case CEE_CONV_R4:
7654 case CEE_CONV_R8:
7655 case CEE_CONV_U4:
7656 case CEE_CONV_I8:
7657 case CEE_CONV_U8:
7658 case CEE_CONV_OVF_I8:
7659 case CEE_CONV_OVF_U8:
7660 case CEE_CONV_R_UN:
7661 CHECK_STACK (1);
7663 /* Special case this earlier so we have long constants in the IR */
7664 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7665 int data = sp [-1]->inst_c0;
7666 sp [-1]->opcode = OP_I8CONST;
7667 sp [-1]->type = STACK_I8;
7668 #if SIZEOF_REGISTER == 8
7669 if ((*ip) == CEE_CONV_U8)
7670 sp [-1]->inst_c0 = (guint32)data;
7671 else
7672 sp [-1]->inst_c0 = data;
7673 #else
7674 sp [-1]->inst_ls_word = data;
7675 if ((*ip) == CEE_CONV_U8)
7676 sp [-1]->inst_ms_word = 0;
7677 else
7678 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7679 #endif
7680 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7682 else {
7683 ADD_UNOP (*ip);
7685 ip++;
7686 break;
7687 case CEE_CONV_OVF_I4:
7688 case CEE_CONV_OVF_I1:
7689 case CEE_CONV_OVF_I2:
7690 case CEE_CONV_OVF_I:
7691 case CEE_CONV_OVF_U:
7692 CHECK_STACK (1);
7694 if (sp [-1]->type == STACK_R8) {
7695 ADD_UNOP (CEE_CONV_OVF_I8);
7696 ADD_UNOP (*ip);
7697 } else {
7698 ADD_UNOP (*ip);
7700 ip++;
7701 break;
7702 case CEE_CONV_OVF_U1:
7703 case CEE_CONV_OVF_U2:
7704 case CEE_CONV_OVF_U4:
7705 CHECK_STACK (1);
7707 if (sp [-1]->type == STACK_R8) {
7708 ADD_UNOP (CEE_CONV_OVF_U8);
7709 ADD_UNOP (*ip);
7710 } else {
7711 ADD_UNOP (*ip);
7713 ip++;
7714 break;
7715 case CEE_CONV_OVF_I1_UN:
7716 case CEE_CONV_OVF_I2_UN:
7717 case CEE_CONV_OVF_I4_UN:
7718 case CEE_CONV_OVF_I8_UN:
7719 case CEE_CONV_OVF_U1_UN:
7720 case CEE_CONV_OVF_U2_UN:
7721 case CEE_CONV_OVF_U4_UN:
7722 case CEE_CONV_OVF_U8_UN:
7723 case CEE_CONV_OVF_I_UN:
7724 case CEE_CONV_OVF_U_UN:
7725 case CEE_CONV_U2:
7726 case CEE_CONV_U1:
7727 case CEE_CONV_I:
7728 case CEE_CONV_U:
7729 CHECK_STACK (1);
7730 ADD_UNOP (*ip);
7731 CHECK_CFG_EXCEPTION;
7732 ip++;
7733 break;
7734 case CEE_ADD_OVF:
7735 case CEE_ADD_OVF_UN:
7736 case CEE_MUL_OVF:
7737 case CEE_MUL_OVF_UN:
7738 case CEE_SUB_OVF:
7739 case CEE_SUB_OVF_UN:
7740 CHECK_STACK (2);
7741 ADD_BINOP (*ip);
7742 ip++;
7743 break;
7744 case CEE_CPOBJ:
7745 CHECK_OPSIZE (5);
7746 CHECK_STACK (2);
7747 token = read32 (ip + 1);
7748 klass = mini_get_class (method, token, generic_context);
7749 CHECK_TYPELOAD (klass);
7750 sp -= 2;
7751 if (generic_class_is_reference_type (cfg, klass)) {
7752 MonoInst *store, *load;
7753 int dreg = alloc_ireg_ref (cfg);
7755 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7756 load->flags |= ins_flag;
7757 MONO_ADD_INS (cfg->cbb, load);
7759 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7760 store->flags |= ins_flag;
7761 MONO_ADD_INS (cfg->cbb, store);
7763 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7764 emit_write_barrier (cfg, sp [0], sp [1], -1);
7765 } else {
7766 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7768 ins_flag = 0;
7769 ip += 5;
7770 break;
7771 case CEE_LDOBJ: {
7772 int loc_index = -1;
7773 int stloc_len = 0;
7775 CHECK_OPSIZE (5);
7776 CHECK_STACK (1);
7777 --sp;
7778 token = read32 (ip + 1);
7779 klass = mini_get_class (method, token, generic_context);
7780 CHECK_TYPELOAD (klass);
7782 /* Optimize the common ldobj+stloc combination */
7783 switch (ip [5]) {
7784 case CEE_STLOC_S:
7785 loc_index = ip [6];
7786 stloc_len = 2;
7787 break;
7788 case CEE_STLOC_0:
7789 case CEE_STLOC_1:
7790 case CEE_STLOC_2:
7791 case CEE_STLOC_3:
7792 loc_index = ip [5] - CEE_STLOC_0;
7793 stloc_len = 1;
7794 break;
7795 default:
7796 break;
7799 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7800 CHECK_LOCAL (loc_index);
7802 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7803 ins->dreg = cfg->locals [loc_index]->dreg;
7804 ip += 5;
7805 ip += stloc_len;
7806 break;
7809 /* Optimize the ldobj+stobj combination */
7810 /* The reference case ends up being a load+store anyway */
7811 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7812 CHECK_STACK (1);
7814 sp --;
7816 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7818 ip += 5 + 5;
7819 ins_flag = 0;
7820 break;
7823 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7824 *sp++ = ins;
7826 ip += 5;
7827 ins_flag = 0;
7828 inline_costs += 1;
7829 break;
7831 case CEE_LDSTR:
7832 CHECK_STACK_OVF (1);
7833 CHECK_OPSIZE (5);
7834 n = read32 (ip + 1);
7836 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7837 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7838 ins->type = STACK_OBJ;
7839 *sp = ins;
7841 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7842 MonoInst *iargs [1];
7844 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7845 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7846 } else {
7847 if (cfg->opt & MONO_OPT_SHARED) {
7848 MonoInst *iargs [3];
7850 if (cfg->compile_aot) {
7851 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7853 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7854 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7855 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7856 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7857 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7858 } else {
7859 if (bblock->out_of_line) {
7860 MonoInst *iargs [2];
7862 if (image == mono_defaults.corlib) {
7864 * Avoid relocations in AOT and save some space by using a
7865 * version of helper_ldstr specialized to mscorlib.
7867 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7868 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7869 } else {
7870 /* Avoid creating the string object */
7871 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7872 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7873 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7876 else
7877 if (cfg->compile_aot) {
7878 NEW_LDSTRCONST (cfg, ins, image, n);
7879 *sp = ins;
7880 MONO_ADD_INS (bblock, ins);
7882 else {
7883 NEW_PCONST (cfg, ins, NULL);
7884 ins->type = STACK_OBJ;
7885 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7886 if (!ins->inst_p0)
7887 OUT_OF_MEMORY_FAILURE;
7889 *sp = ins;
7890 MONO_ADD_INS (bblock, ins);
7895 sp++;
7896 ip += 5;
7897 break;
7898 case CEE_NEWOBJ: {
7899 MonoInst *iargs [2];
7900 MonoMethodSignature *fsig;
7901 MonoInst this_ins;
7902 MonoInst *alloc;
7903 MonoInst *vtable_arg = NULL;
7905 CHECK_OPSIZE (5);
7906 token = read32 (ip + 1);
7907 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7908 if (!cmethod || mono_loader_get_last_error ())
7909 LOAD_ERROR;
7910 fsig = mono_method_get_signature (cmethod, image, token);
7911 if (!fsig)
7912 LOAD_ERROR;
7914 mono_save_token_info (cfg, image, token, cmethod);
7916 if (!mono_class_init (cmethod->klass))
7917 LOAD_ERROR;
7919 if (cfg->generic_sharing_context)
7920 context_used = mono_method_check_context_used (cmethod);
7922 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7923 if (check_linkdemand (cfg, method, cmethod))
7924 INLINE_FAILURE;
7925 CHECK_CFG_EXCEPTION;
7926 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7927 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7930 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7931 emit_generic_class_init (cfg, cmethod->klass);
7932 CHECK_TYPELOAD (cmethod->klass);
7935 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7936 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7937 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7938 mono_class_vtable (cfg->domain, cmethod->klass);
7939 CHECK_TYPELOAD (cmethod->klass);
7941 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7942 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7943 } else {
7944 if (context_used) {
7945 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7946 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7947 } else {
7948 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7950 CHECK_TYPELOAD (cmethod->klass);
7951 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7956 n = fsig->param_count;
7957 CHECK_STACK (n);
7960 * Generate smaller code for the common newobj <exception> instruction in
7961 * argument checking code.
7963 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7964 is_exception_class (cmethod->klass) && n <= 2 &&
7965 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7966 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7967 MonoInst *iargs [3];
7969 g_assert (!vtable_arg);
7971 sp -= n;
7973 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7974 switch (n) {
7975 case 0:
7976 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7977 break;
7978 case 1:
7979 iargs [1] = sp [0];
7980 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7981 break;
7982 case 2:
7983 iargs [1] = sp [0];
7984 iargs [2] = sp [1];
7985 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7986 break;
7987 default:
7988 g_assert_not_reached ();
7991 ip += 5;
7992 inline_costs += 5;
7993 break;
7996 /* move the args to allow room for 'this' in the first position */
7997 while (n--) {
7998 --sp;
7999 sp [1] = sp [0];
8002 /* check_call_signature () requires sp[0] to be set */
8003 this_ins.type = STACK_OBJ;
8004 sp [0] = &this_ins;
8005 if (check_call_signature (cfg, fsig, sp))
8006 UNVERIFIED;
8008 iargs [0] = NULL;
8010 if (mini_class_is_system_array (cmethod->klass)) {
8011 g_assert (!vtable_arg);
8013 *sp = emit_get_rgctx_method (cfg, context_used,
8014 cmethod, MONO_RGCTX_INFO_METHOD);
8016 /* Avoid varargs in the common case */
8017 if (fsig->param_count == 1)
8018 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8019 else if (fsig->param_count == 2)
8020 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8021 else if (fsig->param_count == 3)
8022 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8023 else
8024 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8025 } else if (cmethod->string_ctor) {
8026 g_assert (!context_used);
8027 g_assert (!vtable_arg);
8028 /* we simply pass a null pointer */
8029 EMIT_NEW_PCONST (cfg, *sp, NULL);
8030 /* now call the string ctor */
8031 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8032 } else {
8033 MonoInst* callvirt_this_arg = NULL;
8035 if (cmethod->klass->valuetype) {
8036 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8037 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8038 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8040 alloc = NULL;
8043 * The code generated by mini_emit_virtual_call () expects
8044 * iargs [0] to be a boxed instance, but luckily the vcall
8045 * will be transformed into a normal call there.
8047 } else if (context_used) {
8048 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8049 *sp = alloc;
8050 } else {
8051 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8053 CHECK_TYPELOAD (cmethod->klass);
8056 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8057 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8058 * As a workaround, we call class cctors before allocating objects.
8060 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8061 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8062 if (cfg->verbose_level > 2)
8063 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8064 class_inits = g_slist_prepend (class_inits, vtable);
8067 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8068 *sp = alloc;
8070 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8072 if (alloc)
8073 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8075 /* Now call the actual ctor */
8076 /* Avoid virtual calls to ctors if possible */
8077 if (cmethod->klass->marshalbyref)
8078 callvirt_this_arg = sp [0];
8081 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8082 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8083 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8084 *sp = ins;
8085 sp++;
8088 CHECK_CFG_EXCEPTION;
8089 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8090 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8091 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8092 !g_list_find (dont_inline, cmethod)) {
8093 int costs;
8095 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8096 cfg->real_offset += 5;
8097 bblock = cfg->cbb;
8099 inline_costs += costs - 5;
8100 } else {
8101 INLINE_FAILURE;
8102 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8104 } else if (context_used &&
8105 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8106 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8107 MonoInst *cmethod_addr;
8109 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8110 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8112 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8113 } else {
8114 INLINE_FAILURE;
8115 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8116 callvirt_this_arg, NULL, vtable_arg);
8120 if (alloc == NULL) {
8121 /* Valuetype */
8122 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8123 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8124 *sp++= ins;
8126 else
8127 *sp++ = alloc;
8129 ip += 5;
8130 inline_costs += 5;
8131 break;
8133 case CEE_CASTCLASS:
8134 CHECK_STACK (1);
8135 --sp;
8136 CHECK_OPSIZE (5);
8137 token = read32 (ip + 1);
8138 klass = mini_get_class (method, token, generic_context);
8139 CHECK_TYPELOAD (klass);
8140 if (sp [0]->type != STACK_OBJ)
8141 UNVERIFIED;
8143 if (cfg->generic_sharing_context)
8144 context_used = mono_class_check_context_used (klass);
8146 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8147 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8148 MonoInst *args [3];
8150 /* obj */
8151 args [0] = *sp;
8153 /* klass */
8154 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8156 /* inline cache*/
8157 /*FIXME AOT support*/
8158 if (cfg->compile_aot)
8159 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8160 else
8161 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8163 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8164 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8165 ip += 5;
8166 inline_costs += 2;
8167 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8168 MonoMethod *mono_castclass;
8169 MonoInst *iargs [1];
8170 int costs;
8172 mono_castclass = mono_marshal_get_castclass (klass);
8173 iargs [0] = sp [0];
8175 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8176 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8177 CHECK_CFG_EXCEPTION;
8178 g_assert (costs > 0);
8180 ip += 5;
8181 cfg->real_offset += 5;
8182 bblock = cfg->cbb;
8184 *sp++ = iargs [0];
8186 inline_costs += costs;
8188 else {
8189 ins = handle_castclass (cfg, klass, *sp, context_used);
8190 CHECK_CFG_EXCEPTION;
8191 bblock = cfg->cbb;
8192 *sp ++ = ins;
8193 ip += 5;
8195 break;
8196 case CEE_ISINST: {
8197 CHECK_STACK (1);
8198 --sp;
8199 CHECK_OPSIZE (5);
8200 token = read32 (ip + 1);
8201 klass = mini_get_class (method, token, generic_context);
8202 CHECK_TYPELOAD (klass);
8203 if (sp [0]->type != STACK_OBJ)
8204 UNVERIFIED;
8206 if (cfg->generic_sharing_context)
8207 context_used = mono_class_check_context_used (klass);
8209 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8210 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8211 MonoInst *args [3];
8213 /* obj */
8214 args [0] = *sp;
8216 /* klass */
8217 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8219 /* inline cache*/
8220 /*FIXME AOT support*/
8221 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8223 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8224 ip += 5;
8225 inline_costs += 2;
8226 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8227 MonoMethod *mono_isinst;
8228 MonoInst *iargs [1];
8229 int costs;
8231 mono_isinst = mono_marshal_get_isinst (klass);
8232 iargs [0] = sp [0];
8234 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8235 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8236 CHECK_CFG_EXCEPTION;
8237 g_assert (costs > 0);
8239 ip += 5;
8240 cfg->real_offset += 5;
8241 bblock = cfg->cbb;
8243 *sp++= iargs [0];
8245 inline_costs += costs;
8247 else {
8248 ins = handle_isinst (cfg, klass, *sp, context_used);
8249 CHECK_CFG_EXCEPTION;
8250 bblock = cfg->cbb;
8251 *sp ++ = ins;
8252 ip += 5;
8254 break;
8256 case CEE_UNBOX_ANY: {
8257 CHECK_STACK (1);
8258 --sp;
8259 CHECK_OPSIZE (5);
8260 token = read32 (ip + 1);
8261 klass = mini_get_class (method, token, generic_context);
8262 CHECK_TYPELOAD (klass);
8264 mono_save_token_info (cfg, image, token, klass);
8266 if (cfg->generic_sharing_context)
8267 context_used = mono_class_check_context_used (klass);
8269 if (generic_class_is_reference_type (cfg, klass)) {
8270 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8271 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8272 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8273 MonoInst *args [3];
8275 /* obj */
8276 args [0] = *sp;
8278 /* klass */
8279 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8281 /* inline cache*/
8282 /*FIXME AOT support*/
8283 if (cfg->compile_aot)
8284 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8285 else
8286 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8288 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8289 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8290 ip += 5;
8291 inline_costs += 2;
8292 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8293 MonoMethod *mono_castclass;
8294 MonoInst *iargs [1];
8295 int costs;
8297 mono_castclass = mono_marshal_get_castclass (klass);
8298 iargs [0] = sp [0];
8300 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8301 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8302 CHECK_CFG_EXCEPTION;
8303 g_assert (costs > 0);
8305 ip += 5;
8306 cfg->real_offset += 5;
8307 bblock = cfg->cbb;
8309 *sp++ = iargs [0];
8310 inline_costs += costs;
8311 } else {
8312 ins = handle_castclass (cfg, klass, *sp, context_used);
8313 CHECK_CFG_EXCEPTION;
8314 bblock = cfg->cbb;
8315 *sp ++ = ins;
8316 ip += 5;
8318 break;
8321 if (mono_class_is_nullable (klass)) {
8322 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8323 *sp++= ins;
8324 ip += 5;
8325 break;
8328 /* UNBOX */
8329 ins = handle_unbox (cfg, klass, sp, context_used);
8330 *sp = ins;
8332 ip += 5;
8334 /* LDOBJ */
8335 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8336 *sp++ = ins;
8338 inline_costs += 2;
8339 break;
8341 case CEE_BOX: {
8342 MonoInst *val;
8344 CHECK_STACK (1);
8345 --sp;
8346 val = *sp;
8347 CHECK_OPSIZE (5);
8348 token = read32 (ip + 1);
8349 klass = mini_get_class (method, token, generic_context);
8350 CHECK_TYPELOAD (klass);
8352 mono_save_token_info (cfg, image, token, klass);
8354 if (cfg->generic_sharing_context)
8355 context_used = mono_class_check_context_used (klass);
8357 if (generic_class_is_reference_type (cfg, klass)) {
8358 *sp++ = val;
8359 ip += 5;
8360 break;
8363 if (klass == mono_defaults.void_class)
8364 UNVERIFIED;
8365 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8366 UNVERIFIED;
8367 /* frequent check in generic code: box (struct), brtrue */
8369 // FIXME: LLVM can't handle the inconsistent bb linking
8370 if (!mono_class_is_nullable (klass) &&
8371 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8372 (ip [5] == CEE_BRTRUE ||
8373 ip [5] == CEE_BRTRUE_S ||
8374 ip [5] == CEE_BRFALSE ||
8375 ip [5] == CEE_BRFALSE_S)) {
8376 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8377 int dreg;
8378 MonoBasicBlock *true_bb, *false_bb;
8380 ip += 5;
8382 if (cfg->verbose_level > 3) {
8383 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8384 printf ("<box+brtrue opt>\n");
8387 switch (*ip) {
8388 case CEE_BRTRUE_S:
8389 case CEE_BRFALSE_S:
8390 CHECK_OPSIZE (2);
8391 ip++;
8392 target = ip + 1 + (signed char)(*ip);
8393 ip++;
8394 break;
8395 case CEE_BRTRUE:
8396 case CEE_BRFALSE:
8397 CHECK_OPSIZE (5);
8398 ip++;
8399 target = ip + 4 + (gint)(read32 (ip));
8400 ip += 4;
8401 break;
8402 default:
8403 g_assert_not_reached ();
8407 * We need to link both bblocks, since it is needed for handling stack
8408 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8409 * Branching to only one of them would lead to inconsistencies, so
8410 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8412 GET_BBLOCK (cfg, true_bb, target);
8413 GET_BBLOCK (cfg, false_bb, ip);
8415 mono_link_bblock (cfg, cfg->cbb, true_bb);
8416 mono_link_bblock (cfg, cfg->cbb, false_bb);
8418 if (sp != stack_start) {
8419 handle_stack_args (cfg, stack_start, sp - stack_start);
8420 sp = stack_start;
8421 CHECK_UNVERIFIABLE (cfg);
8424 if (COMPILE_LLVM (cfg)) {
8425 dreg = alloc_ireg (cfg);
8426 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8427 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8429 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8430 } else {
8431 /* The JIT can't eliminate the iconst+compare */
8432 MONO_INST_NEW (cfg, ins, OP_BR);
8433 ins->inst_target_bb = is_true ? true_bb : false_bb;
8434 MONO_ADD_INS (cfg->cbb, ins);
8437 start_new_bblock = 1;
8438 break;
8441 *sp++ = handle_box (cfg, val, klass, context_used);
8443 CHECK_CFG_EXCEPTION;
8444 ip += 5;
8445 inline_costs += 1;
8446 break;
8448 case CEE_UNBOX: {
8449 CHECK_STACK (1);
8450 --sp;
8451 CHECK_OPSIZE (5);
8452 token = read32 (ip + 1);
8453 klass = mini_get_class (method, token, generic_context);
8454 CHECK_TYPELOAD (klass);
8456 mono_save_token_info (cfg, image, token, klass);
8458 if (cfg->generic_sharing_context)
8459 context_used = mono_class_check_context_used (klass);
8461 if (mono_class_is_nullable (klass)) {
8462 MonoInst *val;
8464 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8465 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8467 *sp++= ins;
8468 } else {
8469 ins = handle_unbox (cfg, klass, sp, context_used);
8470 *sp++ = ins;
8472 ip += 5;
8473 inline_costs += 2;
8474 break;
8476 case CEE_LDFLD:
8477 case CEE_LDFLDA:
8478 case CEE_STFLD: {
8479 MonoClassField *field;
8480 int costs;
8481 guint foffset;
8483 if (*ip == CEE_STFLD) {
8484 CHECK_STACK (2);
8485 sp -= 2;
8486 } else {
8487 CHECK_STACK (1);
8488 --sp;
8490 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8491 UNVERIFIED;
8492 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8493 UNVERIFIED;
8494 CHECK_OPSIZE (5);
8495 token = read32 (ip + 1);
8496 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8497 field = mono_method_get_wrapper_data (method, token);
8498 klass = field->parent;
8500 else {
8501 field = mono_field_from_token (image, token, &klass, generic_context);
8503 if (!field)
8504 LOAD_ERROR;
8505 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8506 FIELD_ACCESS_FAILURE;
8507 mono_class_init (klass);
8509 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8510 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8511 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8512 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8515 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8516 if (*ip == CEE_STFLD) {
8517 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8518 UNVERIFIED;
8519 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8520 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8521 MonoInst *iargs [5];
8523 iargs [0] = sp [0];
8524 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8525 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8526 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8527 field->offset);
8528 iargs [4] = sp [1];
8530 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8531 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8532 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8533 CHECK_CFG_EXCEPTION;
8534 g_assert (costs > 0);
8536 cfg->real_offset += 5;
8537 bblock = cfg->cbb;
8539 inline_costs += costs;
8540 } else {
8541 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8543 } else {
8544 MonoInst *store;
8546 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8548 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8549 if (sp [0]->opcode != OP_LDADDR)
8550 store->flags |= MONO_INST_FAULT;
8552 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8553 /* insert call to write barrier */
8554 MonoInst *ptr;
8555 int dreg;
8557 dreg = alloc_ireg_mp (cfg);
8558 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8559 emit_write_barrier (cfg, ptr, sp [1], -1);
8562 store->flags |= ins_flag;
8564 ins_flag = 0;
8565 ip += 5;
8566 break;
8569 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8570 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8571 MonoInst *iargs [4];
8573 iargs [0] = sp [0];
8574 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8575 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8576 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8577 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8578 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8579 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8580 CHECK_CFG_EXCEPTION;
8581 bblock = cfg->cbb;
8582 g_assert (costs > 0);
8584 cfg->real_offset += 5;
8586 *sp++ = iargs [0];
8588 inline_costs += costs;
8589 } else {
8590 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8591 *sp++ = ins;
8593 } else {
8594 if (sp [0]->type == STACK_VTYPE) {
8595 MonoInst *var;
8597 /* Have to compute the address of the variable */
8599 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8600 if (!var)
8601 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8602 else
8603 g_assert (var->klass == klass);
8605 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8606 sp [0] = ins;
8609 if (*ip == CEE_LDFLDA) {
8610 if (sp [0]->type == STACK_OBJ) {
8611 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8612 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8615 dreg = alloc_ireg_mp (cfg);
8617 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8618 ins->klass = mono_class_from_mono_type (field->type);
8619 ins->type = STACK_MP;
8620 *sp++ = ins;
8621 } else {
8622 MonoInst *load;
8624 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8626 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8627 load->flags |= ins_flag;
8628 if (sp [0]->opcode != OP_LDADDR)
8629 load->flags |= MONO_INST_FAULT;
8630 *sp++ = load;
8633 ins_flag = 0;
8634 ip += 5;
8635 break;
8637 case CEE_LDSFLD:
8638 case CEE_LDSFLDA:
8639 case CEE_STSFLD: {
8640 MonoClassField *field;
8641 gpointer addr = NULL;
8642 gboolean is_special_static;
8643 MonoType *ftype;
8645 CHECK_OPSIZE (5);
8646 token = read32 (ip + 1);
8648 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8649 field = mono_method_get_wrapper_data (method, token);
8650 klass = field->parent;
8652 else
8653 field = mono_field_from_token (image, token, &klass, generic_context);
8654 if (!field)
8655 LOAD_ERROR;
8656 mono_class_init (klass);
8657 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8658 FIELD_ACCESS_FAILURE;
8660 /* if the class is Critical then transparent code cannot access it's fields */
8661 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8662 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8665 * We can only support shared generic static
8666 * field access on architectures where the
8667 * trampoline code has been extended to handle
8668 * the generic class init.
8670 #ifndef MONO_ARCH_VTABLE_REG
8671 GENERIC_SHARING_FAILURE (*ip);
8672 #endif
8674 if (cfg->generic_sharing_context)
8675 context_used = mono_class_check_context_used (klass);
8677 ftype = mono_field_get_type (field);
8679 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8681 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8682 * to be called here.
8684 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8685 mono_class_vtable (cfg->domain, klass);
8686 CHECK_TYPELOAD (klass);
8688 mono_domain_lock (cfg->domain);
8689 if (cfg->domain->special_static_fields)
8690 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8691 mono_domain_unlock (cfg->domain);
8693 is_special_static = mono_class_field_is_special_static (field);
8695 /* Generate IR to compute the field address */
8696 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8698 * Fast access to TLS data
8699 * Inline version of get_thread_static_data () in
8700 * threads.c.
8702 guint32 offset;
8703 int idx, static_data_reg, array_reg, dreg;
8704 MonoInst *thread_ins;
8706 // offset &= 0x7fffffff;
8707 // idx = (offset >> 24) - 1;
8708 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8710 thread_ins = mono_get_thread_intrinsic (cfg);
8711 MONO_ADD_INS (cfg->cbb, thread_ins);
8712 static_data_reg = alloc_ireg (cfg);
8713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8715 if (cfg->compile_aot) {
8716 int offset_reg, offset2_reg, idx_reg;
8718 /* For TLS variables, this will return the TLS offset */
8719 EMIT_NEW_SFLDACONST (cfg, ins, field);
8720 offset_reg = ins->dreg;
8721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8722 idx_reg = alloc_ireg (cfg);
8723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8726 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8727 array_reg = alloc_ireg (cfg);
8728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8729 offset2_reg = alloc_ireg (cfg);
8730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8731 dreg = alloc_ireg (cfg);
8732 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8733 } else {
8734 offset = (gsize)addr & 0x7fffffff;
8735 idx = (offset >> 24) - 1;
8737 array_reg = alloc_ireg (cfg);
8738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8739 dreg = alloc_ireg (cfg);
8740 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8742 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8743 (cfg->compile_aot && is_special_static) ||
8744 (context_used && is_special_static)) {
8745 MonoInst *iargs [2];
8747 g_assert (field->parent);
8748 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8749 if (context_used) {
8750 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8751 field, MONO_RGCTX_INFO_CLASS_FIELD);
8752 } else {
8753 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8755 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8756 } else if (context_used) {
8757 MonoInst *static_data;
8760 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8761 method->klass->name_space, method->klass->name, method->name,
8762 depth, field->offset);
8765 if (mono_class_needs_cctor_run (klass, method))
8766 emit_generic_class_init (cfg, klass);
8769 * The pointer we're computing here is
8771 * super_info.static_data + field->offset
8773 static_data = emit_get_rgctx_klass (cfg, context_used,
8774 klass, MONO_RGCTX_INFO_STATIC_DATA);
8776 if (field->offset == 0) {
8777 ins = static_data;
8778 } else {
8779 int addr_reg = mono_alloc_preg (cfg);
8780 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8782 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8783 MonoInst *iargs [2];
8785 g_assert (field->parent);
8786 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8787 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8788 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8789 } else {
8790 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8792 CHECK_TYPELOAD (klass);
8793 if (!addr) {
8794 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8795 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8796 if (cfg->verbose_level > 2)
8797 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8798 class_inits = g_slist_prepend (class_inits, vtable);
8799 } else {
8800 if (cfg->run_cctors) {
8801 MonoException *ex;
8802 /* This makes so that inline cannot trigger */
8803 /* .cctors: too many apps depend on them */
8804 /* running with a specific order... */
8805 if (! vtable->initialized)
8806 INLINE_FAILURE;
8807 ex = mono_runtime_class_init_full (vtable, FALSE);
8808 if (ex) {
8809 set_exception_object (cfg, ex);
8810 goto exception_exit;
8814 addr = (char*)vtable->data + field->offset;
8816 if (cfg->compile_aot)
8817 EMIT_NEW_SFLDACONST (cfg, ins, field);
8818 else
8819 EMIT_NEW_PCONST (cfg, ins, addr);
8820 } else {
8821 MonoInst *iargs [1];
8822 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8823 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8827 /* Generate IR to do the actual load/store operation */
8829 if (*ip == CEE_LDSFLDA) {
8830 ins->klass = mono_class_from_mono_type (ftype);
8831 ins->type = STACK_PTR;
8832 *sp++ = ins;
8833 } else if (*ip == CEE_STSFLD) {
8834 MonoInst *store;
8835 CHECK_STACK (1);
8836 sp--;
8838 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8839 store->flags |= ins_flag;
8840 } else {
8841 gboolean is_const = FALSE;
8842 MonoVTable *vtable = NULL;
8844 if (!context_used) {
8845 vtable = mono_class_vtable (cfg->domain, klass);
8846 CHECK_TYPELOAD (klass);
8848 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8849 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8850 gpointer addr = (char*)vtable->data + field->offset;
8851 int ro_type = ftype->type;
8852 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8853 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8855 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8856 is_const = TRUE;
8857 switch (ro_type) {
8858 case MONO_TYPE_BOOLEAN:
8859 case MONO_TYPE_U1:
8860 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8861 sp++;
8862 break;
8863 case MONO_TYPE_I1:
8864 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8865 sp++;
8866 break;
8867 case MONO_TYPE_CHAR:
8868 case MONO_TYPE_U2:
8869 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8870 sp++;
8871 break;
8872 case MONO_TYPE_I2:
8873 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8874 sp++;
8875 break;
8876 break;
8877 case MONO_TYPE_I4:
8878 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8879 sp++;
8880 break;
8881 case MONO_TYPE_U4:
8882 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8883 sp++;
8884 break;
8885 case MONO_TYPE_I:
8886 case MONO_TYPE_U:
8887 case MONO_TYPE_PTR:
8888 case MONO_TYPE_FNPTR:
8889 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8890 type_to_eval_stack_type ((cfg), field->type, *sp);
8891 sp++;
8892 break;
8893 case MONO_TYPE_STRING:
8894 case MONO_TYPE_OBJECT:
8895 case MONO_TYPE_CLASS:
8896 case MONO_TYPE_SZARRAY:
8897 case MONO_TYPE_ARRAY:
8898 if (!mono_gc_is_moving ()) {
8899 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8900 type_to_eval_stack_type ((cfg), field->type, *sp);
8901 sp++;
8902 } else {
8903 is_const = FALSE;
8905 break;
8906 case MONO_TYPE_I8:
8907 case MONO_TYPE_U8:
8908 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8909 sp++;
8910 break;
8911 case MONO_TYPE_R4:
8912 case MONO_TYPE_R8:
8913 case MONO_TYPE_VALUETYPE:
8914 default:
8915 is_const = FALSE;
8916 break;
8920 if (!is_const) {
8921 MonoInst *load;
8923 CHECK_STACK_OVF (1);
8925 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8926 load->flags |= ins_flag;
8927 ins_flag = 0;
8928 *sp++ = load;
8931 ins_flag = 0;
8932 ip += 5;
8933 break;
8935 case CEE_STOBJ:
8936 CHECK_STACK (2);
8937 sp -= 2;
8938 CHECK_OPSIZE (5);
8939 token = read32 (ip + 1);
8940 klass = mini_get_class (method, token, generic_context);
8941 CHECK_TYPELOAD (klass);
8942 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8943 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8944 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8945 generic_class_is_reference_type (cfg, klass)) {
8946 /* insert call to write barrier */
8947 emit_write_barrier (cfg, sp [0], sp [1], -1);
8949 ins_flag = 0;
8950 ip += 5;
8951 inline_costs += 1;
8952 break;
8955 * Array opcodes
8957 case CEE_NEWARR: {
8958 MonoInst *len_ins;
8959 const char *data_ptr;
8960 int data_size = 0;
8961 guint32 field_token;
8963 CHECK_STACK (1);
8964 --sp;
8966 CHECK_OPSIZE (5);
8967 token = read32 (ip + 1);
8969 klass = mini_get_class (method, token, generic_context);
8970 CHECK_TYPELOAD (klass);
8972 if (cfg->generic_sharing_context)
8973 context_used = mono_class_check_context_used (klass);
8975 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8976 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8977 ins->sreg1 = sp [0]->dreg;
8978 ins->type = STACK_I4;
8979 ins->dreg = alloc_ireg (cfg);
8980 MONO_ADD_INS (cfg->cbb, ins);
8981 *sp = mono_decompose_opcode (cfg, ins);
8984 if (context_used) {
8985 MonoInst *args [3];
8986 MonoClass *array_class = mono_array_class_get (klass, 1);
8987 /* FIXME: we cannot get a managed
8988 allocator because we can't get the
8989 open generic class's vtable. We
8990 have the same problem in
8991 handle_alloc(). This
8992 needs to be solved so that we can
8993 have managed allocs of shared
8994 generic classes. */
8996 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8997 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8999 MonoMethod *managed_alloc = NULL;
9001 /* FIXME: Decompose later to help abcrem */
9003 /* vtable */
9004 args [0] = emit_get_rgctx_klass (cfg, context_used,
9005 array_class, MONO_RGCTX_INFO_VTABLE);
9006 /* array len */
9007 args [1] = sp [0];
9009 if (managed_alloc)
9010 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9011 else
9012 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9013 } else {
9014 if (cfg->opt & MONO_OPT_SHARED) {
9015 /* Decompose now to avoid problems with references to the domainvar */
9016 MonoInst *iargs [3];
9018 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9019 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9020 iargs [2] = sp [0];
9022 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9023 } else {
9024 /* Decompose later since it is needed by abcrem */
9025 MonoClass *array_type = mono_array_class_get (klass, 1);
9026 mono_class_vtable (cfg->domain, array_type);
9027 CHECK_TYPELOAD (array_type);
9029 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9030 ins->dreg = alloc_ireg_ref (cfg);
9031 ins->sreg1 = sp [0]->dreg;
9032 ins->inst_newa_class = klass;
9033 ins->type = STACK_OBJ;
9034 ins->klass = klass;
9035 MONO_ADD_INS (cfg->cbb, ins);
9036 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9037 cfg->cbb->has_array_access = TRUE;
9039 /* Needed so mono_emit_load_get_addr () gets called */
9040 mono_get_got_var (cfg);
9044 len_ins = sp [0];
9045 ip += 5;
9046 *sp++ = ins;
9047 inline_costs += 1;
9050 * we inline/optimize the initialization sequence if possible.
9051 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9052 * for small sizes open code the memcpy
9053 * ensure the rva field is big enough
9055 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9056 MonoMethod *memcpy_method = get_memcpy_method ();
9057 MonoInst *iargs [3];
9058 int add_reg = alloc_ireg_mp (cfg);
9060 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9061 if (cfg->compile_aot) {
9062 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9063 } else {
9064 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9066 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9067 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9068 ip += 11;
9071 break;
9073 case CEE_LDLEN:
9074 CHECK_STACK (1);
9075 --sp;
9076 if (sp [0]->type != STACK_OBJ)
9077 UNVERIFIED;
9079 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9080 ins->dreg = alloc_preg (cfg);
9081 ins->sreg1 = sp [0]->dreg;
9082 ins->type = STACK_I4;
9083 /* This flag will be inherited by the decomposition */
9084 ins->flags |= MONO_INST_FAULT;
9085 MONO_ADD_INS (cfg->cbb, ins);
9086 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9087 cfg->cbb->has_array_access = TRUE;
9088 ip ++;
9089 *sp++ = ins;
9090 break;
9091 case CEE_LDELEMA:
9092 CHECK_STACK (2);
9093 sp -= 2;
9094 CHECK_OPSIZE (5);
9095 if (sp [0]->type != STACK_OBJ)
9096 UNVERIFIED;
9098 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9100 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9101 CHECK_TYPELOAD (klass);
9102 /* we need to make sure that this array is exactly the type it needs
9103 * to be for correctness. the wrappers are lax with their usage
9104 * so we need to ignore them here
9106 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9107 MonoClass *array_class = mono_array_class_get (klass, 1);
9108 mini_emit_check_array_type (cfg, sp [0], array_class);
9109 CHECK_TYPELOAD (array_class);
9112 readonly = FALSE;
9113 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9114 *sp++ = ins;
9115 ip += 5;
9116 break;
9117 case CEE_LDELEM:
9118 case CEE_LDELEM_I1:
9119 case CEE_LDELEM_U1:
9120 case CEE_LDELEM_I2:
9121 case CEE_LDELEM_U2:
9122 case CEE_LDELEM_I4:
9123 case CEE_LDELEM_U4:
9124 case CEE_LDELEM_I8:
9125 case CEE_LDELEM_I:
9126 case CEE_LDELEM_R4:
9127 case CEE_LDELEM_R8:
9128 case CEE_LDELEM_REF: {
9129 MonoInst *addr;
9131 CHECK_STACK (2);
9132 sp -= 2;
9134 if (*ip == CEE_LDELEM) {
9135 CHECK_OPSIZE (5);
9136 token = read32 (ip + 1);
9137 klass = mini_get_class (method, token, generic_context);
9138 CHECK_TYPELOAD (klass);
9139 mono_class_init (klass);
9141 else
9142 klass = array_access_to_klass (*ip);
9144 if (sp [0]->type != STACK_OBJ)
9145 UNVERIFIED;
9147 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9149 if (sp [1]->opcode == OP_ICONST) {
9150 int array_reg = sp [0]->dreg;
9151 int index_reg = sp [1]->dreg;
9152 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9154 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9155 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9156 } else {
9157 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9158 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9160 *sp++ = ins;
9161 if (*ip == CEE_LDELEM)
9162 ip += 5;
9163 else
9164 ++ip;
9165 break;
9167 case CEE_STELEM_I:
9168 case CEE_STELEM_I1:
9169 case CEE_STELEM_I2:
9170 case CEE_STELEM_I4:
9171 case CEE_STELEM_I8:
9172 case CEE_STELEM_R4:
9173 case CEE_STELEM_R8:
9174 case CEE_STELEM_REF:
9175 case CEE_STELEM: {
9176 MonoInst *addr;
9178 CHECK_STACK (3);
9179 sp -= 3;
9181 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9183 if (*ip == CEE_STELEM) {
9184 CHECK_OPSIZE (5);
9185 token = read32 (ip + 1);
9186 klass = mini_get_class (method, token, generic_context);
9187 CHECK_TYPELOAD (klass);
9188 mono_class_init (klass);
9190 else
9191 klass = array_access_to_klass (*ip);
9193 if (sp [0]->type != STACK_OBJ)
9194 UNVERIFIED;
9196 /* storing a NULL doesn't need any of the complex checks in stelemref */
9197 if (generic_class_is_reference_type (cfg, klass) &&
9198 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9199 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9200 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9201 MonoInst *iargs [3];
9203 if (!helper->slot)
9204 mono_class_setup_vtable (obj_array);
9205 g_assert (helper->slot);
9207 if (sp [0]->type != STACK_OBJ)
9208 UNVERIFIED;
9209 if (sp [2]->type != STACK_OBJ)
9210 UNVERIFIED;
9212 iargs [2] = sp [2];
9213 iargs [1] = sp [1];
9214 iargs [0] = sp [0];
9216 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9217 } else {
9218 if (sp [1]->opcode == OP_ICONST) {
9219 int array_reg = sp [0]->dreg;
9220 int index_reg = sp [1]->dreg;
9221 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9223 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9224 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9225 } else {
9226 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9227 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9231 if (*ip == CEE_STELEM)
9232 ip += 5;
9233 else
9234 ++ip;
9235 inline_costs += 1;
9236 break;
9238 case CEE_CKFINITE: {
9239 CHECK_STACK (1);
9240 --sp;
9242 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9243 ins->sreg1 = sp [0]->dreg;
9244 ins->dreg = alloc_freg (cfg);
9245 ins->type = STACK_R8;
9246 MONO_ADD_INS (bblock, ins);
9248 *sp++ = mono_decompose_opcode (cfg, ins);
9250 ++ip;
9251 break;
9253 case CEE_REFANYVAL: {
9254 MonoInst *src_var, *src;
9256 int klass_reg = alloc_preg (cfg);
9257 int dreg = alloc_preg (cfg);
9259 CHECK_STACK (1);
9260 MONO_INST_NEW (cfg, ins, *ip);
9261 --sp;
9262 CHECK_OPSIZE (5);
9263 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9264 CHECK_TYPELOAD (klass);
9265 mono_class_init (klass);
9267 if (cfg->generic_sharing_context)
9268 context_used = mono_class_check_context_used (klass);
9270 // FIXME:
9271 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9272 if (!src_var)
9273 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9274 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9277 if (context_used) {
9278 MonoInst *klass_ins;
9280 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9281 klass, MONO_RGCTX_INFO_KLASS);
9283 // FIXME:
9284 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9285 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9286 } else {
9287 mini_emit_class_check (cfg, klass_reg, klass);
9289 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9290 ins->type = STACK_MP;
9291 *sp++ = ins;
9292 ip += 5;
9293 break;
9295 case CEE_MKREFANY: {
9296 MonoInst *loc, *addr;
9298 CHECK_STACK (1);
9299 MONO_INST_NEW (cfg, ins, *ip);
9300 --sp;
9301 CHECK_OPSIZE (5);
9302 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9303 CHECK_TYPELOAD (klass);
9304 mono_class_init (klass);
9306 if (cfg->generic_sharing_context)
9307 context_used = mono_class_check_context_used (klass);
9309 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9310 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9312 if (context_used) {
9313 MonoInst *const_ins;
9314 int type_reg = alloc_preg (cfg);
9316 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9317 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9319 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9320 } else if (cfg->compile_aot) {
9321 int const_reg = alloc_preg (cfg);
9322 int type_reg = alloc_preg (cfg);
9324 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9327 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9328 } else {
9329 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9330 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9334 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9335 ins->type = STACK_VTYPE;
9336 ins->klass = mono_defaults.typed_reference_class;
9337 *sp++ = ins;
9338 ip += 5;
9339 break;
9341 case CEE_LDTOKEN: {
9342 gpointer handle;
9343 MonoClass *handle_class;
9345 CHECK_STACK_OVF (1);
9347 CHECK_OPSIZE (5);
9348 n = read32 (ip + 1);
9350 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9351 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9352 handle = mono_method_get_wrapper_data (method, n);
9353 handle_class = mono_method_get_wrapper_data (method, n + 1);
9354 if (handle_class == mono_defaults.typehandle_class)
9355 handle = &((MonoClass*)handle)->byval_arg;
9357 else {
9358 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9360 if (!handle)
9361 LOAD_ERROR;
9362 mono_class_init (handle_class);
9363 if (cfg->generic_sharing_context) {
9364 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9365 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9366 /* This case handles ldtoken
9367 of an open type, like for
9368 typeof(Gen<>). */
9369 context_used = 0;
9370 } else if (handle_class == mono_defaults.typehandle_class) {
9371 /* If we get a MONO_TYPE_CLASS
9372 then we need to provide the
9373 open type, not an
9374 instantiation of it. */
9375 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9376 context_used = 0;
9377 else
9378 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9379 } else if (handle_class == mono_defaults.fieldhandle_class)
9380 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9381 else if (handle_class == mono_defaults.methodhandle_class)
9382 context_used = mono_method_check_context_used (handle);
9383 else
9384 g_assert_not_reached ();
9387 if ((cfg->opt & MONO_OPT_SHARED) &&
9388 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9389 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9390 MonoInst *addr, *vtvar, *iargs [3];
9391 int method_context_used;
9393 if (cfg->generic_sharing_context)
9394 method_context_used = mono_method_check_context_used (method);
9395 else
9396 method_context_used = 0;
9398 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9400 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9401 EMIT_NEW_ICONST (cfg, iargs [1], n);
9402 if (method_context_used) {
9403 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9404 method, MONO_RGCTX_INFO_METHOD);
9405 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9406 } else {
9407 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9408 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9410 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9412 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9414 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9415 } else {
9416 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9417 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9418 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9419 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9420 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9421 MonoClass *tclass = mono_class_from_mono_type (handle);
9423 mono_class_init (tclass);
9424 if (context_used) {
9425 ins = emit_get_rgctx_klass (cfg, context_used,
9426 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9427 } else if (cfg->compile_aot) {
9428 if (method->wrapper_type) {
9429 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9430 /* Special case for static synchronized wrappers */
9431 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9432 } else {
9433 /* FIXME: n is not a normal token */
9434 cfg->disable_aot = TRUE;
9435 EMIT_NEW_PCONST (cfg, ins, NULL);
9437 } else {
9438 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9440 } else {
9441 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9443 ins->type = STACK_OBJ;
9444 ins->klass = cmethod->klass;
9445 ip += 5;
9446 } else {
9447 MonoInst *addr, *vtvar;
9449 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9451 if (context_used) {
9452 if (handle_class == mono_defaults.typehandle_class) {
9453 ins = emit_get_rgctx_klass (cfg, context_used,
9454 mono_class_from_mono_type (handle),
9455 MONO_RGCTX_INFO_TYPE);
9456 } else if (handle_class == mono_defaults.methodhandle_class) {
9457 ins = emit_get_rgctx_method (cfg, context_used,
9458 handle, MONO_RGCTX_INFO_METHOD);
9459 } else if (handle_class == mono_defaults.fieldhandle_class) {
9460 ins = emit_get_rgctx_field (cfg, context_used,
9461 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9462 } else {
9463 g_assert_not_reached ();
9465 } else if (cfg->compile_aot) {
9466 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9467 } else {
9468 EMIT_NEW_PCONST (cfg, ins, handle);
9470 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9471 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9472 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9476 *sp++ = ins;
9477 ip += 5;
9478 break;
9480 case CEE_THROW:
9481 CHECK_STACK (1);
9482 MONO_INST_NEW (cfg, ins, OP_THROW);
9483 --sp;
9484 ins->sreg1 = sp [0]->dreg;
9485 ip++;
9486 bblock->out_of_line = TRUE;
9487 MONO_ADD_INS (bblock, ins);
9488 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9489 MONO_ADD_INS (bblock, ins);
9490 sp = stack_start;
9492 link_bblock (cfg, bblock, end_bblock);
9493 start_new_bblock = 1;
9494 break;
9495 case CEE_ENDFINALLY:
9496 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9497 MONO_ADD_INS (bblock, ins);
9498 ip++;
9499 start_new_bblock = 1;
9502 * Control will leave the method so empty the stack, otherwise
9503 * the next basic block will start with a nonempty stack.
9505 while (sp != stack_start) {
9506 sp--;
9508 break;
9509 case CEE_LEAVE:
9510 case CEE_LEAVE_S: {
9511 GList *handlers;
9513 if (*ip == CEE_LEAVE) {
9514 CHECK_OPSIZE (5);
9515 target = ip + 5 + (gint32)read32(ip + 1);
9516 } else {
9517 CHECK_OPSIZE (2);
9518 target = ip + 2 + (signed char)(ip [1]);
9521 /* empty the stack */
9522 while (sp != stack_start) {
9523 sp--;
9527 * If this leave statement is in a catch block, check for a
9528 * pending exception, and rethrow it if necessary.
9529 * We avoid doing this in runtime invoke wrappers, since those are called
9530 * by native code which excepts the wrapper to catch all exceptions.
9532 for (i = 0; i < header->num_clauses; ++i) {
9533 MonoExceptionClause *clause = &header->clauses [i];
9536 * Use <= in the final comparison to handle clauses with multiple
9537 * leave statements, like in bug #78024.
9538 * The ordering of the exception clauses guarantees that we find the
9539 * innermost clause.
9541 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9542 MonoInst *exc_ins;
9543 MonoBasicBlock *dont_throw;
9546 MonoInst *load;
9548 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9551 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9553 NEW_BBLOCK (cfg, dont_throw);
9556 * Currently, we always rethrow the abort exception, despite the
9557 * fact that this is not correct. See thread6.cs for an example.
9558 * But propagating the abort exception is more important than
9559 * getting the sematics right.
9561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9563 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9565 MONO_START_BB (cfg, dont_throw);
9566 bblock = cfg->cbb;
9570 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9571 GList *tmp;
9572 MonoExceptionClause *clause;
9574 for (tmp = handlers; tmp; tmp = tmp->next) {
9575 clause = tmp->data;
9576 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9577 g_assert (tblock);
9578 link_bblock (cfg, bblock, tblock);
9579 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9580 ins->inst_target_bb = tblock;
9581 ins->inst_eh_block = clause;
9582 MONO_ADD_INS (bblock, ins);
9583 bblock->has_call_handler = 1;
9584 if (COMPILE_LLVM (cfg)) {
9585 MonoBasicBlock *target_bb;
9588 * Link the finally bblock with the target, since it will
9589 * conceptually branch there.
9590 * FIXME: Have to link the bblock containing the endfinally.
9592 GET_BBLOCK (cfg, target_bb, target);
9593 link_bblock (cfg, tblock, target_bb);
9596 g_list_free (handlers);
9599 MONO_INST_NEW (cfg, ins, OP_BR);
9600 MONO_ADD_INS (bblock, ins);
9601 GET_BBLOCK (cfg, tblock, target);
9602 link_bblock (cfg, bblock, tblock);
9603 ins->inst_target_bb = tblock;
9604 start_new_bblock = 1;
9606 if (*ip == CEE_LEAVE)
9607 ip += 5;
9608 else
9609 ip += 2;
9611 break;
9615 * Mono specific opcodes
9617 case MONO_CUSTOM_PREFIX: {
9619 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9621 CHECK_OPSIZE (2);
9622 switch (ip [1]) {
9623 case CEE_MONO_ICALL: {
9624 gpointer func;
9625 MonoJitICallInfo *info;
9627 token = read32 (ip + 2);
9628 func = mono_method_get_wrapper_data (method, token);
9629 info = mono_find_jit_icall_by_addr (func);
9630 g_assert (info);
9632 CHECK_STACK (info->sig->param_count);
9633 sp -= info->sig->param_count;
9635 ins = mono_emit_jit_icall (cfg, info->func, sp);
9636 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9637 *sp++ = ins;
9639 ip += 6;
9640 inline_costs += 10 * num_calls++;
9642 break;
9644 case CEE_MONO_LDPTR: {
9645 gpointer ptr;
9647 CHECK_STACK_OVF (1);
9648 CHECK_OPSIZE (6);
9649 token = read32 (ip + 2);
9651 ptr = mono_method_get_wrapper_data (method, token);
9652 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9653 MonoJitICallInfo *callinfo;
9654 const char *icall_name;
9656 icall_name = method->name + strlen ("__icall_wrapper_");
9657 g_assert (icall_name);
9658 callinfo = mono_find_jit_icall_by_name (icall_name);
9659 g_assert (callinfo);
9661 if (ptr == callinfo->func) {
9662 /* Will be transformed into an AOTCONST later */
9663 EMIT_NEW_PCONST (cfg, ins, ptr);
9664 *sp++ = ins;
9665 ip += 6;
9666 break;
9669 /* FIXME: Generalize this */
9670 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9671 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9672 *sp++ = ins;
9673 ip += 6;
9674 break;
9676 EMIT_NEW_PCONST (cfg, ins, ptr);
9677 *sp++ = ins;
9678 ip += 6;
9679 inline_costs += 10 * num_calls++;
9680 /* Can't embed random pointers into AOT code */
9681 cfg->disable_aot = 1;
9682 break;
9684 case CEE_MONO_ICALL_ADDR: {
9685 MonoMethod *cmethod;
9686 gpointer ptr;
9688 CHECK_STACK_OVF (1);
9689 CHECK_OPSIZE (6);
9690 token = read32 (ip + 2);
9692 cmethod = mono_method_get_wrapper_data (method, token);
9694 if (cfg->compile_aot) {
9695 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9696 } else {
9697 ptr = mono_lookup_internal_call (cmethod);
9698 g_assert (ptr);
9699 EMIT_NEW_PCONST (cfg, ins, ptr);
9701 *sp++ = ins;
9702 ip += 6;
9703 break;
9705 case CEE_MONO_VTADDR: {
9706 MonoInst *src_var, *src;
9708 CHECK_STACK (1);
9709 --sp;
9711 // FIXME:
9712 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9713 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9714 *sp++ = src;
9715 ip += 2;
9716 break;
9718 case CEE_MONO_NEWOBJ: {
9719 MonoInst *iargs [2];
9721 CHECK_STACK_OVF (1);
9722 CHECK_OPSIZE (6);
9723 token = read32 (ip + 2);
9724 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9725 mono_class_init (klass);
9726 NEW_DOMAINCONST (cfg, iargs [0]);
9727 MONO_ADD_INS (cfg->cbb, iargs [0]);
9728 NEW_CLASSCONST (cfg, iargs [1], klass);
9729 MONO_ADD_INS (cfg->cbb, iargs [1]);
9730 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9731 ip += 6;
9732 inline_costs += 10 * num_calls++;
9733 break;
9735 case CEE_MONO_OBJADDR:
9736 CHECK_STACK (1);
9737 --sp;
9738 MONO_INST_NEW (cfg, ins, OP_MOVE);
9739 ins->dreg = alloc_ireg_mp (cfg);
9740 ins->sreg1 = sp [0]->dreg;
9741 ins->type = STACK_MP;
9742 MONO_ADD_INS (cfg->cbb, ins);
9743 *sp++ = ins;
9744 ip += 2;
9745 break;
9746 case CEE_MONO_LDNATIVEOBJ:
9748 * Similar to LDOBJ, but instead load the unmanaged
9749 * representation of the vtype to the stack.
9751 CHECK_STACK (1);
9752 CHECK_OPSIZE (6);
9753 --sp;
9754 token = read32 (ip + 2);
9755 klass = mono_method_get_wrapper_data (method, token);
9756 g_assert (klass->valuetype);
9757 mono_class_init (klass);
9760 MonoInst *src, *dest, *temp;
9762 src = sp [0];
9763 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9764 temp->backend.is_pinvoke = 1;
9765 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9766 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9768 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9769 dest->type = STACK_VTYPE;
9770 dest->klass = klass;
9772 *sp ++ = dest;
9773 ip += 6;
9775 break;
9776 case CEE_MONO_RETOBJ: {
9778 * Same as RET, but return the native representation of a vtype
9779 * to the caller.
9781 g_assert (cfg->ret);
9782 g_assert (mono_method_signature (method)->pinvoke);
9783 CHECK_STACK (1);
9784 --sp;
9786 CHECK_OPSIZE (6);
9787 token = read32 (ip + 2);
9788 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9790 if (!cfg->vret_addr) {
9791 g_assert (cfg->ret_var_is_local);
9793 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9794 } else {
9795 EMIT_NEW_RETLOADA (cfg, ins);
9797 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9799 if (sp != stack_start)
9800 UNVERIFIED;
9802 MONO_INST_NEW (cfg, ins, OP_BR);
9803 ins->inst_target_bb = end_bblock;
9804 MONO_ADD_INS (bblock, ins);
9805 link_bblock (cfg, bblock, end_bblock);
9806 start_new_bblock = 1;
9807 ip += 6;
9808 break;
9810 case CEE_MONO_CISINST:
9811 case CEE_MONO_CCASTCLASS: {
9812 int token;
9813 CHECK_STACK (1);
9814 --sp;
9815 CHECK_OPSIZE (6);
9816 token = read32 (ip + 2);
9817 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9818 if (ip [1] == CEE_MONO_CISINST)
9819 ins = handle_cisinst (cfg, klass, sp [0]);
9820 else
9821 ins = handle_ccastclass (cfg, klass, sp [0]);
9822 bblock = cfg->cbb;
9823 *sp++ = ins;
9824 ip += 6;
9825 break;
9827 case CEE_MONO_SAVE_LMF:
9828 case CEE_MONO_RESTORE_LMF:
9829 #ifdef MONO_ARCH_HAVE_LMF_OPS
9830 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9831 MONO_ADD_INS (bblock, ins);
9832 cfg->need_lmf_area = TRUE;
9833 #endif
9834 ip += 2;
9835 break;
9836 case CEE_MONO_CLASSCONST:
9837 CHECK_STACK_OVF (1);
9838 CHECK_OPSIZE (6);
9839 token = read32 (ip + 2);
9840 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9841 *sp++ = ins;
9842 ip += 6;
9843 inline_costs += 10 * num_calls++;
9844 break;
9845 case CEE_MONO_NOT_TAKEN:
9846 bblock->out_of_line = TRUE;
9847 ip += 2;
9848 break;
9849 case CEE_MONO_TLS:
9850 CHECK_STACK_OVF (1);
9851 CHECK_OPSIZE (6);
9852 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9853 ins->dreg = alloc_preg (cfg);
9854 ins->inst_offset = (gint32)read32 (ip + 2);
9855 ins->type = STACK_PTR;
9856 MONO_ADD_INS (bblock, ins);
9857 *sp++ = ins;
9858 ip += 6;
9859 break;
9860 case CEE_MONO_DYN_CALL: {
9861 MonoCallInst *call;
9863 /* It would be easier to call a trampoline, but that would put an
9864 * extra frame on the stack, confusing exception handling. So
9865 * implement it inline using an opcode for now.
9868 if (!cfg->dyn_call_var) {
9869 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9870 /* prevent it from being register allocated */
9871 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9874 /* Has to use a call inst since it local regalloc expects it */
9875 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9876 ins = (MonoInst*)call;
9877 sp -= 2;
9878 ins->sreg1 = sp [0]->dreg;
9879 ins->sreg2 = sp [1]->dreg;
9880 MONO_ADD_INS (bblock, ins);
9882 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9883 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9884 #endif
9886 ip += 2;
9887 inline_costs += 10 * num_calls++;
9889 break;
9891 default:
9892 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9893 break;
9895 break;
9898 case CEE_PREFIX1: {
9899 CHECK_OPSIZE (2);
9900 switch (ip [1]) {
9901 case CEE_ARGLIST: {
9902 /* somewhat similar to LDTOKEN */
9903 MonoInst *addr, *vtvar;
9904 CHECK_STACK_OVF (1);
9905 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9907 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9908 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9910 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9911 ins->type = STACK_VTYPE;
9912 ins->klass = mono_defaults.argumenthandle_class;
9913 *sp++ = ins;
9914 ip += 2;
9915 break;
9917 case CEE_CEQ:
9918 case CEE_CGT:
9919 case CEE_CGT_UN:
9920 case CEE_CLT:
9921 case CEE_CLT_UN: {
9922 MonoInst *cmp;
9923 CHECK_STACK (2);
9925 * The following transforms:
9926 * CEE_CEQ into OP_CEQ
9927 * CEE_CGT into OP_CGT
9928 * CEE_CGT_UN into OP_CGT_UN
9929 * CEE_CLT into OP_CLT
9930 * CEE_CLT_UN into OP_CLT_UN
9932 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9934 MONO_INST_NEW (cfg, ins, cmp->opcode);
9935 sp -= 2;
9936 cmp->sreg1 = sp [0]->dreg;
9937 cmp->sreg2 = sp [1]->dreg;
9938 type_from_op (cmp, sp [0], sp [1]);
9939 CHECK_TYPE (cmp);
9940 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9941 cmp->opcode = OP_LCOMPARE;
9942 else if (sp [0]->type == STACK_R8)
9943 cmp->opcode = OP_FCOMPARE;
9944 else
9945 cmp->opcode = OP_ICOMPARE;
9946 MONO_ADD_INS (bblock, cmp);
9947 ins->type = STACK_I4;
9948 ins->dreg = alloc_dreg (cfg, ins->type);
9949 type_from_op (ins, sp [0], sp [1]);
9951 if (cmp->opcode == OP_FCOMPARE) {
9953 * The backends expect the fceq opcodes to do the
9954 * comparison too.
9956 cmp->opcode = OP_NOP;
9957 ins->sreg1 = cmp->sreg1;
9958 ins->sreg2 = cmp->sreg2;
9960 MONO_ADD_INS (bblock, ins);
9961 *sp++ = ins;
9962 ip += 2;
9963 break;
9965 case CEE_LDFTN: {
9966 MonoInst *argconst;
9967 MonoMethod *cil_method;
9968 gboolean needs_static_rgctx_invoke;
9970 CHECK_STACK_OVF (1);
9971 CHECK_OPSIZE (6);
9972 n = read32 (ip + 2);
9973 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9974 if (!cmethod || mono_loader_get_last_error ())
9975 LOAD_ERROR;
9976 mono_class_init (cmethod->klass);
9978 mono_save_token_info (cfg, image, n, cmethod);
9980 if (cfg->generic_sharing_context)
9981 context_used = mono_method_check_context_used (cmethod);
9983 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9985 cil_method = cmethod;
9986 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9987 METHOD_ACCESS_FAILURE;
9989 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9990 if (check_linkdemand (cfg, method, cmethod))
9991 INLINE_FAILURE;
9992 CHECK_CFG_EXCEPTION;
9993 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9994 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9998 * Optimize the common case of ldftn+delegate creation
10000 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10001 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10002 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10003 MonoInst *target_ins;
10004 MonoMethod *invoke;
10005 int invoke_context_used = 0;
10007 invoke = mono_get_delegate_invoke (ctor_method->klass);
10008 if (!invoke || !mono_method_signature (invoke))
10009 LOAD_ERROR;
10011 if (cfg->generic_sharing_context)
10012 invoke_context_used = mono_method_check_context_used (invoke);
10014 target_ins = sp [-1];
10016 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10017 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10018 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10019 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10020 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10024 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10025 /* FIXME: SGEN support */
10026 if (invoke_context_used == 0) {
10027 ip += 6;
10028 if (cfg->verbose_level > 3)
10029 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10030 sp --;
10031 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10032 CHECK_CFG_EXCEPTION;
10033 ip += 5;
10034 sp ++;
10035 break;
10037 #endif
10041 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10042 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10043 *sp++ = ins;
10045 ip += 6;
10046 inline_costs += 10 * num_calls++;
10047 break;
10049 case CEE_LDVIRTFTN: {
10050 MonoInst *args [2];
10052 CHECK_STACK (1);
10053 CHECK_OPSIZE (6);
10054 n = read32 (ip + 2);
10055 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10056 if (!cmethod || mono_loader_get_last_error ())
10057 LOAD_ERROR;
10058 mono_class_init (cmethod->klass);
10060 if (cfg->generic_sharing_context)
10061 context_used = mono_method_check_context_used (cmethod);
10063 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10064 if (check_linkdemand (cfg, method, cmethod))
10065 INLINE_FAILURE;
10066 CHECK_CFG_EXCEPTION;
10067 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10068 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10071 --sp;
10072 args [0] = *sp;
10074 args [1] = emit_get_rgctx_method (cfg, context_used,
10075 cmethod, MONO_RGCTX_INFO_METHOD);
10077 if (context_used)
10078 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10079 else
10080 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10082 ip += 6;
10083 inline_costs += 10 * num_calls++;
10084 break;
10086 case CEE_LDARG:
10087 CHECK_STACK_OVF (1);
10088 CHECK_OPSIZE (4);
10089 n = read16 (ip + 2);
10090 CHECK_ARG (n);
10091 EMIT_NEW_ARGLOAD (cfg, ins, n);
10092 *sp++ = ins;
10093 ip += 4;
10094 break;
10095 case CEE_LDARGA:
10096 CHECK_STACK_OVF (1);
10097 CHECK_OPSIZE (4);
10098 n = read16 (ip + 2);
10099 CHECK_ARG (n);
10100 NEW_ARGLOADA (cfg, ins, n);
10101 MONO_ADD_INS (cfg->cbb, ins);
10102 *sp++ = ins;
10103 ip += 4;
10104 break;
10105 case CEE_STARG:
10106 CHECK_STACK (1);
10107 --sp;
10108 CHECK_OPSIZE (4);
10109 n = read16 (ip + 2);
10110 CHECK_ARG (n);
10111 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10112 UNVERIFIED;
10113 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10114 ip += 4;
10115 break;
10116 case CEE_LDLOC:
10117 CHECK_STACK_OVF (1);
10118 CHECK_OPSIZE (4);
10119 n = read16 (ip + 2);
10120 CHECK_LOCAL (n);
10121 EMIT_NEW_LOCLOAD (cfg, ins, n);
10122 *sp++ = ins;
10123 ip += 4;
10124 break;
10125 case CEE_LDLOCA: {
10126 unsigned char *tmp_ip;
10127 CHECK_STACK_OVF (1);
10128 CHECK_OPSIZE (4);
10129 n = read16 (ip + 2);
10130 CHECK_LOCAL (n);
10132 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10133 ip = tmp_ip;
10134 inline_costs += 1;
10135 break;
10138 EMIT_NEW_LOCLOADA (cfg, ins, n);
10139 *sp++ = ins;
10140 ip += 4;
10141 break;
10143 case CEE_STLOC:
10144 CHECK_STACK (1);
10145 --sp;
10146 CHECK_OPSIZE (4);
10147 n = read16 (ip + 2);
10148 CHECK_LOCAL (n);
10149 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10150 UNVERIFIED;
10151 emit_stloc_ir (cfg, sp, header, n);
10152 ip += 4;
10153 inline_costs += 1;
10154 break;
10155 case CEE_LOCALLOC:
10156 CHECK_STACK (1);
10157 --sp;
10158 if (sp != stack_start)
10159 UNVERIFIED;
10160 if (cfg->method != method)
10162 * Inlining this into a loop in a parent could lead to
10163 * stack overflows which is different behavior than the
10164 * non-inlined case, thus disable inlining in this case.
10166 goto inline_failure;
10168 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10169 ins->dreg = alloc_preg (cfg);
10170 ins->sreg1 = sp [0]->dreg;
10171 ins->type = STACK_PTR;
10172 MONO_ADD_INS (cfg->cbb, ins);
10174 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10175 if (init_locals)
10176 ins->flags |= MONO_INST_INIT;
10178 *sp++ = ins;
10179 ip += 2;
10180 break;
10181 case CEE_ENDFILTER: {
10182 MonoExceptionClause *clause, *nearest;
10183 int cc, nearest_num;
10185 CHECK_STACK (1);
10186 --sp;
10187 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10188 UNVERIFIED;
10189 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10190 ins->sreg1 = (*sp)->dreg;
10191 MONO_ADD_INS (bblock, ins);
10192 start_new_bblock = 1;
10193 ip += 2;
10195 nearest = NULL;
10196 nearest_num = 0;
10197 for (cc = 0; cc < header->num_clauses; ++cc) {
10198 clause = &header->clauses [cc];
10199 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10200 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10201 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10202 nearest = clause;
10203 nearest_num = cc;
10206 g_assert (nearest);
10207 if ((ip - header->code) != nearest->handler_offset)
10208 UNVERIFIED;
10210 break;
10212 case CEE_UNALIGNED_:
10213 ins_flag |= MONO_INST_UNALIGNED;
10214 /* FIXME: record alignment? we can assume 1 for now */
10215 CHECK_OPSIZE (3);
10216 ip += 3;
10217 break;
10218 case CEE_VOLATILE_:
10219 ins_flag |= MONO_INST_VOLATILE;
10220 ip += 2;
10221 break;
10222 case CEE_TAIL_:
10223 ins_flag |= MONO_INST_TAILCALL;
10224 cfg->flags |= MONO_CFG_HAS_TAIL;
10225 /* Can't inline tail calls at this time */
10226 inline_costs += 100000;
10227 ip += 2;
10228 break;
10229 case CEE_INITOBJ:
10230 CHECK_STACK (1);
10231 --sp;
10232 CHECK_OPSIZE (6);
10233 token = read32 (ip + 2);
10234 klass = mini_get_class (method, token, generic_context);
10235 CHECK_TYPELOAD (klass);
10236 if (generic_class_is_reference_type (cfg, klass))
10237 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10238 else
10239 mini_emit_initobj (cfg, *sp, NULL, klass);
10240 ip += 6;
10241 inline_costs += 1;
10242 break;
10243 case CEE_CONSTRAINED_:
10244 CHECK_OPSIZE (6);
10245 token = read32 (ip + 2);
10246 if (method->wrapper_type != MONO_WRAPPER_NONE)
10247 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10248 else
10249 constrained_call = mono_class_get_full (image, token, generic_context);
10250 CHECK_TYPELOAD (constrained_call);
10251 ip += 6;
10252 break;
10253 case CEE_CPBLK:
10254 case CEE_INITBLK: {
10255 MonoInst *iargs [3];
10256 CHECK_STACK (3);
10257 sp -= 3;
10259 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10260 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10261 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10262 /* emit_memset only works when val == 0 */
10263 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10264 } else {
10265 iargs [0] = sp [0];
10266 iargs [1] = sp [1];
10267 iargs [2] = sp [2];
10268 if (ip [1] == CEE_CPBLK) {
10269 MonoMethod *memcpy_method = get_memcpy_method ();
10270 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10271 } else {
10272 MonoMethod *memset_method = get_memset_method ();
10273 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10276 ip += 2;
10277 inline_costs += 1;
10278 break;
10280 case CEE_NO_:
10281 CHECK_OPSIZE (3);
10282 if (ip [2] & 0x1)
10283 ins_flag |= MONO_INST_NOTYPECHECK;
10284 if (ip [2] & 0x2)
10285 ins_flag |= MONO_INST_NORANGECHECK;
10286 /* we ignore the no-nullcheck for now since we
10287 * really do it explicitly only when doing callvirt->call
10289 ip += 3;
10290 break;
10291 case CEE_RETHROW: {
10292 MonoInst *load;
10293 int handler_offset = -1;
10295 for (i = 0; i < header->num_clauses; ++i) {
10296 MonoExceptionClause *clause = &header->clauses [i];
10297 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10298 handler_offset = clause->handler_offset;
10299 break;
10303 bblock->flags |= BB_EXCEPTION_UNSAFE;
10305 g_assert (handler_offset != -1);
10307 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10308 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10309 ins->sreg1 = load->dreg;
10310 MONO_ADD_INS (bblock, ins);
10312 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10313 MONO_ADD_INS (bblock, ins);
10315 sp = stack_start;
10316 link_bblock (cfg, bblock, end_bblock);
10317 start_new_bblock = 1;
10318 ip += 2;
10319 break;
10321 case CEE_SIZEOF: {
10322 guint32 align;
10323 int ialign;
10325 CHECK_STACK_OVF (1);
10326 CHECK_OPSIZE (6);
10327 token = read32 (ip + 2);
10328 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10329 MonoType *type = mono_type_create_from_typespec (image, token);
10330 token = mono_type_size (type, &ialign);
10331 } else {
10332 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10333 CHECK_TYPELOAD (klass);
10334 mono_class_init (klass);
10335 token = mono_class_value_size (klass, &align);
10337 EMIT_NEW_ICONST (cfg, ins, token);
10338 *sp++= ins;
10339 ip += 6;
10340 break;
10342 case CEE_REFANYTYPE: {
10343 MonoInst *src_var, *src;
10345 CHECK_STACK (1);
10346 --sp;
10348 // FIXME:
10349 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10350 if (!src_var)
10351 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10352 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10353 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10354 *sp++ = ins;
10355 ip += 2;
10356 break;
10358 case CEE_READONLY_:
10359 readonly = TRUE;
10360 ip += 2;
10361 break;
10363 case CEE_UNUSED56:
10364 case CEE_UNUSED57:
10365 case CEE_UNUSED70:
10366 case CEE_UNUSED:
10367 case CEE_UNUSED99:
10368 UNVERIFIED;
10370 default:
10371 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10372 UNVERIFIED;
10374 break;
10376 case CEE_UNUSED58:
10377 case CEE_UNUSED1:
10378 UNVERIFIED;
10380 default:
10381 g_warning ("opcode 0x%02x not handled", *ip);
10382 UNVERIFIED;
10385 if (start_new_bblock != 1)
10386 UNVERIFIED;
10388 bblock->cil_length = ip - bblock->cil_code;
10389 bblock->next_bb = end_bblock;
10391 if (cfg->method == method && cfg->domainvar) {
10392 MonoInst *store;
10393 MonoInst *get_domain;
10395 cfg->cbb = init_localsbb;
10397 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10398 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10400 else {
10401 get_domain->dreg = alloc_preg (cfg);
10402 MONO_ADD_INS (cfg->cbb, get_domain);
10404 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10405 MONO_ADD_INS (cfg->cbb, store);
10408 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10409 if (cfg->compile_aot)
10410 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10411 mono_get_got_var (cfg);
10412 #endif
10414 if (cfg->method == method && cfg->got_var)
10415 mono_emit_load_got_addr (cfg);
10417 if (init_locals) {
10418 MonoInst *store;
10420 cfg->cbb = init_localsbb;
10421 cfg->ip = NULL;
10422 for (i = 0; i < header->num_locals; ++i) {
10423 MonoType *ptype = header->locals [i];
10424 int t = ptype->type;
10425 dreg = cfg->locals [i]->dreg;
10427 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10428 t = mono_class_enum_basetype (ptype->data.klass)->type;
10429 if (ptype->byref) {
10430 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10431 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10432 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10433 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10434 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10435 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10436 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10437 ins->type = STACK_R8;
10438 ins->inst_p0 = (void*)&r8_0;
10439 ins->dreg = alloc_dreg (cfg, STACK_R8);
10440 MONO_ADD_INS (init_localsbb, ins);
10441 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10442 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10443 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10444 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10445 } else {
10446 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10451 if (cfg->init_ref_vars && cfg->method == method) {
10452 /* Emit initialization for ref vars */
10453 // FIXME: Avoid duplication initialization for IL locals.
10454 for (i = 0; i < cfg->num_varinfo; ++i) {
10455 MonoInst *ins = cfg->varinfo [i];
10457 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10458 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10462 /* Add a sequence point for method entry/exit events */
10463 if (seq_points) {
10464 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10465 MONO_ADD_INS (init_localsbb, ins);
10466 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10467 MONO_ADD_INS (cfg->bb_exit, ins);
10470 cfg->ip = NULL;
10472 if (cfg->method == method) {
10473 MonoBasicBlock *bb;
10474 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10475 bb->region = mono_find_block_region (cfg, bb->real_offset);
10476 if (cfg->spvars)
10477 mono_create_spvar_for_region (cfg, bb->region);
10478 if (cfg->verbose_level > 2)
10479 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10483 g_slist_free (class_inits);
10484 dont_inline = g_list_remove (dont_inline, method);
10486 if (inline_costs < 0) {
10487 char *mname;
10489 /* Method is too large */
10490 mname = mono_method_full_name (method, TRUE);
10491 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10492 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10493 g_free (mname);
10494 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10495 mono_basic_block_free (original_bb);
10496 return -1;
10499 if ((cfg->verbose_level > 2) && (cfg->method == method))
10500 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10502 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10503 mono_basic_block_free (original_bb);
10504 return inline_costs;
10506 exception_exit:
10507 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10508 goto cleanup;
10510 inline_failure:
10511 goto cleanup;
10513 load_error:
10514 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10515 goto cleanup;
10517 unverified:
10518 set_exception_type_from_invalid_il (cfg, method, ip);
10519 goto cleanup;
10521 cleanup:
10522 g_slist_free (class_inits);
10523 mono_basic_block_free (original_bb);
10524 dont_inline = g_list_remove (dont_inline, method);
10525 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10526 return -1;
10529 static int
10530 store_membase_reg_to_store_membase_imm (int opcode)
10532 switch (opcode) {
10533 case OP_STORE_MEMBASE_REG:
10534 return OP_STORE_MEMBASE_IMM;
10535 case OP_STOREI1_MEMBASE_REG:
10536 return OP_STOREI1_MEMBASE_IMM;
10537 case OP_STOREI2_MEMBASE_REG:
10538 return OP_STOREI2_MEMBASE_IMM;
10539 case OP_STOREI4_MEMBASE_REG:
10540 return OP_STOREI4_MEMBASE_IMM;
10541 case OP_STOREI8_MEMBASE_REG:
10542 return OP_STOREI8_MEMBASE_IMM;
10543 default:
10544 g_assert_not_reached ();
10547 return -1;
10550 #endif /* DISABLE_JIT */
10553 mono_op_to_op_imm (int opcode)
10555 switch (opcode) {
10556 case OP_IADD:
10557 return OP_IADD_IMM;
10558 case OP_ISUB:
10559 return OP_ISUB_IMM;
10560 case OP_IDIV:
10561 return OP_IDIV_IMM;
10562 case OP_IDIV_UN:
10563 return OP_IDIV_UN_IMM;
10564 case OP_IREM:
10565 return OP_IREM_IMM;
10566 case OP_IREM_UN:
10567 return OP_IREM_UN_IMM;
10568 case OP_IMUL:
10569 return OP_IMUL_IMM;
10570 case OP_IAND:
10571 return OP_IAND_IMM;
10572 case OP_IOR:
10573 return OP_IOR_IMM;
10574 case OP_IXOR:
10575 return OP_IXOR_IMM;
10576 case OP_ISHL:
10577 return OP_ISHL_IMM;
10578 case OP_ISHR:
10579 return OP_ISHR_IMM;
10580 case OP_ISHR_UN:
10581 return OP_ISHR_UN_IMM;
10583 case OP_LADD:
10584 return OP_LADD_IMM;
10585 case OP_LSUB:
10586 return OP_LSUB_IMM;
10587 case OP_LAND:
10588 return OP_LAND_IMM;
10589 case OP_LOR:
10590 return OP_LOR_IMM;
10591 case OP_LXOR:
10592 return OP_LXOR_IMM;
10593 case OP_LSHL:
10594 return OP_LSHL_IMM;
10595 case OP_LSHR:
10596 return OP_LSHR_IMM;
10597 case OP_LSHR_UN:
10598 return OP_LSHR_UN_IMM;
10600 case OP_COMPARE:
10601 return OP_COMPARE_IMM;
10602 case OP_ICOMPARE:
10603 return OP_ICOMPARE_IMM;
10604 case OP_LCOMPARE:
10605 return OP_LCOMPARE_IMM;
10607 case OP_STORE_MEMBASE_REG:
10608 return OP_STORE_MEMBASE_IMM;
10609 case OP_STOREI1_MEMBASE_REG:
10610 return OP_STOREI1_MEMBASE_IMM;
10611 case OP_STOREI2_MEMBASE_REG:
10612 return OP_STOREI2_MEMBASE_IMM;
10613 case OP_STOREI4_MEMBASE_REG:
10614 return OP_STOREI4_MEMBASE_IMM;
10616 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10617 case OP_X86_PUSH:
10618 return OP_X86_PUSH_IMM;
10619 case OP_X86_COMPARE_MEMBASE_REG:
10620 return OP_X86_COMPARE_MEMBASE_IMM;
10621 #endif
10622 #if defined(TARGET_AMD64)
10623 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10624 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10625 #endif
10626 case OP_VOIDCALL_REG:
10627 return OP_VOIDCALL;
10628 case OP_CALL_REG:
10629 return OP_CALL;
10630 case OP_LCALL_REG:
10631 return OP_LCALL;
10632 case OP_FCALL_REG:
10633 return OP_FCALL;
10634 case OP_LOCALLOC:
10635 return OP_LOCALLOC_IMM;
10638 return -1;
10641 static int
10642 ldind_to_load_membase (int opcode)
10644 switch (opcode) {
10645 case CEE_LDIND_I1:
10646 return OP_LOADI1_MEMBASE;
10647 case CEE_LDIND_U1:
10648 return OP_LOADU1_MEMBASE;
10649 case CEE_LDIND_I2:
10650 return OP_LOADI2_MEMBASE;
10651 case CEE_LDIND_U2:
10652 return OP_LOADU2_MEMBASE;
10653 case CEE_LDIND_I4:
10654 return OP_LOADI4_MEMBASE;
10655 case CEE_LDIND_U4:
10656 return OP_LOADU4_MEMBASE;
10657 case CEE_LDIND_I:
10658 return OP_LOAD_MEMBASE;
10659 case CEE_LDIND_REF:
10660 return OP_LOAD_MEMBASE;
10661 case CEE_LDIND_I8:
10662 return OP_LOADI8_MEMBASE;
10663 case CEE_LDIND_R4:
10664 return OP_LOADR4_MEMBASE;
10665 case CEE_LDIND_R8:
10666 return OP_LOADR8_MEMBASE;
10667 default:
10668 g_assert_not_reached ();
10671 return -1;
10674 static int
10675 stind_to_store_membase (int opcode)
10677 switch (opcode) {
10678 case CEE_STIND_I1:
10679 return OP_STOREI1_MEMBASE_REG;
10680 case CEE_STIND_I2:
10681 return OP_STOREI2_MEMBASE_REG;
10682 case CEE_STIND_I4:
10683 return OP_STOREI4_MEMBASE_REG;
10684 case CEE_STIND_I:
10685 case CEE_STIND_REF:
10686 return OP_STORE_MEMBASE_REG;
10687 case CEE_STIND_I8:
10688 return OP_STOREI8_MEMBASE_REG;
10689 case CEE_STIND_R4:
10690 return OP_STORER4_MEMBASE_REG;
10691 case CEE_STIND_R8:
10692 return OP_STORER8_MEMBASE_REG;
10693 default:
10694 g_assert_not_reached ();
10697 return -1;
10701 mono_load_membase_to_load_mem (int opcode)
10703 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10704 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10705 switch (opcode) {
10706 case OP_LOAD_MEMBASE:
10707 return OP_LOAD_MEM;
10708 case OP_LOADU1_MEMBASE:
10709 return OP_LOADU1_MEM;
10710 case OP_LOADU2_MEMBASE:
10711 return OP_LOADU2_MEM;
10712 case OP_LOADI4_MEMBASE:
10713 return OP_LOADI4_MEM;
10714 case OP_LOADU4_MEMBASE:
10715 return OP_LOADU4_MEM;
10716 #if SIZEOF_REGISTER == 8
10717 case OP_LOADI8_MEMBASE:
10718 return OP_LOADI8_MEM;
10719 #endif
10721 #endif
10723 return -1;
10726 static inline int
10727 op_to_op_dest_membase (int store_opcode, int opcode)
10729 #if defined(TARGET_X86)
10730 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10731 return -1;
10733 switch (opcode) {
10734 case OP_IADD:
10735 return OP_X86_ADD_MEMBASE_REG;
10736 case OP_ISUB:
10737 return OP_X86_SUB_MEMBASE_REG;
10738 case OP_IAND:
10739 return OP_X86_AND_MEMBASE_REG;
10740 case OP_IOR:
10741 return OP_X86_OR_MEMBASE_REG;
10742 case OP_IXOR:
10743 return OP_X86_XOR_MEMBASE_REG;
10744 case OP_ADD_IMM:
10745 case OP_IADD_IMM:
10746 return OP_X86_ADD_MEMBASE_IMM;
10747 case OP_SUB_IMM:
10748 case OP_ISUB_IMM:
10749 return OP_X86_SUB_MEMBASE_IMM;
10750 case OP_AND_IMM:
10751 case OP_IAND_IMM:
10752 return OP_X86_AND_MEMBASE_IMM;
10753 case OP_OR_IMM:
10754 case OP_IOR_IMM:
10755 return OP_X86_OR_MEMBASE_IMM;
10756 case OP_XOR_IMM:
10757 case OP_IXOR_IMM:
10758 return OP_X86_XOR_MEMBASE_IMM;
10759 case OP_MOVE:
10760 return OP_NOP;
10762 #endif
10764 #if defined(TARGET_AMD64)
10765 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10766 return -1;
10768 switch (opcode) {
10769 case OP_IADD:
10770 return OP_X86_ADD_MEMBASE_REG;
10771 case OP_ISUB:
10772 return OP_X86_SUB_MEMBASE_REG;
10773 case OP_IAND:
10774 return OP_X86_AND_MEMBASE_REG;
10775 case OP_IOR:
10776 return OP_X86_OR_MEMBASE_REG;
10777 case OP_IXOR:
10778 return OP_X86_XOR_MEMBASE_REG;
10779 case OP_IADD_IMM:
10780 return OP_X86_ADD_MEMBASE_IMM;
10781 case OP_ISUB_IMM:
10782 return OP_X86_SUB_MEMBASE_IMM;
10783 case OP_IAND_IMM:
10784 return OP_X86_AND_MEMBASE_IMM;
10785 case OP_IOR_IMM:
10786 return OP_X86_OR_MEMBASE_IMM;
10787 case OP_IXOR_IMM:
10788 return OP_X86_XOR_MEMBASE_IMM;
10789 case OP_LADD:
10790 return OP_AMD64_ADD_MEMBASE_REG;
10791 case OP_LSUB:
10792 return OP_AMD64_SUB_MEMBASE_REG;
10793 case OP_LAND:
10794 return OP_AMD64_AND_MEMBASE_REG;
10795 case OP_LOR:
10796 return OP_AMD64_OR_MEMBASE_REG;
10797 case OP_LXOR:
10798 return OP_AMD64_XOR_MEMBASE_REG;
10799 case OP_ADD_IMM:
10800 case OP_LADD_IMM:
10801 return OP_AMD64_ADD_MEMBASE_IMM;
10802 case OP_SUB_IMM:
10803 case OP_LSUB_IMM:
10804 return OP_AMD64_SUB_MEMBASE_IMM;
10805 case OP_AND_IMM:
10806 case OP_LAND_IMM:
10807 return OP_AMD64_AND_MEMBASE_IMM;
10808 case OP_OR_IMM:
10809 case OP_LOR_IMM:
10810 return OP_AMD64_OR_MEMBASE_IMM;
10811 case OP_XOR_IMM:
10812 case OP_LXOR_IMM:
10813 return OP_AMD64_XOR_MEMBASE_IMM;
10814 case OP_MOVE:
10815 return OP_NOP;
10817 #endif
10819 return -1;
10822 static inline int
10823 op_to_op_store_membase (int store_opcode, int opcode)
10825 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10826 switch (opcode) {
10827 case OP_ICEQ:
10828 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10829 return OP_X86_SETEQ_MEMBASE;
10830 case OP_CNE:
10831 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10832 return OP_X86_SETNE_MEMBASE;
10834 #endif
10836 return -1;
10839 static inline int
10840 op_to_op_src1_membase (int load_opcode, int opcode)
10842 #ifdef TARGET_X86
10843 /* FIXME: This has sign extension issues */
10845 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10846 return OP_X86_COMPARE_MEMBASE8_IMM;
10849 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10850 return -1;
10852 switch (opcode) {
10853 case OP_X86_PUSH:
10854 return OP_X86_PUSH_MEMBASE;
10855 case OP_COMPARE_IMM:
10856 case OP_ICOMPARE_IMM:
10857 return OP_X86_COMPARE_MEMBASE_IMM;
10858 case OP_COMPARE:
10859 case OP_ICOMPARE:
10860 return OP_X86_COMPARE_MEMBASE_REG;
10862 #endif
10864 #ifdef TARGET_AMD64
10865 /* FIXME: This has sign extension issues */
10867 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10868 return OP_X86_COMPARE_MEMBASE8_IMM;
10871 switch (opcode) {
10872 case OP_X86_PUSH:
10873 #ifdef __mono_ilp32__
10874 if (load_opcode == OP_LOADI8_MEMBASE)
10875 #else
10876 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10877 #endif
10878 return OP_X86_PUSH_MEMBASE;
10879 break;
10880 /* FIXME: This only works for 32 bit immediates
10881 case OP_COMPARE_IMM:
10882 case OP_LCOMPARE_IMM:
10883 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10884 return OP_AMD64_COMPARE_MEMBASE_IMM;
10886 case OP_ICOMPARE_IMM:
10887 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10888 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10889 break;
10890 case OP_COMPARE:
10891 case OP_LCOMPARE:
10892 #ifdef __mono_ilp32__
10893 if (load_opcode == OP_LOAD_MEMBASE)
10894 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10895 if (load_opcode == OP_LOADI8_MEMBASE)
10896 #else
10897 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10898 #endif
10899 return OP_AMD64_COMPARE_MEMBASE_REG;
10900 break;
10901 case OP_ICOMPARE:
10902 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10903 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10904 break;
10906 #endif
10908 return -1;
10911 static inline int
10912 op_to_op_src2_membase (int load_opcode, int opcode)
10914 #ifdef TARGET_X86
10915 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10916 return -1;
10918 switch (opcode) {
10919 case OP_COMPARE:
10920 case OP_ICOMPARE:
10921 return OP_X86_COMPARE_REG_MEMBASE;
10922 case OP_IADD:
10923 return OP_X86_ADD_REG_MEMBASE;
10924 case OP_ISUB:
10925 return OP_X86_SUB_REG_MEMBASE;
10926 case OP_IAND:
10927 return OP_X86_AND_REG_MEMBASE;
10928 case OP_IOR:
10929 return OP_X86_OR_REG_MEMBASE;
10930 case OP_IXOR:
10931 return OP_X86_XOR_REG_MEMBASE;
10933 #endif
10935 #ifdef TARGET_AMD64
10936 #ifdef __mono_ilp32__
10937 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
10938 #else
10939 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10940 #endif
10941 switch (opcode) {
10942 case OP_ICOMPARE:
10943 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10944 case OP_IADD:
10945 return OP_X86_ADD_REG_MEMBASE;
10946 case OP_ISUB:
10947 return OP_X86_SUB_REG_MEMBASE;
10948 case OP_IAND:
10949 return OP_X86_AND_REG_MEMBASE;
10950 case OP_IOR:
10951 return OP_X86_OR_REG_MEMBASE;
10952 case OP_IXOR:
10953 return OP_X86_XOR_REG_MEMBASE;
10955 #ifdef __mono_ilp32__
10956 } else if (load_opcode == OP_LOADI8_MEMBASE) {
10957 #else
10958 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10959 #endif
10960 switch (opcode) {
10961 case OP_COMPARE:
10962 case OP_LCOMPARE:
10963 return OP_AMD64_COMPARE_REG_MEMBASE;
10964 case OP_LADD:
10965 return OP_AMD64_ADD_REG_MEMBASE;
10966 case OP_LSUB:
10967 return OP_AMD64_SUB_REG_MEMBASE;
10968 case OP_LAND:
10969 return OP_AMD64_AND_REG_MEMBASE;
10970 case OP_LOR:
10971 return OP_AMD64_OR_REG_MEMBASE;
10972 case OP_LXOR:
10973 return OP_AMD64_XOR_REG_MEMBASE;
10976 #endif
10978 return -1;
10982 mono_op_to_op_imm_noemul (int opcode)
10984 switch (opcode) {
10985 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10986 case OP_LSHR:
10987 case OP_LSHL:
10988 case OP_LSHR_UN:
10989 return -1;
10990 #endif
10991 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10992 case OP_IDIV:
10993 case OP_IDIV_UN:
10994 case OP_IREM:
10995 case OP_IREM_UN:
10996 return -1;
10997 #endif
10998 default:
10999 return mono_op_to_op_imm (opcode);
11003 #ifndef DISABLE_JIT
11006 * mono_handle_global_vregs:
11008 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11009 * for them.
11011 void
11012 mono_handle_global_vregs (MonoCompile *cfg)
11014 gint32 *vreg_to_bb;
11015 MonoBasicBlock *bb;
11016 int i, pos;
11018 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11020 #ifdef MONO_ARCH_SIMD_INTRINSICS
11021 if (cfg->uses_simd_intrinsics)
11022 mono_simd_simplify_indirection (cfg);
11023 #endif
11025 /* Find local vregs used in more than one bb */
11026 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11027 MonoInst *ins = bb->code;
11028 int block_num = bb->block_num;
11030 if (cfg->verbose_level > 2)
11031 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11033 cfg->cbb = bb;
11034 for (; ins; ins = ins->next) {
11035 const char *spec = INS_INFO (ins->opcode);
11036 int regtype = 0, regindex;
11037 gint32 prev_bb;
11039 if (G_UNLIKELY (cfg->verbose_level > 2))
11040 mono_print_ins (ins);
11042 g_assert (ins->opcode >= MONO_CEE_LAST);
11044 for (regindex = 0; regindex < 4; regindex ++) {
11045 int vreg = 0;
11047 if (regindex == 0) {
11048 regtype = spec [MONO_INST_DEST];
11049 if (regtype == ' ')
11050 continue;
11051 vreg = ins->dreg;
11052 } else if (regindex == 1) {
11053 regtype = spec [MONO_INST_SRC1];
11054 if (regtype == ' ')
11055 continue;
11056 vreg = ins->sreg1;
11057 } else if (regindex == 2) {
11058 regtype = spec [MONO_INST_SRC2];
11059 if (regtype == ' ')
11060 continue;
11061 vreg = ins->sreg2;
11062 } else if (regindex == 3) {
11063 regtype = spec [MONO_INST_SRC3];
11064 if (regtype == ' ')
11065 continue;
11066 vreg = ins->sreg3;
11069 #if SIZEOF_REGISTER == 4
11070 /* In the LLVM case, the long opcodes are not decomposed */
11071 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11073 * Since some instructions reference the original long vreg,
11074 * and some reference the two component vregs, it is quite hard
11075 * to determine when it needs to be global. So be conservative.
11077 if (!get_vreg_to_inst (cfg, vreg)) {
11078 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11080 if (cfg->verbose_level > 2)
11081 printf ("LONG VREG R%d made global.\n", vreg);
11085 * Make the component vregs volatile since the optimizations can
11086 * get confused otherwise.
11088 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11089 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11091 #endif
11093 g_assert (vreg != -1);
11095 prev_bb = vreg_to_bb [vreg];
11096 if (prev_bb == 0) {
11097 /* 0 is a valid block num */
11098 vreg_to_bb [vreg] = block_num + 1;
11099 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11100 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11101 continue;
11103 if (!get_vreg_to_inst (cfg, vreg)) {
11104 if (G_UNLIKELY (cfg->verbose_level > 2))
11105 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11107 switch (regtype) {
11108 case 'i':
11109 if (vreg_is_ref (cfg, vreg))
11110 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11111 else
11112 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11113 break;
11114 case 'l':
11115 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11116 break;
11117 case 'f':
11118 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11119 break;
11120 case 'v':
11121 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11122 break;
11123 default:
11124 g_assert_not_reached ();
11128 /* Flag as having been used in more than one bb */
11129 vreg_to_bb [vreg] = -1;
11135 /* If a variable is used in only one bblock, convert it into a local vreg */
11136 for (i = 0; i < cfg->num_varinfo; i++) {
11137 MonoInst *var = cfg->varinfo [i];
11138 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11140 switch (var->type) {
11141 case STACK_I4:
11142 case STACK_OBJ:
11143 case STACK_PTR:
11144 case STACK_MP:
11145 case STACK_VTYPE:
11146 #if SIZEOF_REGISTER == 8
11147 case STACK_I8:
11148 #endif
11149 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11150 /* Enabling this screws up the fp stack on x86 */
11151 case STACK_R8:
11152 #endif
11153 /* Arguments are implicitly global */
11154 /* Putting R4 vars into registers doesn't work currently */
11155 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11157 * Make that the variable's liveness interval doesn't contain a call, since
11158 * that would cause the lvreg to be spilled, making the whole optimization
11159 * useless.
11161 /* This is too slow for JIT compilation */
11162 #if 0
11163 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11164 MonoInst *ins;
11165 int def_index, call_index, ins_index;
11166 gboolean spilled = FALSE;
11168 def_index = -1;
11169 call_index = -1;
11170 ins_index = 0;
11171 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11172 const char *spec = INS_INFO (ins->opcode);
11174 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11175 def_index = ins_index;
11177 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11178 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11179 if (call_index > def_index) {
11180 spilled = TRUE;
11181 break;
11185 if (MONO_IS_CALL (ins))
11186 call_index = ins_index;
11188 ins_index ++;
11191 if (spilled)
11192 break;
11194 #endif
11196 if (G_UNLIKELY (cfg->verbose_level > 2))
11197 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11198 var->flags |= MONO_INST_IS_DEAD;
11199 cfg->vreg_to_inst [var->dreg] = NULL;
11201 break;
11206 * Compress the varinfo and vars tables so the liveness computation is faster and
11207 * takes up less space.
11209 pos = 0;
11210 for (i = 0; i < cfg->num_varinfo; ++i) {
11211 MonoInst *var = cfg->varinfo [i];
11212 if (pos < i && cfg->locals_start == i)
11213 cfg->locals_start = pos;
11214 if (!(var->flags & MONO_INST_IS_DEAD)) {
11215 if (pos < i) {
11216 cfg->varinfo [pos] = cfg->varinfo [i];
11217 cfg->varinfo [pos]->inst_c0 = pos;
11218 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11219 cfg->vars [pos].idx = pos;
11220 #if SIZEOF_REGISTER == 4
11221 if (cfg->varinfo [pos]->type == STACK_I8) {
11222 /* Modify the two component vars too */
11223 MonoInst *var1;
11225 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11226 var1->inst_c0 = pos;
11227 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11228 var1->inst_c0 = pos;
11230 #endif
11232 pos ++;
11235 cfg->num_varinfo = pos;
11236 if (cfg->locals_start > cfg->num_varinfo)
11237 cfg->locals_start = cfg->num_varinfo;
11241 * mono_spill_global_vars:
11243 * Generate spill code for variables which are not allocated to registers,
11244 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11245 * code is generated which could be optimized by the local optimization passes.
11247 void
11248 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11250 MonoBasicBlock *bb;
11251 char spec2 [16];
11252 int orig_next_vreg;
11253 guint32 *vreg_to_lvreg;
11254 guint32 *lvregs;
11255 guint32 i, lvregs_len;
11256 gboolean dest_has_lvreg = FALSE;
11257 guint32 stacktypes [128];
11258 MonoInst **live_range_start, **live_range_end;
11259 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11261 *need_local_opts = FALSE;
11263 memset (spec2, 0, sizeof (spec2));
11265 /* FIXME: Move this function to mini.c */
11266 stacktypes ['i'] = STACK_PTR;
11267 stacktypes ['l'] = STACK_I8;
11268 stacktypes ['f'] = STACK_R8;
11269 #ifdef MONO_ARCH_SIMD_INTRINSICS
11270 stacktypes ['x'] = STACK_VTYPE;
11271 #endif
11273 #if SIZEOF_REGISTER == 4
11274 /* Create MonoInsts for longs */
11275 for (i = 0; i < cfg->num_varinfo; i++) {
11276 MonoInst *ins = cfg->varinfo [i];
11278 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11279 switch (ins->type) {
11280 case STACK_R8:
11281 case STACK_I8: {
11282 MonoInst *tree;
11284 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11285 break;
11287 g_assert (ins->opcode == OP_REGOFFSET);
11289 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11290 g_assert (tree);
11291 tree->opcode = OP_REGOFFSET;
11292 tree->inst_basereg = ins->inst_basereg;
11293 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11295 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11296 g_assert (tree);
11297 tree->opcode = OP_REGOFFSET;
11298 tree->inst_basereg = ins->inst_basereg;
11299 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11300 break;
11302 default:
11303 break;
11307 #endif
11309 if (cfg->compute_gc_maps) {
11310 /* registers need liveness info even for !non refs */
11311 for (i = 0; i < cfg->num_varinfo; i++) {
11312 MonoInst *ins = cfg->varinfo [i];
11314 if (ins->opcode == OP_REGVAR)
11315 ins->flags |= MONO_INST_GC_TRACK;
11319 /* FIXME: widening and truncation */
11322 * As an optimization, when a variable allocated to the stack is first loaded into
11323 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11324 * the variable again.
11326 orig_next_vreg = cfg->next_vreg;
11327 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11328 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11329 lvregs_len = 0;
11332 * These arrays contain the first and last instructions accessing a given
11333 * variable.
11334 * Since we emit bblocks in the same order we process them here, and we
11335 * don't split live ranges, these will precisely describe the live range of
11336 * the variable, i.e. the instruction range where a valid value can be found
11337 * in the variables location.
11338 * The live range is computed using the liveness info computed by the liveness pass.
11339 * We can't use vmv->range, since that is an abstract live range, and we need
11340 * one which is instruction precise.
11341 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11343 /* FIXME: Only do this if debugging info is requested */
11344 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11345 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11346 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11347 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11349 /* Add spill loads/stores */
11350 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11351 MonoInst *ins;
11353 if (cfg->verbose_level > 2)
11354 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11356 /* Clear vreg_to_lvreg array */
11357 for (i = 0; i < lvregs_len; i++)
11358 vreg_to_lvreg [lvregs [i]] = 0;
11359 lvregs_len = 0;
11361 cfg->cbb = bb;
11362 MONO_BB_FOR_EACH_INS (bb, ins) {
11363 const char *spec = INS_INFO (ins->opcode);
11364 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11365 gboolean store, no_lvreg;
11366 int sregs [MONO_MAX_SRC_REGS];
11368 if (G_UNLIKELY (cfg->verbose_level > 2))
11369 mono_print_ins (ins);
11371 if (ins->opcode == OP_NOP)
11372 continue;
11375 * We handle LDADDR here as well, since it can only be decomposed
11376 * when variable addresses are known.
11378 if (ins->opcode == OP_LDADDR) {
11379 MonoInst *var = ins->inst_p0;
11381 if (var->opcode == OP_VTARG_ADDR) {
11382 /* Happens on SPARC/S390 where vtypes are passed by reference */
11383 MonoInst *vtaddr = var->inst_left;
11384 if (vtaddr->opcode == OP_REGVAR) {
11385 ins->opcode = OP_MOVE;
11386 ins->sreg1 = vtaddr->dreg;
11388 else if (var->inst_left->opcode == OP_REGOFFSET) {
11389 ins->opcode = OP_LOAD_MEMBASE;
11390 ins->inst_basereg = vtaddr->inst_basereg;
11391 ins->inst_offset = vtaddr->inst_offset;
11392 } else
11393 NOT_IMPLEMENTED;
11394 } else {
11395 g_assert (var->opcode == OP_REGOFFSET);
11397 ins->opcode = OP_ADD_IMM;
11398 ins->sreg1 = var->inst_basereg;
11399 ins->inst_imm = var->inst_offset;
11402 *need_local_opts = TRUE;
11403 spec = INS_INFO (ins->opcode);
11406 if (ins->opcode < MONO_CEE_LAST) {
11407 mono_print_ins (ins);
11408 g_assert_not_reached ();
11412 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11413 * src register.
11414 * FIXME:
11416 if (MONO_IS_STORE_MEMBASE (ins)) {
11417 tmp_reg = ins->dreg;
11418 ins->dreg = ins->sreg2;
11419 ins->sreg2 = tmp_reg;
11420 store = TRUE;
11422 spec2 [MONO_INST_DEST] = ' ';
11423 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11424 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11425 spec2 [MONO_INST_SRC3] = ' ';
11426 spec = spec2;
11427 } else if (MONO_IS_STORE_MEMINDEX (ins))
11428 g_assert_not_reached ();
11429 else
11430 store = FALSE;
11431 no_lvreg = FALSE;
11433 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11434 printf ("\t %.3s %d", spec, ins->dreg);
11435 num_sregs = mono_inst_get_src_registers (ins, sregs);
11436 for (srcindex = 0; srcindex < 3; ++srcindex)
11437 printf (" %d", sregs [srcindex]);
11438 printf ("\n");
11441 /***************/
11442 /* DREG */
11443 /***************/
11444 regtype = spec [MONO_INST_DEST];
11445 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11446 prev_dreg = -1;
11448 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11449 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11450 MonoInst *store_ins;
11451 int store_opcode;
11452 MonoInst *def_ins = ins;
11453 int dreg = ins->dreg; /* The original vreg */
11455 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11457 if (var->opcode == OP_REGVAR) {
11458 ins->dreg = var->dreg;
11459 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11461 * Instead of emitting a load+store, use a _membase opcode.
11463 g_assert (var->opcode == OP_REGOFFSET);
11464 if (ins->opcode == OP_MOVE) {
11465 NULLIFY_INS (ins);
11466 def_ins = NULL;
11467 } else {
11468 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11469 ins->inst_basereg = var->inst_basereg;
11470 ins->inst_offset = var->inst_offset;
11471 ins->dreg = -1;
11473 spec = INS_INFO (ins->opcode);
11474 } else {
11475 guint32 lvreg;
11477 g_assert (var->opcode == OP_REGOFFSET);
11479 prev_dreg = ins->dreg;
11481 /* Invalidate any previous lvreg for this vreg */
11482 vreg_to_lvreg [ins->dreg] = 0;
11484 lvreg = 0;
11486 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11487 regtype = 'l';
11488 store_opcode = OP_STOREI8_MEMBASE_REG;
11491 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11493 if (regtype == 'l') {
11494 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11495 mono_bblock_insert_after_ins (bb, ins, store_ins);
11496 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11497 mono_bblock_insert_after_ins (bb, ins, store_ins);
11498 def_ins = store_ins;
11500 else {
11501 g_assert (store_opcode != OP_STOREV_MEMBASE);
11503 /* Try to fuse the store into the instruction itself */
11504 /* FIXME: Add more instructions */
11505 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11506 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11507 ins->inst_imm = ins->inst_c0;
11508 ins->inst_destbasereg = var->inst_basereg;
11509 ins->inst_offset = var->inst_offset;
11510 spec = INS_INFO (ins->opcode);
11511 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11512 ins->opcode = store_opcode;
11513 ins->inst_destbasereg = var->inst_basereg;
11514 ins->inst_offset = var->inst_offset;
11516 no_lvreg = TRUE;
11518 tmp_reg = ins->dreg;
11519 ins->dreg = ins->sreg2;
11520 ins->sreg2 = tmp_reg;
11521 store = TRUE;
11523 spec2 [MONO_INST_DEST] = ' ';
11524 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11525 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11526 spec2 [MONO_INST_SRC3] = ' ';
11527 spec = spec2;
11528 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11529 // FIXME: The backends expect the base reg to be in inst_basereg
11530 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11531 ins->dreg = -1;
11532 ins->inst_basereg = var->inst_basereg;
11533 ins->inst_offset = var->inst_offset;
11534 spec = INS_INFO (ins->opcode);
11535 } else {
11536 /* printf ("INS: "); mono_print_ins (ins); */
11537 /* Create a store instruction */
11538 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11540 /* Insert it after the instruction */
11541 mono_bblock_insert_after_ins (bb, ins, store_ins);
11543 def_ins = store_ins;
11546 * We can't assign ins->dreg to var->dreg here, since the
11547 * sregs could use it. So set a flag, and do it after
11548 * the sregs.
11550 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11551 dest_has_lvreg = TRUE;
11556 if (def_ins && !live_range_start [dreg]) {
11557 live_range_start [dreg] = def_ins;
11558 live_range_start_bb [dreg] = bb;
11561 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11562 MonoInst *tmp;
11564 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11565 tmp->inst_c1 = dreg;
11566 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11570 /************/
11571 /* SREGS */
11572 /************/
11573 num_sregs = mono_inst_get_src_registers (ins, sregs);
11574 for (srcindex = 0; srcindex < 3; ++srcindex) {
11575 regtype = spec [MONO_INST_SRC1 + srcindex];
11576 sreg = sregs [srcindex];
11578 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11579 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11580 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11581 MonoInst *use_ins = ins;
11582 MonoInst *load_ins;
11583 guint32 load_opcode;
11585 if (var->opcode == OP_REGVAR) {
11586 sregs [srcindex] = var->dreg;
11587 //mono_inst_set_src_registers (ins, sregs);
11588 live_range_end [sreg] = use_ins;
11589 live_range_end_bb [sreg] = bb;
11591 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11592 MonoInst *tmp;
11594 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11595 /* var->dreg is a hreg */
11596 tmp->inst_c1 = sreg;
11597 mono_bblock_insert_after_ins (bb, ins, tmp);
11600 continue;
11603 g_assert (var->opcode == OP_REGOFFSET);
11605 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11607 g_assert (load_opcode != OP_LOADV_MEMBASE);
11609 if (vreg_to_lvreg [sreg]) {
11610 g_assert (vreg_to_lvreg [sreg] != -1);
11612 /* The variable is already loaded to an lvreg */
11613 if (G_UNLIKELY (cfg->verbose_level > 2))
11614 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11615 sregs [srcindex] = vreg_to_lvreg [sreg];
11616 //mono_inst_set_src_registers (ins, sregs);
11617 continue;
11620 /* Try to fuse the load into the instruction */
11621 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11622 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11623 sregs [0] = var->inst_basereg;
11624 //mono_inst_set_src_registers (ins, sregs);
11625 ins->inst_offset = var->inst_offset;
11626 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11627 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11628 sregs [1] = var->inst_basereg;
11629 //mono_inst_set_src_registers (ins, sregs);
11630 ins->inst_offset = var->inst_offset;
11631 } else {
11632 if (MONO_IS_REAL_MOVE (ins)) {
11633 ins->opcode = OP_NOP;
11634 sreg = ins->dreg;
11635 } else {
11636 //printf ("%d ", srcindex); mono_print_ins (ins);
11638 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11640 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11641 if (var->dreg == prev_dreg) {
11643 * sreg refers to the value loaded by the load
11644 * emitted below, but we need to use ins->dreg
11645 * since it refers to the store emitted earlier.
11647 sreg = ins->dreg;
11649 g_assert (sreg != -1);
11650 vreg_to_lvreg [var->dreg] = sreg;
11651 g_assert (lvregs_len < 1024);
11652 lvregs [lvregs_len ++] = var->dreg;
11656 sregs [srcindex] = sreg;
11657 //mono_inst_set_src_registers (ins, sregs);
11659 if (regtype == 'l') {
11660 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11661 mono_bblock_insert_before_ins (bb, ins, load_ins);
11662 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11663 mono_bblock_insert_before_ins (bb, ins, load_ins);
11664 use_ins = load_ins;
11666 else {
11667 #if SIZEOF_REGISTER == 4
11668 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11669 #endif
11670 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11671 mono_bblock_insert_before_ins (bb, ins, load_ins);
11672 use_ins = load_ins;
11676 if (var->dreg < orig_next_vreg) {
11677 live_range_end [var->dreg] = use_ins;
11678 live_range_end_bb [var->dreg] = bb;
11681 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11682 MonoInst *tmp;
11684 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11685 tmp->inst_c1 = var->dreg;
11686 mono_bblock_insert_after_ins (bb, ins, tmp);
11690 mono_inst_set_src_registers (ins, sregs);
11692 if (dest_has_lvreg) {
11693 g_assert (ins->dreg != -1);
11694 vreg_to_lvreg [prev_dreg] = ins->dreg;
11695 g_assert (lvregs_len < 1024);
11696 lvregs [lvregs_len ++] = prev_dreg;
11697 dest_has_lvreg = FALSE;
11700 if (store) {
11701 tmp_reg = ins->dreg;
11702 ins->dreg = ins->sreg2;
11703 ins->sreg2 = tmp_reg;
11706 if (MONO_IS_CALL (ins)) {
11707 /* Clear vreg_to_lvreg array */
11708 for (i = 0; i < lvregs_len; i++)
11709 vreg_to_lvreg [lvregs [i]] = 0;
11710 lvregs_len = 0;
11711 } else if (ins->opcode == OP_NOP) {
11712 ins->dreg = -1;
11713 MONO_INST_NULLIFY_SREGS (ins);
11716 if (cfg->verbose_level > 2)
11717 mono_print_ins_index (1, ins);
11720 /* Extend the live range based on the liveness info */
11721 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11722 for (i = 0; i < cfg->num_varinfo; i ++) {
11723 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11725 if (vreg_is_volatile (cfg, vi->vreg))
11726 /* The liveness info is incomplete */
11727 continue;
11729 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11730 /* Live from at least the first ins of this bb */
11731 live_range_start [vi->vreg] = bb->code;
11732 live_range_start_bb [vi->vreg] = bb;
11735 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11736 /* Live at least until the last ins of this bb */
11737 live_range_end [vi->vreg] = bb->last_ins;
11738 live_range_end_bb [vi->vreg] = bb;
11744 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11746 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11747 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11749 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11750 for (i = 0; i < cfg->num_varinfo; ++i) {
11751 int vreg = MONO_VARINFO (cfg, i)->vreg;
11752 MonoInst *ins;
11754 if (live_range_start [vreg]) {
11755 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11756 ins->inst_c0 = i;
11757 ins->inst_c1 = vreg;
11758 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11760 if (live_range_end [vreg]) {
11761 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11762 ins->inst_c0 = i;
11763 ins->inst_c1 = vreg;
11764 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11765 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11766 else
11767 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11771 #endif
11773 g_free (live_range_start);
11774 g_free (live_range_end);
11775 g_free (live_range_start_bb);
11776 g_free (live_range_end_bb);
11780 * FIXME:
11781 * - use 'iadd' instead of 'int_add'
11782 * - handling ovf opcodes: decompose in method_to_ir.
11783 * - unify iregs/fregs
11784 * -> partly done, the missing parts are:
11785 * - a more complete unification would involve unifying the hregs as well, so
11786 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11787 * would no longer map to the machine hregs, so the code generators would need to
11788 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11789 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11790 * fp/non-fp branches speeds it up by about 15%.
11791 * - use sext/zext opcodes instead of shifts
11792 * - add OP_ICALL
11793 * - get rid of TEMPLOADs if possible and use vregs instead
11794 * - clean up usage of OP_P/OP_ opcodes
11795 * - cleanup usage of DUMMY_USE
11796 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11797 * stack
11798 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11799 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11800 * - make sure handle_stack_args () is called before the branch is emitted
11801 * - when the new IR is done, get rid of all unused stuff
11802 * - COMPARE/BEQ as separate instructions or unify them ?
11803 * - keeping them separate allows specialized compare instructions like
11804 * compare_imm, compare_membase
11805 * - most back ends unify fp compare+branch, fp compare+ceq
11806 * - integrate mono_save_args into inline_method
11807 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11808 * - handle long shift opts on 32 bit platforms somehow: they require
11809 * 3 sregs (2 for arg1 and 1 for arg2)
11810 * - make byref a 'normal' type.
11811 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11812 * variable if needed.
11813 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11814 * like inline_method.
11815 * - remove inlining restrictions
11816 * - fix LNEG and enable cfold of INEG
11817 * - generalize x86 optimizations like ldelema as a peephole optimization
11818 * - add store_mem_imm for amd64
11819 * - optimize the loading of the interruption flag in the managed->native wrappers
11820 * - avoid special handling of OP_NOP in passes
11821 * - move code inserting instructions into one function/macro.
11822 * - try a coalescing phase after liveness analysis
11823 * - add float -> vreg conversion + local optimizations on !x86
11824 * - figure out how to handle decomposed branches during optimizations, ie.
11825 * compare+branch, op_jump_table+op_br etc.
11826 * - promote RuntimeXHandles to vregs
11827 * - vtype cleanups:
11828 * - add a NEW_VARLOADA_VREG macro
11829 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11830 * accessing vtype fields.
11831 * - get rid of I8CONST on 64 bit platforms
11832 * - dealing with the increase in code size due to branches created during opcode
11833 * decomposition:
11834 * - use extended basic blocks
11835 * - all parts of the JIT
11836 * - handle_global_vregs () && local regalloc
11837 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11838 * - sources of increase in code size:
11839 * - vtypes
11840 * - long compares
11841 * - isinst and castclass
11842 * - lvregs not allocated to global registers even if used multiple times
11843 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11844 * meaningful.
11845 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11846 * - add all micro optimizations from the old JIT
11847 * - put tree optimizations into the deadce pass
11848 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11849 * specific function.
11850 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11851 * fcompare + branchCC.
11852 * - create a helper function for allocating a stack slot, taking into account
11853 * MONO_CFG_HAS_SPILLUP.
11854 * - merge r68207.
11855 * - merge the ia64 switch changes.
11856 * - optimize mono_regstate2_alloc_int/float.
11857 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11858 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11859 * parts of the tree could be separated by other instructions, killing the tree
11860 * arguments, or stores killing loads etc. Also, should we fold loads into other
11861 * instructions if the result of the load is used multiple times ?
11862 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11863 * - LAST MERGE: 108395.
11864 * - when returning vtypes in registers, generate IR and append it to the end of the
11865 * last bb instead of doing it in the epilog.
11866 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11871 NOTES
11872 -----
11874 - When to decompose opcodes:
11875 - earlier: this makes some optimizations hard to implement, since the low level IR
11876 no longer contains the neccessary information. But it is easier to do.
11877 - later: harder to implement, enables more optimizations.
11878 - Branches inside bblocks:
11879 - created when decomposing complex opcodes.
11880 - branches to another bblock: harmless, but not tracked by the branch
11881 optimizations, so need to branch to a label at the start of the bblock.
11882 - branches to inside the same bblock: very problematic, trips up the local
11883 reg allocator. Can be fixed by spitting the current bblock, but that is a
11884 complex operation, since some local vregs can become global vregs etc.
11885 - Local/global vregs:
11886 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11887 local register allocator.
11888 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11889 structure, created by mono_create_var (). Assigned to hregs or the stack by
11890 the global register allocator.
11891 - When to do optimizations like alu->alu_imm:
11892 - earlier -> saves work later on since the IR will be smaller/simpler
11893 - later -> can work on more instructions
11894 - Handling of valuetypes:
11895 - When a vtype is pushed on the stack, a new temporary is created, an
11896 instruction computing its address (LDADDR) is emitted and pushed on
11897 the stack. Need to optimize cases when the vtype is used immediately as in
11898 argument passing, stloc etc.
11899 - Instead of the to_end stuff in the old JIT, simply call the function handling
11900 the values on the stack before emitting the last instruction of the bb.
11903 #endif /* DISABLE_JIT */