Backport from trunk
[mono-project.git] / mono / mini / method-to-ir.c
blob98c27ed01ac103577f2c13c871015393624687a3
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
54 #include "mini.h"
55 #include "trace.h"
57 #include "ir-emit.h"
59 #include "jit-icalls.h"
60 #include "debugger-agent.h"
62 #define BRANCH_COST 100
63 #define INLINE_LENGTH_LIMIT 20
64 #define INLINE_FAILURE do {\
65 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
66 goto inline_failure;\
67 } while (0)
68 #define CHECK_CFG_EXCEPTION do {\
69 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
70 goto exception_exit;\
71 } while (0)
72 #define METHOD_ACCESS_FAILURE do { \
73 char *method_fname = mono_method_full_name (method, TRUE); \
74 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
75 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
76 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
77 g_free (method_fname); \
78 g_free (cil_method_fname); \
79 goto exception_exit; \
80 } while (0)
81 #define FIELD_ACCESS_FAILURE do { \
82 char *method_fname = mono_method_full_name (method, TRUE); \
83 char *field_fname = mono_field_full_name (field); \
84 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
85 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
86 g_free (method_fname); \
87 g_free (field_fname); \
88 goto exception_exit; \
89 } while (0)
90 #define GENERIC_SHARING_FAILURE(opcode) do { \
91 if (cfg->generic_sharing_context) { \
92 if (cfg->verbose_level > 2) \
93 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
94 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
95 goto exception_exit; \
96 } \
97 } while (0)
99 /* Determine whenever 'ins' represents a load of the 'this' argument */
100 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
102 static int ldind_to_load_membase (int opcode);
103 static int stind_to_store_membase (int opcode);
105 int mono_op_to_op_imm (int opcode);
106 int mono_op_to_op_imm_noemul (int opcode);
108 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
109 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
110 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
112 /* helper methods signature */
113 extern MonoMethodSignature *helper_sig_class_init_trampoline;
114 extern MonoMethodSignature *helper_sig_domain_get;
115 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
117 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 * Instruction metadata
122 #ifdef MINI_OP
123 #undef MINI_OP
124 #endif
125 #ifdef MINI_OP3
126 #undef MINI_OP3
127 #endif
128 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
129 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
130 #define NONE ' '
131 #define IREG 'i'
132 #define FREG 'f'
133 #define VREG 'v'
134 #define XREG 'x'
135 #if SIZEOF_REGISTER == 8
136 #define LREG IREG
137 #else
138 #define LREG 'l'
139 #endif
140 /* keep in sync with the enum in mini.h */
141 const char
142 ins_info[] = {
143 #include "mini-ops.h"
145 #undef MINI_OP
146 #undef MINI_OP3
148 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
149 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
151 * This should contain the index of the last sreg + 1. This is not the same
152 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
154 const gint8 ins_sreg_counts[] = {
155 #include "mini-ops.h"
157 #undef MINI_OP
158 #undef MINI_OP3
160 extern GHashTable *jit_icall_name_hash;
162 #define MONO_INIT_VARINFO(vi,id) do { \
163 (vi)->range.first_use.pos.bid = 0xffff; \
164 (vi)->reg = -1; \
165 (vi)->idx = (id); \
166 } while (0)
168 void
169 mono_inst_set_src_registers (MonoInst *ins, int *regs)
171 ins->sreg1 = regs [0];
172 ins->sreg2 = regs [1];
173 ins->sreg3 = regs [2];
176 guint32
177 mono_alloc_ireg (MonoCompile *cfg)
179 return alloc_ireg (cfg);
182 guint32
183 mono_alloc_freg (MonoCompile *cfg)
185 return alloc_freg (cfg);
188 guint32
189 mono_alloc_preg (MonoCompile *cfg)
191 return alloc_preg (cfg);
194 guint32
195 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
197 return alloc_dreg (cfg, stack_type);
200 guint
201 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
203 if (type->byref)
204 return OP_MOVE;
206 handle_enum:
207 switch (type->type) {
208 case MONO_TYPE_I1:
209 case MONO_TYPE_U1:
210 case MONO_TYPE_BOOLEAN:
211 return OP_MOVE;
212 case MONO_TYPE_I2:
213 case MONO_TYPE_U2:
214 case MONO_TYPE_CHAR:
215 return OP_MOVE;
216 case MONO_TYPE_I4:
217 case MONO_TYPE_U4:
218 return OP_MOVE;
219 case MONO_TYPE_I:
220 case MONO_TYPE_U:
221 case MONO_TYPE_PTR:
222 case MONO_TYPE_FNPTR:
223 return OP_MOVE;
224 case MONO_TYPE_CLASS:
225 case MONO_TYPE_STRING:
226 case MONO_TYPE_OBJECT:
227 case MONO_TYPE_SZARRAY:
228 case MONO_TYPE_ARRAY:
229 return OP_MOVE;
230 case MONO_TYPE_I8:
231 case MONO_TYPE_U8:
232 #if SIZEOF_REGISTER == 8
233 return OP_MOVE;
234 #else
235 return OP_LMOVE;
236 #endif
237 case MONO_TYPE_R4:
238 return OP_FMOVE;
239 case MONO_TYPE_R8:
240 return OP_FMOVE;
241 case MONO_TYPE_VALUETYPE:
242 if (type->data.klass->enumtype) {
243 type = mono_class_enum_basetype (type->data.klass);
244 goto handle_enum;
246 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
247 return OP_XMOVE;
248 return OP_VMOVE;
249 case MONO_TYPE_TYPEDBYREF:
250 return OP_VMOVE;
251 case MONO_TYPE_GENERICINST:
252 type = &type->data.generic_class->container_class->byval_arg;
253 goto handle_enum;
254 case MONO_TYPE_VAR:
255 case MONO_TYPE_MVAR:
256 g_assert (cfg->generic_sharing_context);
257 return OP_MOVE;
258 default:
259 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
261 return -1;
264 void
265 mono_print_bb (MonoBasicBlock *bb, const char *msg)
267 int i;
268 MonoInst *tree;
270 printf ("\n%s %d: [IN: ", msg, bb->block_num);
271 for (i = 0; i < bb->in_count; ++i)
272 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
273 printf (", OUT: ");
274 for (i = 0; i < bb->out_count; ++i)
275 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
276 printf (" ]\n");
277 for (tree = bb->code; tree; tree = tree->next)
278 mono_print_ins_index (-1, tree);
282 * Can't put this at the beginning, since other files reference stuff from this
283 * file.
285 #ifndef DISABLE_JIT
287 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
289 #define GET_BBLOCK(cfg,tblock,ip) do { \
290 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
291 if (!(tblock)) { \
292 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
293 NEW_BBLOCK (cfg, (tblock)); \
294 (tblock)->cil_code = (ip); \
295 ADD_BBLOCK (cfg, (tblock)); \
297 } while (0)
299 #if defined(TARGET_X86) || defined(TARGET_AMD64)
300 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
301 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
302 (dest)->dreg = alloc_preg ((cfg)); \
303 (dest)->sreg1 = (sr1); \
304 (dest)->sreg2 = (sr2); \
305 (dest)->inst_imm = (imm); \
306 (dest)->backend.shift_amount = (shift); \
307 MONO_ADD_INS ((cfg)->cbb, (dest)); \
308 } while (0)
309 #endif
311 #if SIZEOF_REGISTER == 8
312 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
313 /* FIXME: Need to add many more cases */ \
314 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
315 MonoInst *widen; \
316 int dr = alloc_preg (cfg); \
317 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
318 (ins)->sreg2 = widen->dreg; \
320 } while (0)
321 #else
322 #define ADD_WIDEN_OP(ins, arg1, arg2)
323 #endif
325 #define ADD_BINOP(op) do { \
326 MONO_INST_NEW (cfg, ins, (op)); \
327 sp -= 2; \
328 ins->sreg1 = sp [0]->dreg; \
329 ins->sreg2 = sp [1]->dreg; \
330 type_from_op (ins, sp [0], sp [1]); \
331 CHECK_TYPE (ins); \
332 /* Have to insert a widening op */ \
333 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
334 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
335 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
337 } while (0)
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
341 sp--; \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
344 CHECK_TYPE (ins); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
347 *sp++ = mono_decompose_opcode (cfg, ins); \
348 } while (0)
350 #define ADD_BINCOND(next_block) do { \
351 MonoInst *cmp; \
352 sp -= 2; \
353 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
354 cmp->sreg1 = sp [0]->dreg; \
355 cmp->sreg2 = sp [1]->dreg; \
356 type_from_op (cmp, sp [0], sp [1]); \
357 CHECK_TYPE (cmp); \
358 type_from_op (ins, sp [0], sp [1]); \
359 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
360 GET_BBLOCK (cfg, tblock, target); \
361 link_bblock (cfg, bblock, tblock); \
362 ins->inst_true_bb = tblock; \
363 if ((next_block)) { \
364 link_bblock (cfg, bblock, (next_block)); \
365 ins->inst_false_bb = (next_block); \
366 start_new_bblock = 1; \
367 } else { \
368 GET_BBLOCK (cfg, tblock, ip); \
369 link_bblock (cfg, bblock, tblock); \
370 ins->inst_false_bb = tblock; \
371 start_new_bblock = 2; \
373 if (sp != stack_start) { \
374 handle_stack_args (cfg, stack_start, sp - stack_start); \
375 CHECK_UNVERIFIABLE (cfg); \
377 MONO_ADD_INS (bblock, cmp); \
378 MONO_ADD_INS (bblock, ins); \
379 } while (0)
381 /* *
382 * link_bblock: Links two basic blocks
384 * links two basic blocks in the control flow graph, the 'from'
385 * argument is the starting block and the 'to' argument is the block
386 * the control flow ends to after 'from'.
388 static void
389 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
391 MonoBasicBlock **newa;
392 int i, found;
394 #if 0
395 if (from->cil_code) {
396 if (to->cil_code)
397 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
398 else
399 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
400 } else {
401 if (to->cil_code)
402 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
403 else
404 printf ("edge from entry to exit\n");
406 #endif
408 found = FALSE;
409 for (i = 0; i < from->out_count; ++i) {
410 if (to == from->out_bb [i]) {
411 found = TRUE;
412 break;
415 if (!found) {
416 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
417 for (i = 0; i < from->out_count; ++i) {
418 newa [i] = from->out_bb [i];
420 newa [i] = to;
421 from->out_count++;
422 from->out_bb = newa;
425 found = FALSE;
426 for (i = 0; i < to->in_count; ++i) {
427 if (from == to->in_bb [i]) {
428 found = TRUE;
429 break;
432 if (!found) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
434 for (i = 0; i < to->in_count; ++i) {
435 newa [i] = to->in_bb [i];
437 newa [i] = from;
438 to->in_count++;
439 to->in_bb = newa;
443 void
444 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
446 link_bblock (cfg, from, to);
450 * mono_find_block_region:
452 * We mark each basic block with a region ID. We use that to avoid BB
453 * optimizations when blocks are in different regions.
455 * Returns:
456 * A region token that encodes where this region is, and information
457 * about the clause owner for this block.
459 * The region encodes the try/catch/filter clause that owns this block
460 * as well as the type. -1 is a special value that represents a block
461 * that is in none of try/catch/filter.
463 static int
464 mono_find_block_region (MonoCompile *cfg, int offset)
466 MonoMethod *method = cfg->method;
467 MonoMethodHeader *header = mono_method_get_header (method);
468 MonoExceptionClause *clause;
469 int i;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
482 else
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
490 return -1;
493 static GList*
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethod *method = cfg->method;
497 MonoMethodHeader *header = mono_method_get_header (method);
498 MonoExceptionClause *clause;
499 MonoBasicBlock *handler;
500 int i;
501 GList *res = NULL;
503 for (i = 0; i < header->num_clauses; ++i) {
504 clause = &header->clauses [i];
505 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
506 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
507 if (clause->flags == type) {
508 handler = cfg->cil_offset_to_bb [clause->handler_offset];
509 g_assert (handler);
510 res = g_list_append (res, handler);
514 return res;
517 static void
518 mono_create_spvar_for_region (MonoCompile *cfg, int region)
520 MonoInst *var;
522 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
523 if (var)
524 return;
526 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
527 /* prevent it from being register allocated */
528 var->flags |= MONO_INST_INDIRECT;
530 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
533 static MonoInst *
534 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
536 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
539 static MonoInst*
540 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
542 MonoInst *var;
544 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
545 if (var)
546 return var;
548 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
554 return var;
558 * Returns the type used in the eval stack when @type is loaded.
559 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
561 void
562 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
564 MonoClass *klass;
566 inst->klass = klass = mono_class_from_mono_type (type);
567 if (type->byref) {
568 inst->type = STACK_MP;
569 return;
572 handle_enum:
573 switch (type->type) {
574 case MONO_TYPE_VOID:
575 inst->type = STACK_INV;
576 return;
577 case MONO_TYPE_I1:
578 case MONO_TYPE_U1:
579 case MONO_TYPE_BOOLEAN:
580 case MONO_TYPE_I2:
581 case MONO_TYPE_U2:
582 case MONO_TYPE_CHAR:
583 case MONO_TYPE_I4:
584 case MONO_TYPE_U4:
585 inst->type = STACK_I4;
586 return;
587 case MONO_TYPE_I:
588 case MONO_TYPE_U:
589 case MONO_TYPE_PTR:
590 case MONO_TYPE_FNPTR:
591 inst->type = STACK_PTR;
592 return;
593 case MONO_TYPE_CLASS:
594 case MONO_TYPE_STRING:
595 case MONO_TYPE_OBJECT:
596 case MONO_TYPE_SZARRAY:
597 case MONO_TYPE_ARRAY:
598 inst->type = STACK_OBJ;
599 return;
600 case MONO_TYPE_I8:
601 case MONO_TYPE_U8:
602 inst->type = STACK_I8;
603 return;
604 case MONO_TYPE_R4:
605 case MONO_TYPE_R8:
606 inst->type = STACK_R8;
607 return;
608 case MONO_TYPE_VALUETYPE:
609 if (type->data.klass->enumtype) {
610 type = mono_class_enum_basetype (type->data.klass);
611 goto handle_enum;
612 } else {
613 inst->klass = klass;
614 inst->type = STACK_VTYPE;
615 return;
617 case MONO_TYPE_TYPEDBYREF:
618 inst->klass = mono_defaults.typed_reference_class;
619 inst->type = STACK_VTYPE;
620 return;
621 case MONO_TYPE_GENERICINST:
622 type = &type->data.generic_class->container_class->byval_arg;
623 goto handle_enum;
624 case MONO_TYPE_VAR :
625 case MONO_TYPE_MVAR :
626 /* FIXME: all the arguments must be references for now,
627 * later look inside cfg and see if the arg num is
628 * really a reference
630 g_assert (cfg->generic_sharing_context);
631 inst->type = STACK_OBJ;
632 return;
633 default:
634 g_error ("unknown type 0x%02x in eval stack type", type->type);
639 * The following tables are used to quickly validate the IL code in type_from_op ().
641 static const char
642 bin_num_table [STACK_MAX] [STACK_MAX] = {
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
653 static const char
654 neg_table [] = {
655 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
658 /* reduce the size of this table */
659 static const char
660 bin_int_table [STACK_MAX] [STACK_MAX] = {
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
671 static const char
672 bin_comp_table [STACK_MAX] [STACK_MAX] = {
673 /* Inv i L p F & O vt */
674 {0},
675 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
676 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
677 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
678 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
679 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
680 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
681 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
684 /* reduce the size of this table */
685 static const char
686 shift_table [STACK_MAX] [STACK_MAX] = {
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
698 * Tables to map from the non-specific opcode to the matching
699 * type-specific opcode.
701 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
702 static const guint16
703 binops_op_map [STACK_MAX] = {
704 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
707 /* handles from CEE_NEG to CEE_CONV_U8 */
708 static const guint16
709 unops_op_map [STACK_MAX] = {
710 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
713 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
714 static const guint16
715 ovfops_op_map [STACK_MAX] = {
716 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
719 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
720 static const guint16
721 ovf2ops_op_map [STACK_MAX] = {
722 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
725 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
726 static const guint16
727 ovf3ops_op_map [STACK_MAX] = {
728 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
731 /* handles from CEE_BEQ to CEE_BLT_UN */
732 static const guint16
733 beqops_op_map [STACK_MAX] = {
734 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
737 /* handles from CEE_CEQ to CEE_CLT_UN */
738 static const guint16
739 ceqops_op_map [STACK_MAX] = {
740 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
744 * Sets ins->type (the type on the eval stack) according to the
745 * type of the opcode and the arguments to it.
746 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
748 * FIXME: this function sets ins->type unconditionally in some cases, but
749 * it should set it to invalid for some types (a conv.x on an object)
751 static void
752 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
754 switch (ins->opcode) {
755 /* binops */
756 case CEE_ADD:
757 case CEE_SUB:
758 case CEE_MUL:
759 case CEE_DIV:
760 case CEE_REM:
761 /* FIXME: check unverifiable args for STACK_MP */
762 ins->type = bin_num_table [src1->type] [src2->type];
763 ins->opcode += binops_op_map [ins->type];
764 break;
765 case CEE_DIV_UN:
766 case CEE_REM_UN:
767 case CEE_AND:
768 case CEE_OR:
769 case CEE_XOR:
770 ins->type = bin_int_table [src1->type] [src2->type];
771 ins->opcode += binops_op_map [ins->type];
772 break;
773 case CEE_SHL:
774 case CEE_SHR:
775 case CEE_SHR_UN:
776 ins->type = shift_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
778 break;
779 case OP_COMPARE:
780 case OP_LCOMPARE:
781 case OP_ICOMPARE:
782 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
783 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
784 ins->opcode = OP_LCOMPARE;
785 else if (src1->type == STACK_R8)
786 ins->opcode = OP_FCOMPARE;
787 else
788 ins->opcode = OP_ICOMPARE;
789 break;
790 case OP_ICOMPARE_IMM:
791 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
792 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
793 ins->opcode = OP_LCOMPARE_IMM;
794 break;
795 case CEE_BEQ:
796 case CEE_BGE:
797 case CEE_BGT:
798 case CEE_BLE:
799 case CEE_BLT:
800 case CEE_BNE_UN:
801 case CEE_BGE_UN:
802 case CEE_BGT_UN:
803 case CEE_BLE_UN:
804 case CEE_BLT_UN:
805 ins->opcode += beqops_op_map [src1->type];
806 break;
807 case OP_CEQ:
808 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
809 ins->opcode += ceqops_op_map [src1->type];
810 break;
811 case OP_CGT:
812 case OP_CGT_UN:
813 case OP_CLT:
814 case OP_CLT_UN:
815 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
816 ins->opcode += ceqops_op_map [src1->type];
817 break;
818 /* unops */
819 case CEE_NEG:
820 ins->type = neg_table [src1->type];
821 ins->opcode += unops_op_map [ins->type];
822 break;
823 case CEE_NOT:
824 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
825 ins->type = src1->type;
826 else
827 ins->type = STACK_INV;
828 ins->opcode += unops_op_map [ins->type];
829 break;
830 case CEE_CONV_I1:
831 case CEE_CONV_I2:
832 case CEE_CONV_I4:
833 case CEE_CONV_U4:
834 ins->type = STACK_I4;
835 ins->opcode += unops_op_map [src1->type];
836 break;
837 case CEE_CONV_R_UN:
838 ins->type = STACK_R8;
839 switch (src1->type) {
840 case STACK_I4:
841 case STACK_PTR:
842 ins->opcode = OP_ICONV_TO_R_UN;
843 break;
844 case STACK_I8:
845 ins->opcode = OP_LCONV_TO_R_UN;
846 break;
848 break;
849 case CEE_CONV_OVF_I1:
850 case CEE_CONV_OVF_U1:
851 case CEE_CONV_OVF_I2:
852 case CEE_CONV_OVF_U2:
853 case CEE_CONV_OVF_I4:
854 case CEE_CONV_OVF_U4:
855 ins->type = STACK_I4;
856 ins->opcode += ovf3ops_op_map [src1->type];
857 break;
858 case CEE_CONV_OVF_I_UN:
859 case CEE_CONV_OVF_U_UN:
860 ins->type = STACK_PTR;
861 ins->opcode += ovf2ops_op_map [src1->type];
862 break;
863 case CEE_CONV_OVF_I1_UN:
864 case CEE_CONV_OVF_I2_UN:
865 case CEE_CONV_OVF_I4_UN:
866 case CEE_CONV_OVF_U1_UN:
867 case CEE_CONV_OVF_U2_UN:
868 case CEE_CONV_OVF_U4_UN:
869 ins->type = STACK_I4;
870 ins->opcode += ovf2ops_op_map [src1->type];
871 break;
872 case CEE_CONV_U:
873 ins->type = STACK_PTR;
874 switch (src1->type) {
875 case STACK_I4:
876 ins->opcode = OP_ICONV_TO_U;
877 break;
878 case STACK_PTR:
879 case STACK_MP:
880 #if SIZEOF_REGISTER == 8
881 ins->opcode = OP_LCONV_TO_U;
882 #else
883 ins->opcode = OP_MOVE;
884 #endif
885 break;
886 case STACK_I8:
887 ins->opcode = OP_LCONV_TO_U;
888 break;
889 case STACK_R8:
890 ins->opcode = OP_FCONV_TO_U;
891 break;
893 break;
894 case CEE_CONV_I8:
895 case CEE_CONV_U8:
896 ins->type = STACK_I8;
897 ins->opcode += unops_op_map [src1->type];
898 break;
899 case CEE_CONV_OVF_I8:
900 case CEE_CONV_OVF_U8:
901 ins->type = STACK_I8;
902 ins->opcode += ovf3ops_op_map [src1->type];
903 break;
904 case CEE_CONV_OVF_U8_UN:
905 case CEE_CONV_OVF_I8_UN:
906 ins->type = STACK_I8;
907 ins->opcode += ovf2ops_op_map [src1->type];
908 break;
909 case CEE_CONV_R4:
910 case CEE_CONV_R8:
911 ins->type = STACK_R8;
912 ins->opcode += unops_op_map [src1->type];
913 break;
914 case OP_CKFINITE:
915 ins->type = STACK_R8;
916 break;
917 case CEE_CONV_U2:
918 case CEE_CONV_U1:
919 ins->type = STACK_I4;
920 ins->opcode += ovfops_op_map [src1->type];
921 break;
922 case CEE_CONV_I:
923 case CEE_CONV_OVF_I:
924 case CEE_CONV_OVF_U:
925 ins->type = STACK_PTR;
926 ins->opcode += ovfops_op_map [src1->type];
927 break;
928 case CEE_ADD_OVF:
929 case CEE_ADD_OVF_UN:
930 case CEE_MUL_OVF:
931 case CEE_MUL_OVF_UN:
932 case CEE_SUB_OVF:
933 case CEE_SUB_OVF_UN:
934 ins->type = bin_num_table [src1->type] [src2->type];
935 ins->opcode += ovfops_op_map [src1->type];
936 if (ins->type == STACK_R8)
937 ins->type = STACK_INV;
938 break;
939 case OP_LOAD_MEMBASE:
940 ins->type = STACK_PTR;
941 break;
942 case OP_LOADI1_MEMBASE:
943 case OP_LOADU1_MEMBASE:
944 case OP_LOADI2_MEMBASE:
945 case OP_LOADU2_MEMBASE:
946 case OP_LOADI4_MEMBASE:
947 case OP_LOADU4_MEMBASE:
948 ins->type = STACK_PTR;
949 break;
950 case OP_LOADI8_MEMBASE:
951 ins->type = STACK_I8;
952 break;
953 case OP_LOADR4_MEMBASE:
954 case OP_LOADR8_MEMBASE:
955 ins->type = STACK_R8;
956 break;
957 default:
958 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
959 break;
962 if (ins->type == STACK_MP)
963 ins->klass = mono_defaults.object_class;
966 static const char
967 ldind_type [] = {
968 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
971 #if 0
973 static const char
974 param_table [STACK_MAX] [STACK_MAX] = {
975 {0},
978 static int
979 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
980 int i;
982 if (sig->hasthis) {
983 switch (args->type) {
984 case STACK_I4:
985 case STACK_I8:
986 case STACK_R8:
987 case STACK_VTYPE:
988 case STACK_INV:
989 return 0;
991 args++;
993 for (i = 0; i < sig->param_count; ++i) {
994 switch (args [i].type) {
995 case STACK_INV:
996 return 0;
997 case STACK_MP:
998 if (!sig->params [i]->byref)
999 return 0;
1000 continue;
1001 case STACK_OBJ:
1002 if (sig->params [i]->byref)
1003 return 0;
1004 switch (sig->params [i]->type) {
1005 case MONO_TYPE_CLASS:
1006 case MONO_TYPE_STRING:
1007 case MONO_TYPE_OBJECT:
1008 case MONO_TYPE_SZARRAY:
1009 case MONO_TYPE_ARRAY:
1010 break;
1011 default:
1012 return 0;
1014 continue;
1015 case STACK_R8:
1016 if (sig->params [i]->byref)
1017 return 0;
1018 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1019 return 0;
1020 continue;
1021 case STACK_PTR:
1022 case STACK_I4:
1023 case STACK_I8:
1024 case STACK_VTYPE:
1025 break;
1027 /*if (!param_table [args [i].type] [sig->params [i]->type])
1028 return 0;*/
1030 return 1;
1032 #endif
1035 * When we need a pointer to the current domain many times in a method, we
1036 * call mono_domain_get() once and we store the result in a local variable.
1037 * This function returns the variable that represents the MonoDomain*.
1039 inline static MonoInst *
1040 mono_get_domainvar (MonoCompile *cfg)
1042 if (!cfg->domainvar)
1043 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1044 return cfg->domainvar;
1048 * The got_var contains the address of the Global Offset Table when AOT
1049 * compiling.
1051 MonoInst *
1052 mono_get_got_var (MonoCompile *cfg)
1054 #ifdef MONO_ARCH_NEED_GOT_VAR
1055 if (!cfg->compile_aot)
1056 return NULL;
1057 if (!cfg->got_var) {
1058 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1060 return cfg->got_var;
1061 #else
1062 return NULL;
1063 #endif
1066 static MonoInst *
1067 mono_get_vtable_var (MonoCompile *cfg)
1069 g_assert (cfg->generic_sharing_context);
1071 if (!cfg->rgctx_var) {
1072 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1073 /* force the var to be stack allocated */
1074 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1077 return cfg->rgctx_var;
1080 static MonoType*
1081 type_from_stack_type (MonoInst *ins) {
1082 switch (ins->type) {
1083 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1084 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1085 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1086 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1087 case STACK_MP:
1088 return &ins->klass->this_arg;
1089 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1090 case STACK_VTYPE: return &ins->klass->byval_arg;
1091 default:
1092 g_error ("stack type %d to monotype not handled\n", ins->type);
1094 return NULL;
1097 static G_GNUC_UNUSED int
1098 type_to_stack_type (MonoType *t)
1100 switch (mono_type_get_underlying_type (t)->type) {
1101 case MONO_TYPE_I1:
1102 case MONO_TYPE_U1:
1103 case MONO_TYPE_BOOLEAN:
1104 case MONO_TYPE_I2:
1105 case MONO_TYPE_U2:
1106 case MONO_TYPE_CHAR:
1107 case MONO_TYPE_I4:
1108 case MONO_TYPE_U4:
1109 return STACK_I4;
1110 case MONO_TYPE_I:
1111 case MONO_TYPE_U:
1112 case MONO_TYPE_PTR:
1113 case MONO_TYPE_FNPTR:
1114 return STACK_PTR;
1115 case MONO_TYPE_CLASS:
1116 case MONO_TYPE_STRING:
1117 case MONO_TYPE_OBJECT:
1118 case MONO_TYPE_SZARRAY:
1119 case MONO_TYPE_ARRAY:
1120 return STACK_OBJ;
1121 case MONO_TYPE_I8:
1122 case MONO_TYPE_U8:
1123 return STACK_I8;
1124 case MONO_TYPE_R4:
1125 case MONO_TYPE_R8:
1126 return STACK_R8;
1127 case MONO_TYPE_VALUETYPE:
1128 case MONO_TYPE_TYPEDBYREF:
1129 return STACK_VTYPE;
1130 case MONO_TYPE_GENERICINST:
1131 if (mono_type_generic_inst_is_valuetype (t))
1132 return STACK_VTYPE;
1133 else
1134 return STACK_OBJ;
1135 break;
1136 default:
1137 g_assert_not_reached ();
1140 return -1;
1143 static MonoClass*
1144 array_access_to_klass (int opcode)
1146 switch (opcode) {
1147 case CEE_LDELEM_U1:
1148 return mono_defaults.byte_class;
1149 case CEE_LDELEM_U2:
1150 return mono_defaults.uint16_class;
1151 case CEE_LDELEM_I:
1152 case CEE_STELEM_I:
1153 return mono_defaults.int_class;
1154 case CEE_LDELEM_I1:
1155 case CEE_STELEM_I1:
1156 return mono_defaults.sbyte_class;
1157 case CEE_LDELEM_I2:
1158 case CEE_STELEM_I2:
1159 return mono_defaults.int16_class;
1160 case CEE_LDELEM_I4:
1161 case CEE_STELEM_I4:
1162 return mono_defaults.int32_class;
1163 case CEE_LDELEM_U4:
1164 return mono_defaults.uint32_class;
1165 case CEE_LDELEM_I8:
1166 case CEE_STELEM_I8:
1167 return mono_defaults.int64_class;
1168 case CEE_LDELEM_R4:
1169 case CEE_STELEM_R4:
1170 return mono_defaults.single_class;
1171 case CEE_LDELEM_R8:
1172 case CEE_STELEM_R8:
1173 return mono_defaults.double_class;
1174 case CEE_LDELEM_REF:
1175 case CEE_STELEM_REF:
1176 return mono_defaults.object_class;
1177 default:
1178 g_assert_not_reached ();
1180 return NULL;
1184 * We try to share variables when possible
1186 static MonoInst *
1187 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 MonoInst *res;
1190 int pos, vnum;
1192 /* inlining can result in deeper stacks */
1193 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1194 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1196 pos = ins->type - 1 + slot * STACK_MAX;
1198 switch (ins->type) {
1199 case STACK_I4:
1200 case STACK_I8:
1201 case STACK_R8:
1202 case STACK_PTR:
1203 case STACK_MP:
1204 case STACK_OBJ:
1205 if ((vnum = cfg->intvars [pos]))
1206 return cfg->varinfo [vnum];
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1208 cfg->intvars [pos] = res->inst_c0;
1209 break;
1210 default:
1211 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1213 return res;
1216 static void
1217 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1220 * Don't use this if a generic_context is set, since that means AOT can't
1221 * look up the method using just the image+token.
1222 * table == 0 means this is a reference made from a wrapper.
1224 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1225 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1226 jump_info_token->image = image;
1227 jump_info_token->token = token;
1228 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1233 * This function is called to handle items that are left on the evaluation stack
1234 * at basic block boundaries. What happens is that we save the values to local variables
1235 * and we reload them later when first entering the target basic block (with the
1236 * handle_loaded_temps () function).
1237 * A single joint point will use the same variables (stored in the array bb->out_stack or
1238 * bb->in_stack, if the basic block is before or after the joint point).
1240 * This function needs to be called _before_ emitting the last instruction of
1241 * the bb (i.e. before emitting a branch).
1242 * If the stack merge fails at a join point, cfg->unverifiable is set.
1244 static void
1245 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1247 int i, bindex;
1248 MonoBasicBlock *bb = cfg->cbb;
1249 MonoBasicBlock *outb;
1250 MonoInst *inst, **locals;
1251 gboolean found;
1253 if (!count)
1254 return;
1255 if (cfg->verbose_level > 3)
1256 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1257 if (!bb->out_scount) {
1258 bb->out_scount = count;
1259 //printf ("bblock %d has out:", bb->block_num);
1260 found = FALSE;
1261 for (i = 0; i < bb->out_count; ++i) {
1262 outb = bb->out_bb [i];
1263 /* exception handlers are linked, but they should not be considered for stack args */
1264 if (outb->flags & BB_EXCEPTION_HANDLER)
1265 continue;
1266 //printf (" %d", outb->block_num);
1267 if (outb->in_stack) {
1268 found = TRUE;
1269 bb->out_stack = outb->in_stack;
1270 break;
1273 //printf ("\n");
1274 if (!found) {
1275 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1276 for (i = 0; i < count; ++i) {
1278 * try to reuse temps already allocated for this purpouse, if they occupy the same
1279 * stack slot and if they are of the same type.
1280 * This won't cause conflicts since if 'local' is used to
1281 * store one of the values in the in_stack of a bblock, then
1282 * the same variable will be used for the same outgoing stack
1283 * slot as well.
1284 * This doesn't work when inlining methods, since the bblocks
1285 * in the inlined methods do not inherit their in_stack from
1286 * the bblock they are inlined to. See bug #58863 for an
1287 * example.
1289 if (cfg->inlined_method)
1290 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1291 else
1292 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1297 for (i = 0; i < bb->out_count; ++i) {
1298 outb = bb->out_bb [i];
1299 /* exception handlers are linked, but they should not be considered for stack args */
1300 if (outb->flags & BB_EXCEPTION_HANDLER)
1301 continue;
1302 if (outb->in_scount) {
1303 if (outb->in_scount != bb->out_scount) {
1304 cfg->unverifiable = TRUE;
1305 return;
1307 continue; /* check they are the same locals */
1309 outb->in_scount = count;
1310 outb->in_stack = bb->out_stack;
1313 locals = bb->out_stack;
1314 cfg->cbb = bb;
1315 for (i = 0; i < count; ++i) {
1316 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1317 inst->cil_code = sp [i]->cil_code;
1318 sp [i] = locals [i];
1319 if (cfg->verbose_level > 3)
1320 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1324 * It is possible that the out bblocks already have in_stack assigned, and
1325 * the in_stacks differ. In this case, we will store to all the different
1326 * in_stacks.
1329 found = TRUE;
1330 bindex = 0;
1331 while (found) {
1332 /* Find a bblock which has a different in_stack */
1333 found = FALSE;
1334 while (bindex < bb->out_count) {
1335 outb = bb->out_bb [bindex];
1336 /* exception handlers are linked, but they should not be considered for stack args */
1337 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 bindex++;
1339 continue;
1341 if (outb->in_stack != locals) {
1342 for (i = 0; i < count; ++i) {
1343 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1344 inst->cil_code = sp [i]->cil_code;
1345 sp [i] = locals [i];
1346 if (cfg->verbose_level > 3)
1347 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1349 locals = outb->in_stack;
1350 found = TRUE;
1351 break;
1353 bindex ++;
1358 /* Emit code which loads interface_offsets [klass->interface_id]
1359 * The array is stored in memory before vtable.
1361 static void
1362 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1364 if (cfg->compile_aot) {
1365 int ioffset_reg = alloc_preg (cfg);
1366 int iid_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1369 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1372 else {
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1378 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1379 * stored in "klass_reg" implements the interface "klass".
1381 static void
1382 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1384 int ibitmap_reg = alloc_preg (cfg);
1385 int ibitmap_byte_reg = alloc_preg (cfg);
1387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1389 if (cfg->compile_aot) {
1390 int iid_reg = alloc_preg (cfg);
1391 int shifted_iid_reg = alloc_preg (cfg);
1392 int ibitmap_byte_address_reg = alloc_preg (cfg);
1393 int masked_iid_reg = alloc_preg (cfg);
1394 int iid_one_bit_reg = alloc_preg (cfg);
1395 int iid_bit_reg = alloc_preg (cfg);
1396 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1398 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1399 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1401 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1404 } else {
1405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1411 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1412 * stored in "vtable_reg" implements the interface "klass".
1414 static void
1415 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1417 int ibitmap_reg = alloc_preg (cfg);
1418 int ibitmap_byte_reg = alloc_preg (cfg);
1420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1422 if (cfg->compile_aot) {
1423 int iid_reg = alloc_preg (cfg);
1424 int shifted_iid_reg = alloc_preg (cfg);
1425 int ibitmap_byte_address_reg = alloc_preg (cfg);
1426 int masked_iid_reg = alloc_preg (cfg);
1427 int iid_one_bit_reg = alloc_preg (cfg);
1428 int iid_bit_reg = alloc_preg (cfg);
1429 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1432 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1434 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1436 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1437 } else {
1438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1444 * Emit code which checks whenever the interface id of @klass is smaller than
1445 * than the value given by max_iid_reg.
1447 static void
1448 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1449 MonoBasicBlock *false_target)
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1456 else
1457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1458 if (false_target)
1459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1460 else
1461 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1464 /* Same as above, but obtains max_iid from a vtable */
1465 static void
1466 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1467 MonoBasicBlock *false_target)
1469 int max_iid_reg = alloc_preg (cfg);
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1472 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1475 /* Same as above, but obtains max_iid from a klass */
1476 static void
1477 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1478 MonoBasicBlock *false_target)
1480 int max_iid_reg = alloc_preg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1483 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1486 static void
1487 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1489 int idepth_reg = alloc_preg (cfg);
1490 int stypes_reg = alloc_preg (cfg);
1491 int stype = alloc_preg (cfg);
1493 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1500 if (cfg->compile_aot) {
1501 int const_reg = alloc_preg (cfg);
1502 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1504 } else {
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1510 static void
1511 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1513 int intf_reg = alloc_preg (cfg);
1515 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1516 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1518 if (true_target)
1519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1520 else
1521 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1525 * Variant of the above that takes a register to the class, not the vtable.
1527 static void
1528 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1530 int intf_bit_reg = alloc_preg (cfg);
1532 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1533 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1535 if (true_target)
1536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1537 else
1538 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1541 static inline void
1542 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1544 if (cfg->compile_aot) {
1545 int const_reg = alloc_preg (cfg);
1546 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1547 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1548 } else {
1549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1551 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1554 static inline void
1555 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1557 if (cfg->compile_aot) {
1558 int const_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1561 } else {
1562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1567 static void
1568 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1570 if (klass->rank) {
1571 int rank_reg = alloc_preg (cfg);
1572 int eclass_reg = alloc_preg (cfg);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1576 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1577 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1579 if (klass->cast_class == mono_defaults.object_class) {
1580 int parent_reg = alloc_preg (cfg);
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1582 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1583 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1584 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1585 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1586 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1587 } else if (klass->cast_class == mono_defaults.enum_class) {
1588 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1589 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1590 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1591 } else {
1592 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1593 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1596 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1597 /* Check that the object is a vector too */
1598 int bounds_reg = alloc_preg (cfg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1601 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1603 } else {
1604 int idepth_reg = alloc_preg (cfg);
1605 int stypes_reg = alloc_preg (cfg);
1606 int stype = alloc_preg (cfg);
1608 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1609 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1611 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1615 mini_emit_class_check (cfg, stype, klass);
1619 static void
1620 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1622 int val_reg;
1624 g_assert (val == 0);
1626 if (align == 0)
1627 align = 4;
1629 if ((size <= 4) && (size <= align)) {
1630 switch (size) {
1631 case 1:
1632 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1633 return;
1634 case 2:
1635 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1636 return;
1637 case 4:
1638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1639 return;
1640 #if SIZEOF_REGISTER == 8
1641 case 8:
1642 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1643 return;
1644 #endif
1648 val_reg = alloc_preg (cfg);
1650 if (SIZEOF_REGISTER == 8)
1651 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1652 else
1653 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1655 if (align < 4) {
1656 /* This could be optimized further if neccesary */
1657 while (size >= 1) {
1658 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1659 offset += 1;
1660 size -= 1;
1662 return;
1665 #if !NO_UNALIGNED_ACCESS
1666 if (SIZEOF_REGISTER == 8) {
1667 if (offset % 8) {
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1669 offset += 4;
1670 size -= 4;
1672 while (size >= 8) {
1673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1674 offset += 8;
1675 size -= 8;
1678 #endif
1680 while (size >= 4) {
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1682 offset += 4;
1683 size -= 4;
1685 while (size >= 2) {
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1687 offset += 2;
1688 size -= 2;
1690 while (size >= 1) {
1691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1692 offset += 1;
1693 size -= 1;
1697 #endif /* DISABLE_JIT */
1699 void
1700 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1702 int cur_reg;
1704 if (align == 0)
1705 align = 4;
1707 if (align < 4) {
1708 /* This could be optimized further if neccesary */
1709 while (size >= 1) {
1710 cur_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1713 doffset += 1;
1714 soffset += 1;
1715 size -= 1;
1719 #if !NO_UNALIGNED_ACCESS
1720 if (SIZEOF_REGISTER == 8) {
1721 while (size >= 8) {
1722 cur_reg = alloc_preg (cfg);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1725 doffset += 8;
1726 soffset += 8;
1727 size -= 8;
1730 #endif
1732 while (size >= 4) {
1733 cur_reg = alloc_preg (cfg);
1734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1736 doffset += 4;
1737 soffset += 4;
1738 size -= 4;
1740 while (size >= 2) {
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1744 doffset += 2;
1745 soffset += 2;
1746 size -= 2;
1748 while (size >= 1) {
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1752 doffset += 1;
1753 soffset += 1;
1754 size -= 1;
1758 #ifndef DISABLE_JIT
1760 static int
1761 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1763 if (type->byref)
1764 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 handle_enum:
1767 type = mini_get_basic_type_from_generic (gsctx, type);
1768 switch (type->type) {
1769 case MONO_TYPE_VOID:
1770 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1771 case MONO_TYPE_I1:
1772 case MONO_TYPE_U1:
1773 case MONO_TYPE_BOOLEAN:
1774 case MONO_TYPE_I2:
1775 case MONO_TYPE_U2:
1776 case MONO_TYPE_CHAR:
1777 case MONO_TYPE_I4:
1778 case MONO_TYPE_U4:
1779 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1780 case MONO_TYPE_I:
1781 case MONO_TYPE_U:
1782 case MONO_TYPE_PTR:
1783 case MONO_TYPE_FNPTR:
1784 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1785 case MONO_TYPE_CLASS:
1786 case MONO_TYPE_STRING:
1787 case MONO_TYPE_OBJECT:
1788 case MONO_TYPE_SZARRAY:
1789 case MONO_TYPE_ARRAY:
1790 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1791 case MONO_TYPE_I8:
1792 case MONO_TYPE_U8:
1793 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1794 case MONO_TYPE_R4:
1795 case MONO_TYPE_R8:
1796 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1797 case MONO_TYPE_VALUETYPE:
1798 if (type->data.klass->enumtype) {
1799 type = mono_class_enum_basetype (type->data.klass);
1800 goto handle_enum;
1801 } else
1802 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1803 case MONO_TYPE_TYPEDBYREF:
1804 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1805 case MONO_TYPE_GENERICINST:
1806 type = &type->data.generic_class->container_class->byval_arg;
1807 goto handle_enum;
1808 default:
1809 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1811 return -1;
1815 * target_type_is_incompatible:
1816 * @cfg: MonoCompile context
1818 * Check that the item @arg on the evaluation stack can be stored
1819 * in the target type (can be a local, or field, etc).
1820 * The cfg arg can be used to check if we need verification or just
1821 * validity checks.
1823 * Returns: non-0 value if arg can't be stored on a target.
1825 static int
1826 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1828 MonoType *simple_type;
1829 MonoClass *klass;
1831 if (target->byref) {
1832 /* FIXME: check that the pointed to types match */
1833 if (arg->type == STACK_MP)
1834 return arg->klass != mono_class_from_mono_type (target);
1835 if (arg->type == STACK_PTR)
1836 return 0;
1837 return 1;
1840 simple_type = mono_type_get_underlying_type (target);
1841 switch (simple_type->type) {
1842 case MONO_TYPE_VOID:
1843 return 1;
1844 case MONO_TYPE_I1:
1845 case MONO_TYPE_U1:
1846 case MONO_TYPE_BOOLEAN:
1847 case MONO_TYPE_I2:
1848 case MONO_TYPE_U2:
1849 case MONO_TYPE_CHAR:
1850 case MONO_TYPE_I4:
1851 case MONO_TYPE_U4:
1852 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1853 return 1;
1854 return 0;
1855 case MONO_TYPE_PTR:
1856 /* STACK_MP is needed when setting pinned locals */
1857 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1858 return 1;
1859 return 0;
1860 case MONO_TYPE_I:
1861 case MONO_TYPE_U:
1862 case MONO_TYPE_FNPTR:
1863 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1864 return 1;
1865 return 0;
1866 case MONO_TYPE_CLASS:
1867 case MONO_TYPE_STRING:
1868 case MONO_TYPE_OBJECT:
1869 case MONO_TYPE_SZARRAY:
1870 case MONO_TYPE_ARRAY:
1871 if (arg->type != STACK_OBJ)
1872 return 1;
1873 /* FIXME: check type compatibility */
1874 return 0;
1875 case MONO_TYPE_I8:
1876 case MONO_TYPE_U8:
1877 if (arg->type != STACK_I8)
1878 return 1;
1879 return 0;
1880 case MONO_TYPE_R4:
1881 case MONO_TYPE_R8:
1882 if (arg->type != STACK_R8)
1883 return 1;
1884 return 0;
1885 case MONO_TYPE_VALUETYPE:
1886 if (arg->type != STACK_VTYPE)
1887 return 1;
1888 klass = mono_class_from_mono_type (simple_type);
1889 if (klass != arg->klass)
1890 return 1;
1891 return 0;
1892 case MONO_TYPE_TYPEDBYREF:
1893 if (arg->type != STACK_VTYPE)
1894 return 1;
1895 klass = mono_class_from_mono_type (simple_type);
1896 if (klass != arg->klass)
1897 return 1;
1898 return 0;
1899 case MONO_TYPE_GENERICINST:
1900 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1901 if (arg->type != STACK_VTYPE)
1902 return 1;
1903 klass = mono_class_from_mono_type (simple_type);
1904 if (klass != arg->klass)
1905 return 1;
1906 return 0;
1907 } else {
1908 if (arg->type != STACK_OBJ)
1909 return 1;
1910 /* FIXME: check type compatibility */
1911 return 0;
1913 case MONO_TYPE_VAR:
1914 case MONO_TYPE_MVAR:
1915 /* FIXME: all the arguments must be references for now,
1916 * later look inside cfg and see if the arg num is
1917 * really a reference
1919 g_assert (cfg->generic_sharing_context);
1920 if (arg->type != STACK_OBJ)
1921 return 1;
1922 return 0;
1923 default:
1924 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1926 return 1;
1930 * Prepare arguments for passing to a function call.
1931 * Return a non-zero value if the arguments can't be passed to the given
1932 * signature.
1933 * The type checks are not yet complete and some conversions may need
1934 * casts on 32 or 64 bit architectures.
1936 * FIXME: implement this using target_type_is_incompatible ()
1938 static int
1939 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1941 MonoType *simple_type;
1942 int i;
1944 if (sig->hasthis) {
1945 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1946 return 1;
1947 args++;
1949 for (i = 0; i < sig->param_count; ++i) {
1950 if (sig->params [i]->byref) {
1951 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1952 return 1;
1953 continue;
1955 simple_type = sig->params [i];
1956 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1957 handle_enum:
1958 switch (simple_type->type) {
1959 case MONO_TYPE_VOID:
1960 return 1;
1961 continue;
1962 case MONO_TYPE_I1:
1963 case MONO_TYPE_U1:
1964 case MONO_TYPE_BOOLEAN:
1965 case MONO_TYPE_I2:
1966 case MONO_TYPE_U2:
1967 case MONO_TYPE_CHAR:
1968 case MONO_TYPE_I4:
1969 case MONO_TYPE_U4:
1970 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1971 return 1;
1972 continue;
1973 case MONO_TYPE_I:
1974 case MONO_TYPE_U:
1975 case MONO_TYPE_PTR:
1976 case MONO_TYPE_FNPTR:
1977 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1978 return 1;
1979 continue;
1980 case MONO_TYPE_CLASS:
1981 case MONO_TYPE_STRING:
1982 case MONO_TYPE_OBJECT:
1983 case MONO_TYPE_SZARRAY:
1984 case MONO_TYPE_ARRAY:
1985 if (args [i]->type != STACK_OBJ)
1986 return 1;
1987 continue;
1988 case MONO_TYPE_I8:
1989 case MONO_TYPE_U8:
1990 if (args [i]->type != STACK_I8)
1991 return 1;
1992 continue;
1993 case MONO_TYPE_R4:
1994 case MONO_TYPE_R8:
1995 if (args [i]->type != STACK_R8)
1996 return 1;
1997 continue;
1998 case MONO_TYPE_VALUETYPE:
1999 if (simple_type->data.klass->enumtype) {
2000 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2001 goto handle_enum;
2003 if (args [i]->type != STACK_VTYPE)
2004 return 1;
2005 continue;
2006 case MONO_TYPE_TYPEDBYREF:
2007 if (args [i]->type != STACK_VTYPE)
2008 return 1;
2009 continue;
2010 case MONO_TYPE_GENERICINST:
2011 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2012 goto handle_enum;
2014 default:
2015 g_error ("unknown type 0x%02x in check_call_signature",
2016 simple_type->type);
2019 return 0;
2022 static int
2023 callvirt_to_call (int opcode)
2025 switch (opcode) {
2026 case OP_CALLVIRT:
2027 return OP_CALL;
2028 case OP_VOIDCALLVIRT:
2029 return OP_VOIDCALL;
2030 case OP_FCALLVIRT:
2031 return OP_FCALL;
2032 case OP_VCALLVIRT:
2033 return OP_VCALL;
2034 case OP_LCALLVIRT:
2035 return OP_LCALL;
2036 default:
2037 g_assert_not_reached ();
2040 return -1;
2043 static int
2044 callvirt_to_call_membase (int opcode)
2046 switch (opcode) {
2047 case OP_CALLVIRT:
2048 return OP_CALL_MEMBASE;
2049 case OP_VOIDCALLVIRT:
2050 return OP_VOIDCALL_MEMBASE;
2051 case OP_FCALLVIRT:
2052 return OP_FCALL_MEMBASE;
2053 case OP_LCALLVIRT:
2054 return OP_LCALL_MEMBASE;
2055 case OP_VCALLVIRT:
2056 return OP_VCALL_MEMBASE;
2057 default:
2058 g_assert_not_reached ();
2061 return -1;
2064 #ifdef MONO_ARCH_HAVE_IMT
2065 static void
2066 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2068 #ifdef MONO_ARCH_IMT_REG
2069 int method_reg = alloc_preg (cfg);
2071 if (imt_arg) {
2072 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2073 } else if (cfg->compile_aot) {
2074 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2075 } else {
2076 MonoInst *ins;
2077 MONO_INST_NEW (cfg, ins, OP_PCONST);
2078 ins->inst_p0 = call->method;
2079 ins->dreg = method_reg;
2080 MONO_ADD_INS (cfg->cbb, ins);
2083 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2084 #else
2085 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2086 #endif
2088 #endif
2090 static MonoJumpInfo *
2091 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2093 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2095 ji->ip.i = ip;
2096 ji->type = type;
2097 ji->data.target = target;
2099 return ji;
2102 inline static MonoInst*
2103 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2105 inline static MonoCallInst *
2106 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2107 MonoInst **args, int calli, int virtual, int tail)
2109 MonoCallInst *call;
2110 #ifdef MONO_ARCH_SOFT_FLOAT
2111 int i;
2112 #endif
2114 if (tail)
2115 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2116 else
2117 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2119 call->args = args;
2120 call->signature = sig;
2122 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2124 if (tail) {
2125 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2126 call->vret_var = cfg->vret_addr;
2127 //g_assert_not_reached ();
2129 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2130 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2131 MonoInst *loada;
2133 temp->backend.is_pinvoke = sig->pinvoke;
2136 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2137 * address of return value to increase optimization opportunities.
2138 * Before vtype decomposition, the dreg of the call ins itself represents the
2139 * fact the call modifies the return value. After decomposition, the call will
2140 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2141 * will be transformed into an LDADDR.
2143 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2144 loada->dreg = alloc_preg (cfg);
2145 loada->inst_p0 = temp;
2146 /* We reference the call too since call->dreg could change during optimization */
2147 loada->inst_p1 = call;
2148 MONO_ADD_INS (cfg->cbb, loada);
2150 call->inst.dreg = temp->dreg;
2152 call->vret_var = loada;
2153 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2154 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2156 #ifdef MONO_ARCH_SOFT_FLOAT
2158 * If the call has a float argument, we would need to do an r8->r4 conversion using
2159 * an icall, but that cannot be done during the call sequence since it would clobber
2160 * the call registers + the stack. So we do it before emitting the call.
2162 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2163 MonoType *t;
2164 MonoInst *in = call->args [i];
2166 if (i >= sig->hasthis)
2167 t = sig->params [i - sig->hasthis];
2168 else
2169 t = &mono_defaults.int_class->byval_arg;
2170 t = mono_type_get_underlying_type (t);
2172 if (!t->byref && t->type == MONO_TYPE_R4) {
2173 MonoInst *iargs [1];
2174 MonoInst *conv;
2176 iargs [0] = in;
2177 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2179 /* The result will be in an int vreg */
2180 call->args [i] = conv;
2183 #endif
2185 #ifdef ENABLE_LLVM
2186 if (COMPILE_LLVM (cfg))
2187 mono_llvm_emit_call (cfg, call);
2188 else
2189 mono_arch_emit_call (cfg, call);
2190 #else
2191 mono_arch_emit_call (cfg, call);
2192 #endif
2194 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2195 cfg->flags |= MONO_CFG_HAS_CALLS;
2197 return call;
2200 inline static MonoInst*
2201 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2203 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2205 call->inst.sreg1 = addr->dreg;
2207 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2209 return (MonoInst*)call;
2212 inline static MonoInst*
2213 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2215 #ifdef MONO_ARCH_RGCTX_REG
2216 MonoCallInst *call;
2217 int rgctx_reg = -1;
2219 if (rgctx_arg) {
2220 rgctx_reg = mono_alloc_preg (cfg);
2221 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2223 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2224 if (rgctx_arg) {
2225 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2226 cfg->uses_rgctx_reg = TRUE;
2227 call->rgctx_reg = TRUE;
2229 return (MonoInst*)call;
2230 #else
2231 g_assert_not_reached ();
2232 return NULL;
2233 #endif
2236 static MonoInst*
2237 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2238 static MonoInst*
2239 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2241 static MonoInst*
2242 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2243 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2245 gboolean might_be_remote;
2246 gboolean virtual = this != NULL;
2247 gboolean enable_for_aot = TRUE;
2248 int context_used;
2249 MonoCallInst *call;
2251 if (method->string_ctor) {
2252 /* Create the real signature */
2253 /* FIXME: Cache these */
2254 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2255 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2257 sig = ctor_sig;
2260 might_be_remote = this && sig->hasthis &&
2261 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2262 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2264 context_used = mono_method_check_context_used (method);
2265 if (might_be_remote && context_used) {
2266 MonoInst *addr;
2268 g_assert (cfg->generic_sharing_context);
2270 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2272 return mono_emit_calli (cfg, sig, args, addr);
2275 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2277 if (might_be_remote)
2278 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2279 else
2280 call->method = method;
2281 call->inst.flags |= MONO_INST_HAS_METHOD;
2282 call->inst.inst_left = this;
2284 if (virtual) {
2285 int vtable_reg, slot_reg, this_reg;
2287 this_reg = this->dreg;
2289 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2290 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2291 /* Make a call to delegate->invoke_impl */
2292 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2293 call->inst.inst_basereg = this_reg;
2294 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2295 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2297 return (MonoInst*)call;
2299 #endif
2301 if ((!cfg->compile_aot || enable_for_aot) &&
2302 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2303 (MONO_METHOD_IS_FINAL (method) &&
2304 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2306 * the method is not virtual, we just need to ensure this is not null
2307 * and then we can call the method directly.
2309 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2310 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2313 if (!method->string_ctor) {
2314 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2315 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2316 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2319 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2321 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2323 return (MonoInst*)call;
2326 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2328 * the method is virtual, but we can statically dispatch since either
2329 * it's class or the method itself are sealed.
2330 * But first we need to ensure it's not a null reference.
2332 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2333 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2334 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2336 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2337 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2339 return (MonoInst*)call;
2342 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2344 vtable_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2346 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2347 slot_reg = -1;
2348 #ifdef MONO_ARCH_HAVE_IMT
2349 if (mono_use_imt) {
2350 guint32 imt_slot = mono_method_get_imt_slot (method);
2351 emit_imt_argument (cfg, call, imt_arg);
2352 slot_reg = vtable_reg;
2353 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2355 #endif
2356 if (slot_reg == -1) {
2357 slot_reg = alloc_preg (cfg);
2358 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2359 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2361 } else {
2362 slot_reg = vtable_reg;
2363 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2364 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2365 #ifdef MONO_ARCH_HAVE_IMT
2366 if (imt_arg) {
2367 g_assert (mono_method_signature (method)->generic_param_count);
2368 emit_imt_argument (cfg, call, imt_arg);
2370 #endif
2373 call->inst.sreg1 = slot_reg;
2374 call->virtual = TRUE;
2377 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2379 return (MonoInst*)call;
2382 static MonoInst*
2383 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2384 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2386 int rgctx_reg = 0;
2387 MonoInst *ins;
2388 MonoCallInst *call;
2390 if (vtable_arg) {
2391 #ifdef MONO_ARCH_RGCTX_REG
2392 rgctx_reg = mono_alloc_preg (cfg);
2393 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2394 #else
2395 NOT_IMPLEMENTED;
2396 #endif
2398 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2400 call = (MonoCallInst*)ins;
2401 if (vtable_arg) {
2402 #ifdef MONO_ARCH_RGCTX_REG
2403 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2404 cfg->uses_rgctx_reg = TRUE;
2405 call->rgctx_reg = TRUE;
2406 #else
2407 NOT_IMPLEMENTED;
2408 #endif
2411 return ins;
2414 static inline MonoInst*
2415 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2417 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2420 MonoInst*
2421 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2422 MonoInst **args)
2424 MonoCallInst *call;
2426 g_assert (sig);
2428 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2429 call->fptr = func;
2431 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2433 return (MonoInst*)call;
2436 inline static MonoInst*
2437 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2439 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2441 g_assert (info);
2443 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2447 * mono_emit_abs_call:
2449 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2451 inline static MonoInst*
2452 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2453 MonoMethodSignature *sig, MonoInst **args)
2455 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2456 MonoInst *ins;
2459 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2460 * handle it.
2462 if (cfg->abs_patches == NULL)
2463 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2464 g_hash_table_insert (cfg->abs_patches, ji, ji);
2465 ins = mono_emit_native_call (cfg, ji, sig, args);
2466 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2467 return ins;
2470 static MonoInst*
2471 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2473 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2474 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2475 int widen_op = -1;
2478 * Native code might return non register sized integers
2479 * without initializing the upper bits.
2481 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2482 case OP_LOADI1_MEMBASE:
2483 widen_op = OP_ICONV_TO_I1;
2484 break;
2485 case OP_LOADU1_MEMBASE:
2486 widen_op = OP_ICONV_TO_U1;
2487 break;
2488 case OP_LOADI2_MEMBASE:
2489 widen_op = OP_ICONV_TO_I2;
2490 break;
2491 case OP_LOADU2_MEMBASE:
2492 widen_op = OP_ICONV_TO_U2;
2493 break;
2494 default:
2495 break;
2498 if (widen_op != -1) {
2499 int dreg = alloc_preg (cfg);
2500 MonoInst *widen;
2502 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2503 widen->type = ins->type;
2504 ins = widen;
2509 return ins;
2512 static MonoMethod*
2513 get_memcpy_method (void)
2515 static MonoMethod *memcpy_method = NULL;
2516 if (!memcpy_method) {
2517 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2518 if (!memcpy_method)
2519 g_error ("Old corlib found. Install a new one");
2521 return memcpy_method;
2525 * Emit code to copy a valuetype of type @klass whose address is stored in
2526 * @src->dreg to memory whose address is stored at @dest->dreg.
2528 void
2529 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2531 MonoInst *iargs [3];
2532 int n;
2533 guint32 align = 0;
2534 MonoMethod *memcpy_method;
2536 g_assert (klass);
2538 * This check breaks with spilled vars... need to handle it during verification anyway.
2539 * g_assert (klass && klass == src->klass && klass == dest->klass);
2542 if (native)
2543 n = mono_class_native_size (klass, &align);
2544 else
2545 n = mono_class_value_size (klass, &align);
2547 #if HAVE_WRITE_BARRIERS
2548 /* if native is true there should be no references in the struct */
2549 if (klass->has_references && !native) {
2550 /* Avoid barriers when storing to the stack */
2551 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2552 (dest->opcode == OP_LDADDR))) {
2553 int context_used = 0;
2555 iargs [0] = dest;
2556 iargs [1] = src;
2558 if (cfg->generic_sharing_context)
2559 context_used = mono_class_check_context_used (klass);
2560 if (context_used) {
2561 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2562 } else {
2563 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2564 mono_class_compute_gc_descriptor (klass);
2567 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2570 #endif
2572 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2573 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2574 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2575 } else {
2576 iargs [0] = dest;
2577 iargs [1] = src;
2578 EMIT_NEW_ICONST (cfg, iargs [2], n);
2580 memcpy_method = get_memcpy_method ();
2581 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2585 static MonoMethod*
2586 get_memset_method (void)
2588 static MonoMethod *memset_method = NULL;
2589 if (!memset_method) {
2590 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2591 if (!memset_method)
2592 g_error ("Old corlib found. Install a new one");
2594 return memset_method;
2597 void
2598 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2600 MonoInst *iargs [3];
2601 int n;
2602 guint32 align;
2603 MonoMethod *memset_method;
2605 /* FIXME: Optimize this for the case when dest is an LDADDR */
2607 mono_class_init (klass);
2608 n = mono_class_value_size (klass, &align);
2610 if (n <= sizeof (gpointer) * 5) {
2611 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2613 else {
2614 memset_method = get_memset_method ();
2615 iargs [0] = dest;
2616 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2617 EMIT_NEW_ICONST (cfg, iargs [2], n);
2618 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2622 static MonoInst*
2623 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2625 MonoInst *this = NULL;
2627 g_assert (cfg->generic_sharing_context);
2629 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2630 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2631 !method->klass->valuetype)
2632 EMIT_NEW_ARGLOAD (cfg, this, 0);
2634 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2635 MonoInst *mrgctx_loc, *mrgctx_var;
2637 g_assert (!this);
2638 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2640 mrgctx_loc = mono_get_vtable_var (cfg);
2641 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2643 return mrgctx_var;
2644 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2645 MonoInst *vtable_loc, *vtable_var;
2647 g_assert (!this);
2649 vtable_loc = mono_get_vtable_var (cfg);
2650 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2652 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2653 MonoInst *mrgctx_var = vtable_var;
2654 int vtable_reg;
2656 vtable_reg = alloc_preg (cfg);
2657 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2658 vtable_var->type = STACK_PTR;
2661 return vtable_var;
2662 } else {
2663 MonoInst *ins;
2664 int vtable_reg, res_reg;
2666 vtable_reg = alloc_preg (cfg);
2667 res_reg = alloc_preg (cfg);
2668 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2669 return ins;
2673 static MonoJumpInfoRgctxEntry *
2674 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2676 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2677 res->method = method;
2678 res->in_mrgctx = in_mrgctx;
2679 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2680 res->data->type = patch_type;
2681 res->data->data.target = patch_data;
2682 res->info_type = info_type;
2684 return res;
2687 static inline MonoInst*
2688 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2690 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2693 static MonoInst*
2694 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2695 MonoClass *klass, int rgctx_type)
2697 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2698 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2700 return emit_rgctx_fetch (cfg, rgctx, entry);
2703 static MonoInst*
2704 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2705 MonoMethod *cmethod, int rgctx_type)
2707 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2708 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2710 return emit_rgctx_fetch (cfg, rgctx, entry);
2713 static MonoInst*
2714 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2715 MonoClassField *field, int rgctx_type)
2717 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2718 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2720 return emit_rgctx_fetch (cfg, rgctx, entry);
2724 * On return the caller must check @klass for load errors.
2726 static void
2727 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2729 MonoInst *vtable_arg;
2730 MonoCallInst *call;
2731 int context_used = 0;
2733 if (cfg->generic_sharing_context)
2734 context_used = mono_class_check_context_used (klass);
2736 if (context_used) {
2737 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2738 klass, MONO_RGCTX_INFO_VTABLE);
2739 } else {
2740 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2742 if (!vtable)
2743 return;
2744 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2747 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2748 #ifdef MONO_ARCH_VTABLE_REG
2749 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2750 cfg->uses_vtable_reg = TRUE;
2751 #else
2752 NOT_IMPLEMENTED;
2753 #endif
2757 * On return the caller must check @array_class for load errors
2759 static void
2760 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2762 int vtable_reg = alloc_preg (cfg);
2763 int context_used = 0;
2765 if (cfg->generic_sharing_context)
2766 context_used = mono_class_check_context_used (array_class);
2768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2770 if (cfg->opt & MONO_OPT_SHARED) {
2771 int class_reg = alloc_preg (cfg);
2772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2773 if (cfg->compile_aot) {
2774 int klass_reg = alloc_preg (cfg);
2775 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2776 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2777 } else {
2778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2780 } else if (context_used) {
2781 MonoInst *vtable_ins;
2783 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2784 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2785 } else {
2786 if (cfg->compile_aot) {
2787 int vt_reg;
2788 MonoVTable *vtable;
2790 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2791 return;
2792 vt_reg = alloc_preg (cfg);
2793 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2794 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2795 } else {
2796 MonoVTable *vtable;
2797 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2798 return;
2799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2803 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2806 static void
2807 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2809 if (mini_get_debug_options ()->better_cast_details) {
2810 int to_klass_reg = alloc_preg (cfg);
2811 int vtable_reg = alloc_preg (cfg);
2812 int klass_reg = alloc_preg (cfg);
2813 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2815 if (!tls_get) {
2816 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2817 exit (1);
2820 MONO_ADD_INS (cfg->cbb, tls_get);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2825 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2826 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2830 static void
2831 reset_cast_details (MonoCompile *cfg)
2833 /* Reset the variables holding the cast details */
2834 if (mini_get_debug_options ()->better_cast_details) {
2835 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2837 MONO_ADD_INS (cfg->cbb, tls_get);
2838 /* It is enough to reset the from field */
2839 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2844 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2845 * generic code is generated.
2847 static MonoInst*
2848 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2850 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2852 if (context_used) {
2853 MonoInst *rgctx, *addr;
2855 /* FIXME: What if the class is shared? We might not
2856 have to get the address of the method from the
2857 RGCTX. */
2858 addr = emit_get_rgctx_method (cfg, context_used, method,
2859 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2861 rgctx = emit_get_rgctx (cfg, method, context_used);
2863 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2864 } else {
2865 return mono_emit_method_call (cfg, method, &val, NULL);
2869 static MonoInst*
2870 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2872 MonoInst *add;
2873 int obj_reg;
2874 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2875 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2876 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2877 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2879 obj_reg = sp [0]->dreg;
2880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2881 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2883 /* FIXME: generics */
2884 g_assert (klass->rank == 0);
2886 // Check rank == 0
2887 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2888 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2891 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2893 if (context_used) {
2894 MonoInst *element_class;
2896 /* This assertion is from the unboxcast insn */
2897 g_assert (klass->rank == 0);
2899 element_class = emit_get_rgctx_klass (cfg, context_used,
2900 klass->element_class, MONO_RGCTX_INFO_KLASS);
2902 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2903 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2904 } else {
2905 save_cast_details (cfg, klass->element_class, obj_reg);
2906 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2907 reset_cast_details (cfg);
2910 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2911 MONO_ADD_INS (cfg->cbb, add);
2912 add->type = STACK_MP;
2913 add->klass = klass;
2915 return add;
2919 * Returns NULL and set the cfg exception on error.
2921 static MonoInst*
2922 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2924 MonoInst *iargs [2];
2925 void *alloc_ftn;
2927 if (cfg->opt & MONO_OPT_SHARED) {
2928 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2929 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2931 alloc_ftn = mono_object_new;
2932 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2933 /* This happens often in argument checking code, eg. throw new FooException... */
2934 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2935 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2936 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2937 } else {
2938 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2939 MonoMethod *managed_alloc = NULL;
2940 gboolean pass_lw;
2942 if (!vtable) {
2943 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2944 cfg->exception_ptr = klass;
2945 return NULL;
2948 #ifndef MONO_CROSS_COMPILE
2949 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2950 #endif
2952 if (managed_alloc) {
2953 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2954 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2956 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2957 if (pass_lw) {
2958 guint32 lw = vtable->klass->instance_size;
2959 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2960 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2961 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2963 else {
2964 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2968 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2971 static MonoInst*
2972 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2973 gboolean for_box)
2975 MonoInst *iargs [2];
2976 MonoMethod *managed_alloc = NULL;
2977 void *alloc_ftn;
2980 FIXME: we cannot get managed_alloc here because we can't get
2981 the class's vtable (because it's not a closed class)
2983 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2984 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2987 if (cfg->opt & MONO_OPT_SHARED) {
2988 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2989 iargs [1] = data_inst;
2990 alloc_ftn = mono_object_new;
2991 } else {
2992 if (managed_alloc) {
2993 iargs [0] = data_inst;
2994 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2997 iargs [0] = data_inst;
2998 alloc_ftn = mono_object_new_specific;
3001 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3005 * Returns NULL and set the cfg exception on error.
3007 static MonoInst*
3008 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3010 MonoInst *alloc, *ins;
3012 if (mono_class_is_nullable (klass)) {
3013 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3014 return mono_emit_method_call (cfg, method, &val, NULL);
3017 alloc = handle_alloc (cfg, klass, TRUE);
3018 if (!alloc)
3019 return NULL;
3021 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3023 return alloc;
3026 static MonoInst *
3027 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3029 MonoInst *alloc, *ins;
3031 if (mono_class_is_nullable (klass)) {
3032 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3033 /* FIXME: What if the class is shared? We might not
3034 have to get the method address from the RGCTX. */
3035 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3036 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3037 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3039 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3040 } else {
3041 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3043 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3045 return alloc;
3050 * Returns NULL and set the cfg exception on error.
3052 static MonoInst*
3053 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3055 MonoBasicBlock *is_null_bb;
3056 int obj_reg = src->dreg;
3057 int vtable_reg = alloc_preg (cfg);
3059 NEW_BBLOCK (cfg, is_null_bb);
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3064 save_cast_details (cfg, klass, obj_reg);
3066 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3068 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3069 } else {
3070 int klass_reg = alloc_preg (cfg);
3072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3074 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3075 /* the remoting code is broken, access the class for now */
3076 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3077 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3078 if (!vt) {
3079 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3080 cfg->exception_ptr = klass;
3081 return NULL;
3083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3084 } else {
3085 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3088 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3089 } else {
3090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3091 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3095 MONO_START_BB (cfg, is_null_bb);
3097 reset_cast_details (cfg);
3099 return src;
3103 * Returns NULL and set the cfg exception on error.
3105 static MonoInst*
3106 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3108 MonoInst *ins;
3109 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3110 int obj_reg = src->dreg;
3111 int vtable_reg = alloc_preg (cfg);
3112 int res_reg = alloc_preg (cfg);
3114 NEW_BBLOCK (cfg, is_null_bb);
3115 NEW_BBLOCK (cfg, false_bb);
3116 NEW_BBLOCK (cfg, end_bb);
3118 /* Do the assignment at the beginning, so the other assignment can be if converted */
3119 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3120 ins->type = STACK_OBJ;
3121 ins->klass = klass;
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3124 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3126 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3128 /* the is_null_bb target simply copies the input register to the output */
3129 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3130 } else {
3131 int klass_reg = alloc_preg (cfg);
3133 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3135 if (klass->rank) {
3136 int rank_reg = alloc_preg (cfg);
3137 int eclass_reg = alloc_preg (cfg);
3139 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3142 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3144 if (klass->cast_class == mono_defaults.object_class) {
3145 int parent_reg = alloc_preg (cfg);
3146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3147 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3148 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3150 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3151 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3152 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3153 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3154 } else if (klass->cast_class == mono_defaults.enum_class) {
3155 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3157 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3158 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3159 } else {
3160 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3161 /* Check that the object is a vector too */
3162 int bounds_reg = alloc_preg (cfg);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3168 /* the is_null_bb target simply copies the input register to the output */
3169 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3171 } else if (mono_class_is_nullable (klass)) {
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3173 /* the is_null_bb target simply copies the input register to the output */
3174 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3175 } else {
3176 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3177 /* the remoting code is broken, access the class for now */
3178 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3179 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3180 if (!vt) {
3181 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3182 cfg->exception_ptr = klass;
3183 return NULL;
3185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3186 } else {
3187 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3188 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3191 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3192 } else {
3193 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3194 /* the is_null_bb target simply copies the input register to the output */
3195 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3200 MONO_START_BB (cfg, false_bb);
3202 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3203 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3205 MONO_START_BB (cfg, is_null_bb);
3207 MONO_START_BB (cfg, end_bb);
3209 return ins;
3212 static MonoInst*
3213 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3215 /* This opcode takes as input an object reference and a class, and returns:
3216 0) if the object is an instance of the class,
3217 1) if the object is not instance of the class,
3218 2) if the object is a proxy whose type cannot be determined */
3220 MonoInst *ins;
3221 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3222 int obj_reg = src->dreg;
3223 int dreg = alloc_ireg (cfg);
3224 int tmp_reg;
3225 int klass_reg = alloc_preg (cfg);
3227 NEW_BBLOCK (cfg, true_bb);
3228 NEW_BBLOCK (cfg, false_bb);
3229 NEW_BBLOCK (cfg, false2_bb);
3230 NEW_BBLOCK (cfg, end_bb);
3231 NEW_BBLOCK (cfg, no_proxy_bb);
3233 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3234 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3236 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3237 NEW_BBLOCK (cfg, interface_fail_bb);
3239 tmp_reg = alloc_preg (cfg);
3240 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3241 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3242 MONO_START_BB (cfg, interface_fail_bb);
3243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3245 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3247 tmp_reg = alloc_preg (cfg);
3248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3251 } else {
3252 tmp_reg = alloc_preg (cfg);
3253 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3254 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3256 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3257 tmp_reg = alloc_preg (cfg);
3258 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3259 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3261 tmp_reg = alloc_preg (cfg);
3262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3264 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3266 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3267 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3269 MONO_START_BB (cfg, no_proxy_bb);
3271 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3274 MONO_START_BB (cfg, false_bb);
3276 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3277 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3279 MONO_START_BB (cfg, false2_bb);
3281 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3284 MONO_START_BB (cfg, true_bb);
3286 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3288 MONO_START_BB (cfg, end_bb);
3290 /* FIXME: */
3291 MONO_INST_NEW (cfg, ins, OP_ICONST);
3292 ins->dreg = dreg;
3293 ins->type = STACK_I4;
3295 return ins;
3298 static MonoInst*
3299 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3301 /* This opcode takes as input an object reference and a class, and returns:
3302 0) if the object is an instance of the class,
3303 1) if the object is a proxy whose type cannot be determined
3304 an InvalidCastException exception is thrown otherwhise*/
3306 MonoInst *ins;
3307 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3308 int obj_reg = src->dreg;
3309 int dreg = alloc_ireg (cfg);
3310 int tmp_reg = alloc_preg (cfg);
3311 int klass_reg = alloc_preg (cfg);
3313 NEW_BBLOCK (cfg, end_bb);
3314 NEW_BBLOCK (cfg, ok_result_bb);
3316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3317 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3319 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3320 NEW_BBLOCK (cfg, interface_fail_bb);
3322 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3323 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3324 MONO_START_BB (cfg, interface_fail_bb);
3325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3327 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3329 tmp_reg = alloc_preg (cfg);
3330 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3331 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3332 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3334 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3335 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3337 } else {
3338 NEW_BBLOCK (cfg, no_proxy_bb);
3340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3342 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3344 tmp_reg = alloc_preg (cfg);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3348 tmp_reg = alloc_preg (cfg);
3349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3350 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3353 NEW_BBLOCK (cfg, fail_1_bb);
3355 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3357 MONO_START_BB (cfg, fail_1_bb);
3359 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3360 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3362 MONO_START_BB (cfg, no_proxy_bb);
3364 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3367 MONO_START_BB (cfg, ok_result_bb);
3369 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3371 MONO_START_BB (cfg, end_bb);
3373 /* FIXME: */
3374 MONO_INST_NEW (cfg, ins, OP_ICONST);
3375 ins->dreg = dreg;
3376 ins->type = STACK_I4;
3378 return ins;
3382 * Returns NULL and set the cfg exception on error.
3384 static G_GNUC_UNUSED MonoInst*
3385 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3387 gpointer *trampoline;
3388 MonoInst *obj, *method_ins, *tramp_ins;
3389 MonoDomain *domain;
3390 guint8 **code_slot;
3392 obj = handle_alloc (cfg, klass, FALSE);
3393 if (!obj)
3394 return NULL;
3396 /* Inline the contents of mono_delegate_ctor */
3398 /* Set target field */
3399 /* Optimize away setting of NULL target */
3400 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3403 /* Set method field */
3404 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3405 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3408 * To avoid looking up the compiled code belonging to the target method
3409 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3410 * store it, and we fill it after the method has been compiled.
3412 if (!cfg->compile_aot && !method->dynamic) {
3413 MonoInst *code_slot_ins;
3415 domain = mono_domain_get ();
3416 mono_domain_lock (domain);
3417 if (!domain_jit_info (domain)->method_code_hash)
3418 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3419 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3420 if (!code_slot) {
3421 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3422 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3424 mono_domain_unlock (domain);
3426 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3427 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3430 /* Set invoke_impl field */
3431 if (cfg->compile_aot) {
3432 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3433 } else {
3434 trampoline = mono_create_delegate_trampoline (klass);
3435 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3437 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3439 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3441 return obj;
3444 static MonoInst*
3445 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3447 MonoJitICallInfo *info;
3449 /* Need to register the icall so it gets an icall wrapper */
3450 info = mono_get_array_new_va_icall (rank);
3452 cfg->flags |= MONO_CFG_HAS_VARARGS;
3454 /* mono_array_new_va () needs a vararg calling convention */
3455 cfg->disable_llvm = TRUE;
3457 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3458 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3461 static void
3462 mono_emit_load_got_addr (MonoCompile *cfg)
3464 MonoInst *getaddr, *dummy_use;
3466 if (!cfg->got_var || cfg->got_var_allocated)
3467 return;
3469 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3470 getaddr->dreg = cfg->got_var->dreg;
3472 /* Add it to the start of the first bblock */
3473 if (cfg->bb_entry->code) {
3474 getaddr->next = cfg->bb_entry->code;
3475 cfg->bb_entry->code = getaddr;
3477 else
3478 MONO_ADD_INS (cfg->bb_entry, getaddr);
3480 cfg->got_var_allocated = TRUE;
3483 * Add a dummy use to keep the got_var alive, since real uses might
3484 * only be generated by the back ends.
3485 * Add it to end_bblock, so the variable's lifetime covers the whole
3486 * method.
3487 * It would be better to make the usage of the got var explicit in all
3488 * cases when the backend needs it (i.e. calls, throw etc.), so this
3489 * wouldn't be needed.
3491 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3492 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3495 static int inline_limit;
3496 static gboolean inline_limit_inited;
3498 static gboolean
3499 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3501 MonoMethodHeader *header;
3502 MonoVTable *vtable;
3503 #ifdef MONO_ARCH_SOFT_FLOAT
3504 MonoMethodSignature *sig = mono_method_signature (method);
3505 int i;
3506 #endif
3508 if (cfg->generic_sharing_context)
3509 return FALSE;
3511 if (cfg->inline_depth > 10)
3512 return FALSE;
3514 #ifdef MONO_ARCH_HAVE_LMF_OPS
3515 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3516 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3517 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3518 return TRUE;
3519 #endif
3521 if (method->is_inflated)
3522 /* Avoid inflating the header */
3523 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3524 else
3525 header = mono_method_get_header (method);
3527 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3528 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3529 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3530 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3531 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3532 (method->klass->marshalbyref) ||
3533 !header || header->num_clauses)
3534 return FALSE;
3536 /* also consider num_locals? */
3537 /* Do the size check early to avoid creating vtables */
3538 if (!inline_limit_inited) {
3539 if (getenv ("MONO_INLINELIMIT"))
3540 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3541 else
3542 inline_limit = INLINE_LENGTH_LIMIT;
3543 inline_limit_inited = TRUE;
3545 if (header->code_size >= inline_limit)
3546 return FALSE;
3549 * if we can initialize the class of the method right away, we do,
3550 * otherwise we don't allow inlining if the class needs initialization,
3551 * since it would mean inserting a call to mono_runtime_class_init()
3552 * inside the inlined code
3554 if (!(cfg->opt & MONO_OPT_SHARED)) {
3555 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3556 if (cfg->run_cctors && method->klass->has_cctor) {
3557 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3558 if (!method->klass->runtime_info)
3559 /* No vtable created yet */
3560 return FALSE;
3561 vtable = mono_class_vtable (cfg->domain, method->klass);
3562 if (!vtable)
3563 return FALSE;
3564 /* This makes so that inline cannot trigger */
3565 /* .cctors: too many apps depend on them */
3566 /* running with a specific order... */
3567 if (! vtable->initialized)
3568 return FALSE;
3569 mono_runtime_class_init (vtable);
3571 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3572 if (!method->klass->runtime_info)
3573 /* No vtable created yet */
3574 return FALSE;
3575 vtable = mono_class_vtable (cfg->domain, method->klass);
3576 if (!vtable)
3577 return FALSE;
3578 if (!vtable->initialized)
3579 return FALSE;
3581 } else {
3583 * If we're compiling for shared code
3584 * the cctor will need to be run at aot method load time, for example,
3585 * or at the end of the compilation of the inlining method.
3587 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3588 return FALSE;
3592 * CAS - do not inline methods with declarative security
3593 * Note: this has to be before any possible return TRUE;
3595 if (mono_method_has_declsec (method))
3596 return FALSE;
3598 #ifdef MONO_ARCH_SOFT_FLOAT
3599 /* FIXME: */
3600 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3601 return FALSE;
3602 for (i = 0; i < sig->param_count; ++i)
3603 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3604 return FALSE;
3605 #endif
3607 return TRUE;
3610 static gboolean
3611 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3613 if (vtable->initialized && !cfg->compile_aot)
3614 return FALSE;
3616 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3617 return FALSE;
3619 if (!mono_class_needs_cctor_run (vtable->klass, method))
3620 return FALSE;
3622 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3623 /* The initialization is already done before the method is called */
3624 return FALSE;
3626 return TRUE;
3629 static MonoInst*
3630 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3632 MonoInst *ins;
3633 guint32 size;
3634 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3636 mono_class_init (klass);
3637 size = mono_class_array_element_size (klass);
3639 mult_reg = alloc_preg (cfg);
3640 array_reg = arr->dreg;
3641 index_reg = index->dreg;
3643 #if SIZEOF_REGISTER == 8
3644 /* The array reg is 64 bits but the index reg is only 32 */
3645 index2_reg = alloc_preg (cfg);
3646 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3647 #else
3648 if (index->type == STACK_I8) {
3649 index2_reg = alloc_preg (cfg);
3650 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3651 } else {
3652 index2_reg = index_reg;
3654 #endif
3656 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3658 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3659 if (size == 1 || size == 2 || size == 4 || size == 8) {
3660 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3662 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3663 ins->type = STACK_PTR;
3665 return ins;
3667 #endif
3669 add_reg = alloc_preg (cfg);
3671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3672 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3673 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3674 ins->type = STACK_PTR;
3675 MONO_ADD_INS (cfg->cbb, ins);
3677 return ins;
3680 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3681 static MonoInst*
3682 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3684 int bounds_reg = alloc_preg (cfg);
3685 int add_reg = alloc_preg (cfg);
3686 int mult_reg = alloc_preg (cfg);
3687 int mult2_reg = alloc_preg (cfg);
3688 int low1_reg = alloc_preg (cfg);
3689 int low2_reg = alloc_preg (cfg);
3690 int high1_reg = alloc_preg (cfg);
3691 int high2_reg = alloc_preg (cfg);
3692 int realidx1_reg = alloc_preg (cfg);
3693 int realidx2_reg = alloc_preg (cfg);
3694 int sum_reg = alloc_preg (cfg);
3695 int index1, index2;
3696 MonoInst *ins;
3697 guint32 size;
3699 mono_class_init (klass);
3700 size = mono_class_array_element_size (klass);
3702 index1 = index_ins1->dreg;
3703 index2 = index_ins2->dreg;
3705 /* range checking */
3706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3707 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3709 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3710 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3711 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3712 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3713 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3714 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3715 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3717 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3718 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3719 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3721 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3722 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3723 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3725 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3726 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3728 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3729 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3731 ins->type = STACK_MP;
3732 ins->klass = klass;
3733 MONO_ADD_INS (cfg->cbb, ins);
3735 return ins;
3737 #endif
3739 static MonoInst*
3740 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3742 int rank;
3743 MonoInst *addr;
3744 MonoMethod *addr_method;
3745 int element_size;
3747 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3749 if (rank == 1)
3750 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3752 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3753 /* emit_ldelema_2 depends on OP_LMUL */
3754 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3755 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3757 #endif
3759 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3760 addr_method = mono_marshal_get_array_address (rank, element_size);
3761 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3763 return addr;
3766 static MonoInst*
3767 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3769 MonoInst *ins = NULL;
3771 static MonoClass *runtime_helpers_class = NULL;
3772 if (! runtime_helpers_class)
3773 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3774 "System.Runtime.CompilerServices", "RuntimeHelpers");
3776 if (cmethod->klass == mono_defaults.string_class) {
3777 if (strcmp (cmethod->name, "get_Chars") == 0) {
3778 int dreg = alloc_ireg (cfg);
3779 int index_reg = alloc_preg (cfg);
3780 int mult_reg = alloc_preg (cfg);
3781 int add_reg = alloc_preg (cfg);
3783 #if SIZEOF_REGISTER == 8
3784 /* The array reg is 64 bits but the index reg is only 32 */
3785 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3786 #else
3787 index_reg = args [1]->dreg;
3788 #endif
3789 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3791 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3792 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3793 add_reg = ins->dreg;
3794 /* Avoid a warning */
3795 mult_reg = 0;
3796 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3797 add_reg, 0);
3798 #else
3799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3800 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3801 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3802 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3803 #endif
3804 type_from_op (ins, NULL, NULL);
3805 return ins;
3806 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3807 int dreg = alloc_ireg (cfg);
3808 /* Decompose later to allow more optimizations */
3809 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3810 ins->type = STACK_I4;
3811 cfg->cbb->has_array_access = TRUE;
3812 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3814 return ins;
3815 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3816 int mult_reg = alloc_preg (cfg);
3817 int add_reg = alloc_preg (cfg);
3819 /* The corlib functions check for oob already. */
3820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3821 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3823 } else
3824 return NULL;
3825 } else if (cmethod->klass == mono_defaults.object_class) {
3827 if (strcmp (cmethod->name, "GetType") == 0) {
3828 int dreg = alloc_preg (cfg);
3829 int vt_reg = alloc_preg (cfg);
3830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3831 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3832 type_from_op (ins, NULL, NULL);
3834 return ins;
3835 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3836 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3837 int dreg = alloc_ireg (cfg);
3838 int t1 = alloc_ireg (cfg);
3840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3841 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3842 ins->type = STACK_I4;
3844 return ins;
3845 #endif
3846 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3847 MONO_INST_NEW (cfg, ins, OP_NOP);
3848 MONO_ADD_INS (cfg->cbb, ins);
3849 return ins;
3850 } else
3851 return NULL;
3852 } else if (cmethod->klass == mono_defaults.array_class) {
3853 if (cmethod->name [0] != 'g')
3854 return NULL;
3856 if (strcmp (cmethod->name, "get_Rank") == 0) {
3857 int dreg = alloc_ireg (cfg);
3858 int vtable_reg = alloc_preg (cfg);
3859 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3860 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3861 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3862 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3863 type_from_op (ins, NULL, NULL);
3865 return ins;
3866 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3867 int dreg = alloc_ireg (cfg);
3869 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3870 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3871 type_from_op (ins, NULL, NULL);
3873 return ins;
3874 } else
3875 return NULL;
3876 } else if (cmethod->klass == runtime_helpers_class) {
3878 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3879 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3880 return ins;
3881 } else
3882 return NULL;
3883 } else if (cmethod->klass == mono_defaults.thread_class) {
3884 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3885 ins->dreg = alloc_preg (cfg);
3886 ins->type = STACK_OBJ;
3887 MONO_ADD_INS (cfg->cbb, ins);
3888 return ins;
3889 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3890 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3891 MONO_ADD_INS (cfg->cbb, ins);
3892 return ins;
3893 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3894 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3895 MONO_ADD_INS (cfg->cbb, ins);
3896 return ins;
3898 } else if (cmethod->klass == mono_defaults.monitor_class) {
3899 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3900 if (strcmp (cmethod->name, "Enter") == 0) {
3901 MonoCallInst *call;
3903 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3904 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3905 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3906 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3908 return (MonoInst*)call;
3909 } else if (strcmp (cmethod->name, "Exit") == 0) {
3910 MonoCallInst *call;
3912 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3913 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3914 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3915 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3917 return (MonoInst*)call;
3919 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3920 MonoMethod *fast_method = NULL;
3922 /* Avoid infinite recursion */
3923 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3924 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3925 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3926 return NULL;
3928 if (strcmp (cmethod->name, "Enter") == 0 ||
3929 strcmp (cmethod->name, "Exit") == 0)
3930 fast_method = mono_monitor_get_fast_path (cmethod);
3931 if (!fast_method)
3932 return NULL;
3934 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3935 #endif
3936 } else if (mini_class_is_system_array (cmethod->klass) &&
3937 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3938 MonoInst *addr, *store, *load;
3939 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3941 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3943 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3944 return store;
3945 } else if (cmethod->klass->image == mono_defaults.corlib &&
3946 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3947 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3948 ins = NULL;
3950 #if SIZEOF_REGISTER == 8
3951 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3952 /* 64 bit reads are already atomic */
3953 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3954 ins->dreg = mono_alloc_preg (cfg);
3955 ins->inst_basereg = args [0]->dreg;
3956 ins->inst_offset = 0;
3957 MONO_ADD_INS (cfg->cbb, ins);
3959 #endif
3961 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3962 if (strcmp (cmethod->name, "Increment") == 0) {
3963 MonoInst *ins_iconst;
3964 guint32 opcode = 0;
3966 if (fsig->params [0]->type == MONO_TYPE_I4)
3967 opcode = OP_ATOMIC_ADD_NEW_I4;
3968 #if SIZEOF_REGISTER == 8
3969 else if (fsig->params [0]->type == MONO_TYPE_I8)
3970 opcode = OP_ATOMIC_ADD_NEW_I8;
3971 #endif
3972 if (opcode) {
3973 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3974 ins_iconst->inst_c0 = 1;
3975 ins_iconst->dreg = mono_alloc_ireg (cfg);
3976 MONO_ADD_INS (cfg->cbb, ins_iconst);
3978 MONO_INST_NEW (cfg, ins, opcode);
3979 ins->dreg = mono_alloc_ireg (cfg);
3980 ins->inst_basereg = args [0]->dreg;
3981 ins->inst_offset = 0;
3982 ins->sreg2 = ins_iconst->dreg;
3983 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3984 MONO_ADD_INS (cfg->cbb, ins);
3986 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3987 MonoInst *ins_iconst;
3988 guint32 opcode = 0;
3990 if (fsig->params [0]->type == MONO_TYPE_I4)
3991 opcode = OP_ATOMIC_ADD_NEW_I4;
3992 #if SIZEOF_REGISTER == 8
3993 else if (fsig->params [0]->type == MONO_TYPE_I8)
3994 opcode = OP_ATOMIC_ADD_NEW_I8;
3995 #endif
3996 if (opcode) {
3997 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3998 ins_iconst->inst_c0 = -1;
3999 ins_iconst->dreg = mono_alloc_ireg (cfg);
4000 MONO_ADD_INS (cfg->cbb, ins_iconst);
4002 MONO_INST_NEW (cfg, ins, opcode);
4003 ins->dreg = mono_alloc_ireg (cfg);
4004 ins->inst_basereg = args [0]->dreg;
4005 ins->inst_offset = 0;
4006 ins->sreg2 = ins_iconst->dreg;
4007 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4008 MONO_ADD_INS (cfg->cbb, ins);
4010 } else if (strcmp (cmethod->name, "Add") == 0) {
4011 guint32 opcode = 0;
4013 if (fsig->params [0]->type == MONO_TYPE_I4)
4014 opcode = OP_ATOMIC_ADD_NEW_I4;
4015 #if SIZEOF_REGISTER == 8
4016 else if (fsig->params [0]->type == MONO_TYPE_I8)
4017 opcode = OP_ATOMIC_ADD_NEW_I8;
4018 #endif
4020 if (opcode) {
4021 MONO_INST_NEW (cfg, ins, opcode);
4022 ins->dreg = mono_alloc_ireg (cfg);
4023 ins->inst_basereg = args [0]->dreg;
4024 ins->inst_offset = 0;
4025 ins->sreg2 = args [1]->dreg;
4026 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4027 MONO_ADD_INS (cfg->cbb, ins);
4030 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4032 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4033 if (strcmp (cmethod->name, "Exchange") == 0) {
4034 guint32 opcode;
4035 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4037 if (fsig->params [0]->type == MONO_TYPE_I4)
4038 opcode = OP_ATOMIC_EXCHANGE_I4;
4039 #if SIZEOF_REGISTER == 8
4040 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4041 (fsig->params [0]->type == MONO_TYPE_I))
4042 opcode = OP_ATOMIC_EXCHANGE_I8;
4043 #else
4044 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4045 opcode = OP_ATOMIC_EXCHANGE_I4;
4046 #endif
4047 else
4048 return NULL;
4050 MONO_INST_NEW (cfg, ins, opcode);
4051 ins->dreg = mono_alloc_ireg (cfg);
4052 ins->inst_basereg = args [0]->dreg;
4053 ins->inst_offset = 0;
4054 ins->sreg2 = args [1]->dreg;
4055 MONO_ADD_INS (cfg->cbb, ins);
4057 switch (fsig->params [0]->type) {
4058 case MONO_TYPE_I4:
4059 ins->type = STACK_I4;
4060 break;
4061 case MONO_TYPE_I8:
4062 case MONO_TYPE_I:
4063 ins->type = STACK_I8;
4064 break;
4065 case MONO_TYPE_OBJECT:
4066 ins->type = STACK_OBJ;
4067 break;
4068 default:
4069 g_assert_not_reached ();
4072 #if HAVE_WRITE_BARRIERS
4073 if (is_ref) {
4074 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4075 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4077 #endif
4079 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4081 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4082 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4083 int size = 0;
4084 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4085 if (fsig->params [1]->type == MONO_TYPE_I4)
4086 size = 4;
4087 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4088 size = sizeof (gpointer);
4089 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4090 size = 8;
4091 if (size == 4) {
4092 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4093 ins->dreg = alloc_ireg (cfg);
4094 ins->sreg1 = args [0]->dreg;
4095 ins->sreg2 = args [1]->dreg;
4096 ins->sreg3 = args [2]->dreg;
4097 ins->type = STACK_I4;
4098 MONO_ADD_INS (cfg->cbb, ins);
4099 } else if (size == 8) {
4100 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4101 ins->dreg = alloc_ireg (cfg);
4102 ins->sreg1 = args [0]->dreg;
4103 ins->sreg2 = args [1]->dreg;
4104 ins->sreg3 = args [2]->dreg;
4105 ins->type = STACK_I8;
4106 MONO_ADD_INS (cfg->cbb, ins);
4107 } else {
4108 /* g_assert_not_reached (); */
4110 #if HAVE_WRITE_BARRIERS
4111 if (is_ref) {
4112 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4113 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4115 #endif
4117 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4119 if (ins)
4120 return ins;
4121 } else if (cmethod->klass->image == mono_defaults.corlib) {
4122 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4123 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4124 MONO_INST_NEW (cfg, ins, OP_BREAK);
4125 MONO_ADD_INS (cfg->cbb, ins);
4126 return ins;
4128 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4129 && strcmp (cmethod->klass->name, "Environment") == 0) {
4130 #ifdef PLATFORM_WIN32
4131 EMIT_NEW_ICONST (cfg, ins, 1);
4132 #else
4133 EMIT_NEW_ICONST (cfg, ins, 0);
4134 #endif
4135 return ins;
4137 } else if (cmethod->klass == mono_defaults.math_class) {
4139 * There is general branches code for Min/Max, but it does not work for
4140 * all inputs:
4141 * http://everything2.com/?node_id=1051618
4145 #ifdef MONO_ARCH_SIMD_INTRINSICS
4146 if (cfg->opt & MONO_OPT_SIMD) {
4147 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4148 if (ins)
4149 return ins;
4151 #endif
4153 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4157 * This entry point could be used later for arbitrary method
4158 * redirection.
4160 inline static MonoInst*
4161 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4162 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4164 if (method->klass == mono_defaults.string_class) {
4165 /* managed string allocation support */
4166 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4167 MonoInst *iargs [2];
4168 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4169 MonoMethod *managed_alloc = NULL;
4171 g_assert (vtable); /*Should not fail since it System.String*/
4172 #ifndef MONO_CROSS_COMPILE
4173 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4174 #endif
4175 if (!managed_alloc)
4176 return NULL;
4177 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4178 iargs [1] = args [0];
4179 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4182 return NULL;
4185 static void
4186 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4188 MonoInst *store, *temp;
4189 int i;
4191 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4192 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4195 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4196 * would be different than the MonoInst's used to represent arguments, and
4197 * the ldelema implementation can't deal with that.
4198 * Solution: When ldelema is used on an inline argument, create a var for
4199 * it, emit ldelema on that var, and emit the saving code below in
4200 * inline_method () if needed.
4202 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4203 cfg->args [i] = temp;
4204 /* This uses cfg->args [i] which is set by the preceeding line */
4205 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4206 store->cil_code = sp [0]->cil_code;
4207 sp++;
4211 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4212 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4214 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4215 static gboolean
4216 check_inline_called_method_name_limit (MonoMethod *called_method)
4218 int strncmp_result;
4219 static char *limit = NULL;
4221 if (limit == NULL) {
4222 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4224 if (limit_string != NULL)
4225 limit = limit_string;
4226 else
4227 limit = (char *) "";
4230 if (limit [0] != '\0') {
4231 char *called_method_name = mono_method_full_name (called_method, TRUE);
4233 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4234 g_free (called_method_name);
4236 //return (strncmp_result <= 0);
4237 return (strncmp_result == 0);
4238 } else {
4239 return TRUE;
4242 #endif
4244 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4245 static gboolean
4246 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4248 int strncmp_result;
4249 static char *limit = NULL;
4251 if (limit == NULL) {
4252 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4253 if (limit_string != NULL) {
4254 limit = limit_string;
4255 } else {
4256 limit = (char *) "";
4260 if (limit [0] != '\0') {
4261 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4263 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4264 g_free (caller_method_name);
4266 //return (strncmp_result <= 0);
4267 return (strncmp_result == 0);
4268 } else {
4269 return TRUE;
4272 #endif
4274 static int
4275 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4276 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4278 MonoInst *ins, *rvar = NULL;
4279 MonoMethodHeader *cheader;
4280 MonoBasicBlock *ebblock, *sbblock;
4281 int i, costs;
4282 MonoMethod *prev_inlined_method;
4283 MonoInst **prev_locals, **prev_args;
4284 MonoType **prev_arg_types;
4285 guint prev_real_offset;
4286 GHashTable *prev_cbb_hash;
4287 MonoBasicBlock **prev_cil_offset_to_bb;
4288 MonoBasicBlock *prev_cbb;
4289 unsigned char* prev_cil_start;
4290 guint32 prev_cil_offset_to_bb_len;
4291 MonoMethod *prev_current_method;
4292 MonoGenericContext *prev_generic_context;
4293 gboolean ret_var_set, prev_ret_var_set;
4295 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4297 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4298 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4299 return 0;
4300 #endif
4301 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4302 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4303 return 0;
4304 #endif
4306 if (cfg->verbose_level > 2)
4307 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4309 if (!cmethod->inline_info) {
4310 mono_jit_stats.inlineable_methods++;
4311 cmethod->inline_info = 1;
4313 /* allocate space to store the return value */
4314 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4315 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4318 /* allocate local variables */
4319 cheader = mono_method_get_header (cmethod);
4320 prev_locals = cfg->locals;
4321 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4322 for (i = 0; i < cheader->num_locals; ++i)
4323 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4325 /* allocate start and end blocks */
4326 /* This is needed so if the inline is aborted, we can clean up */
4327 NEW_BBLOCK (cfg, sbblock);
4328 sbblock->real_offset = real_offset;
4330 NEW_BBLOCK (cfg, ebblock);
4331 ebblock->block_num = cfg->num_bblocks++;
4332 ebblock->real_offset = real_offset;
4334 prev_args = cfg->args;
4335 prev_arg_types = cfg->arg_types;
4336 prev_inlined_method = cfg->inlined_method;
4337 cfg->inlined_method = cmethod;
4338 cfg->ret_var_set = FALSE;
4339 cfg->inline_depth ++;
4340 prev_real_offset = cfg->real_offset;
4341 prev_cbb_hash = cfg->cbb_hash;
4342 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4343 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4344 prev_cil_start = cfg->cil_start;
4345 prev_cbb = cfg->cbb;
4346 prev_current_method = cfg->current_method;
4347 prev_generic_context = cfg->generic_context;
4348 prev_ret_var_set = cfg->ret_var_set;
4350 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4352 ret_var_set = cfg->ret_var_set;
4354 cfg->inlined_method = prev_inlined_method;
4355 cfg->real_offset = prev_real_offset;
4356 cfg->cbb_hash = prev_cbb_hash;
4357 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4358 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4359 cfg->cil_start = prev_cil_start;
4360 cfg->locals = prev_locals;
4361 cfg->args = prev_args;
4362 cfg->arg_types = prev_arg_types;
4363 cfg->current_method = prev_current_method;
4364 cfg->generic_context = prev_generic_context;
4365 cfg->ret_var_set = prev_ret_var_set;
4366 cfg->inline_depth --;
4368 if ((costs >= 0 && costs < 60) || inline_allways) {
4369 if (cfg->verbose_level > 2)
4370 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4372 mono_jit_stats.inlined_methods++;
4374 /* always add some code to avoid block split failures */
4375 MONO_INST_NEW (cfg, ins, OP_NOP);
4376 MONO_ADD_INS (prev_cbb, ins);
4378 prev_cbb->next_bb = sbblock;
4379 link_bblock (cfg, prev_cbb, sbblock);
4382 * Get rid of the begin and end bblocks if possible to aid local
4383 * optimizations.
4385 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4387 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4388 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4390 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4391 MonoBasicBlock *prev = ebblock->in_bb [0];
4392 mono_merge_basic_blocks (cfg, prev, ebblock);
4393 cfg->cbb = prev;
4394 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4395 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4396 cfg->cbb = prev_cbb;
4398 } else {
4399 cfg->cbb = ebblock;
4402 if (rvar) {
4404 * If the inlined method contains only a throw, then the ret var is not
4405 * set, so set it to a dummy value.
4407 if (!ret_var_set) {
4408 static double r8_0 = 0.0;
4410 switch (rvar->type) {
4411 case STACK_I4:
4412 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4413 break;
4414 case STACK_I8:
4415 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4416 break;
4417 case STACK_PTR:
4418 case STACK_MP:
4419 case STACK_OBJ:
4420 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4421 break;
4422 case STACK_R8:
4423 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4424 ins->type = STACK_R8;
4425 ins->inst_p0 = (void*)&r8_0;
4426 ins->dreg = rvar->dreg;
4427 MONO_ADD_INS (cfg->cbb, ins);
4428 break;
4429 case STACK_VTYPE:
4430 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4431 break;
4432 default:
4433 g_assert_not_reached ();
4437 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4438 *sp++ = ins;
4440 return costs + 1;
4441 } else {
4442 if (cfg->verbose_level > 2)
4443 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4444 cfg->exception_type = MONO_EXCEPTION_NONE;
4445 mono_loader_clear_error ();
4447 /* This gets rid of the newly added bblocks */
4448 cfg->cbb = prev_cbb;
4450 return 0;
4454 * Some of these comments may well be out-of-date.
4455 * Design decisions: we do a single pass over the IL code (and we do bblock
4456 * splitting/merging in the few cases when it's required: a back jump to an IL
4457 * address that was not already seen as bblock starting point).
4458 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4459 * Complex operations are decomposed in simpler ones right away. We need to let the
4460 * arch-specific code peek and poke inside this process somehow (except when the
4461 * optimizations can take advantage of the full semantic info of coarse opcodes).
4462 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4463 * MonoInst->opcode initially is the IL opcode or some simplification of that
4464 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4465 * opcode with value bigger than OP_LAST.
4466 * At this point the IR can be handed over to an interpreter, a dumb code generator
4467 * or to the optimizing code generator that will translate it to SSA form.
4469 * Profiling directed optimizations.
4470 * We may compile by default with few or no optimizations and instrument the code
4471 * or the user may indicate what methods to optimize the most either in a config file
4472 * or through repeated runs where the compiler applies offline the optimizations to
4473 * each method and then decides if it was worth it.
4476 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4477 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4478 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4479 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4480 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4481 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4482 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4483 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4485 /* offset from br.s -> br like opcodes */
4486 #define BIG_BRANCH_OFFSET 13
4488 static gboolean
4489 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4491 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4493 return b == NULL || b == bb;
4496 static int
4497 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4499 unsigned char *ip = start;
4500 unsigned char *target;
4501 int i;
4502 guint cli_addr;
4503 MonoBasicBlock *bblock;
4504 const MonoOpcode *opcode;
4506 while (ip < end) {
4507 cli_addr = ip - start;
4508 i = mono_opcode_value ((const guint8 **)&ip, end);
4509 if (i < 0)
4510 UNVERIFIED;
4511 opcode = &mono_opcodes [i];
4512 switch (opcode->argument) {
4513 case MonoInlineNone:
4514 ip++;
4515 break;
4516 case MonoInlineString:
4517 case MonoInlineType:
4518 case MonoInlineField:
4519 case MonoInlineMethod:
4520 case MonoInlineTok:
4521 case MonoInlineSig:
4522 case MonoShortInlineR:
4523 case MonoInlineI:
4524 ip += 5;
4525 break;
4526 case MonoInlineVar:
4527 ip += 3;
4528 break;
4529 case MonoShortInlineVar:
4530 case MonoShortInlineI:
4531 ip += 2;
4532 break;
4533 case MonoShortInlineBrTarget:
4534 target = start + cli_addr + 2 + (signed char)ip [1];
4535 GET_BBLOCK (cfg, bblock, target);
4536 ip += 2;
4537 if (ip < end)
4538 GET_BBLOCK (cfg, bblock, ip);
4539 break;
4540 case MonoInlineBrTarget:
4541 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4542 GET_BBLOCK (cfg, bblock, target);
4543 ip += 5;
4544 if (ip < end)
4545 GET_BBLOCK (cfg, bblock, ip);
4546 break;
4547 case MonoInlineSwitch: {
4548 guint32 n = read32 (ip + 1);
4549 guint32 j;
4550 ip += 5;
4551 cli_addr += 5 + 4 * n;
4552 target = start + cli_addr;
4553 GET_BBLOCK (cfg, bblock, target);
4555 for (j = 0; j < n; ++j) {
4556 target = start + cli_addr + (gint32)read32 (ip);
4557 GET_BBLOCK (cfg, bblock, target);
4558 ip += 4;
4560 break;
4562 case MonoInlineR:
4563 case MonoInlineI8:
4564 ip += 9;
4565 break;
4566 default:
4567 g_assert_not_reached ();
4570 if (i == CEE_THROW) {
4571 unsigned char *bb_start = ip - 1;
4573 /* Find the start of the bblock containing the throw */
4574 bblock = NULL;
4575 while ((bb_start >= start) && !bblock) {
4576 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4577 bb_start --;
4579 if (bblock)
4580 bblock->out_of_line = 1;
4583 return 0;
4584 unverified:
4585 *pos = ip;
4586 return 1;
4589 static inline MonoMethod *
4590 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4592 MonoMethod *method;
4594 if (m->wrapper_type != MONO_WRAPPER_NONE)
4595 return mono_method_get_wrapper_data (m, token);
4597 method = mono_get_method_full (m->klass->image, token, klass, context);
4599 return method;
4602 static inline MonoMethod *
4603 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4605 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4607 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4608 return NULL;
4610 return method;
4613 static inline MonoClass*
4614 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4616 MonoClass *klass;
4618 if (method->wrapper_type != MONO_WRAPPER_NONE)
4619 klass = mono_method_get_wrapper_data (method, token);
4620 else
4621 klass = mono_class_get_full (method->klass->image, token, context);
4622 if (klass)
4623 mono_class_init (klass);
4624 return klass;
4628 * Returns TRUE if the JIT should abort inlining because "callee"
4629 * is influenced by security attributes.
4631 static
4632 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4634 guint32 result;
4636 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4637 return TRUE;
4640 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4641 if (result == MONO_JIT_SECURITY_OK)
4642 return FALSE;
4644 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4645 /* Generate code to throw a SecurityException before the actual call/link */
4646 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4647 MonoInst *args [2];
4649 NEW_ICONST (cfg, args [0], 4);
4650 NEW_METHODCONST (cfg, args [1], caller);
4651 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4652 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4653 /* don't hide previous results */
4654 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4655 cfg->exception_data = result;
4656 return TRUE;
4659 return FALSE;
4662 static MonoMethod*
4663 throw_exception (void)
4665 static MonoMethod *method = NULL;
4667 if (!method) {
4668 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4669 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4671 g_assert (method);
4672 return method;
4675 static void
4676 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4678 MonoMethod *thrower = throw_exception ();
4679 MonoInst *args [1];
4681 EMIT_NEW_PCONST (cfg, args [0], ex);
4682 mono_emit_method_call (cfg, thrower, args, NULL);
4686 * Return the original method is a wrapper is specified. We can only access
4687 * the custom attributes from the original method.
4689 static MonoMethod*
4690 get_original_method (MonoMethod *method)
4692 if (method->wrapper_type == MONO_WRAPPER_NONE)
4693 return method;
4695 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4696 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4697 return NULL;
4699 /* in other cases we need to find the original method */
4700 return mono_marshal_method_from_wrapper (method);
4703 static void
4704 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4705 MonoBasicBlock *bblock, unsigned char *ip)
4707 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4708 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4709 return;
4711 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4712 caller = get_original_method (caller);
4713 if (!caller)
4714 return;
4716 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4717 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4718 emit_throw_exception (cfg, mono_get_exception_field_access ());
4721 static void
4722 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4723 MonoBasicBlock *bblock, unsigned char *ip)
4725 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4726 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4727 return;
4729 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4730 caller = get_original_method (caller);
4731 if (!caller)
4732 return;
4734 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4735 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4736 emit_throw_exception (cfg, mono_get_exception_method_access ());
4740 * Check that the IL instructions at ip are the array initialization
4741 * sequence and return the pointer to the data and the size.
4743 static const char*
4744 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4747 * newarr[System.Int32]
4748 * dup
4749 * ldtoken field valuetype ...
4750 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4752 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4753 guint32 token = read32 (ip + 7);
4754 guint32 field_token = read32 (ip + 2);
4755 guint32 field_index = field_token & 0xffffff;
4756 guint32 rva;
4757 const char *data_ptr;
4758 int size = 0;
4759 MonoMethod *cmethod;
4760 MonoClass *dummy_class;
4761 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4762 int dummy_align;
4764 if (!field)
4765 return NULL;
4767 *out_field_token = field_token;
4769 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4770 if (!cmethod)
4771 return NULL;
4772 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4773 return NULL;
4774 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4775 case MONO_TYPE_BOOLEAN:
4776 case MONO_TYPE_I1:
4777 case MONO_TYPE_U1:
4778 size = 1; break;
4779 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4780 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4781 case MONO_TYPE_CHAR:
4782 case MONO_TYPE_I2:
4783 case MONO_TYPE_U2:
4784 size = 2; break;
4785 case MONO_TYPE_I4:
4786 case MONO_TYPE_U4:
4787 case MONO_TYPE_R4:
4788 size = 4; break;
4789 case MONO_TYPE_R8:
4790 #ifdef ARM_FPU_FPA
4791 return NULL; /* stupid ARM FP swapped format */
4792 #endif
4793 case MONO_TYPE_I8:
4794 case MONO_TYPE_U8:
4795 size = 8; break;
4796 #endif
4797 default:
4798 return NULL;
4800 size *= len;
4801 if (size > mono_type_size (field->type, &dummy_align))
4802 return NULL;
4803 *out_size = size;
4804 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4805 if (!method->klass->image->dynamic) {
4806 field_index = read32 (ip + 2) & 0xffffff;
4807 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4808 data_ptr = mono_image_rva_map (method->klass->image, rva);
4809 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4810 /* for aot code we do the lookup on load */
4811 if (aot && data_ptr)
4812 return GUINT_TO_POINTER (rva);
4813 } else {
4814 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4815 g_assert (!aot);
4816 data_ptr = mono_field_get_data (field);
4818 return data_ptr;
4820 return NULL;
4823 static void
4824 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4826 char *method_fname = mono_method_full_name (method, TRUE);
4827 char *method_code;
4829 if (mono_method_get_header (method)->code_size == 0)
4830 method_code = g_strdup ("method body is empty.");
4831 else
4832 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4833 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4834 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4835 g_free (method_fname);
4836 g_free (method_code);
4839 static void
4840 set_exception_object (MonoCompile *cfg, MonoException *exception)
4842 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4843 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4844 cfg->exception_ptr = exception;
4847 static gboolean
4848 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4850 MonoType *type;
4852 if (cfg->generic_sharing_context)
4853 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4854 else
4855 type = &klass->byval_arg;
4856 return MONO_TYPE_IS_REFERENCE (type);
4860 * mono_decompose_array_access_opts:
4862 * Decompose array access opcodes.
4863 * This should be in decompose.c, but it emits calls so it has to stay here until
4864 * the old JIT is gone.
4866 void
4867 mono_decompose_array_access_opts (MonoCompile *cfg)
4869 MonoBasicBlock *bb, *first_bb;
4872 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4873 * can be executed anytime. It should be run before decompose_long
4877 * Create a dummy bblock and emit code into it so we can use the normal
4878 * code generation macros.
4880 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4881 first_bb = cfg->cbb;
4883 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4884 MonoInst *ins;
4885 MonoInst *prev = NULL;
4886 MonoInst *dest;
4887 MonoInst *iargs [3];
4888 gboolean restart;
4890 if (!bb->has_array_access)
4891 continue;
4893 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4895 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4896 restart = TRUE;
4898 while (restart) {
4899 restart = FALSE;
4901 for (ins = bb->code; ins; ins = ins->next) {
4902 switch (ins->opcode) {
4903 case OP_LDLEN:
4904 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4905 G_STRUCT_OFFSET (MonoArray, max_length));
4906 MONO_ADD_INS (cfg->cbb, dest);
4907 break;
4908 case OP_BOUNDS_CHECK:
4909 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4910 break;
4911 case OP_NEWARR:
4912 if (cfg->opt & MONO_OPT_SHARED) {
4913 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4914 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4915 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4916 iargs [2]->dreg = ins->sreg1;
4918 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4919 dest->dreg = ins->dreg;
4920 } else {
4921 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4923 g_assert (vtable); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
4924 NEW_VTABLECONST (cfg, iargs [0], vtable);
4925 MONO_ADD_INS (cfg->cbb, iargs [0]);
4926 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4927 iargs [1]->dreg = ins->sreg1;
4929 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4930 dest->dreg = ins->dreg;
4932 break;
4933 case OP_STRLEN:
4934 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4935 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4936 MONO_ADD_INS (cfg->cbb, dest);
4937 break;
4938 default:
4939 break;
4942 g_assert (cfg->cbb == first_bb);
4944 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4945 /* Replace the original instruction with the new code sequence */
4947 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4948 first_bb->code = first_bb->last_ins = NULL;
4949 first_bb->in_count = first_bb->out_count = 0;
4950 cfg->cbb = first_bb;
4952 else
4953 prev = ins;
4957 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4961 typedef union {
4962 guint32 vali [2];
4963 gint64 vall;
4964 double vald;
4965 } DVal;
4967 #ifdef MONO_ARCH_SOFT_FLOAT
4970 * mono_decompose_soft_float:
4972 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4973 * similar to long support on 32 bit platforms. 32 bit float values require special
4974 * handling when used as locals, arguments, and in calls.
4975 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4977 void
4978 mono_decompose_soft_float (MonoCompile *cfg)
4980 MonoBasicBlock *bb, *first_bb;
4983 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4987 * Create a dummy bblock and emit code into it so we can use the normal
4988 * code generation macros.
4990 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4991 first_bb = cfg->cbb;
4993 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4994 MonoInst *ins;
4995 MonoInst *prev = NULL;
4996 gboolean restart;
4998 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
5000 cfg->cbb->code = cfg->cbb->last_ins = NULL;
5001 restart = TRUE;
5003 while (restart) {
5004 restart = FALSE;
5006 for (ins = bb->code; ins; ins = ins->next) {
5007 const char *spec = INS_INFO (ins->opcode);
5009 /* Most fp operations are handled automatically by opcode emulation */
5011 switch (ins->opcode) {
5012 case OP_R8CONST: {
5013 DVal d;
5014 d.vald = *(double*)ins->inst_p0;
5015 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5016 break;
5018 case OP_R4CONST: {
5019 DVal d;
5020 /* We load the r8 value */
5021 d.vald = *(float*)ins->inst_p0;
5022 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5023 break;
5025 case OP_FMOVE:
5026 ins->opcode = OP_LMOVE;
5027 break;
5028 case OP_FGETLOW32:
5029 ins->opcode = OP_MOVE;
5030 ins->sreg1 = ins->sreg1 + 1;
5031 break;
5032 case OP_FGETHIGH32:
5033 ins->opcode = OP_MOVE;
5034 ins->sreg1 = ins->sreg1 + 2;
5035 break;
5036 case OP_SETFRET: {
5037 int reg = ins->sreg1;
5039 ins->opcode = OP_SETLRET;
5040 ins->dreg = -1;
5041 ins->sreg1 = reg + 1;
5042 ins->sreg2 = reg + 2;
5043 break;
5045 case OP_LOADR8_MEMBASE:
5046 ins->opcode = OP_LOADI8_MEMBASE;
5047 break;
5048 case OP_STORER8_MEMBASE_REG:
5049 ins->opcode = OP_STOREI8_MEMBASE_REG;
5050 break;
5051 case OP_STORER4_MEMBASE_REG: {
5052 MonoInst *iargs [2];
5053 int addr_reg;
5055 /* Arg 1 is the double value */
5056 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5057 iargs [0]->dreg = ins->sreg1;
5059 /* Arg 2 is the address to store to */
5060 addr_reg = mono_alloc_preg (cfg);
5061 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5062 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5063 restart = TRUE;
5064 break;
5066 case OP_LOADR4_MEMBASE: {
5067 MonoInst *iargs [1];
5068 MonoInst *conv;
5069 int addr_reg;
5071 addr_reg = mono_alloc_preg (cfg);
5072 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5073 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5074 conv->dreg = ins->dreg;
5075 break;
5077 case OP_FCALL:
5078 case OP_FCALL_REG:
5079 case OP_FCALL_MEMBASE: {
5080 MonoCallInst *call = (MonoCallInst*)ins;
5081 if (call->signature->ret->type == MONO_TYPE_R4) {
5082 MonoCallInst *call2;
5083 MonoInst *iargs [1];
5084 MonoInst *conv;
5086 /* Convert the call into a call returning an int */
5087 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5088 memcpy (call2, call, sizeof (MonoCallInst));
5089 switch (ins->opcode) {
5090 case OP_FCALL:
5091 call2->inst.opcode = OP_CALL;
5092 break;
5093 case OP_FCALL_REG:
5094 call2->inst.opcode = OP_CALL_REG;
5095 break;
5096 case OP_FCALL_MEMBASE:
5097 call2->inst.opcode = OP_CALL_MEMBASE;
5098 break;
5099 default:
5100 g_assert_not_reached ();
5102 call2->inst.dreg = mono_alloc_ireg (cfg);
5103 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5105 /* FIXME: Optimize this */
5107 /* Emit an r4->r8 conversion */
5108 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5109 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5110 conv->dreg = ins->dreg;
5111 } else {
5112 switch (ins->opcode) {
5113 case OP_FCALL:
5114 ins->opcode = OP_LCALL;
5115 break;
5116 case OP_FCALL_REG:
5117 ins->opcode = OP_LCALL_REG;
5118 break;
5119 case OP_FCALL_MEMBASE:
5120 ins->opcode = OP_LCALL_MEMBASE;
5121 break;
5122 default:
5123 g_assert_not_reached ();
5126 break;
5128 case OP_FCOMPARE: {
5129 MonoJitICallInfo *info;
5130 MonoInst *iargs [2];
5131 MonoInst *call, *cmp, *br;
5133 /* Convert fcompare+fbcc to icall+icompare+beq */
5135 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5136 g_assert (info);
5138 /* Create dummy MonoInst's for the arguments */
5139 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5140 iargs [0]->dreg = ins->sreg1;
5141 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5142 iargs [1]->dreg = ins->sreg2;
5144 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5146 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5147 cmp->sreg1 = call->dreg;
5148 cmp->inst_imm = 0;
5149 MONO_ADD_INS (cfg->cbb, cmp);
5151 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5152 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5153 br->inst_true_bb = ins->next->inst_true_bb;
5154 br->inst_false_bb = ins->next->inst_false_bb;
5155 MONO_ADD_INS (cfg->cbb, br);
5157 /* The call sequence might include fp ins */
5158 restart = TRUE;
5160 /* Skip fbcc or fccc */
5161 NULLIFY_INS (ins->next);
5162 break;
5164 case OP_FCEQ:
5165 case OP_FCGT:
5166 case OP_FCGT_UN:
5167 case OP_FCLT:
5168 case OP_FCLT_UN: {
5169 MonoJitICallInfo *info;
5170 MonoInst *iargs [2];
5171 MonoInst *call;
5173 /* Convert fccc to icall+icompare+iceq */
5175 info = mono_find_jit_opcode_emulation (ins->opcode);
5176 g_assert (info);
5178 /* Create dummy MonoInst's for the arguments */
5179 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5180 iargs [0]->dreg = ins->sreg1;
5181 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5182 iargs [1]->dreg = ins->sreg2;
5184 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5186 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5187 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5189 /* The call sequence might include fp ins */
5190 restart = TRUE;
5191 break;
5193 case OP_CKFINITE: {
5194 MonoInst *iargs [2];
5195 MonoInst *call, *cmp;
5197 /* Convert to icall+icompare+cond_exc+move */
5199 /* Create dummy MonoInst's for the arguments */
5200 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5201 iargs [0]->dreg = ins->sreg1;
5203 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5205 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5206 cmp->sreg1 = call->dreg;
5207 cmp->inst_imm = 1;
5208 MONO_ADD_INS (cfg->cbb, cmp);
5210 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5212 /* Do the assignment if the value is finite */
5213 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5215 restart = TRUE;
5216 break;
5218 default:
5219 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5220 mono_print_ins (ins);
5221 g_assert_not_reached ();
5223 break;
5226 g_assert (cfg->cbb == first_bb);
5228 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5229 /* Replace the original instruction with the new code sequence */
5231 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5232 first_bb->code = first_bb->last_ins = NULL;
5233 first_bb->in_count = first_bb->out_count = 0;
5234 cfg->cbb = first_bb;
5236 else
5237 prev = ins;
5241 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5244 mono_decompose_long_opts (cfg);
5247 #endif
5249 static void
5250 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5252 MonoInst *ins;
5253 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5254 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5255 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5256 /* Optimize reg-reg moves away */
5258 * Can't optimize other opcodes, since sp[0] might point to
5259 * the last ins of a decomposed opcode.
5261 sp [0]->dreg = (cfg)->locals [n]->dreg;
5262 } else {
5263 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5268 * ldloca inhibits many optimizations so try to get rid of it in common
5269 * cases.
5271 static inline unsigned char *
5272 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5274 int local, token;
5275 MonoClass *klass;
5277 if (size == 1) {
5278 local = ip [1];
5279 ip += 2;
5280 } else {
5281 local = read16 (ip + 2);
5282 ip += 4;
5285 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5286 gboolean skip = FALSE;
5288 /* From the INITOBJ case */
5289 token = read32 (ip + 2);
5290 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5291 CHECK_TYPELOAD (klass);
5292 if (generic_class_is_reference_type (cfg, klass)) {
5293 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5294 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5295 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5296 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5297 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5298 } else {
5299 skip = TRUE;
5302 if (!skip)
5303 return ip + 6;
5305 load_error:
5306 return NULL;
5309 static gboolean
5310 is_exception_class (MonoClass *class)
5312 while (class) {
5313 if (class == mono_defaults.exception_class)
5314 return TRUE;
5315 class = class->parent;
5317 return FALSE;
5321 * mono_method_to_ir:
5323 * Translate the .net IL into linear IR.
5326 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5327 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5328 guint inline_offset, gboolean is_virtual_call)
5330 MonoInst *ins, **sp, **stack_start;
5331 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5332 MonoMethod *cmethod, *method_definition;
5333 MonoInst **arg_array;
5334 MonoMethodHeader *header;
5335 MonoImage *image;
5336 guint32 token, ins_flag;
5337 MonoClass *klass;
5338 MonoClass *constrained_call = NULL;
5339 unsigned char *ip, *end, *target, *err_pos;
5340 static double r8_0 = 0.0;
5341 MonoMethodSignature *sig;
5342 MonoGenericContext *generic_context = NULL;
5343 MonoGenericContainer *generic_container = NULL;
5344 MonoType **param_types;
5345 int i, n, start_new_bblock, dreg;
5346 int num_calls = 0, inline_costs = 0;
5347 int breakpoint_id = 0;
5348 guint num_args;
5349 MonoBoolean security, pinvoke;
5350 MonoSecurityManager* secman = NULL;
5351 MonoDeclSecurityActions actions;
5352 GSList *class_inits = NULL;
5353 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5354 int context_used;
5355 gboolean init_locals, seq_points;
5357 /* serialization and xdomain stuff may need access to private fields and methods */
5358 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5359 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5360 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5361 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5362 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5363 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5365 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5367 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5368 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5369 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5370 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5372 image = method->klass->image;
5373 header = mono_method_get_header (method);
5374 generic_container = mono_method_get_generic_container (method);
5375 sig = mono_method_signature (method);
5376 num_args = sig->hasthis + sig->param_count;
5377 ip = (unsigned char*)header->code;
5378 cfg->cil_start = ip;
5379 end = ip + header->code_size;
5380 mono_jit_stats.cil_code_size += header->code_size;
5381 init_locals = header->init_locals;
5383 seq_points = cfg->gen_seq_points && cfg->method == method;
5386 * Methods without init_locals set could cause asserts in various passes
5387 * (#497220).
5389 init_locals = TRUE;
5391 method_definition = method;
5392 while (method_definition->is_inflated) {
5393 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5394 method_definition = imethod->declaring;
5397 /* SkipVerification is not allowed if core-clr is enabled */
5398 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5399 dont_verify = TRUE;
5400 dont_verify_stloc = TRUE;
5403 if (!dont_verify && mini_method_verify (cfg, method_definition))
5404 goto exception_exit;
5406 if (mono_debug_using_mono_debugger ())
5407 cfg->keep_cil_nops = TRUE;
5409 if (sig->is_inflated)
5410 generic_context = mono_method_get_context (method);
5411 else if (generic_container)
5412 generic_context = &generic_container->context;
5413 cfg->generic_context = generic_context;
5415 if (!cfg->generic_sharing_context)
5416 g_assert (!sig->has_type_parameters);
5418 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5419 g_assert (method->is_inflated);
5420 g_assert (mono_method_get_context (method)->method_inst);
5422 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5423 g_assert (sig->generic_param_count);
5425 if (cfg->method == method) {
5426 cfg->real_offset = 0;
5427 } else {
5428 cfg->real_offset = inline_offset;
5431 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5432 cfg->cil_offset_to_bb_len = header->code_size;
5434 cfg->current_method = method;
5436 if (cfg->verbose_level > 2)
5437 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5439 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5440 if (sig->hasthis)
5441 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5442 for (n = 0; n < sig->param_count; ++n)
5443 param_types [n + sig->hasthis] = sig->params [n];
5444 cfg->arg_types = param_types;
5446 dont_inline = g_list_prepend (dont_inline, method);
5447 if (cfg->method == method) {
5449 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5450 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5452 /* ENTRY BLOCK */
5453 NEW_BBLOCK (cfg, start_bblock);
5454 cfg->bb_entry = start_bblock;
5455 start_bblock->cil_code = NULL;
5456 start_bblock->cil_length = 0;
5458 /* EXIT BLOCK */
5459 NEW_BBLOCK (cfg, end_bblock);
5460 cfg->bb_exit = end_bblock;
5461 end_bblock->cil_code = NULL;
5462 end_bblock->cil_length = 0;
5463 g_assert (cfg->num_bblocks == 2);
5465 arg_array = cfg->args;
5467 if (header->num_clauses) {
5468 cfg->spvars = g_hash_table_new (NULL, NULL);
5469 cfg->exvars = g_hash_table_new (NULL, NULL);
5471 /* handle exception clauses */
5472 for (i = 0; i < header->num_clauses; ++i) {
5473 MonoBasicBlock *try_bb;
5474 MonoExceptionClause *clause = &header->clauses [i];
5475 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5476 try_bb->real_offset = clause->try_offset;
5477 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5478 tblock->real_offset = clause->handler_offset;
5479 tblock->flags |= BB_EXCEPTION_HANDLER;
5481 link_bblock (cfg, try_bb, tblock);
5483 if (*(ip + clause->handler_offset) == CEE_POP)
5484 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5486 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5487 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5488 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5489 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5490 MONO_ADD_INS (tblock, ins);
5492 /* todo: is a fault block unsafe to optimize? */
5493 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5494 tblock->flags |= BB_EXCEPTION_UNSAFE;
5498 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5499 while (p < end) {
5500 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5502 /* catch and filter blocks get the exception object on the stack */
5503 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5504 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5505 MonoInst *dummy_use;
5507 /* mostly like handle_stack_args (), but just sets the input args */
5508 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5509 tblock->in_scount = 1;
5510 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5511 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5514 * Add a dummy use for the exvar so its liveness info will be
5515 * correct.
5517 cfg->cbb = tblock;
5518 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5520 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5521 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5522 tblock->flags |= BB_EXCEPTION_HANDLER;
5523 tblock->real_offset = clause->data.filter_offset;
5524 tblock->in_scount = 1;
5525 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5526 /* The filter block shares the exvar with the handler block */
5527 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5528 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5529 MONO_ADD_INS (tblock, ins);
5533 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5534 clause->data.catch_class &&
5535 cfg->generic_sharing_context &&
5536 mono_class_check_context_used (clause->data.catch_class)) {
5538 * In shared generic code with catch
5539 * clauses containing type variables
5540 * the exception handling code has to
5541 * be able to get to the rgctx.
5542 * Therefore we have to make sure that
5543 * the vtable/mrgctx argument (for
5544 * static or generic methods) or the
5545 * "this" argument (for non-static
5546 * methods) are live.
5548 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5549 mini_method_get_context (method)->method_inst ||
5550 method->klass->valuetype) {
5551 mono_get_vtable_var (cfg);
5552 } else {
5553 MonoInst *dummy_use;
5555 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5559 } else {
5560 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5561 cfg->cbb = start_bblock;
5562 cfg->args = arg_array;
5563 mono_save_args (cfg, sig, inline_args);
5566 /* FIRST CODE BLOCK */
5567 NEW_BBLOCK (cfg, bblock);
5568 bblock->cil_code = ip;
5569 cfg->cbb = bblock;
5570 cfg->ip = ip;
5572 ADD_BBLOCK (cfg, bblock);
5574 if (cfg->method == method) {
5575 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5576 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5577 MONO_INST_NEW (cfg, ins, OP_BREAK);
5578 MONO_ADD_INS (bblock, ins);
5582 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5583 secman = mono_security_manager_get_methods ();
5585 security = (secman && mono_method_has_declsec (method));
5586 /* at this point having security doesn't mean we have any code to generate */
5587 if (security && (cfg->method == method)) {
5588 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5589 * And we do not want to enter the next section (with allocation) if we
5590 * have nothing to generate */
5591 security = mono_declsec_get_demands (method, &actions);
5594 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5595 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5596 if (pinvoke) {
5597 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5598 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5599 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5601 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5602 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5603 pinvoke = FALSE;
5605 if (custom)
5606 mono_custom_attrs_free (custom);
5608 if (pinvoke) {
5609 custom = mono_custom_attrs_from_class (wrapped->klass);
5610 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5611 pinvoke = FALSE;
5613 if (custom)
5614 mono_custom_attrs_free (custom);
5616 } else {
5617 /* not a P/Invoke after all */
5618 pinvoke = FALSE;
5622 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5623 /* we use a separate basic block for the initialization code */
5624 NEW_BBLOCK (cfg, init_localsbb);
5625 cfg->bb_init = init_localsbb;
5626 init_localsbb->real_offset = cfg->real_offset;
5627 start_bblock->next_bb = init_localsbb;
5628 init_localsbb->next_bb = bblock;
5629 link_bblock (cfg, start_bblock, init_localsbb);
5630 link_bblock (cfg, init_localsbb, bblock);
5632 cfg->cbb = init_localsbb;
5633 } else {
5634 start_bblock->next_bb = bblock;
5635 link_bblock (cfg, start_bblock, bblock);
5638 /* at this point we know, if security is TRUE, that some code needs to be generated */
5639 if (security && (cfg->method == method)) {
5640 MonoInst *args [2];
5642 mono_jit_stats.cas_demand_generation++;
5644 if (actions.demand.blob) {
5645 /* Add code for SecurityAction.Demand */
5646 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5647 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5648 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5649 mono_emit_method_call (cfg, secman->demand, args, NULL);
5651 if (actions.noncasdemand.blob) {
5652 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5653 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5654 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5655 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5656 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5657 mono_emit_method_call (cfg, secman->demand, args, NULL);
5659 if (actions.demandchoice.blob) {
5660 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5661 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5662 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5663 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5664 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5668 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5669 if (pinvoke) {
5670 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5673 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5674 /* check if this is native code, e.g. an icall or a p/invoke */
5675 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5676 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5677 if (wrapped) {
5678 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5679 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5681 /* if this ia a native call then it can only be JITted from platform code */
5682 if ((icall || pinvk) && method->klass && method->klass->image) {
5683 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5684 MonoException *ex = icall ? mono_get_exception_security () :
5685 mono_get_exception_method_access ();
5686 emit_throw_exception (cfg, ex);
5693 if (header->code_size == 0)
5694 UNVERIFIED;
5696 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5697 ip = err_pos;
5698 UNVERIFIED;
5701 if (cfg->method == method)
5702 mono_debug_init_method (cfg, bblock, breakpoint_id);
5704 for (n = 0; n < header->num_locals; ++n) {
5705 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5706 UNVERIFIED;
5708 class_inits = NULL;
5710 /* We force the vtable variable here for all shared methods
5711 for the possibility that they might show up in a stack
5712 trace where their exact instantiation is needed. */
5713 if (cfg->generic_sharing_context && method == cfg->method) {
5714 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5715 mini_method_get_context (method)->method_inst ||
5716 method->klass->valuetype) {
5717 mono_get_vtable_var (cfg);
5718 } else {
5719 /* FIXME: Is there a better way to do this?
5720 We need the variable live for the duration
5721 of the whole method. */
5722 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5726 /* add a check for this != NULL to inlined methods */
5727 if (is_virtual_call) {
5728 MonoInst *arg_ins;
5730 NEW_ARGLOAD (cfg, arg_ins, 0);
5731 MONO_ADD_INS (cfg->cbb, arg_ins);
5732 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5733 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5734 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5737 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5738 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5740 ins_flag = 0;
5741 start_new_bblock = 0;
5742 cfg->cbb = bblock;
5743 while (ip < end) {
5745 if (cfg->method == method)
5746 cfg->real_offset = ip - header->code;
5747 else
5748 cfg->real_offset = inline_offset;
5749 cfg->ip = ip;
5751 context_used = 0;
5753 if (start_new_bblock) {
5754 bblock->cil_length = ip - bblock->cil_code;
5755 if (start_new_bblock == 2) {
5756 g_assert (ip == tblock->cil_code);
5757 } else {
5758 GET_BBLOCK (cfg, tblock, ip);
5760 bblock->next_bb = tblock;
5761 bblock = tblock;
5762 cfg->cbb = bblock;
5763 start_new_bblock = 0;
5764 for (i = 0; i < bblock->in_scount; ++i) {
5765 if (cfg->verbose_level > 3)
5766 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5767 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5768 *sp++ = ins;
5770 if (class_inits)
5771 g_slist_free (class_inits);
5772 class_inits = NULL;
5773 } else {
5774 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5775 link_bblock (cfg, bblock, tblock);
5776 if (sp != stack_start) {
5777 handle_stack_args (cfg, stack_start, sp - stack_start);
5778 sp = stack_start;
5779 CHECK_UNVERIFIABLE (cfg);
5781 bblock->next_bb = tblock;
5782 bblock = tblock;
5783 cfg->cbb = bblock;
5784 for (i = 0; i < bblock->in_scount; ++i) {
5785 if (cfg->verbose_level > 3)
5786 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5787 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5788 *sp++ = ins;
5790 g_slist_free (class_inits);
5791 class_inits = NULL;
5796 * Sequence points are points where the debugger can place a breakpoint.
5797 * Currently, we generate these automatically at points where the IL
5798 * stack is empty.
5800 if (seq_points && sp == stack_start) {
5801 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5802 MONO_ADD_INS (cfg->cbb, ins);
5805 bblock->real_offset = cfg->real_offset;
5807 if ((cfg->method == method) && cfg->coverage_info) {
5808 guint32 cil_offset = ip - header->code;
5809 cfg->coverage_info->data [cil_offset].cil_code = ip;
5811 /* TODO: Use an increment here */
5812 #if defined(TARGET_X86)
5813 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5814 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5815 ins->inst_imm = 1;
5816 MONO_ADD_INS (cfg->cbb, ins);
5817 #else
5818 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5819 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5820 #endif
5823 if (cfg->verbose_level > 3)
5824 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5826 switch (*ip) {
5827 case CEE_NOP:
5828 if (cfg->keep_cil_nops)
5829 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5830 else
5831 MONO_INST_NEW (cfg, ins, OP_NOP);
5832 ip++;
5833 MONO_ADD_INS (bblock, ins);
5834 break;
5835 case CEE_BREAK:
5836 MONO_INST_NEW (cfg, ins, OP_BREAK);
5837 ip++;
5838 MONO_ADD_INS (bblock, ins);
5839 break;
5840 case CEE_LDARG_0:
5841 case CEE_LDARG_1:
5842 case CEE_LDARG_2:
5843 case CEE_LDARG_3:
5844 CHECK_STACK_OVF (1);
5845 n = (*ip)-CEE_LDARG_0;
5846 CHECK_ARG (n);
5847 EMIT_NEW_ARGLOAD (cfg, ins, n);
5848 ip++;
5849 *sp++ = ins;
5850 break;
5851 case CEE_LDLOC_0:
5852 case CEE_LDLOC_1:
5853 case CEE_LDLOC_2:
5854 case CEE_LDLOC_3:
5855 CHECK_STACK_OVF (1);
5856 n = (*ip)-CEE_LDLOC_0;
5857 CHECK_LOCAL (n);
5858 EMIT_NEW_LOCLOAD (cfg, ins, n);
5859 ip++;
5860 *sp++ = ins;
5861 break;
5862 case CEE_STLOC_0:
5863 case CEE_STLOC_1:
5864 case CEE_STLOC_2:
5865 case CEE_STLOC_3: {
5866 CHECK_STACK (1);
5867 n = (*ip)-CEE_STLOC_0;
5868 CHECK_LOCAL (n);
5869 --sp;
5870 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5871 UNVERIFIED;
5872 emit_stloc_ir (cfg, sp, header, n);
5873 ++ip;
5874 inline_costs += 1;
5875 break;
5877 case CEE_LDARG_S:
5878 CHECK_OPSIZE (2);
5879 CHECK_STACK_OVF (1);
5880 n = ip [1];
5881 CHECK_ARG (n);
5882 EMIT_NEW_ARGLOAD (cfg, ins, n);
5883 *sp++ = ins;
5884 ip += 2;
5885 break;
5886 case CEE_LDARGA_S:
5887 CHECK_OPSIZE (2);
5888 CHECK_STACK_OVF (1);
5889 n = ip [1];
5890 CHECK_ARG (n);
5891 NEW_ARGLOADA (cfg, ins, n);
5892 MONO_ADD_INS (cfg->cbb, ins);
5893 *sp++ = ins;
5894 ip += 2;
5895 break;
5896 case CEE_STARG_S:
5897 CHECK_OPSIZE (2);
5898 CHECK_STACK (1);
5899 --sp;
5900 n = ip [1];
5901 CHECK_ARG (n);
5902 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5903 UNVERIFIED;
5904 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5905 ip += 2;
5906 break;
5907 case CEE_LDLOC_S:
5908 CHECK_OPSIZE (2);
5909 CHECK_STACK_OVF (1);
5910 n = ip [1];
5911 CHECK_LOCAL (n);
5912 EMIT_NEW_LOCLOAD (cfg, ins, n);
5913 *sp++ = ins;
5914 ip += 2;
5915 break;
5916 case CEE_LDLOCA_S: {
5917 unsigned char *tmp_ip;
5918 CHECK_OPSIZE (2);
5919 CHECK_STACK_OVF (1);
5920 CHECK_LOCAL (ip [1]);
5922 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5923 ip = tmp_ip;
5924 inline_costs += 1;
5925 break;
5928 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5929 *sp++ = ins;
5930 ip += 2;
5931 break;
5933 case CEE_STLOC_S:
5934 CHECK_OPSIZE (2);
5935 CHECK_STACK (1);
5936 --sp;
5937 CHECK_LOCAL (ip [1]);
5938 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5939 UNVERIFIED;
5940 emit_stloc_ir (cfg, sp, header, ip [1]);
5941 ip += 2;
5942 inline_costs += 1;
5943 break;
5944 case CEE_LDNULL:
5945 CHECK_STACK_OVF (1);
5946 EMIT_NEW_PCONST (cfg, ins, NULL);
5947 ins->type = STACK_OBJ;
5948 ++ip;
5949 *sp++ = ins;
5950 break;
5951 case CEE_LDC_I4_M1:
5952 CHECK_STACK_OVF (1);
5953 EMIT_NEW_ICONST (cfg, ins, -1);
5954 ++ip;
5955 *sp++ = ins;
5956 break;
5957 case CEE_LDC_I4_0:
5958 case CEE_LDC_I4_1:
5959 case CEE_LDC_I4_2:
5960 case CEE_LDC_I4_3:
5961 case CEE_LDC_I4_4:
5962 case CEE_LDC_I4_5:
5963 case CEE_LDC_I4_6:
5964 case CEE_LDC_I4_7:
5965 case CEE_LDC_I4_8:
5966 CHECK_STACK_OVF (1);
5967 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5968 ++ip;
5969 *sp++ = ins;
5970 break;
5971 case CEE_LDC_I4_S:
5972 CHECK_OPSIZE (2);
5973 CHECK_STACK_OVF (1);
5974 ++ip;
5975 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5976 ++ip;
5977 *sp++ = ins;
5978 break;
5979 case CEE_LDC_I4:
5980 CHECK_OPSIZE (5);
5981 CHECK_STACK_OVF (1);
5982 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5983 ip += 5;
5984 *sp++ = ins;
5985 break;
5986 case CEE_LDC_I8:
5987 CHECK_OPSIZE (9);
5988 CHECK_STACK_OVF (1);
5989 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5990 ins->type = STACK_I8;
5991 ins->dreg = alloc_dreg (cfg, STACK_I8);
5992 ++ip;
5993 ins->inst_l = (gint64)read64 (ip);
5994 MONO_ADD_INS (bblock, ins);
5995 ip += 8;
5996 *sp++ = ins;
5997 break;
5998 case CEE_LDC_R4: {
5999 float *f;
6000 gboolean use_aotconst = FALSE;
6002 #ifdef TARGET_POWERPC
6003 /* FIXME: Clean this up */
6004 if (cfg->compile_aot)
6005 use_aotconst = TRUE;
6006 #endif
6008 /* FIXME: we should really allocate this only late in the compilation process */
6009 f = mono_domain_alloc (cfg->domain, sizeof (float));
6010 CHECK_OPSIZE (5);
6011 CHECK_STACK_OVF (1);
6013 if (use_aotconst) {
6014 MonoInst *cons;
6015 int dreg;
6017 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6019 dreg = alloc_freg (cfg);
6020 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6021 ins->type = STACK_R8;
6022 } else {
6023 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6024 ins->type = STACK_R8;
6025 ins->dreg = alloc_dreg (cfg, STACK_R8);
6026 ins->inst_p0 = f;
6027 MONO_ADD_INS (bblock, ins);
6029 ++ip;
6030 readr4 (ip, f);
6031 ip += 4;
6032 *sp++ = ins;
6033 break;
6035 case CEE_LDC_R8: {
6036 double *d;
6037 gboolean use_aotconst = FALSE;
6039 #ifdef TARGET_POWERPC
6040 /* FIXME: Clean this up */
6041 if (cfg->compile_aot)
6042 use_aotconst = TRUE;
6043 #endif
6045 /* FIXME: we should really allocate this only late in the compilation process */
6046 d = mono_domain_alloc (cfg->domain, sizeof (double));
6047 CHECK_OPSIZE (9);
6048 CHECK_STACK_OVF (1);
6050 if (use_aotconst) {
6051 MonoInst *cons;
6052 int dreg;
6054 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6056 dreg = alloc_freg (cfg);
6057 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6058 ins->type = STACK_R8;
6059 } else {
6060 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6061 ins->type = STACK_R8;
6062 ins->dreg = alloc_dreg (cfg, STACK_R8);
6063 ins->inst_p0 = d;
6064 MONO_ADD_INS (bblock, ins);
6066 ++ip;
6067 readr8 (ip, d);
6068 ip += 8;
6069 *sp++ = ins;
6070 break;
6072 case CEE_DUP: {
6073 MonoInst *temp, *store;
6074 CHECK_STACK (1);
6075 CHECK_STACK_OVF (1);
6076 sp--;
6077 ins = *sp;
6079 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6080 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6082 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6083 *sp++ = ins;
6085 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6086 *sp++ = ins;
6088 ++ip;
6089 inline_costs += 2;
6090 break;
6092 case CEE_POP:
6093 CHECK_STACK (1);
6094 ip++;
6095 --sp;
6097 #ifdef TARGET_X86
6098 if (sp [0]->type == STACK_R8)
6099 /* we need to pop the value from the x86 FP stack */
6100 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6101 #endif
6102 break;
6103 case CEE_JMP: {
6104 MonoCallInst *call;
6106 INLINE_FAILURE;
6108 CHECK_OPSIZE (5);
6109 if (stack_start != sp)
6110 UNVERIFIED;
6111 token = read32 (ip + 1);
6112 /* FIXME: check the signature matches */
6113 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6115 if (!cmethod)
6116 goto load_error;
6118 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6119 GENERIC_SHARING_FAILURE (CEE_JMP);
6121 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6122 CHECK_CFG_EXCEPTION;
6124 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6126 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6127 int i, n;
6129 /* Handle tail calls similarly to calls */
6130 n = fsig->param_count + fsig->hasthis;
6132 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6133 call->method = cmethod;
6134 call->tail_call = TRUE;
6135 call->signature = mono_method_signature (cmethod);
6136 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6137 call->inst.inst_p0 = cmethod;
6138 for (i = 0; i < n; ++i)
6139 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6141 mono_arch_emit_call (cfg, call);
6142 MONO_ADD_INS (bblock, (MonoInst*)call);
6144 #else
6145 for (i = 0; i < num_args; ++i)
6146 /* Prevent arguments from being optimized away */
6147 arg_array [i]->flags |= MONO_INST_VOLATILE;
6149 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6150 ins = (MonoInst*)call;
6151 ins->inst_p0 = cmethod;
6152 MONO_ADD_INS (bblock, ins);
6153 #endif
6155 ip += 5;
6156 start_new_bblock = 1;
6157 break;
6159 case CEE_CALLI:
6160 case CEE_CALL:
6161 case CEE_CALLVIRT: {
6162 MonoInst *addr = NULL;
6163 MonoMethodSignature *fsig = NULL;
6164 int array_rank = 0;
6165 int virtual = *ip == CEE_CALLVIRT;
6166 int calli = *ip == CEE_CALLI;
6167 gboolean pass_imt_from_rgctx = FALSE;
6168 MonoInst *imt_arg = NULL;
6169 gboolean pass_vtable = FALSE;
6170 gboolean pass_mrgctx = FALSE;
6171 MonoInst *vtable_arg = NULL;
6172 gboolean check_this = FALSE;
6173 gboolean supported_tail_call = FALSE;
6175 CHECK_OPSIZE (5);
6176 token = read32 (ip + 1);
6178 if (calli) {
6179 cmethod = NULL;
6180 CHECK_STACK (1);
6181 --sp;
6182 addr = *sp;
6183 if (method->wrapper_type != MONO_WRAPPER_NONE)
6184 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6185 else
6186 fsig = mono_metadata_parse_signature (image, token);
6188 n = fsig->param_count + fsig->hasthis;
6189 } else {
6190 MonoMethod *cil_method;
6192 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6193 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6194 cil_method = cmethod;
6195 } else if (constrained_call) {
6196 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6198 * This is needed since get_method_constrained can't find
6199 * the method in klass representing a type var.
6200 * The type var is guaranteed to be a reference type in this
6201 * case.
6203 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6204 cil_method = cmethod;
6205 g_assert (!cmethod->klass->valuetype);
6206 } else {
6207 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6209 } else {
6210 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6211 cil_method = cmethod;
6214 if (!cmethod)
6215 goto load_error;
6216 if (!dont_verify && !cfg->skip_visibility) {
6217 MonoMethod *target_method = cil_method;
6218 if (method->is_inflated) {
6219 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6221 if (!mono_method_can_access_method (method_definition, target_method) &&
6222 !mono_method_can_access_method (method, cil_method))
6223 METHOD_ACCESS_FAILURE;
6226 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6227 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6229 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6230 /* MS.NET seems to silently convert this to a callvirt */
6231 virtual = 1;
6233 if (!cmethod->klass->inited)
6234 if (!mono_class_init (cmethod->klass))
6235 goto load_error;
6237 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6238 mini_class_is_system_array (cmethod->klass)) {
6239 array_rank = cmethod->klass->rank;
6240 fsig = mono_method_signature (cmethod);
6241 } else {
6242 if (mono_method_signature (cmethod)->pinvoke) {
6243 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6244 check_for_pending_exc, FALSE);
6245 fsig = mono_method_signature (wrapper);
6246 } else if (constrained_call) {
6247 fsig = mono_method_signature (cmethod);
6248 } else {
6249 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6253 mono_save_token_info (cfg, image, token, cil_method);
6255 n = fsig->param_count + fsig->hasthis;
6257 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6258 if (check_linkdemand (cfg, method, cmethod))
6259 INLINE_FAILURE;
6260 CHECK_CFG_EXCEPTION;
6263 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6264 g_assert_not_reached ();
6267 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6268 UNVERIFIED;
6270 if (!cfg->generic_sharing_context && cmethod)
6271 g_assert (!mono_method_check_context_used (cmethod));
6273 CHECK_STACK (n);
6275 //g_assert (!virtual || fsig->hasthis);
6277 sp -= n;
6279 if (constrained_call) {
6281 * We have the `constrained.' prefix opcode.
6283 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6285 * The type parameter is instantiated as a valuetype,
6286 * but that type doesn't override the method we're
6287 * calling, so we need to box `this'.
6289 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6290 ins->klass = constrained_call;
6291 sp [0] = handle_box (cfg, ins, constrained_call);
6292 CHECK_CFG_EXCEPTION;
6293 } else if (!constrained_call->valuetype) {
6294 int dreg = alloc_preg (cfg);
6297 * The type parameter is instantiated as a reference
6298 * type. We have a managed pointer on the stack, so
6299 * we need to dereference it here.
6301 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6302 ins->type = STACK_OBJ;
6303 sp [0] = ins;
6304 } else if (cmethod->klass->valuetype)
6305 virtual = 0;
6306 constrained_call = NULL;
6309 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6310 UNVERIFIED;
6313 * If the callee is a shared method, then its static cctor
6314 * might not get called after the call was patched.
6316 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6317 emit_generic_class_init (cfg, cmethod->klass);
6318 CHECK_TYPELOAD (cmethod->klass);
6321 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6322 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6323 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6324 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6325 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6328 * Pass vtable iff target method might
6329 * be shared, which means that sharing
6330 * is enabled for its class and its
6331 * context is sharable (and it's not a
6332 * generic method).
6334 if (sharing_enabled && context_sharable &&
6335 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6336 pass_vtable = TRUE;
6339 if (cmethod && mini_method_get_context (cmethod) &&
6340 mini_method_get_context (cmethod)->method_inst) {
6341 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6342 MonoGenericContext *context = mini_method_get_context (cmethod);
6343 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6345 g_assert (!pass_vtable);
6347 if (sharing_enabled && context_sharable)
6348 pass_mrgctx = TRUE;
6351 if (cfg->generic_sharing_context && cmethod) {
6352 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6354 context_used = mono_method_check_context_used (cmethod);
6356 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6357 /* Generic method interface
6358 calls are resolved via a
6359 helper function and don't
6360 need an imt. */
6361 if (!cmethod_context || !cmethod_context->method_inst)
6362 pass_imt_from_rgctx = TRUE;
6366 * If a shared method calls another
6367 * shared method then the caller must
6368 * have a generic sharing context
6369 * because the magic trampoline
6370 * requires it. FIXME: We shouldn't
6371 * have to force the vtable/mrgctx
6372 * variable here. Instead there
6373 * should be a flag in the cfg to
6374 * request a generic sharing context.
6376 if (context_used &&
6377 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6378 mono_get_vtable_var (cfg);
6381 if (pass_vtable) {
6382 if (context_used) {
6383 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6384 } else {
6385 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6387 CHECK_TYPELOAD (cmethod->klass);
6388 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6392 if (pass_mrgctx) {
6393 g_assert (!vtable_arg);
6395 if (context_used) {
6396 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6397 } else {
6398 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6401 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6402 MONO_METHOD_IS_FINAL (cmethod)) {
6403 if (virtual)
6404 check_this = TRUE;
6405 virtual = 0;
6409 if (pass_imt_from_rgctx) {
6410 g_assert (!pass_vtable);
6411 g_assert (cmethod);
6413 imt_arg = emit_get_rgctx_method (cfg, context_used,
6414 cmethod, MONO_RGCTX_INFO_METHOD);
6417 if (check_this) {
6418 MonoInst *check;
6420 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6421 check->sreg1 = sp [0]->dreg;
6422 MONO_ADD_INS (cfg->cbb, check);
6425 /* Calling virtual generic methods */
6426 if (cmethod && virtual &&
6427 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6428 !(MONO_METHOD_IS_FINAL (cmethod) &&
6429 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6430 mono_method_signature (cmethod)->generic_param_count) {
6431 MonoInst *this_temp, *this_arg_temp, *store;
6432 MonoInst *iargs [4];
6434 g_assert (mono_method_signature (cmethod)->is_inflated);
6436 /* Prevent inlining of methods that contain indirect calls */
6437 INLINE_FAILURE;
6439 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6440 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6441 g_assert (!imt_arg);
6442 if (context_used) {
6443 imt_arg = emit_get_rgctx_method (cfg, context_used,
6444 cmethod, MONO_RGCTX_INFO_METHOD);
6446 } else {
6447 g_assert (cmethod->is_inflated);
6448 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6450 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6451 } else
6452 #endif
6454 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6455 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6456 MONO_ADD_INS (bblock, store);
6458 /* FIXME: This should be a managed pointer */
6459 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6461 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6462 if (context_used) {
6463 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6464 cmethod, MONO_RGCTX_INFO_METHOD);
6465 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6466 addr = mono_emit_jit_icall (cfg,
6467 mono_helper_compile_generic_method, iargs);
6468 } else {
6469 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6470 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6471 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6474 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6476 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6479 if (!MONO_TYPE_IS_VOID (fsig->ret))
6480 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6482 ip += 5;
6483 ins_flag = 0;
6484 break;
6487 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6488 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6489 #else
6490 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6491 #endif
6493 /* Tail prefix */
6494 /* FIXME: runtime generic context pointer for jumps? */
6495 /* FIXME: handle this for generic sharing eventually */
6496 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6497 MonoCallInst *call;
6499 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6500 INLINE_FAILURE;
6502 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6503 /* Handle tail calls similarly to calls */
6504 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6505 #else
6506 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6507 call->tail_call = TRUE;
6508 call->method = cmethod;
6509 call->signature = mono_method_signature (cmethod);
6512 * We implement tail calls by storing the actual arguments into the
6513 * argument variables, then emitting a CEE_JMP.
6515 for (i = 0; i < n; ++i) {
6516 /* Prevent argument from being register allocated */
6517 arg_array [i]->flags |= MONO_INST_VOLATILE;
6518 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6520 #endif
6522 ins = (MonoInst*)call;
6523 ins->inst_p0 = cmethod;
6524 ins->inst_p1 = arg_array [0];
6525 MONO_ADD_INS (bblock, ins);
6526 link_bblock (cfg, bblock, end_bblock);
6527 start_new_bblock = 1;
6528 /* skip CEE_RET as well */
6529 ip += 6;
6530 ins_flag = 0;
6531 break;
6534 /* Conversion to a JIT intrinsic */
6535 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6536 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6537 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6538 *sp = ins;
6539 sp++;
6542 ip += 5;
6543 ins_flag = 0;
6544 break;
6547 /* Inlining */
6548 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6549 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6550 mono_method_check_inlining (cfg, cmethod) &&
6551 !g_list_find (dont_inline, cmethod)) {
6552 int costs;
6553 gboolean allways = FALSE;
6555 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6556 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6557 /* Prevent inlining of methods that call wrappers */
6558 INLINE_FAILURE;
6559 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6560 allways = TRUE;
6563 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6564 ip += 5;
6565 cfg->real_offset += 5;
6566 bblock = cfg->cbb;
6568 if (!MONO_TYPE_IS_VOID (fsig->ret))
6569 /* *sp is already set by inline_method */
6570 sp++;
6572 inline_costs += costs;
6573 ins_flag = 0;
6574 break;
6578 inline_costs += 10 * num_calls++;
6580 /* Tail recursion elimination */
6581 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6582 gboolean has_vtargs = FALSE;
6583 int i;
6585 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6586 INLINE_FAILURE;
6588 /* keep it simple */
6589 for (i = fsig->param_count - 1; i >= 0; i--) {
6590 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6591 has_vtargs = TRUE;
6594 if (!has_vtargs) {
6595 for (i = 0; i < n; ++i)
6596 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6597 MONO_INST_NEW (cfg, ins, OP_BR);
6598 MONO_ADD_INS (bblock, ins);
6599 tblock = start_bblock->out_bb [0];
6600 link_bblock (cfg, bblock, tblock);
6601 ins->inst_target_bb = tblock;
6602 start_new_bblock = 1;
6604 /* skip the CEE_RET, too */
6605 if (ip_in_bb (cfg, bblock, ip + 5))
6606 ip += 6;
6607 else
6608 ip += 5;
6610 ins_flag = 0;
6611 break;
6615 /* Generic sharing */
6616 /* FIXME: only do this for generic methods if
6617 they are not shared! */
6618 if (context_used && !imt_arg && !array_rank &&
6619 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6620 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6621 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6622 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6623 INLINE_FAILURE;
6625 g_assert (cfg->generic_sharing_context && cmethod);
6626 g_assert (!addr);
6629 * We are compiling a call to a
6630 * generic method from shared code,
6631 * which means that we have to look up
6632 * the method in the rgctx and do an
6633 * indirect call.
6635 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6638 /* Indirect calls */
6639 if (addr) {
6640 g_assert (!imt_arg);
6642 if (*ip == CEE_CALL)
6643 g_assert (context_used);
6644 else if (*ip == CEE_CALLI)
6645 g_assert (!vtable_arg);
6646 else
6647 /* FIXME: what the hell is this??? */
6648 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6649 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6651 /* Prevent inlining of methods with indirect calls */
6652 INLINE_FAILURE;
6654 if (vtable_arg) {
6655 #ifdef MONO_ARCH_RGCTX_REG
6656 MonoCallInst *call;
6657 int rgctx_reg = mono_alloc_preg (cfg);
6659 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6660 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6661 call = (MonoCallInst*)ins;
6662 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6663 cfg->uses_rgctx_reg = TRUE;
6664 call->rgctx_reg = TRUE;
6665 #else
6666 NOT_IMPLEMENTED;
6667 #endif
6668 } else {
6669 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6671 * Instead of emitting an indirect call, emit a direct call
6672 * with the contents of the aotconst as the patch info.
6674 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6675 NULLIFY_INS (addr);
6676 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6677 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6678 NULLIFY_INS (addr);
6679 } else {
6680 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6683 if (!MONO_TYPE_IS_VOID (fsig->ret))
6684 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6686 ip += 5;
6687 ins_flag = 0;
6688 break;
6691 /* Array methods */
6692 if (array_rank) {
6693 MonoInst *addr;
6695 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6696 if (sp [fsig->param_count]->type == STACK_OBJ) {
6697 MonoInst *iargs [2];
6699 iargs [0] = sp [0];
6700 iargs [1] = sp [fsig->param_count];
6702 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6705 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6706 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6707 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6708 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6710 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6712 *sp++ = ins;
6713 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6714 if (!cmethod->klass->element_class->valuetype && !readonly)
6715 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6716 CHECK_TYPELOAD (cmethod->klass);
6718 readonly = FALSE;
6719 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6720 *sp++ = addr;
6721 } else {
6722 g_assert_not_reached ();
6725 ip += 5;
6726 ins_flag = 0;
6727 break;
6730 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6731 if (ins) {
6732 if (!MONO_TYPE_IS_VOID (fsig->ret))
6733 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6735 ip += 5;
6736 ins_flag = 0;
6737 break;
6740 /* Common call */
6741 INLINE_FAILURE;
6742 if (vtable_arg) {
6743 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6744 NULL, vtable_arg);
6745 } else if (imt_arg) {
6746 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6747 } else {
6748 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6751 if (!MONO_TYPE_IS_VOID (fsig->ret))
6752 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6754 ip += 5;
6755 ins_flag = 0;
6756 break;
6758 case CEE_RET:
6759 if (cfg->method != method) {
6760 /* return from inlined method */
6762 * If in_count == 0, that means the ret is unreachable due to
6763 * being preceeded by a throw. In that case, inline_method () will
6764 * handle setting the return value
6765 * (test case: test_0_inline_throw ()).
6767 if (return_var && cfg->cbb->in_count) {
6768 MonoInst *store;
6769 CHECK_STACK (1);
6770 --sp;
6771 //g_assert (returnvar != -1);
6772 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6773 cfg->ret_var_set = TRUE;
6775 } else {
6776 if (cfg->ret) {
6777 MonoType *ret_type = mono_method_signature (method)->ret;
6779 g_assert (!return_var);
6780 CHECK_STACK (1);
6781 --sp;
6782 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6783 MonoInst *ret_addr;
6785 if (!cfg->vret_addr) {
6786 MonoInst *ins;
6788 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6789 } else {
6790 EMIT_NEW_RETLOADA (cfg, ret_addr);
6792 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6793 ins->klass = mono_class_from_mono_type (ret_type);
6795 } else {
6796 #ifdef MONO_ARCH_SOFT_FLOAT
6797 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6798 MonoInst *iargs [1];
6799 MonoInst *conv;
6801 iargs [0] = *sp;
6802 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6803 mono_arch_emit_setret (cfg, method, conv);
6804 } else {
6805 mono_arch_emit_setret (cfg, method, *sp);
6807 #else
6808 mono_arch_emit_setret (cfg, method, *sp);
6809 #endif
6813 if (sp != stack_start)
6814 UNVERIFIED;
6815 MONO_INST_NEW (cfg, ins, OP_BR);
6816 ip++;
6817 ins->inst_target_bb = end_bblock;
6818 MONO_ADD_INS (bblock, ins);
6819 link_bblock (cfg, bblock, end_bblock);
6820 start_new_bblock = 1;
6821 break;
6822 case CEE_BR_S:
6823 CHECK_OPSIZE (2);
6824 MONO_INST_NEW (cfg, ins, OP_BR);
6825 ip++;
6826 target = ip + 1 + (signed char)(*ip);
6827 ++ip;
6828 GET_BBLOCK (cfg, tblock, target);
6829 link_bblock (cfg, bblock, tblock);
6830 ins->inst_target_bb = tblock;
6831 if (sp != stack_start) {
6832 handle_stack_args (cfg, stack_start, sp - stack_start);
6833 sp = stack_start;
6834 CHECK_UNVERIFIABLE (cfg);
6836 MONO_ADD_INS (bblock, ins);
6837 start_new_bblock = 1;
6838 inline_costs += BRANCH_COST;
6839 break;
6840 case CEE_BEQ_S:
6841 case CEE_BGE_S:
6842 case CEE_BGT_S:
6843 case CEE_BLE_S:
6844 case CEE_BLT_S:
6845 case CEE_BNE_UN_S:
6846 case CEE_BGE_UN_S:
6847 case CEE_BGT_UN_S:
6848 case CEE_BLE_UN_S:
6849 case CEE_BLT_UN_S:
6850 CHECK_OPSIZE (2);
6851 CHECK_STACK (2);
6852 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6853 ip++;
6854 target = ip + 1 + *(signed char*)ip;
6855 ip++;
6857 ADD_BINCOND (NULL);
6859 sp = stack_start;
6860 inline_costs += BRANCH_COST;
6861 break;
6862 case CEE_BR:
6863 CHECK_OPSIZE (5);
6864 MONO_INST_NEW (cfg, ins, OP_BR);
6865 ip++;
6867 target = ip + 4 + (gint32)read32(ip);
6868 ip += 4;
6869 GET_BBLOCK (cfg, tblock, target);
6870 link_bblock (cfg, bblock, tblock);
6871 ins->inst_target_bb = tblock;
6872 if (sp != stack_start) {
6873 handle_stack_args (cfg, stack_start, sp - stack_start);
6874 sp = stack_start;
6875 CHECK_UNVERIFIABLE (cfg);
6878 MONO_ADD_INS (bblock, ins);
6880 start_new_bblock = 1;
6881 inline_costs += BRANCH_COST;
6882 break;
6883 case CEE_BRFALSE_S:
6884 case CEE_BRTRUE_S:
6885 case CEE_BRFALSE:
6886 case CEE_BRTRUE: {
6887 MonoInst *cmp;
6888 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6889 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6890 guint32 opsize = is_short ? 1 : 4;
6892 CHECK_OPSIZE (opsize);
6893 CHECK_STACK (1);
6894 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6895 UNVERIFIED;
6896 ip ++;
6897 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6898 ip += opsize;
6900 sp--;
6902 GET_BBLOCK (cfg, tblock, target);
6903 link_bblock (cfg, bblock, tblock);
6904 GET_BBLOCK (cfg, tblock, ip);
6905 link_bblock (cfg, bblock, tblock);
6907 if (sp != stack_start) {
6908 handle_stack_args (cfg, stack_start, sp - stack_start);
6909 CHECK_UNVERIFIABLE (cfg);
6912 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6913 cmp->sreg1 = sp [0]->dreg;
6914 type_from_op (cmp, sp [0], NULL);
6915 CHECK_TYPE (cmp);
6917 #if SIZEOF_REGISTER == 4
6918 if (cmp->opcode == OP_LCOMPARE_IMM) {
6919 /* Convert it to OP_LCOMPARE */
6920 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6921 ins->type = STACK_I8;
6922 ins->dreg = alloc_dreg (cfg, STACK_I8);
6923 ins->inst_l = 0;
6924 MONO_ADD_INS (bblock, ins);
6925 cmp->opcode = OP_LCOMPARE;
6926 cmp->sreg2 = ins->dreg;
6928 #endif
6929 MONO_ADD_INS (bblock, cmp);
6931 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6932 type_from_op (ins, sp [0], NULL);
6933 MONO_ADD_INS (bblock, ins);
6934 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6935 GET_BBLOCK (cfg, tblock, target);
6936 ins->inst_true_bb = tblock;
6937 GET_BBLOCK (cfg, tblock, ip);
6938 ins->inst_false_bb = tblock;
6939 start_new_bblock = 2;
6941 sp = stack_start;
6942 inline_costs += BRANCH_COST;
6943 break;
6945 case CEE_BEQ:
6946 case CEE_BGE:
6947 case CEE_BGT:
6948 case CEE_BLE:
6949 case CEE_BLT:
6950 case CEE_BNE_UN:
6951 case CEE_BGE_UN:
6952 case CEE_BGT_UN:
6953 case CEE_BLE_UN:
6954 case CEE_BLT_UN:
6955 CHECK_OPSIZE (5);
6956 CHECK_STACK (2);
6957 MONO_INST_NEW (cfg, ins, *ip);
6958 ip++;
6959 target = ip + 4 + (gint32)read32(ip);
6960 ip += 4;
6962 ADD_BINCOND (NULL);
6964 sp = stack_start;
6965 inline_costs += BRANCH_COST;
6966 break;
6967 case CEE_SWITCH: {
6968 MonoInst *src1;
6969 MonoBasicBlock **targets;
6970 MonoBasicBlock *default_bblock;
6971 MonoJumpInfoBBTable *table;
6972 int offset_reg = alloc_preg (cfg);
6973 int target_reg = alloc_preg (cfg);
6974 int table_reg = alloc_preg (cfg);
6975 int sum_reg = alloc_preg (cfg);
6976 gboolean use_op_switch;
6978 CHECK_OPSIZE (5);
6979 CHECK_STACK (1);
6980 n = read32 (ip + 1);
6981 --sp;
6982 src1 = sp [0];
6983 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6984 UNVERIFIED;
6986 ip += 5;
6987 CHECK_OPSIZE (n * sizeof (guint32));
6988 target = ip + n * sizeof (guint32);
6990 GET_BBLOCK (cfg, default_bblock, target);
6992 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6993 for (i = 0; i < n; ++i) {
6994 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6995 targets [i] = tblock;
6996 ip += 4;
6999 if (sp != stack_start) {
7001 * Link the current bb with the targets as well, so handle_stack_args
7002 * will set their in_stack correctly.
7004 link_bblock (cfg, bblock, default_bblock);
7005 for (i = 0; i < n; ++i)
7006 link_bblock (cfg, bblock, targets [i]);
7008 handle_stack_args (cfg, stack_start, sp - stack_start);
7009 sp = stack_start;
7010 CHECK_UNVERIFIABLE (cfg);
7013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7015 bblock = cfg->cbb;
7017 for (i = 0; i < n; ++i)
7018 link_bblock (cfg, bblock, targets [i]);
7020 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7021 table->table = targets;
7022 table->table_size = n;
7024 use_op_switch = FALSE;
7025 #ifdef TARGET_ARM
7026 /* ARM implements SWITCH statements differently */
7027 /* FIXME: Make it use the generic implementation */
7028 if (!cfg->compile_aot)
7029 use_op_switch = TRUE;
7030 #endif
7032 if (COMPILE_LLVM (cfg))
7033 use_op_switch = TRUE;
7035 cfg->cbb->has_jump_table = 1;
7037 if (use_op_switch) {
7038 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7039 ins->sreg1 = src1->dreg;
7040 ins->inst_p0 = table;
7041 ins->inst_many_bb = targets;
7042 ins->klass = GUINT_TO_POINTER (n);
7043 MONO_ADD_INS (cfg->cbb, ins);
7044 } else {
7045 if (sizeof (gpointer) == 8)
7046 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7047 else
7048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7050 #if SIZEOF_REGISTER == 8
7051 /* The upper word might not be zero, and we add it to a 64 bit address later */
7052 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7053 #endif
7055 if (cfg->compile_aot) {
7056 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7057 } else {
7058 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7059 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7060 ins->inst_p0 = table;
7061 ins->dreg = table_reg;
7062 MONO_ADD_INS (cfg->cbb, ins);
7065 /* FIXME: Use load_memindex */
7066 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7068 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7070 start_new_bblock = 1;
7071 inline_costs += (BRANCH_COST * 2);
7072 break;
7074 case CEE_LDIND_I1:
7075 case CEE_LDIND_U1:
7076 case CEE_LDIND_I2:
7077 case CEE_LDIND_U2:
7078 case CEE_LDIND_I4:
7079 case CEE_LDIND_U4:
7080 case CEE_LDIND_I8:
7081 case CEE_LDIND_I:
7082 case CEE_LDIND_R4:
7083 case CEE_LDIND_R8:
7084 case CEE_LDIND_REF:
7085 CHECK_STACK (1);
7086 --sp;
7088 switch (*ip) {
7089 case CEE_LDIND_R4:
7090 case CEE_LDIND_R8:
7091 dreg = alloc_freg (cfg);
7092 break;
7093 case CEE_LDIND_I8:
7094 dreg = alloc_lreg (cfg);
7095 break;
7096 default:
7097 dreg = alloc_preg (cfg);
7100 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7101 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7102 ins->flags |= ins_flag;
7103 ins_flag = 0;
7104 MONO_ADD_INS (bblock, ins);
7105 *sp++ = ins;
7106 ++ip;
7107 break;
7108 case CEE_STIND_REF:
7109 case CEE_STIND_I1:
7110 case CEE_STIND_I2:
7111 case CEE_STIND_I4:
7112 case CEE_STIND_I8:
7113 case CEE_STIND_R4:
7114 case CEE_STIND_R8:
7115 case CEE_STIND_I:
7116 CHECK_STACK (2);
7117 sp -= 2;
7119 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7120 ins->flags |= ins_flag;
7121 ins_flag = 0;
7122 MONO_ADD_INS (bblock, ins);
7124 #if HAVE_WRITE_BARRIERS
7125 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7126 /* insert call to write barrier */
7127 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7128 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7130 #endif
7132 inline_costs += 1;
7133 ++ip;
7134 break;
7136 case CEE_MUL:
7137 CHECK_STACK (2);
7139 MONO_INST_NEW (cfg, ins, (*ip));
7140 sp -= 2;
7141 ins->sreg1 = sp [0]->dreg;
7142 ins->sreg2 = sp [1]->dreg;
7143 type_from_op (ins, sp [0], sp [1]);
7144 CHECK_TYPE (ins);
7145 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7147 /* Use the immediate opcodes if possible */
7148 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7149 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7150 if (imm_opcode != -1) {
7151 ins->opcode = imm_opcode;
7152 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7153 ins->sreg2 = -1;
7155 sp [1]->opcode = OP_NOP;
7159 MONO_ADD_INS ((cfg)->cbb, (ins));
7161 *sp++ = mono_decompose_opcode (cfg, ins);
7162 ip++;
7163 break;
7164 case CEE_ADD:
7165 case CEE_SUB:
7166 case CEE_DIV:
7167 case CEE_DIV_UN:
7168 case CEE_REM:
7169 case CEE_REM_UN:
7170 case CEE_AND:
7171 case CEE_OR:
7172 case CEE_XOR:
7173 case CEE_SHL:
7174 case CEE_SHR:
7175 case CEE_SHR_UN:
7176 CHECK_STACK (2);
7178 MONO_INST_NEW (cfg, ins, (*ip));
7179 sp -= 2;
7180 ins->sreg1 = sp [0]->dreg;
7181 ins->sreg2 = sp [1]->dreg;
7182 type_from_op (ins, sp [0], sp [1]);
7183 CHECK_TYPE (ins);
7184 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7185 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7187 /* FIXME: Pass opcode to is_inst_imm */
7189 /* Use the immediate opcodes if possible */
7190 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7191 int imm_opcode;
7193 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7194 if (imm_opcode != -1) {
7195 ins->opcode = imm_opcode;
7196 if (sp [1]->opcode == OP_I8CONST) {
7197 #if SIZEOF_REGISTER == 8
7198 ins->inst_imm = sp [1]->inst_l;
7199 #else
7200 ins->inst_ls_word = sp [1]->inst_ls_word;
7201 ins->inst_ms_word = sp [1]->inst_ms_word;
7202 #endif
7204 else
7205 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7206 ins->sreg2 = -1;
7208 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7209 if (sp [1]->next == NULL)
7210 sp [1]->opcode = OP_NOP;
7213 MONO_ADD_INS ((cfg)->cbb, (ins));
7215 *sp++ = mono_decompose_opcode (cfg, ins);
7216 ip++;
7217 break;
7218 case CEE_NEG:
7219 case CEE_NOT:
7220 case CEE_CONV_I1:
7221 case CEE_CONV_I2:
7222 case CEE_CONV_I4:
7223 case CEE_CONV_R4:
7224 case CEE_CONV_R8:
7225 case CEE_CONV_U4:
7226 case CEE_CONV_I8:
7227 case CEE_CONV_U8:
7228 case CEE_CONV_OVF_I8:
7229 case CEE_CONV_OVF_U8:
7230 case CEE_CONV_R_UN:
7231 CHECK_STACK (1);
7233 /* Special case this earlier so we have long constants in the IR */
7234 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7235 int data = sp [-1]->inst_c0;
7236 sp [-1]->opcode = OP_I8CONST;
7237 sp [-1]->type = STACK_I8;
7238 #if SIZEOF_REGISTER == 8
7239 if ((*ip) == CEE_CONV_U8)
7240 sp [-1]->inst_c0 = (guint32)data;
7241 else
7242 sp [-1]->inst_c0 = data;
7243 #else
7244 sp [-1]->inst_ls_word = data;
7245 if ((*ip) == CEE_CONV_U8)
7246 sp [-1]->inst_ms_word = 0;
7247 else
7248 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7249 #endif
7250 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7252 else {
7253 ADD_UNOP (*ip);
7255 ip++;
7256 break;
7257 case CEE_CONV_OVF_I4:
7258 case CEE_CONV_OVF_I1:
7259 case CEE_CONV_OVF_I2:
7260 case CEE_CONV_OVF_I:
7261 case CEE_CONV_OVF_U:
7262 CHECK_STACK (1);
7264 if (sp [-1]->type == STACK_R8) {
7265 ADD_UNOP (CEE_CONV_OVF_I8);
7266 ADD_UNOP (*ip);
7267 } else {
7268 ADD_UNOP (*ip);
7270 ip++;
7271 break;
7272 case CEE_CONV_OVF_U1:
7273 case CEE_CONV_OVF_U2:
7274 case CEE_CONV_OVF_U4:
7275 CHECK_STACK (1);
7277 if (sp [-1]->type == STACK_R8) {
7278 ADD_UNOP (CEE_CONV_OVF_U8);
7279 ADD_UNOP (*ip);
7280 } else {
7281 ADD_UNOP (*ip);
7283 ip++;
7284 break;
7285 case CEE_CONV_OVF_I1_UN:
7286 case CEE_CONV_OVF_I2_UN:
7287 case CEE_CONV_OVF_I4_UN:
7288 case CEE_CONV_OVF_I8_UN:
7289 case CEE_CONV_OVF_U1_UN:
7290 case CEE_CONV_OVF_U2_UN:
7291 case CEE_CONV_OVF_U4_UN:
7292 case CEE_CONV_OVF_U8_UN:
7293 case CEE_CONV_OVF_I_UN:
7294 case CEE_CONV_OVF_U_UN:
7295 case CEE_CONV_U2:
7296 case CEE_CONV_U1:
7297 case CEE_CONV_I:
7298 case CEE_CONV_U:
7299 CHECK_STACK (1);
7300 ADD_UNOP (*ip);
7301 ip++;
7302 break;
7303 case CEE_ADD_OVF:
7304 case CEE_ADD_OVF_UN:
7305 case CEE_MUL_OVF:
7306 case CEE_MUL_OVF_UN:
7307 case CEE_SUB_OVF:
7308 case CEE_SUB_OVF_UN:
7309 CHECK_STACK (2);
7310 ADD_BINOP (*ip);
7311 ip++;
7312 break;
7313 case CEE_CPOBJ:
7314 CHECK_OPSIZE (5);
7315 CHECK_STACK (2);
7316 token = read32 (ip + 1);
7317 klass = mini_get_class (method, token, generic_context);
7318 CHECK_TYPELOAD (klass);
7319 sp -= 2;
7320 if (generic_class_is_reference_type (cfg, klass)) {
7321 MonoInst *store, *load;
7322 int dreg = alloc_preg (cfg);
7324 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7325 load->flags |= ins_flag;
7326 MONO_ADD_INS (cfg->cbb, load);
7328 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7329 store->flags |= ins_flag;
7330 MONO_ADD_INS (cfg->cbb, store);
7331 } else {
7332 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7334 ins_flag = 0;
7335 ip += 5;
7336 break;
7337 case CEE_LDOBJ: {
7338 int loc_index = -1;
7339 int stloc_len = 0;
7341 CHECK_OPSIZE (5);
7342 CHECK_STACK (1);
7343 --sp;
7344 token = read32 (ip + 1);
7345 klass = mini_get_class (method, token, generic_context);
7346 CHECK_TYPELOAD (klass);
7348 /* Optimize the common ldobj+stloc combination */
7349 switch (ip [5]) {
7350 case CEE_STLOC_S:
7351 loc_index = ip [6];
7352 stloc_len = 2;
7353 break;
7354 case CEE_STLOC_0:
7355 case CEE_STLOC_1:
7356 case CEE_STLOC_2:
7357 case CEE_STLOC_3:
7358 loc_index = ip [5] - CEE_STLOC_0;
7359 stloc_len = 1;
7360 break;
7361 default:
7362 break;
7365 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7366 CHECK_LOCAL (loc_index);
7368 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7369 ins->dreg = cfg->locals [loc_index]->dreg;
7370 ip += 5;
7371 ip += stloc_len;
7372 break;
7375 /* Optimize the ldobj+stobj combination */
7376 /* The reference case ends up being a load+store anyway */
7377 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7378 CHECK_STACK (1);
7380 sp --;
7382 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7384 ip += 5 + 5;
7385 ins_flag = 0;
7386 break;
7389 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7390 *sp++ = ins;
7392 ip += 5;
7393 ins_flag = 0;
7394 inline_costs += 1;
7395 break;
7397 case CEE_LDSTR:
7398 CHECK_STACK_OVF (1);
7399 CHECK_OPSIZE (5);
7400 n = read32 (ip + 1);
7402 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7403 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7404 ins->type = STACK_OBJ;
7405 *sp = ins;
7407 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7408 MonoInst *iargs [1];
7410 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7411 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7412 } else {
7413 if (cfg->opt & MONO_OPT_SHARED) {
7414 MonoInst *iargs [3];
7416 if (cfg->compile_aot) {
7417 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7419 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7420 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7421 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7422 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7423 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7424 } else {
7425 if (bblock->out_of_line) {
7426 MonoInst *iargs [2];
7428 if (image == mono_defaults.corlib) {
7430 * Avoid relocations in AOT and save some space by using a
7431 * version of helper_ldstr specialized to mscorlib.
7433 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7434 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7435 } else {
7436 /* Avoid creating the string object */
7437 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7438 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7439 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7442 else
7443 if (cfg->compile_aot) {
7444 NEW_LDSTRCONST (cfg, ins, image, n);
7445 *sp = ins;
7446 MONO_ADD_INS (bblock, ins);
7448 else {
7449 NEW_PCONST (cfg, ins, NULL);
7450 ins->type = STACK_OBJ;
7451 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7452 *sp = ins;
7453 MONO_ADD_INS (bblock, ins);
7458 sp++;
7459 ip += 5;
7460 break;
7461 case CEE_NEWOBJ: {
7462 MonoInst *iargs [2];
7463 MonoMethodSignature *fsig;
7464 MonoInst this_ins;
7465 MonoInst *alloc;
7466 MonoInst *vtable_arg = NULL;
7468 CHECK_OPSIZE (5);
7469 token = read32 (ip + 1);
7470 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7471 if (!cmethod)
7472 goto load_error;
7473 fsig = mono_method_get_signature (cmethod, image, token);
7475 mono_save_token_info (cfg, image, token, cmethod);
7477 if (!mono_class_init (cmethod->klass))
7478 goto load_error;
7480 if (cfg->generic_sharing_context)
7481 context_used = mono_method_check_context_used (cmethod);
7483 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7484 if (check_linkdemand (cfg, method, cmethod))
7485 INLINE_FAILURE;
7486 CHECK_CFG_EXCEPTION;
7487 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7488 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7491 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7492 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7493 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7494 if (context_used) {
7495 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7496 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7497 } else {
7498 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7500 } else {
7501 if (context_used) {
7502 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7503 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7504 } else {
7505 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7507 CHECK_TYPELOAD (cmethod->klass);
7508 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7513 n = fsig->param_count;
7514 CHECK_STACK (n);
7517 * Generate smaller code for the common newobj <exception> instruction in
7518 * argument checking code.
7520 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7521 is_exception_class (cmethod->klass) && n <= 2 &&
7522 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7523 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7524 MonoInst *iargs [3];
7526 g_assert (!vtable_arg);
7528 sp -= n;
7530 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7531 switch (n) {
7532 case 0:
7533 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7534 break;
7535 case 1:
7536 iargs [1] = sp [0];
7537 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7538 break;
7539 case 2:
7540 iargs [1] = sp [0];
7541 iargs [2] = sp [1];
7542 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7543 break;
7544 default:
7545 g_assert_not_reached ();
7548 ip += 5;
7549 inline_costs += 5;
7550 break;
7553 /* move the args to allow room for 'this' in the first position */
7554 while (n--) {
7555 --sp;
7556 sp [1] = sp [0];
7559 /* check_call_signature () requires sp[0] to be set */
7560 this_ins.type = STACK_OBJ;
7561 sp [0] = &this_ins;
7562 if (check_call_signature (cfg, fsig, sp))
7563 UNVERIFIED;
7565 iargs [0] = NULL;
7567 if (mini_class_is_system_array (cmethod->klass)) {
7568 g_assert (!vtable_arg);
7570 if (context_used) {
7571 *sp = emit_get_rgctx_method (cfg, context_used,
7572 cmethod, MONO_RGCTX_INFO_METHOD);
7573 } else {
7574 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7577 /* Avoid varargs in the common case */
7578 if (fsig->param_count == 1)
7579 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7580 else if (fsig->param_count == 2)
7581 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7582 else
7583 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7584 } else if (cmethod->string_ctor) {
7585 g_assert (!context_used);
7586 g_assert (!vtable_arg);
7587 /* we simply pass a null pointer */
7588 EMIT_NEW_PCONST (cfg, *sp, NULL);
7589 /* now call the string ctor */
7590 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7591 } else {
7592 MonoInst* callvirt_this_arg = NULL;
7594 if (cmethod->klass->valuetype) {
7595 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7596 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7597 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7599 alloc = NULL;
7602 * The code generated by mini_emit_virtual_call () expects
7603 * iargs [0] to be a boxed instance, but luckily the vcall
7604 * will be transformed into a normal call there.
7606 } else if (context_used) {
7607 MonoInst *data;
7608 int rgctx_info;
7610 if (cfg->opt & MONO_OPT_SHARED)
7611 rgctx_info = MONO_RGCTX_INFO_KLASS;
7612 else
7613 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7614 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7616 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7617 *sp = alloc;
7618 } else {
7619 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7621 CHECK_TYPELOAD (cmethod->klass);
7624 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7625 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7626 * As a workaround, we call class cctors before allocating objects.
7628 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7629 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7630 if (cfg->verbose_level > 2)
7631 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7632 class_inits = g_slist_prepend (class_inits, vtable);
7635 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7636 *sp = alloc;
7638 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7640 if (alloc)
7641 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7643 /* Now call the actual ctor */
7644 /* Avoid virtual calls to ctors if possible */
7645 if (cmethod->klass->marshalbyref)
7646 callvirt_this_arg = sp [0];
7648 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7649 mono_method_check_inlining (cfg, cmethod) &&
7650 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7651 !g_list_find (dont_inline, cmethod)) {
7652 int costs;
7654 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7655 cfg->real_offset += 5;
7656 bblock = cfg->cbb;
7658 inline_costs += costs - 5;
7659 } else {
7660 INLINE_FAILURE;
7661 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7663 } else if (context_used &&
7664 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7665 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7666 MonoInst *cmethod_addr;
7668 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7669 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7671 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7672 } else {
7673 INLINE_FAILURE;
7674 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7675 callvirt_this_arg, NULL, vtable_arg);
7679 if (alloc == NULL) {
7680 /* Valuetype */
7681 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7682 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7683 *sp++= ins;
7685 else
7686 *sp++ = alloc;
7688 ip += 5;
7689 inline_costs += 5;
7690 break;
7692 case CEE_CASTCLASS:
7693 CHECK_STACK (1);
7694 --sp;
7695 CHECK_OPSIZE (5);
7696 token = read32 (ip + 1);
7697 klass = mini_get_class (method, token, generic_context);
7698 CHECK_TYPELOAD (klass);
7699 if (sp [0]->type != STACK_OBJ)
7700 UNVERIFIED;
7702 if (cfg->generic_sharing_context)
7703 context_used = mono_class_check_context_used (klass);
7705 if (context_used) {
7706 MonoInst *args [2];
7708 /* obj */
7709 args [0] = *sp;
7711 /* klass */
7712 args [1] = emit_get_rgctx_klass (cfg, context_used,
7713 klass, MONO_RGCTX_INFO_KLASS);
7715 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7716 *sp ++ = ins;
7717 ip += 5;
7718 inline_costs += 2;
7719 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7720 MonoMethod *mono_castclass;
7721 MonoInst *iargs [1];
7722 int costs;
7724 mono_castclass = mono_marshal_get_castclass (klass);
7725 iargs [0] = sp [0];
7727 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7728 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7729 g_assert (costs > 0);
7731 ip += 5;
7732 cfg->real_offset += 5;
7733 bblock = cfg->cbb;
7735 *sp++ = iargs [0];
7737 inline_costs += costs;
7739 else {
7740 ins = handle_castclass (cfg, klass, *sp);
7741 CHECK_CFG_EXCEPTION;
7742 bblock = cfg->cbb;
7743 *sp ++ = ins;
7744 ip += 5;
7746 break;
7747 case CEE_ISINST: {
7748 CHECK_STACK (1);
7749 --sp;
7750 CHECK_OPSIZE (5);
7751 token = read32 (ip + 1);
7752 klass = mini_get_class (method, token, generic_context);
7753 CHECK_TYPELOAD (klass);
7754 if (sp [0]->type != STACK_OBJ)
7755 UNVERIFIED;
7757 if (cfg->generic_sharing_context)
7758 context_used = mono_class_check_context_used (klass);
7760 if (context_used) {
7761 MonoInst *args [2];
7763 /* obj */
7764 args [0] = *sp;
7766 /* klass */
7767 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7769 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7770 sp++;
7771 ip += 5;
7772 inline_costs += 2;
7773 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7774 MonoMethod *mono_isinst;
7775 MonoInst *iargs [1];
7776 int costs;
7778 mono_isinst = mono_marshal_get_isinst (klass);
7779 iargs [0] = sp [0];
7781 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7782 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7783 g_assert (costs > 0);
7785 ip += 5;
7786 cfg->real_offset += 5;
7787 bblock = cfg->cbb;
7789 *sp++= iargs [0];
7791 inline_costs += costs;
7793 else {
7794 ins = handle_isinst (cfg, klass, *sp);
7795 CHECK_CFG_EXCEPTION;
7796 bblock = cfg->cbb;
7797 *sp ++ = ins;
7798 ip += 5;
7800 break;
7802 case CEE_UNBOX_ANY: {
7803 CHECK_STACK (1);
7804 --sp;
7805 CHECK_OPSIZE (5);
7806 token = read32 (ip + 1);
7807 klass = mini_get_class (method, token, generic_context);
7808 CHECK_TYPELOAD (klass);
7810 mono_save_token_info (cfg, image, token, klass);
7812 if (cfg->generic_sharing_context)
7813 context_used = mono_class_check_context_used (klass);
7815 if (generic_class_is_reference_type (cfg, klass)) {
7816 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7817 if (context_used) {
7818 MonoInst *iargs [2];
7820 /* obj */
7821 iargs [0] = *sp;
7822 /* klass */
7823 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7824 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7825 *sp ++ = ins;
7826 ip += 5;
7827 inline_costs += 2;
7828 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7829 MonoMethod *mono_castclass;
7830 MonoInst *iargs [1];
7831 int costs;
7833 mono_castclass = mono_marshal_get_castclass (klass);
7834 iargs [0] = sp [0];
7836 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7837 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7839 g_assert (costs > 0);
7841 ip += 5;
7842 cfg->real_offset += 5;
7843 bblock = cfg->cbb;
7845 *sp++ = iargs [0];
7846 inline_costs += costs;
7847 } else {
7848 ins = handle_castclass (cfg, klass, *sp);
7849 CHECK_CFG_EXCEPTION;
7850 bblock = cfg->cbb;
7851 *sp ++ = ins;
7852 ip += 5;
7854 break;
7857 if (mono_class_is_nullable (klass)) {
7858 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7859 *sp++= ins;
7860 ip += 5;
7861 break;
7864 /* UNBOX */
7865 ins = handle_unbox (cfg, klass, sp, context_used);
7866 *sp = ins;
7868 ip += 5;
7870 /* LDOBJ */
7871 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7872 *sp++ = ins;
7874 inline_costs += 2;
7875 break;
7877 case CEE_BOX: {
7878 MonoInst *val;
7880 CHECK_STACK (1);
7881 --sp;
7882 val = *sp;
7883 CHECK_OPSIZE (5);
7884 token = read32 (ip + 1);
7885 klass = mini_get_class (method, token, generic_context);
7886 CHECK_TYPELOAD (klass);
7888 mono_save_token_info (cfg, image, token, klass);
7890 if (cfg->generic_sharing_context)
7891 context_used = mono_class_check_context_used (klass);
7893 if (generic_class_is_reference_type (cfg, klass)) {
7894 *sp++ = val;
7895 ip += 5;
7896 break;
7899 if (klass == mono_defaults.void_class)
7900 UNVERIFIED;
7901 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7902 UNVERIFIED;
7903 /* frequent check in generic code: box (struct), brtrue */
7904 if (!mono_class_is_nullable (klass) &&
7905 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7906 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7907 ip += 5;
7908 MONO_INST_NEW (cfg, ins, OP_BR);
7909 if (*ip == CEE_BRTRUE_S) {
7910 CHECK_OPSIZE (2);
7911 ip++;
7912 target = ip + 1 + (signed char)(*ip);
7913 ip++;
7914 } else {
7915 CHECK_OPSIZE (5);
7916 ip++;
7917 target = ip + 4 + (gint)(read32 (ip));
7918 ip += 4;
7920 GET_BBLOCK (cfg, tblock, target);
7921 link_bblock (cfg, bblock, tblock);
7922 ins->inst_target_bb = tblock;
7923 GET_BBLOCK (cfg, tblock, ip);
7925 * This leads to some inconsistency, since the two bblocks are
7926 * not really connected, but it is needed for handling stack
7927 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7928 * FIXME: This should only be needed if sp != stack_start, but that
7929 * doesn't work for some reason (test failure in mcs/tests on x86).
7931 link_bblock (cfg, bblock, tblock);
7932 if (sp != stack_start) {
7933 handle_stack_args (cfg, stack_start, sp - stack_start);
7934 sp = stack_start;
7935 CHECK_UNVERIFIABLE (cfg);
7937 MONO_ADD_INS (bblock, ins);
7938 start_new_bblock = 1;
7939 break;
7942 if (context_used) {
7943 MonoInst *data;
7944 int rgctx_info;
7946 if (cfg->opt & MONO_OPT_SHARED)
7947 rgctx_info = MONO_RGCTX_INFO_KLASS;
7948 else
7949 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7950 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7951 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7952 } else {
7953 *sp++ = handle_box (cfg, val, klass);
7956 CHECK_CFG_EXCEPTION;
7957 ip += 5;
7958 inline_costs += 1;
7959 break;
7961 case CEE_UNBOX: {
7962 CHECK_STACK (1);
7963 --sp;
7964 CHECK_OPSIZE (5);
7965 token = read32 (ip + 1);
7966 klass = mini_get_class (method, token, generic_context);
7967 CHECK_TYPELOAD (klass);
7969 mono_save_token_info (cfg, image, token, klass);
7971 if (cfg->generic_sharing_context)
7972 context_used = mono_class_check_context_used (klass);
7974 if (mono_class_is_nullable (klass)) {
7975 MonoInst *val;
7977 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7978 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7980 *sp++= ins;
7981 } else {
7982 ins = handle_unbox (cfg, klass, sp, context_used);
7983 *sp++ = ins;
7985 ip += 5;
7986 inline_costs += 2;
7987 break;
7989 case CEE_LDFLD:
7990 case CEE_LDFLDA:
7991 case CEE_STFLD: {
7992 MonoClassField *field;
7993 int costs;
7994 guint foffset;
7996 if (*ip == CEE_STFLD) {
7997 CHECK_STACK (2);
7998 sp -= 2;
7999 } else {
8000 CHECK_STACK (1);
8001 --sp;
8003 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8004 UNVERIFIED;
8005 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8006 UNVERIFIED;
8007 CHECK_OPSIZE (5);
8008 token = read32 (ip + 1);
8009 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8010 field = mono_method_get_wrapper_data (method, token);
8011 klass = field->parent;
8013 else {
8014 field = mono_field_from_token (image, token, &klass, generic_context);
8016 if (!field)
8017 goto load_error;
8018 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8019 FIELD_ACCESS_FAILURE;
8020 mono_class_init (klass);
8022 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8023 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8024 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8025 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8028 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8029 if (*ip == CEE_STFLD) {
8030 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8031 UNVERIFIED;
8032 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8033 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8034 MonoInst *iargs [5];
8036 iargs [0] = sp [0];
8037 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8038 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8039 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8040 field->offset);
8041 iargs [4] = sp [1];
8043 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8044 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8045 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8046 g_assert (costs > 0);
8048 cfg->real_offset += 5;
8049 bblock = cfg->cbb;
8051 inline_costs += costs;
8052 } else {
8053 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8055 } else {
8056 MonoInst *store;
8058 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8060 #if HAVE_WRITE_BARRIERS
8061 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8062 /* insert call to write barrier */
8063 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8064 MonoInst *iargs [2];
8065 int dreg;
8067 dreg = alloc_preg (cfg);
8068 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8069 iargs [1] = sp [1];
8070 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8072 #endif
8074 store->flags |= ins_flag;
8076 ins_flag = 0;
8077 ip += 5;
8078 break;
8081 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8082 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8083 MonoInst *iargs [4];
8085 iargs [0] = sp [0];
8086 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8087 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8088 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8089 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8090 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8091 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8092 bblock = cfg->cbb;
8093 g_assert (costs > 0);
8095 cfg->real_offset += 5;
8097 *sp++ = iargs [0];
8099 inline_costs += costs;
8100 } else {
8101 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8102 *sp++ = ins;
8104 } else {
8105 if (sp [0]->type == STACK_VTYPE) {
8106 MonoInst *var;
8108 /* Have to compute the address of the variable */
8110 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8111 if (!var)
8112 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8113 else
8114 g_assert (var->klass == klass);
8116 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8117 sp [0] = ins;
8120 if (*ip == CEE_LDFLDA) {
8121 dreg = alloc_preg (cfg);
8123 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8124 ins->klass = mono_class_from_mono_type (field->type);
8125 ins->type = STACK_MP;
8126 *sp++ = ins;
8127 } else {
8128 MonoInst *load;
8130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8131 load->flags |= ins_flag;
8132 *sp++ = load;
8135 ins_flag = 0;
8136 ip += 5;
8137 break;
8139 case CEE_LDSFLD:
8140 case CEE_LDSFLDA:
8141 case CEE_STSFLD: {
8142 MonoClassField *field;
8143 gpointer addr = NULL;
8144 gboolean is_special_static;
8146 CHECK_OPSIZE (5);
8147 token = read32 (ip + 1);
8149 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8150 field = mono_method_get_wrapper_data (method, token);
8151 klass = field->parent;
8153 else
8154 field = mono_field_from_token (image, token, &klass, generic_context);
8155 if (!field)
8156 goto load_error;
8157 mono_class_init (klass);
8158 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8159 FIELD_ACCESS_FAILURE;
8161 /* if the class is Critical then transparent code cannot access it's fields */
8162 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8163 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8166 * We can only support shared generic static
8167 * field access on architectures where the
8168 * trampoline code has been extended to handle
8169 * the generic class init.
8171 #ifndef MONO_ARCH_VTABLE_REG
8172 GENERIC_SHARING_FAILURE (*ip);
8173 #endif
8175 if (cfg->generic_sharing_context)
8176 context_used = mono_class_check_context_used (klass);
8178 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8180 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8181 * to be called here.
8183 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8184 mono_class_vtable (cfg->domain, klass);
8185 CHECK_TYPELOAD (klass);
8187 mono_domain_lock (cfg->domain);
8188 if (cfg->domain->special_static_fields)
8189 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8190 mono_domain_unlock (cfg->domain);
8192 is_special_static = mono_class_field_is_special_static (field);
8194 /* Generate IR to compute the field address */
8196 if ((cfg->opt & MONO_OPT_SHARED) ||
8197 (cfg->compile_aot && is_special_static) ||
8198 (context_used && is_special_static)) {
8199 MonoInst *iargs [2];
8201 g_assert (field->parent);
8202 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8203 if (context_used) {
8204 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8205 field, MONO_RGCTX_INFO_CLASS_FIELD);
8206 } else {
8207 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8209 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8210 } else if (context_used) {
8211 MonoInst *static_data;
8214 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8215 method->klass->name_space, method->klass->name, method->name,
8216 depth, field->offset);
8219 if (mono_class_needs_cctor_run (klass, method)) {
8220 MonoCallInst *call;
8221 MonoInst *vtable;
8223 vtable = emit_get_rgctx_klass (cfg, context_used,
8224 klass, MONO_RGCTX_INFO_VTABLE);
8226 // FIXME: This doesn't work since it tries to pass the argument
8227 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8229 * The vtable pointer is always passed in a register regardless of
8230 * the calling convention, so assign it manually, and make a call
8231 * using a signature without parameters.
8233 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8234 #ifdef MONO_ARCH_VTABLE_REG
8235 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8236 cfg->uses_vtable_reg = TRUE;
8237 #else
8238 NOT_IMPLEMENTED;
8239 #endif
8243 * The pointer we're computing here is
8245 * super_info.static_data + field->offset
8247 static_data = emit_get_rgctx_klass (cfg, context_used,
8248 klass, MONO_RGCTX_INFO_STATIC_DATA);
8250 if (field->offset == 0) {
8251 ins = static_data;
8252 } else {
8253 int addr_reg = mono_alloc_preg (cfg);
8254 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8256 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8257 MonoInst *iargs [2];
8259 g_assert (field->parent);
8260 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8261 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8262 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8263 } else {
8264 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8266 CHECK_TYPELOAD (klass);
8267 if (!addr) {
8268 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8269 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8270 if (cfg->verbose_level > 2)
8271 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8272 class_inits = g_slist_prepend (class_inits, vtable);
8273 } else {
8274 if (cfg->run_cctors) {
8275 MonoException *ex;
8276 /* This makes so that inline cannot trigger */
8277 /* .cctors: too many apps depend on them */
8278 /* running with a specific order... */
8279 if (! vtable->initialized)
8280 INLINE_FAILURE;
8281 ex = mono_runtime_class_init_full (vtable, FALSE);
8282 if (ex) {
8283 set_exception_object (cfg, ex);
8284 goto exception_exit;
8288 addr = (char*)vtable->data + field->offset;
8290 if (cfg->compile_aot)
8291 EMIT_NEW_SFLDACONST (cfg, ins, field);
8292 else
8293 EMIT_NEW_PCONST (cfg, ins, addr);
8294 } else {
8296 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8297 * This could be later optimized to do just a couple of
8298 * memory dereferences with constant offsets.
8300 MonoInst *iargs [1];
8301 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8302 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8306 /* Generate IR to do the actual load/store operation */
8308 if (*ip == CEE_LDSFLDA) {
8309 ins->klass = mono_class_from_mono_type (field->type);
8310 ins->type = STACK_PTR;
8311 *sp++ = ins;
8312 } else if (*ip == CEE_STSFLD) {
8313 MonoInst *store;
8314 CHECK_STACK (1);
8315 sp--;
8317 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8318 store->flags |= ins_flag;
8319 } else {
8320 gboolean is_const = FALSE;
8321 MonoVTable *vtable = NULL;
8323 if (!context_used) {
8324 vtable = mono_class_vtable (cfg->domain, klass);
8325 CHECK_TYPELOAD (klass);
8327 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8328 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8329 gpointer addr = (char*)vtable->data + field->offset;
8330 int ro_type = field->type->type;
8331 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8332 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8334 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8335 is_const = TRUE;
8336 switch (ro_type) {
8337 case MONO_TYPE_BOOLEAN:
8338 case MONO_TYPE_U1:
8339 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8340 sp++;
8341 break;
8342 case MONO_TYPE_I1:
8343 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8344 sp++;
8345 break;
8346 case MONO_TYPE_CHAR:
8347 case MONO_TYPE_U2:
8348 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8349 sp++;
8350 break;
8351 case MONO_TYPE_I2:
8352 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8353 sp++;
8354 break;
8355 break;
8356 case MONO_TYPE_I4:
8357 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8358 sp++;
8359 break;
8360 case MONO_TYPE_U4:
8361 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8362 sp++;
8363 break;
8364 #ifndef HAVE_MOVING_COLLECTOR
8365 case MONO_TYPE_I:
8366 case MONO_TYPE_U:
8367 case MONO_TYPE_STRING:
8368 case MONO_TYPE_OBJECT:
8369 case MONO_TYPE_CLASS:
8370 case MONO_TYPE_SZARRAY:
8371 case MONO_TYPE_PTR:
8372 case MONO_TYPE_FNPTR:
8373 case MONO_TYPE_ARRAY:
8374 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8375 type_to_eval_stack_type ((cfg), field->type, *sp);
8376 sp++;
8377 break;
8378 #endif
8379 case MONO_TYPE_I8:
8380 case MONO_TYPE_U8:
8381 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8382 sp++;
8383 break;
8384 case MONO_TYPE_R4:
8385 case MONO_TYPE_R8:
8386 case MONO_TYPE_VALUETYPE:
8387 default:
8388 is_const = FALSE;
8389 break;
8393 if (!is_const) {
8394 MonoInst *load;
8396 CHECK_STACK_OVF (1);
8398 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8399 load->flags |= ins_flag;
8400 ins_flag = 0;
8401 *sp++ = load;
8404 ins_flag = 0;
8405 ip += 5;
8406 break;
8408 case CEE_STOBJ:
8409 CHECK_STACK (2);
8410 sp -= 2;
8411 CHECK_OPSIZE (5);
8412 token = read32 (ip + 1);
8413 klass = mini_get_class (method, token, generic_context);
8414 CHECK_TYPELOAD (klass);
8415 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8416 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8417 ins_flag = 0;
8418 ip += 5;
8419 inline_costs += 1;
8420 break;
8423 * Array opcodes
8425 case CEE_NEWARR: {
8426 MonoInst *len_ins;
8427 const char *data_ptr;
8428 int data_size = 0;
8429 guint32 field_token;
8431 CHECK_STACK (1);
8432 --sp;
8434 CHECK_OPSIZE (5);
8435 token = read32 (ip + 1);
8437 klass = mini_get_class (method, token, generic_context);
8438 CHECK_TYPELOAD (klass);
8440 if (cfg->generic_sharing_context)
8441 context_used = mono_class_check_context_used (klass);
8443 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8444 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8445 ins->sreg1 = sp [0]->dreg;
8446 ins->type = STACK_I4;
8447 ins->dreg = alloc_ireg (cfg);
8448 MONO_ADD_INS (cfg->cbb, ins);
8449 *sp = mono_decompose_opcode (cfg, ins);
8452 if (context_used) {
8453 MonoInst *args [2];
8455 /* FIXME: Decompose later to help abcrem */
8457 /* vtable */
8458 args [0] = emit_get_rgctx_klass (cfg, context_used,
8459 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8461 /* array len */
8462 args [1] = sp [0];
8464 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8465 } else {
8466 if (cfg->opt & MONO_OPT_SHARED) {
8467 /* Decompose now to avoid problems with references to the domainvar */
8468 MonoInst *iargs [3];
8470 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8471 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8472 iargs [2] = sp [0];
8474 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8475 } else {
8476 /* Decompose later since it is needed by abcrem */
8477 MonoClass *array_type = mono_array_class_get (klass, 1);
8478 mono_class_vtable (cfg->domain, array_type);
8479 CHECK_TYPELOAD (array_type);
8481 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8482 ins->dreg = alloc_preg (cfg);
8483 ins->sreg1 = sp [0]->dreg;
8484 ins->inst_newa_class = klass;
8485 ins->type = STACK_OBJ;
8486 ins->klass = klass;
8487 MONO_ADD_INS (cfg->cbb, ins);
8488 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8489 cfg->cbb->has_array_access = TRUE;
8491 /* Needed so mono_emit_load_get_addr () gets called */
8492 mono_get_got_var (cfg);
8496 len_ins = sp [0];
8497 ip += 5;
8498 *sp++ = ins;
8499 inline_costs += 1;
8502 * we inline/optimize the initialization sequence if possible.
8503 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8504 * for small sizes open code the memcpy
8505 * ensure the rva field is big enough
8507 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8508 MonoMethod *memcpy_method = get_memcpy_method ();
8509 MonoInst *iargs [3];
8510 int add_reg = alloc_preg (cfg);
8512 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8513 if (cfg->compile_aot) {
8514 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8515 } else {
8516 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8518 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8519 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8520 ip += 11;
8523 break;
8525 case CEE_LDLEN:
8526 CHECK_STACK (1);
8527 --sp;
8528 if (sp [0]->type != STACK_OBJ)
8529 UNVERIFIED;
8531 dreg = alloc_preg (cfg);
8532 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8533 ins->dreg = alloc_preg (cfg);
8534 ins->sreg1 = sp [0]->dreg;
8535 ins->type = STACK_I4;
8536 MONO_ADD_INS (cfg->cbb, ins);
8537 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8538 cfg->cbb->has_array_access = TRUE;
8539 ip ++;
8540 *sp++ = ins;
8541 break;
8542 case CEE_LDELEMA:
8543 CHECK_STACK (2);
8544 sp -= 2;
8545 CHECK_OPSIZE (5);
8546 if (sp [0]->type != STACK_OBJ)
8547 UNVERIFIED;
8549 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8551 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8552 CHECK_TYPELOAD (klass);
8553 /* we need to make sure that this array is exactly the type it needs
8554 * to be for correctness. the wrappers are lax with their usage
8555 * so we need to ignore them here
8557 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8558 MonoClass *array_class = mono_array_class_get (klass, 1);
8559 mini_emit_check_array_type (cfg, sp [0], array_class);
8560 CHECK_TYPELOAD (array_class);
8563 readonly = FALSE;
8564 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8565 *sp++ = ins;
8566 ip += 5;
8567 break;
8568 case CEE_LDELEM:
8569 case CEE_LDELEM_I1:
8570 case CEE_LDELEM_U1:
8571 case CEE_LDELEM_I2:
8572 case CEE_LDELEM_U2:
8573 case CEE_LDELEM_I4:
8574 case CEE_LDELEM_U4:
8575 case CEE_LDELEM_I8:
8576 case CEE_LDELEM_I:
8577 case CEE_LDELEM_R4:
8578 case CEE_LDELEM_R8:
8579 case CEE_LDELEM_REF: {
8580 MonoInst *addr;
8582 CHECK_STACK (2);
8583 sp -= 2;
8585 if (*ip == CEE_LDELEM) {
8586 CHECK_OPSIZE (5);
8587 token = read32 (ip + 1);
8588 klass = mini_get_class (method, token, generic_context);
8589 CHECK_TYPELOAD (klass);
8590 mono_class_init (klass);
8592 else
8593 klass = array_access_to_klass (*ip);
8595 if (sp [0]->type != STACK_OBJ)
8596 UNVERIFIED;
8598 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8600 if (sp [1]->opcode == OP_ICONST) {
8601 int array_reg = sp [0]->dreg;
8602 int index_reg = sp [1]->dreg;
8603 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8605 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8606 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8607 } else {
8608 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8609 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8611 *sp++ = ins;
8612 if (*ip == CEE_LDELEM)
8613 ip += 5;
8614 else
8615 ++ip;
8616 break;
8618 case CEE_STELEM_I:
8619 case CEE_STELEM_I1:
8620 case CEE_STELEM_I2:
8621 case CEE_STELEM_I4:
8622 case CEE_STELEM_I8:
8623 case CEE_STELEM_R4:
8624 case CEE_STELEM_R8:
8625 case CEE_STELEM_REF:
8626 case CEE_STELEM: {
8627 MonoInst *addr;
8629 CHECK_STACK (3);
8630 sp -= 3;
8632 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8634 if (*ip == CEE_STELEM) {
8635 CHECK_OPSIZE (5);
8636 token = read32 (ip + 1);
8637 klass = mini_get_class (method, token, generic_context);
8638 CHECK_TYPELOAD (klass);
8639 mono_class_init (klass);
8641 else
8642 klass = array_access_to_klass (*ip);
8644 if (sp [0]->type != STACK_OBJ)
8645 UNVERIFIED;
8647 /* storing a NULL doesn't need any of the complex checks in stelemref */
8648 if (generic_class_is_reference_type (cfg, klass) &&
8649 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8650 MonoMethod* helper = mono_marshal_get_stelemref ();
8651 MonoInst *iargs [3];
8653 if (sp [0]->type != STACK_OBJ)
8654 UNVERIFIED;
8655 if (sp [2]->type != STACK_OBJ)
8656 UNVERIFIED;
8658 iargs [2] = sp [2];
8659 iargs [1] = sp [1];
8660 iargs [0] = sp [0];
8662 mono_emit_method_call (cfg, helper, iargs, NULL);
8663 } else {
8664 if (sp [1]->opcode == OP_ICONST) {
8665 int array_reg = sp [0]->dreg;
8666 int index_reg = sp [1]->dreg;
8667 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8669 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8670 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8671 } else {
8672 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8673 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8677 if (*ip == CEE_STELEM)
8678 ip += 5;
8679 else
8680 ++ip;
8681 inline_costs += 1;
8682 break;
8684 case CEE_CKFINITE: {
8685 CHECK_STACK (1);
8686 --sp;
8688 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8689 ins->sreg1 = sp [0]->dreg;
8690 ins->dreg = alloc_freg (cfg);
8691 ins->type = STACK_R8;
8692 MONO_ADD_INS (bblock, ins);
8694 *sp++ = mono_decompose_opcode (cfg, ins);
8696 ++ip;
8697 break;
8699 case CEE_REFANYVAL: {
8700 MonoInst *src_var, *src;
8702 int klass_reg = alloc_preg (cfg);
8703 int dreg = alloc_preg (cfg);
8705 CHECK_STACK (1);
8706 MONO_INST_NEW (cfg, ins, *ip);
8707 --sp;
8708 CHECK_OPSIZE (5);
8709 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8710 CHECK_TYPELOAD (klass);
8711 mono_class_init (klass);
8713 if (cfg->generic_sharing_context)
8714 context_used = mono_class_check_context_used (klass);
8716 // FIXME:
8717 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8718 if (!src_var)
8719 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8720 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8723 if (context_used) {
8724 MonoInst *klass_ins;
8726 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8727 klass, MONO_RGCTX_INFO_KLASS);
8729 // FIXME:
8730 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8731 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8732 } else {
8733 mini_emit_class_check (cfg, klass_reg, klass);
8735 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8736 ins->type = STACK_MP;
8737 *sp++ = ins;
8738 ip += 5;
8739 break;
8741 case CEE_MKREFANY: {
8742 MonoInst *loc, *addr;
8744 CHECK_STACK (1);
8745 MONO_INST_NEW (cfg, ins, *ip);
8746 --sp;
8747 CHECK_OPSIZE (5);
8748 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8749 CHECK_TYPELOAD (klass);
8750 mono_class_init (klass);
8752 if (cfg->generic_sharing_context)
8753 context_used = mono_class_check_context_used (klass);
8755 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8756 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8758 if (context_used) {
8759 MonoInst *const_ins;
8760 int type_reg = alloc_preg (cfg);
8762 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8766 } else if (cfg->compile_aot) {
8767 int const_reg = alloc_preg (cfg);
8768 int type_reg = alloc_preg (cfg);
8770 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8771 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8774 } else {
8775 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8776 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8780 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8781 ins->type = STACK_VTYPE;
8782 ins->klass = mono_defaults.typed_reference_class;
8783 *sp++ = ins;
8784 ip += 5;
8785 break;
8787 case CEE_LDTOKEN: {
8788 gpointer handle;
8789 MonoClass *handle_class;
8791 CHECK_STACK_OVF (1);
8793 CHECK_OPSIZE (5);
8794 n = read32 (ip + 1);
8796 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8797 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8798 handle = mono_method_get_wrapper_data (method, n);
8799 handle_class = mono_method_get_wrapper_data (method, n + 1);
8800 if (handle_class == mono_defaults.typehandle_class)
8801 handle = &((MonoClass*)handle)->byval_arg;
8803 else {
8804 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8806 if (!handle)
8807 goto load_error;
8808 mono_class_init (handle_class);
8809 if (cfg->generic_sharing_context) {
8810 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8811 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8812 /* This case handles ldtoken
8813 of an open type, like for
8814 typeof(Gen<>). */
8815 context_used = 0;
8816 } else if (handle_class == mono_defaults.typehandle_class) {
8817 /* If we get a MONO_TYPE_CLASS
8818 then we need to provide the
8819 open type, not an
8820 instantiation of it. */
8821 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8822 context_used = 0;
8823 else
8824 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8825 } else if (handle_class == mono_defaults.fieldhandle_class)
8826 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8827 else if (handle_class == mono_defaults.methodhandle_class)
8828 context_used = mono_method_check_context_used (handle);
8829 else
8830 g_assert_not_reached ();
8833 if ((cfg->opt & MONO_OPT_SHARED) &&
8834 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8835 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8836 MonoInst *addr, *vtvar, *iargs [3];
8837 int method_context_used;
8839 if (cfg->generic_sharing_context)
8840 method_context_used = mono_method_check_context_used (method);
8841 else
8842 method_context_used = 0;
8844 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8846 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8847 EMIT_NEW_ICONST (cfg, iargs [1], n);
8848 if (method_context_used) {
8849 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8850 method, MONO_RGCTX_INFO_METHOD);
8851 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8852 } else {
8853 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8854 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8856 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8860 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8861 } else {
8862 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8863 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8864 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8865 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8866 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8867 MonoClass *tclass = mono_class_from_mono_type (handle);
8869 mono_class_init (tclass);
8870 if (context_used) {
8871 ins = emit_get_rgctx_klass (cfg, context_used,
8872 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8873 } else if (cfg->compile_aot) {
8874 if (method->wrapper_type) {
8875 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8876 /* Special case for static synchronized wrappers */
8877 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8878 } else {
8879 /* FIXME: n is not a normal token */
8880 cfg->disable_aot = TRUE;
8881 EMIT_NEW_PCONST (cfg, ins, NULL);
8883 } else {
8884 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8886 } else {
8887 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8889 ins->type = STACK_OBJ;
8890 ins->klass = cmethod->klass;
8891 ip += 5;
8892 } else {
8893 MonoInst *addr, *vtvar;
8895 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8897 if (context_used) {
8898 if (handle_class == mono_defaults.typehandle_class) {
8899 ins = emit_get_rgctx_klass (cfg, context_used,
8900 mono_class_from_mono_type (handle),
8901 MONO_RGCTX_INFO_TYPE);
8902 } else if (handle_class == mono_defaults.methodhandle_class) {
8903 ins = emit_get_rgctx_method (cfg, context_used,
8904 handle, MONO_RGCTX_INFO_METHOD);
8905 } else if (handle_class == mono_defaults.fieldhandle_class) {
8906 ins = emit_get_rgctx_field (cfg, context_used,
8907 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8908 } else {
8909 g_assert_not_reached ();
8911 } else if (cfg->compile_aot) {
8912 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8913 } else {
8914 EMIT_NEW_PCONST (cfg, ins, handle);
8916 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8917 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8918 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8922 *sp++ = ins;
8923 ip += 5;
8924 break;
8926 case CEE_THROW:
8927 CHECK_STACK (1);
8928 MONO_INST_NEW (cfg, ins, OP_THROW);
8929 --sp;
8930 ins->sreg1 = sp [0]->dreg;
8931 ip++;
8932 bblock->out_of_line = TRUE;
8933 MONO_ADD_INS (bblock, ins);
8934 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8935 MONO_ADD_INS (bblock, ins);
8936 sp = stack_start;
8938 link_bblock (cfg, bblock, end_bblock);
8939 start_new_bblock = 1;
8940 break;
8941 case CEE_ENDFINALLY:
8942 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8943 MONO_ADD_INS (bblock, ins);
8944 ip++;
8945 start_new_bblock = 1;
8948 * Control will leave the method so empty the stack, otherwise
8949 * the next basic block will start with a nonempty stack.
8951 while (sp != stack_start) {
8952 sp--;
8954 break;
8955 case CEE_LEAVE:
8956 case CEE_LEAVE_S: {
8957 GList *handlers;
8959 if (*ip == CEE_LEAVE) {
8960 CHECK_OPSIZE (5);
8961 target = ip + 5 + (gint32)read32(ip + 1);
8962 } else {
8963 CHECK_OPSIZE (2);
8964 target = ip + 2 + (signed char)(ip [1]);
8967 /* empty the stack */
8968 while (sp != stack_start) {
8969 sp--;
8973 * If this leave statement is in a catch block, check for a
8974 * pending exception, and rethrow it if necessary.
8976 for (i = 0; i < header->num_clauses; ++i) {
8977 MonoExceptionClause *clause = &header->clauses [i];
8980 * Use <= in the final comparison to handle clauses with multiple
8981 * leave statements, like in bug #78024.
8982 * The ordering of the exception clauses guarantees that we find the
8983 * innermost clause.
8985 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8986 MonoInst *exc_ins;
8987 MonoBasicBlock *dont_throw;
8990 MonoInst *load;
8992 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8995 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8997 NEW_BBLOCK (cfg, dont_throw);
9000 * Currently, we allways rethrow the abort exception, despite the
9001 * fact that this is not correct. See thread6.cs for an example.
9002 * But propagating the abort exception is more important than
9003 * getting the sematics right.
9005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9007 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9009 MONO_START_BB (cfg, dont_throw);
9010 bblock = cfg->cbb;
9014 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9015 GList *tmp;
9016 for (tmp = handlers; tmp; tmp = tmp->next) {
9017 tblock = tmp->data;
9018 link_bblock (cfg, bblock, tblock);
9019 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9020 ins->inst_target_bb = tblock;
9021 MONO_ADD_INS (bblock, ins);
9022 bblock->has_call_handler = 1;
9024 g_list_free (handlers);
9027 MONO_INST_NEW (cfg, ins, OP_BR);
9028 MONO_ADD_INS (bblock, ins);
9029 GET_BBLOCK (cfg, tblock, target);
9030 link_bblock (cfg, bblock, tblock);
9031 ins->inst_target_bb = tblock;
9032 start_new_bblock = 1;
9034 if (*ip == CEE_LEAVE)
9035 ip += 5;
9036 else
9037 ip += 2;
9039 break;
9043 * Mono specific opcodes
9045 case MONO_CUSTOM_PREFIX: {
9047 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9049 CHECK_OPSIZE (2);
9050 switch (ip [1]) {
9051 case CEE_MONO_ICALL: {
9052 gpointer func;
9053 MonoJitICallInfo *info;
9055 token = read32 (ip + 2);
9056 func = mono_method_get_wrapper_data (method, token);
9057 info = mono_find_jit_icall_by_addr (func);
9058 g_assert (info);
9060 CHECK_STACK (info->sig->param_count);
9061 sp -= info->sig->param_count;
9063 ins = mono_emit_jit_icall (cfg, info->func, sp);
9064 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9065 *sp++ = ins;
9067 ip += 6;
9068 inline_costs += 10 * num_calls++;
9070 break;
9072 case CEE_MONO_LDPTR: {
9073 gpointer ptr;
9075 CHECK_STACK_OVF (1);
9076 CHECK_OPSIZE (6);
9077 token = read32 (ip + 2);
9079 ptr = mono_method_get_wrapper_data (method, token);
9080 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9081 MonoJitICallInfo *callinfo;
9082 const char *icall_name;
9084 icall_name = method->name + strlen ("__icall_wrapper_");
9085 g_assert (icall_name);
9086 callinfo = mono_find_jit_icall_by_name (icall_name);
9087 g_assert (callinfo);
9089 if (ptr == callinfo->func) {
9090 /* Will be transformed into an AOTCONST later */
9091 EMIT_NEW_PCONST (cfg, ins, ptr);
9092 *sp++ = ins;
9093 ip += 6;
9094 break;
9097 /* FIXME: Generalize this */
9098 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9099 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9100 *sp++ = ins;
9101 ip += 6;
9102 break;
9104 EMIT_NEW_PCONST (cfg, ins, ptr);
9105 *sp++ = ins;
9106 ip += 6;
9107 inline_costs += 10 * num_calls++;
9108 /* Can't embed random pointers into AOT code */
9109 cfg->disable_aot = 1;
9110 break;
9112 case CEE_MONO_ICALL_ADDR: {
9113 MonoMethod *cmethod;
9114 gpointer ptr;
9116 CHECK_STACK_OVF (1);
9117 CHECK_OPSIZE (6);
9118 token = read32 (ip + 2);
9120 cmethod = mono_method_get_wrapper_data (method, token);
9122 if (cfg->compile_aot) {
9123 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9124 } else {
9125 ptr = mono_lookup_internal_call (cmethod);
9126 g_assert (ptr);
9127 EMIT_NEW_PCONST (cfg, ins, ptr);
9129 *sp++ = ins;
9130 ip += 6;
9131 break;
9133 case CEE_MONO_VTADDR: {
9134 MonoInst *src_var, *src;
9136 CHECK_STACK (1);
9137 --sp;
9139 // FIXME:
9140 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9141 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9142 *sp++ = src;
9143 ip += 2;
9144 break;
9146 case CEE_MONO_NEWOBJ: {
9147 MonoInst *iargs [2];
9149 CHECK_STACK_OVF (1);
9150 CHECK_OPSIZE (6);
9151 token = read32 (ip + 2);
9152 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9153 mono_class_init (klass);
9154 NEW_DOMAINCONST (cfg, iargs [0]);
9155 MONO_ADD_INS (cfg->cbb, iargs [0]);
9156 NEW_CLASSCONST (cfg, iargs [1], klass);
9157 MONO_ADD_INS (cfg->cbb, iargs [1]);
9158 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9159 ip += 6;
9160 inline_costs += 10 * num_calls++;
9161 break;
9163 case CEE_MONO_OBJADDR:
9164 CHECK_STACK (1);
9165 --sp;
9166 MONO_INST_NEW (cfg, ins, OP_MOVE);
9167 ins->dreg = alloc_preg (cfg);
9168 ins->sreg1 = sp [0]->dreg;
9169 ins->type = STACK_MP;
9170 MONO_ADD_INS (cfg->cbb, ins);
9171 *sp++ = ins;
9172 ip += 2;
9173 break;
9174 case CEE_MONO_LDNATIVEOBJ:
9176 * Similar to LDOBJ, but instead load the unmanaged
9177 * representation of the vtype to the stack.
9179 CHECK_STACK (1);
9180 CHECK_OPSIZE (6);
9181 --sp;
9182 token = read32 (ip + 2);
9183 klass = mono_method_get_wrapper_data (method, token);
9184 g_assert (klass->valuetype);
9185 mono_class_init (klass);
9188 MonoInst *src, *dest, *temp;
9190 src = sp [0];
9191 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9192 temp->backend.is_pinvoke = 1;
9193 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9194 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9196 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9197 dest->type = STACK_VTYPE;
9198 dest->klass = klass;
9200 *sp ++ = dest;
9201 ip += 6;
9203 break;
9204 case CEE_MONO_RETOBJ: {
9206 * Same as RET, but return the native representation of a vtype
9207 * to the caller.
9209 g_assert (cfg->ret);
9210 g_assert (mono_method_signature (method)->pinvoke);
9211 CHECK_STACK (1);
9212 --sp;
9214 CHECK_OPSIZE (6);
9215 token = read32 (ip + 2);
9216 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9218 if (!cfg->vret_addr) {
9219 g_assert (cfg->ret_var_is_local);
9221 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9222 } else {
9223 EMIT_NEW_RETLOADA (cfg, ins);
9225 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9227 if (sp != stack_start)
9228 UNVERIFIED;
9230 MONO_INST_NEW (cfg, ins, OP_BR);
9231 ins->inst_target_bb = end_bblock;
9232 MONO_ADD_INS (bblock, ins);
9233 link_bblock (cfg, bblock, end_bblock);
9234 start_new_bblock = 1;
9235 ip += 6;
9236 break;
9238 case CEE_MONO_CISINST:
9239 case CEE_MONO_CCASTCLASS: {
9240 int token;
9241 CHECK_STACK (1);
9242 --sp;
9243 CHECK_OPSIZE (6);
9244 token = read32 (ip + 2);
9245 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9246 if (ip [1] == CEE_MONO_CISINST)
9247 ins = handle_cisinst (cfg, klass, sp [0]);
9248 else
9249 ins = handle_ccastclass (cfg, klass, sp [0]);
9250 bblock = cfg->cbb;
9251 *sp++ = ins;
9252 ip += 6;
9253 break;
9255 case CEE_MONO_SAVE_LMF:
9256 case CEE_MONO_RESTORE_LMF:
9257 #ifdef MONO_ARCH_HAVE_LMF_OPS
9258 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9259 MONO_ADD_INS (bblock, ins);
9260 cfg->need_lmf_area = TRUE;
9261 #endif
9262 ip += 2;
9263 break;
9264 case CEE_MONO_CLASSCONST:
9265 CHECK_STACK_OVF (1);
9266 CHECK_OPSIZE (6);
9267 token = read32 (ip + 2);
9268 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9269 *sp++ = ins;
9270 ip += 6;
9271 inline_costs += 10 * num_calls++;
9272 break;
9273 case CEE_MONO_NOT_TAKEN:
9274 bblock->out_of_line = TRUE;
9275 ip += 2;
9276 break;
9277 case CEE_MONO_TLS:
9278 CHECK_STACK_OVF (1);
9279 CHECK_OPSIZE (6);
9280 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9281 ins->dreg = alloc_preg (cfg);
9282 ins->inst_offset = (gint32)read32 (ip + 2);
9283 ins->type = STACK_PTR;
9284 MONO_ADD_INS (bblock, ins);
9285 *sp++ = ins;
9286 ip += 6;
9287 break;
9288 case CEE_MONO_DYN_CALL: {
9289 MonoCallInst *call;
9291 /* It would be easier to call a trampoline, but that would put an
9292 * extra frame on the stack, confusing exception handling. So
9293 * implement it inline using an opcode for now.
9296 if (!cfg->dyn_call_var) {
9297 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9298 /* prevent it from being register allocated */
9299 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9302 /* Has to use a call inst since it local regalloc expects it */
9303 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9304 ins = (MonoInst*)call;
9305 sp -= 2;
9306 ins->sreg1 = sp [0]->dreg;
9307 ins->sreg2 = sp [1]->dreg;
9308 MONO_ADD_INS (bblock, ins);
9310 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9311 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9312 #endif
9314 ip += 2;
9315 inline_costs += 10 * num_calls++;
9317 break;
9319 default:
9320 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9321 break;
9323 break;
9326 case CEE_PREFIX1: {
9327 CHECK_OPSIZE (2);
9328 switch (ip [1]) {
9329 case CEE_ARGLIST: {
9330 /* somewhat similar to LDTOKEN */
9331 MonoInst *addr, *vtvar;
9332 CHECK_STACK_OVF (1);
9333 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9335 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9336 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9338 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9339 ins->type = STACK_VTYPE;
9340 ins->klass = mono_defaults.argumenthandle_class;
9341 *sp++ = ins;
9342 ip += 2;
9343 break;
9345 case CEE_CEQ:
9346 case CEE_CGT:
9347 case CEE_CGT_UN:
9348 case CEE_CLT:
9349 case CEE_CLT_UN: {
9350 MonoInst *cmp;
9351 CHECK_STACK (2);
9353 * The following transforms:
9354 * CEE_CEQ into OP_CEQ
9355 * CEE_CGT into OP_CGT
9356 * CEE_CGT_UN into OP_CGT_UN
9357 * CEE_CLT into OP_CLT
9358 * CEE_CLT_UN into OP_CLT_UN
9360 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9362 MONO_INST_NEW (cfg, ins, cmp->opcode);
9363 sp -= 2;
9364 cmp->sreg1 = sp [0]->dreg;
9365 cmp->sreg2 = sp [1]->dreg;
9366 type_from_op (cmp, sp [0], sp [1]);
9367 CHECK_TYPE (cmp);
9368 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9369 cmp->opcode = OP_LCOMPARE;
9370 else if (sp [0]->type == STACK_R8)
9371 cmp->opcode = OP_FCOMPARE;
9372 else
9373 cmp->opcode = OP_ICOMPARE;
9374 MONO_ADD_INS (bblock, cmp);
9375 ins->type = STACK_I4;
9376 ins->dreg = alloc_dreg (cfg, ins->type);
9377 type_from_op (ins, sp [0], sp [1]);
9379 if (cmp->opcode == OP_FCOMPARE) {
9381 * The backends expect the fceq opcodes to do the
9382 * comparison too.
9384 cmp->opcode = OP_NOP;
9385 ins->sreg1 = cmp->sreg1;
9386 ins->sreg2 = cmp->sreg2;
9388 MONO_ADD_INS (bblock, ins);
9389 *sp++ = ins;
9390 ip += 2;
9391 break;
9393 case CEE_LDFTN: {
9394 MonoInst *argconst;
9395 MonoMethod *cil_method;
9396 gboolean needs_static_rgctx_invoke;
9398 CHECK_STACK_OVF (1);
9399 CHECK_OPSIZE (6);
9400 n = read32 (ip + 2);
9401 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9402 if (!cmethod)
9403 goto load_error;
9404 mono_class_init (cmethod->klass);
9406 mono_save_token_info (cfg, image, n, cmethod);
9408 if (cfg->generic_sharing_context)
9409 context_used = mono_method_check_context_used (cmethod);
9411 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9413 cil_method = cmethod;
9414 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9415 METHOD_ACCESS_FAILURE;
9417 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9418 if (check_linkdemand (cfg, method, cmethod))
9419 INLINE_FAILURE;
9420 CHECK_CFG_EXCEPTION;
9421 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9422 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9426 * Optimize the common case of ldftn+delegate creation
9428 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9429 /* FIXME: SGEN support */
9430 /* FIXME: handle shared static generic methods */
9431 /* FIXME: handle this in shared code */
9432 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9433 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9434 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9435 MonoInst *target_ins;
9436 MonoMethod *invoke;
9438 invoke = mono_get_delegate_invoke (ctor_method->klass);
9439 if (!invoke || !mono_method_signature (invoke))
9440 goto load_error;
9442 ip += 6;
9443 if (cfg->verbose_level > 3)
9444 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9445 target_ins = sp [-1];
9446 sp --;
9447 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9448 CHECK_CFG_EXCEPTION;
9449 ip += 5;
9450 sp ++;
9451 break;
9454 #endif
9456 if (context_used) {
9457 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9458 } else {
9459 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9461 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9462 *sp++ = ins;
9464 ip += 6;
9465 inline_costs += 10 * num_calls++;
9466 break;
9468 case CEE_LDVIRTFTN: {
9469 MonoInst *args [2];
9471 CHECK_STACK (1);
9472 CHECK_OPSIZE (6);
9473 n = read32 (ip + 2);
9474 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9475 if (!cmethod)
9476 goto load_error;
9477 mono_class_init (cmethod->klass);
9479 if (cfg->generic_sharing_context)
9480 context_used = mono_method_check_context_used (cmethod);
9482 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9483 if (check_linkdemand (cfg, method, cmethod))
9484 INLINE_FAILURE;
9485 CHECK_CFG_EXCEPTION;
9486 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9487 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9490 --sp;
9491 args [0] = *sp;
9493 if (context_used) {
9494 args [1] = emit_get_rgctx_method (cfg, context_used,
9495 cmethod, MONO_RGCTX_INFO_METHOD);
9496 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9497 } else {
9498 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9499 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9502 ip += 6;
9503 inline_costs += 10 * num_calls++;
9504 break;
9506 case CEE_LDARG:
9507 CHECK_STACK_OVF (1);
9508 CHECK_OPSIZE (4);
9509 n = read16 (ip + 2);
9510 CHECK_ARG (n);
9511 EMIT_NEW_ARGLOAD (cfg, ins, n);
9512 *sp++ = ins;
9513 ip += 4;
9514 break;
9515 case CEE_LDARGA:
9516 CHECK_STACK_OVF (1);
9517 CHECK_OPSIZE (4);
9518 n = read16 (ip + 2);
9519 CHECK_ARG (n);
9520 NEW_ARGLOADA (cfg, ins, n);
9521 MONO_ADD_INS (cfg->cbb, ins);
9522 *sp++ = ins;
9523 ip += 4;
9524 break;
9525 case CEE_STARG:
9526 CHECK_STACK (1);
9527 --sp;
9528 CHECK_OPSIZE (4);
9529 n = read16 (ip + 2);
9530 CHECK_ARG (n);
9531 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9532 UNVERIFIED;
9533 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9534 ip += 4;
9535 break;
9536 case CEE_LDLOC:
9537 CHECK_STACK_OVF (1);
9538 CHECK_OPSIZE (4);
9539 n = read16 (ip + 2);
9540 CHECK_LOCAL (n);
9541 EMIT_NEW_LOCLOAD (cfg, ins, n);
9542 *sp++ = ins;
9543 ip += 4;
9544 break;
9545 case CEE_LDLOCA: {
9546 unsigned char *tmp_ip;
9547 CHECK_STACK_OVF (1);
9548 CHECK_OPSIZE (4);
9549 n = read16 (ip + 2);
9550 CHECK_LOCAL (n);
9552 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9553 ip = tmp_ip;
9554 inline_costs += 1;
9555 break;
9558 EMIT_NEW_LOCLOADA (cfg, ins, n);
9559 *sp++ = ins;
9560 ip += 4;
9561 break;
9563 case CEE_STLOC:
9564 CHECK_STACK (1);
9565 --sp;
9566 CHECK_OPSIZE (4);
9567 n = read16 (ip + 2);
9568 CHECK_LOCAL (n);
9569 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9570 UNVERIFIED;
9571 emit_stloc_ir (cfg, sp, header, n);
9572 ip += 4;
9573 inline_costs += 1;
9574 break;
9575 case CEE_LOCALLOC:
9576 CHECK_STACK (1);
9577 --sp;
9578 if (sp != stack_start)
9579 UNVERIFIED;
9580 if (cfg->method != method)
9582 * Inlining this into a loop in a parent could lead to
9583 * stack overflows which is different behavior than the
9584 * non-inlined case, thus disable inlining in this case.
9586 goto inline_failure;
9588 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9589 ins->dreg = alloc_preg (cfg);
9590 ins->sreg1 = sp [0]->dreg;
9591 ins->type = STACK_PTR;
9592 MONO_ADD_INS (cfg->cbb, ins);
9594 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9595 if (init_locals)
9596 ins->flags |= MONO_INST_INIT;
9598 *sp++ = ins;
9599 ip += 2;
9600 break;
9601 case CEE_ENDFILTER: {
9602 MonoExceptionClause *clause, *nearest;
9603 int cc, nearest_num;
9605 CHECK_STACK (1);
9606 --sp;
9607 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9608 UNVERIFIED;
9609 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9610 ins->sreg1 = (*sp)->dreg;
9611 MONO_ADD_INS (bblock, ins);
9612 start_new_bblock = 1;
9613 ip += 2;
9615 nearest = NULL;
9616 nearest_num = 0;
9617 for (cc = 0; cc < header->num_clauses; ++cc) {
9618 clause = &header->clauses [cc];
9619 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9620 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9621 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9622 nearest = clause;
9623 nearest_num = cc;
9626 g_assert (nearest);
9627 if ((ip - header->code) != nearest->handler_offset)
9628 UNVERIFIED;
9630 break;
9632 case CEE_UNALIGNED_:
9633 ins_flag |= MONO_INST_UNALIGNED;
9634 /* FIXME: record alignment? we can assume 1 for now */
9635 CHECK_OPSIZE (3);
9636 ip += 3;
9637 break;
9638 case CEE_VOLATILE_:
9639 ins_flag |= MONO_INST_VOLATILE;
9640 ip += 2;
9641 break;
9642 case CEE_TAIL_:
9643 ins_flag |= MONO_INST_TAILCALL;
9644 cfg->flags |= MONO_CFG_HAS_TAIL;
9645 /* Can't inline tail calls at this time */
9646 inline_costs += 100000;
9647 ip += 2;
9648 break;
9649 case CEE_INITOBJ:
9650 CHECK_STACK (1);
9651 --sp;
9652 CHECK_OPSIZE (6);
9653 token = read32 (ip + 2);
9654 klass = mini_get_class (method, token, generic_context);
9655 CHECK_TYPELOAD (klass);
9656 if (generic_class_is_reference_type (cfg, klass))
9657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9658 else
9659 mini_emit_initobj (cfg, *sp, NULL, klass);
9660 ip += 6;
9661 inline_costs += 1;
9662 break;
9663 case CEE_CONSTRAINED_:
9664 CHECK_OPSIZE (6);
9665 token = read32 (ip + 2);
9666 if (method->wrapper_type != MONO_WRAPPER_NONE)
9667 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9668 else
9669 constrained_call = mono_class_get_full (image, token, generic_context);
9670 CHECK_TYPELOAD (constrained_call);
9671 ip += 6;
9672 break;
9673 case CEE_CPBLK:
9674 case CEE_INITBLK: {
9675 MonoInst *iargs [3];
9676 CHECK_STACK (3);
9677 sp -= 3;
9679 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9680 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9681 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9682 /* emit_memset only works when val == 0 */
9683 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9684 } else {
9685 iargs [0] = sp [0];
9686 iargs [1] = sp [1];
9687 iargs [2] = sp [2];
9688 if (ip [1] == CEE_CPBLK) {
9689 MonoMethod *memcpy_method = get_memcpy_method ();
9690 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9691 } else {
9692 MonoMethod *memset_method = get_memset_method ();
9693 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9696 ip += 2;
9697 inline_costs += 1;
9698 break;
9700 case CEE_NO_:
9701 CHECK_OPSIZE (3);
9702 if (ip [2] & 0x1)
9703 ins_flag |= MONO_INST_NOTYPECHECK;
9704 if (ip [2] & 0x2)
9705 ins_flag |= MONO_INST_NORANGECHECK;
9706 /* we ignore the no-nullcheck for now since we
9707 * really do it explicitly only when doing callvirt->call
9709 ip += 3;
9710 break;
9711 case CEE_RETHROW: {
9712 MonoInst *load;
9713 int handler_offset = -1;
9715 for (i = 0; i < header->num_clauses; ++i) {
9716 MonoExceptionClause *clause = &header->clauses [i];
9717 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9718 handler_offset = clause->handler_offset;
9719 break;
9723 bblock->flags |= BB_EXCEPTION_UNSAFE;
9725 g_assert (handler_offset != -1);
9727 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9728 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9729 ins->sreg1 = load->dreg;
9730 MONO_ADD_INS (bblock, ins);
9731 sp = stack_start;
9732 link_bblock (cfg, bblock, end_bblock);
9733 start_new_bblock = 1;
9734 ip += 2;
9735 break;
9737 case CEE_SIZEOF: {
9738 guint32 align;
9739 int ialign;
9741 CHECK_STACK_OVF (1);
9742 CHECK_OPSIZE (6);
9743 token = read32 (ip + 2);
9744 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9745 MonoType *type = mono_type_create_from_typespec (image, token);
9746 token = mono_type_size (type, &ialign);
9747 } else {
9748 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9749 CHECK_TYPELOAD (klass);
9750 mono_class_init (klass);
9751 token = mono_class_value_size (klass, &align);
9753 EMIT_NEW_ICONST (cfg, ins, token);
9754 *sp++= ins;
9755 ip += 6;
9756 break;
9758 case CEE_REFANYTYPE: {
9759 MonoInst *src_var, *src;
9761 CHECK_STACK (1);
9762 --sp;
9764 // FIXME:
9765 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9766 if (!src_var)
9767 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9768 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9770 *sp++ = ins;
9771 ip += 2;
9772 break;
9774 case CEE_READONLY_:
9775 readonly = TRUE;
9776 ip += 2;
9777 break;
9779 case CEE_UNUSED56:
9780 case CEE_UNUSED57:
9781 case CEE_UNUSED70:
9782 case CEE_UNUSED:
9783 case CEE_UNUSED99:
9784 UNVERIFIED;
9786 default:
9787 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9788 UNVERIFIED;
9790 break;
9792 case CEE_UNUSED58:
9793 case CEE_UNUSED1:
9794 UNVERIFIED;
9796 default:
9797 g_warning ("opcode 0x%02x not handled", *ip);
9798 UNVERIFIED;
9801 if (start_new_bblock != 1)
9802 UNVERIFIED;
9804 bblock->cil_length = ip - bblock->cil_code;
9805 bblock->next_bb = end_bblock;
9807 if (cfg->method == method && cfg->domainvar) {
9808 MonoInst *store;
9809 MonoInst *get_domain;
9811 cfg->cbb = init_localsbb;
9813 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9814 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9816 else {
9817 get_domain->dreg = alloc_preg (cfg);
9818 MONO_ADD_INS (cfg->cbb, get_domain);
9820 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9821 MONO_ADD_INS (cfg->cbb, store);
9824 #ifdef TARGET_POWERPC
9825 if (cfg->compile_aot)
9826 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9827 mono_get_got_var (cfg);
9828 #endif
9830 if (cfg->method == method && cfg->got_var)
9831 mono_emit_load_got_addr (cfg);
9833 if (init_locals) {
9834 MonoInst *store;
9836 cfg->cbb = init_localsbb;
9837 cfg->ip = NULL;
9838 for (i = 0; i < header->num_locals; ++i) {
9839 MonoType *ptype = header->locals [i];
9840 int t = ptype->type;
9841 dreg = cfg->locals [i]->dreg;
9843 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9844 t = mono_class_enum_basetype (ptype->data.klass)->type;
9845 if (ptype->byref) {
9846 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9847 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9848 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9849 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9850 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9851 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9852 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9853 ins->type = STACK_R8;
9854 ins->inst_p0 = (void*)&r8_0;
9855 ins->dreg = alloc_dreg (cfg, STACK_R8);
9856 MONO_ADD_INS (init_localsbb, ins);
9857 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9858 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9859 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9860 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9861 } else {
9862 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9867 /* Add a sequence point for method entry/exit events */
9868 if (seq_points) {
9869 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9870 MONO_ADD_INS (init_localsbb, ins);
9871 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9872 MONO_ADD_INS (cfg->bb_exit, ins);
9875 cfg->ip = NULL;
9877 if (cfg->method == method) {
9878 MonoBasicBlock *bb;
9879 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9880 bb->region = mono_find_block_region (cfg, bb->real_offset);
9881 if (cfg->spvars)
9882 mono_create_spvar_for_region (cfg, bb->region);
9883 if (cfg->verbose_level > 2)
9884 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9888 g_slist_free (class_inits);
9889 dont_inline = g_list_remove (dont_inline, method);
9891 if (inline_costs < 0) {
9892 char *mname;
9894 /* Method is too large */
9895 mname = mono_method_full_name (method, TRUE);
9896 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9897 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9898 g_free (mname);
9899 return -1;
9902 if ((cfg->verbose_level > 2) && (cfg->method == method))
9903 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9905 return inline_costs;
9907 exception_exit:
9908 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9909 g_slist_free (class_inits);
9910 dont_inline = g_list_remove (dont_inline, method);
9911 return -1;
9913 inline_failure:
9914 g_slist_free (class_inits);
9915 dont_inline = g_list_remove (dont_inline, method);
9916 return -1;
9918 load_error:
9919 g_slist_free (class_inits);
9920 dont_inline = g_list_remove (dont_inline, method);
9921 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9922 return -1;
9924 unverified:
9925 g_slist_free (class_inits);
9926 dont_inline = g_list_remove (dont_inline, method);
9927 set_exception_type_from_invalid_il (cfg, method, ip);
9928 return -1;
9931 static int
9932 store_membase_reg_to_store_membase_imm (int opcode)
9934 switch (opcode) {
9935 case OP_STORE_MEMBASE_REG:
9936 return OP_STORE_MEMBASE_IMM;
9937 case OP_STOREI1_MEMBASE_REG:
9938 return OP_STOREI1_MEMBASE_IMM;
9939 case OP_STOREI2_MEMBASE_REG:
9940 return OP_STOREI2_MEMBASE_IMM;
9941 case OP_STOREI4_MEMBASE_REG:
9942 return OP_STOREI4_MEMBASE_IMM;
9943 case OP_STOREI8_MEMBASE_REG:
9944 return OP_STOREI8_MEMBASE_IMM;
9945 default:
9946 g_assert_not_reached ();
9949 return -1;
9952 #endif /* DISABLE_JIT */
9955 mono_op_to_op_imm (int opcode)
9957 switch (opcode) {
9958 case OP_IADD:
9959 return OP_IADD_IMM;
9960 case OP_ISUB:
9961 return OP_ISUB_IMM;
9962 case OP_IDIV:
9963 return OP_IDIV_IMM;
9964 case OP_IDIV_UN:
9965 return OP_IDIV_UN_IMM;
9966 case OP_IREM:
9967 return OP_IREM_IMM;
9968 case OP_IREM_UN:
9969 return OP_IREM_UN_IMM;
9970 case OP_IMUL:
9971 return OP_IMUL_IMM;
9972 case OP_IAND:
9973 return OP_IAND_IMM;
9974 case OP_IOR:
9975 return OP_IOR_IMM;
9976 case OP_IXOR:
9977 return OP_IXOR_IMM;
9978 case OP_ISHL:
9979 return OP_ISHL_IMM;
9980 case OP_ISHR:
9981 return OP_ISHR_IMM;
9982 case OP_ISHR_UN:
9983 return OP_ISHR_UN_IMM;
9985 case OP_LADD:
9986 return OP_LADD_IMM;
9987 case OP_LSUB:
9988 return OP_LSUB_IMM;
9989 case OP_LAND:
9990 return OP_LAND_IMM;
9991 case OP_LOR:
9992 return OP_LOR_IMM;
9993 case OP_LXOR:
9994 return OP_LXOR_IMM;
9995 case OP_LSHL:
9996 return OP_LSHL_IMM;
9997 case OP_LSHR:
9998 return OP_LSHR_IMM;
9999 case OP_LSHR_UN:
10000 return OP_LSHR_UN_IMM;
10002 case OP_COMPARE:
10003 return OP_COMPARE_IMM;
10004 case OP_ICOMPARE:
10005 return OP_ICOMPARE_IMM;
10006 case OP_LCOMPARE:
10007 return OP_LCOMPARE_IMM;
10009 case OP_STORE_MEMBASE_REG:
10010 return OP_STORE_MEMBASE_IMM;
10011 case OP_STOREI1_MEMBASE_REG:
10012 return OP_STOREI1_MEMBASE_IMM;
10013 case OP_STOREI2_MEMBASE_REG:
10014 return OP_STOREI2_MEMBASE_IMM;
10015 case OP_STOREI4_MEMBASE_REG:
10016 return OP_STOREI4_MEMBASE_IMM;
10018 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10019 case OP_X86_PUSH:
10020 return OP_X86_PUSH_IMM;
10021 case OP_X86_COMPARE_MEMBASE_REG:
10022 return OP_X86_COMPARE_MEMBASE_IMM;
10023 #endif
10024 #if defined(TARGET_AMD64)
10025 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10026 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10027 #endif
10028 case OP_VOIDCALL_REG:
10029 return OP_VOIDCALL;
10030 case OP_CALL_REG:
10031 return OP_CALL;
10032 case OP_LCALL_REG:
10033 return OP_LCALL;
10034 case OP_FCALL_REG:
10035 return OP_FCALL;
10036 case OP_LOCALLOC:
10037 return OP_LOCALLOC_IMM;
10040 return -1;
10043 static int
10044 ldind_to_load_membase (int opcode)
10046 switch (opcode) {
10047 case CEE_LDIND_I1:
10048 return OP_LOADI1_MEMBASE;
10049 case CEE_LDIND_U1:
10050 return OP_LOADU1_MEMBASE;
10051 case CEE_LDIND_I2:
10052 return OP_LOADI2_MEMBASE;
10053 case CEE_LDIND_U2:
10054 return OP_LOADU2_MEMBASE;
10055 case CEE_LDIND_I4:
10056 return OP_LOADI4_MEMBASE;
10057 case CEE_LDIND_U4:
10058 return OP_LOADU4_MEMBASE;
10059 case CEE_LDIND_I:
10060 return OP_LOAD_MEMBASE;
10061 case CEE_LDIND_REF:
10062 return OP_LOAD_MEMBASE;
10063 case CEE_LDIND_I8:
10064 return OP_LOADI8_MEMBASE;
10065 case CEE_LDIND_R4:
10066 return OP_LOADR4_MEMBASE;
10067 case CEE_LDIND_R8:
10068 return OP_LOADR8_MEMBASE;
10069 default:
10070 g_assert_not_reached ();
10073 return -1;
10076 static int
10077 stind_to_store_membase (int opcode)
10079 switch (opcode) {
10080 case CEE_STIND_I1:
10081 return OP_STOREI1_MEMBASE_REG;
10082 case CEE_STIND_I2:
10083 return OP_STOREI2_MEMBASE_REG;
10084 case CEE_STIND_I4:
10085 return OP_STOREI4_MEMBASE_REG;
10086 case CEE_STIND_I:
10087 case CEE_STIND_REF:
10088 return OP_STORE_MEMBASE_REG;
10089 case CEE_STIND_I8:
10090 return OP_STOREI8_MEMBASE_REG;
10091 case CEE_STIND_R4:
10092 return OP_STORER4_MEMBASE_REG;
10093 case CEE_STIND_R8:
10094 return OP_STORER8_MEMBASE_REG;
10095 default:
10096 g_assert_not_reached ();
10099 return -1;
10103 mono_load_membase_to_load_mem (int opcode)
10105 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10106 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10107 switch (opcode) {
10108 case OP_LOAD_MEMBASE:
10109 return OP_LOAD_MEM;
10110 case OP_LOADU1_MEMBASE:
10111 return OP_LOADU1_MEM;
10112 case OP_LOADU2_MEMBASE:
10113 return OP_LOADU2_MEM;
10114 case OP_LOADI4_MEMBASE:
10115 return OP_LOADI4_MEM;
10116 case OP_LOADU4_MEMBASE:
10117 return OP_LOADU4_MEM;
10118 #if SIZEOF_REGISTER == 8
10119 case OP_LOADI8_MEMBASE:
10120 return OP_LOADI8_MEM;
10121 #endif
10123 #endif
10125 return -1;
10128 static inline int
10129 op_to_op_dest_membase (int store_opcode, int opcode)
10131 #if defined(TARGET_X86)
10132 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10133 return -1;
10135 switch (opcode) {
10136 case OP_IADD:
10137 return OP_X86_ADD_MEMBASE_REG;
10138 case OP_ISUB:
10139 return OP_X86_SUB_MEMBASE_REG;
10140 case OP_IAND:
10141 return OP_X86_AND_MEMBASE_REG;
10142 case OP_IOR:
10143 return OP_X86_OR_MEMBASE_REG;
10144 case OP_IXOR:
10145 return OP_X86_XOR_MEMBASE_REG;
10146 case OP_ADD_IMM:
10147 case OP_IADD_IMM:
10148 return OP_X86_ADD_MEMBASE_IMM;
10149 case OP_SUB_IMM:
10150 case OP_ISUB_IMM:
10151 return OP_X86_SUB_MEMBASE_IMM;
10152 case OP_AND_IMM:
10153 case OP_IAND_IMM:
10154 return OP_X86_AND_MEMBASE_IMM;
10155 case OP_OR_IMM:
10156 case OP_IOR_IMM:
10157 return OP_X86_OR_MEMBASE_IMM;
10158 case OP_XOR_IMM:
10159 case OP_IXOR_IMM:
10160 return OP_X86_XOR_MEMBASE_IMM;
10161 case OP_MOVE:
10162 return OP_NOP;
10164 #endif
10166 #if defined(TARGET_AMD64)
10167 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10168 return -1;
10170 switch (opcode) {
10171 case OP_IADD:
10172 return OP_X86_ADD_MEMBASE_REG;
10173 case OP_ISUB:
10174 return OP_X86_SUB_MEMBASE_REG;
10175 case OP_IAND:
10176 return OP_X86_AND_MEMBASE_REG;
10177 case OP_IOR:
10178 return OP_X86_OR_MEMBASE_REG;
10179 case OP_IXOR:
10180 return OP_X86_XOR_MEMBASE_REG;
10181 case OP_IADD_IMM:
10182 return OP_X86_ADD_MEMBASE_IMM;
10183 case OP_ISUB_IMM:
10184 return OP_X86_SUB_MEMBASE_IMM;
10185 case OP_IAND_IMM:
10186 return OP_X86_AND_MEMBASE_IMM;
10187 case OP_IOR_IMM:
10188 return OP_X86_OR_MEMBASE_IMM;
10189 case OP_IXOR_IMM:
10190 return OP_X86_XOR_MEMBASE_IMM;
10191 case OP_LADD:
10192 return OP_AMD64_ADD_MEMBASE_REG;
10193 case OP_LSUB:
10194 return OP_AMD64_SUB_MEMBASE_REG;
10195 case OP_LAND:
10196 return OP_AMD64_AND_MEMBASE_REG;
10197 case OP_LOR:
10198 return OP_AMD64_OR_MEMBASE_REG;
10199 case OP_LXOR:
10200 return OP_AMD64_XOR_MEMBASE_REG;
10201 case OP_ADD_IMM:
10202 case OP_LADD_IMM:
10203 return OP_AMD64_ADD_MEMBASE_IMM;
10204 case OP_SUB_IMM:
10205 case OP_LSUB_IMM:
10206 return OP_AMD64_SUB_MEMBASE_IMM;
10207 case OP_AND_IMM:
10208 case OP_LAND_IMM:
10209 return OP_AMD64_AND_MEMBASE_IMM;
10210 case OP_OR_IMM:
10211 case OP_LOR_IMM:
10212 return OP_AMD64_OR_MEMBASE_IMM;
10213 case OP_XOR_IMM:
10214 case OP_LXOR_IMM:
10215 return OP_AMD64_XOR_MEMBASE_IMM;
10216 case OP_MOVE:
10217 return OP_NOP;
10219 #endif
10221 return -1;
10224 static inline int
10225 op_to_op_store_membase (int store_opcode, int opcode)
10227 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10228 switch (opcode) {
10229 case OP_ICEQ:
10230 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10231 return OP_X86_SETEQ_MEMBASE;
10232 case OP_CNE:
10233 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10234 return OP_X86_SETNE_MEMBASE;
10236 #endif
10238 return -1;
10241 static inline int
10242 op_to_op_src1_membase (int load_opcode, int opcode)
10244 #ifdef TARGET_X86
10245 /* FIXME: This has sign extension issues */
10247 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10248 return OP_X86_COMPARE_MEMBASE8_IMM;
10251 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10252 return -1;
10254 switch (opcode) {
10255 case OP_X86_PUSH:
10256 return OP_X86_PUSH_MEMBASE;
10257 case OP_COMPARE_IMM:
10258 case OP_ICOMPARE_IMM:
10259 return OP_X86_COMPARE_MEMBASE_IMM;
10260 case OP_COMPARE:
10261 case OP_ICOMPARE:
10262 return OP_X86_COMPARE_MEMBASE_REG;
10264 #endif
10266 #ifdef TARGET_AMD64
10267 /* FIXME: This has sign extension issues */
10269 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10270 return OP_X86_COMPARE_MEMBASE8_IMM;
10273 switch (opcode) {
10274 case OP_X86_PUSH:
10275 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10276 return OP_X86_PUSH_MEMBASE;
10277 break;
10278 /* FIXME: This only works for 32 bit immediates
10279 case OP_COMPARE_IMM:
10280 case OP_LCOMPARE_IMM:
10281 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10282 return OP_AMD64_COMPARE_MEMBASE_IMM;
10284 case OP_ICOMPARE_IMM:
10285 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10286 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10287 break;
10288 case OP_COMPARE:
10289 case OP_LCOMPARE:
10290 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10291 return OP_AMD64_COMPARE_MEMBASE_REG;
10292 break;
10293 case OP_ICOMPARE:
10294 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10295 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10296 break;
10298 #endif
10300 return -1;
10303 static inline int
10304 op_to_op_src2_membase (int load_opcode, int opcode)
10306 #ifdef TARGET_X86
10307 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10308 return -1;
10310 switch (opcode) {
10311 case OP_COMPARE:
10312 case OP_ICOMPARE:
10313 return OP_X86_COMPARE_REG_MEMBASE;
10314 case OP_IADD:
10315 return OP_X86_ADD_REG_MEMBASE;
10316 case OP_ISUB:
10317 return OP_X86_SUB_REG_MEMBASE;
10318 case OP_IAND:
10319 return OP_X86_AND_REG_MEMBASE;
10320 case OP_IOR:
10321 return OP_X86_OR_REG_MEMBASE;
10322 case OP_IXOR:
10323 return OP_X86_XOR_REG_MEMBASE;
10325 #endif
10327 #ifdef TARGET_AMD64
10328 switch (opcode) {
10329 case OP_ICOMPARE:
10330 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10331 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10332 break;
10333 case OP_COMPARE:
10334 case OP_LCOMPARE:
10335 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10336 return OP_AMD64_COMPARE_REG_MEMBASE;
10337 break;
10338 case OP_IADD:
10339 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10340 return OP_X86_ADD_REG_MEMBASE;
10341 case OP_ISUB:
10342 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10343 return OP_X86_SUB_REG_MEMBASE;
10344 case OP_IAND:
10345 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10346 return OP_X86_AND_REG_MEMBASE;
10347 case OP_IOR:
10348 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10349 return OP_X86_OR_REG_MEMBASE;
10350 case OP_IXOR:
10351 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10352 return OP_X86_XOR_REG_MEMBASE;
10353 case OP_LADD:
10354 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10355 return OP_AMD64_ADD_REG_MEMBASE;
10356 case OP_LSUB:
10357 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10358 return OP_AMD64_SUB_REG_MEMBASE;
10359 case OP_LAND:
10360 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10361 return OP_AMD64_AND_REG_MEMBASE;
10362 case OP_LOR:
10363 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10364 return OP_AMD64_OR_REG_MEMBASE;
10365 case OP_LXOR:
10366 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10367 return OP_AMD64_XOR_REG_MEMBASE;
10369 #endif
10371 return -1;
10375 mono_op_to_op_imm_noemul (int opcode)
10377 switch (opcode) {
10378 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10379 case OP_LSHR:
10380 case OP_LSHL:
10381 case OP_LSHR_UN:
10382 #endif
10383 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10384 case OP_IDIV:
10385 case OP_IDIV_UN:
10386 case OP_IREM:
10387 case OP_IREM_UN:
10388 #endif
10389 return -1;
10390 default:
10391 return mono_op_to_op_imm (opcode);
10395 #ifndef DISABLE_JIT
10398 * mono_handle_global_vregs:
10400 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10401 * for them.
10403 void
10404 mono_handle_global_vregs (MonoCompile *cfg)
10406 gint32 *vreg_to_bb;
10407 MonoBasicBlock *bb;
10408 int i, pos;
10410 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10412 #ifdef MONO_ARCH_SIMD_INTRINSICS
10413 if (cfg->uses_simd_intrinsics)
10414 mono_simd_simplify_indirection (cfg);
10415 #endif
10417 /* Find local vregs used in more than one bb */
10418 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10419 MonoInst *ins = bb->code;
10420 int block_num = bb->block_num;
10422 if (cfg->verbose_level > 2)
10423 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10425 cfg->cbb = bb;
10426 for (; ins; ins = ins->next) {
10427 const char *spec = INS_INFO (ins->opcode);
10428 int regtype = 0, regindex;
10429 gint32 prev_bb;
10431 if (G_UNLIKELY (cfg->verbose_level > 2))
10432 mono_print_ins (ins);
10434 g_assert (ins->opcode >= MONO_CEE_LAST);
10436 for (regindex = 0; regindex < 4; regindex ++) {
10437 int vreg = 0;
10439 if (regindex == 0) {
10440 regtype = spec [MONO_INST_DEST];
10441 if (regtype == ' ')
10442 continue;
10443 vreg = ins->dreg;
10444 } else if (regindex == 1) {
10445 regtype = spec [MONO_INST_SRC1];
10446 if (regtype == ' ')
10447 continue;
10448 vreg = ins->sreg1;
10449 } else if (regindex == 2) {
10450 regtype = spec [MONO_INST_SRC2];
10451 if (regtype == ' ')
10452 continue;
10453 vreg = ins->sreg2;
10454 } else if (regindex == 3) {
10455 regtype = spec [MONO_INST_SRC3];
10456 if (regtype == ' ')
10457 continue;
10458 vreg = ins->sreg3;
10461 #if SIZEOF_REGISTER == 4
10462 /* In the LLVM case, the long opcodes are not decomposed */
10463 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10465 * Since some instructions reference the original long vreg,
10466 * and some reference the two component vregs, it is quite hard
10467 * to determine when it needs to be global. So be conservative.
10469 if (!get_vreg_to_inst (cfg, vreg)) {
10470 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10472 if (cfg->verbose_level > 2)
10473 printf ("LONG VREG R%d made global.\n", vreg);
10477 * Make the component vregs volatile since the optimizations can
10478 * get confused otherwise.
10480 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10481 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10483 #endif
10485 g_assert (vreg != -1);
10487 prev_bb = vreg_to_bb [vreg];
10488 if (prev_bb == 0) {
10489 /* 0 is a valid block num */
10490 vreg_to_bb [vreg] = block_num + 1;
10491 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10492 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10493 continue;
10495 if (!get_vreg_to_inst (cfg, vreg)) {
10496 if (G_UNLIKELY (cfg->verbose_level > 2))
10497 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10499 switch (regtype) {
10500 case 'i':
10501 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10502 break;
10503 case 'f':
10504 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10505 break;
10506 case 'v':
10507 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10508 break;
10509 default:
10510 g_assert_not_reached ();
10514 /* Flag as having been used in more than one bb */
10515 vreg_to_bb [vreg] = -1;
10521 /* If a variable is used in only one bblock, convert it into a local vreg */
10522 for (i = 0; i < cfg->num_varinfo; i++) {
10523 MonoInst *var = cfg->varinfo [i];
10524 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10526 switch (var->type) {
10527 case STACK_I4:
10528 case STACK_OBJ:
10529 case STACK_PTR:
10530 case STACK_MP:
10531 case STACK_VTYPE:
10532 #if SIZEOF_REGISTER == 8
10533 case STACK_I8:
10534 #endif
10535 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10536 /* Enabling this screws up the fp stack on x86 */
10537 case STACK_R8:
10538 #endif
10539 /* Arguments are implicitly global */
10540 /* Putting R4 vars into registers doesn't work currently */
10541 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10543 * Make that the variable's liveness interval doesn't contain a call, since
10544 * that would cause the lvreg to be spilled, making the whole optimization
10545 * useless.
10547 /* This is too slow for JIT compilation */
10548 #if 0
10549 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10550 MonoInst *ins;
10551 int def_index, call_index, ins_index;
10552 gboolean spilled = FALSE;
10554 def_index = -1;
10555 call_index = -1;
10556 ins_index = 0;
10557 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10558 const char *spec = INS_INFO (ins->opcode);
10560 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10561 def_index = ins_index;
10563 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10564 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10565 if (call_index > def_index) {
10566 spilled = TRUE;
10567 break;
10571 if (MONO_IS_CALL (ins))
10572 call_index = ins_index;
10574 ins_index ++;
10577 if (spilled)
10578 break;
10580 #endif
10582 if (G_UNLIKELY (cfg->verbose_level > 2))
10583 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10584 var->flags |= MONO_INST_IS_DEAD;
10585 cfg->vreg_to_inst [var->dreg] = NULL;
10587 break;
10592 * Compress the varinfo and vars tables so the liveness computation is faster and
10593 * takes up less space.
10595 pos = 0;
10596 for (i = 0; i < cfg->num_varinfo; ++i) {
10597 MonoInst *var = cfg->varinfo [i];
10598 if (pos < i && cfg->locals_start == i)
10599 cfg->locals_start = pos;
10600 if (!(var->flags & MONO_INST_IS_DEAD)) {
10601 if (pos < i) {
10602 cfg->varinfo [pos] = cfg->varinfo [i];
10603 cfg->varinfo [pos]->inst_c0 = pos;
10604 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10605 cfg->vars [pos].idx = pos;
10606 #if SIZEOF_REGISTER == 4
10607 if (cfg->varinfo [pos]->type == STACK_I8) {
10608 /* Modify the two component vars too */
10609 MonoInst *var1;
10611 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10612 var1->inst_c0 = pos;
10613 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10614 var1->inst_c0 = pos;
10616 #endif
10618 pos ++;
10621 cfg->num_varinfo = pos;
10622 if (cfg->locals_start > cfg->num_varinfo)
10623 cfg->locals_start = cfg->num_varinfo;
10627 * mono_spill_global_vars:
10629 * Generate spill code for variables which are not allocated to registers,
10630 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10631 * code is generated which could be optimized by the local optimization passes.
10633 void
10634 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10636 MonoBasicBlock *bb;
10637 char spec2 [16];
10638 int orig_next_vreg;
10639 guint32 *vreg_to_lvreg;
10640 guint32 *lvregs;
10641 guint32 i, lvregs_len;
10642 gboolean dest_has_lvreg = FALSE;
10643 guint32 stacktypes [128];
10644 MonoInst **live_range_start, **live_range_end;
10645 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10647 *need_local_opts = FALSE;
10649 memset (spec2, 0, sizeof (spec2));
10651 /* FIXME: Move this function to mini.c */
10652 stacktypes ['i'] = STACK_PTR;
10653 stacktypes ['l'] = STACK_I8;
10654 stacktypes ['f'] = STACK_R8;
10655 #ifdef MONO_ARCH_SIMD_INTRINSICS
10656 stacktypes ['x'] = STACK_VTYPE;
10657 #endif
10659 #if SIZEOF_REGISTER == 4
10660 /* Create MonoInsts for longs */
10661 for (i = 0; i < cfg->num_varinfo; i++) {
10662 MonoInst *ins = cfg->varinfo [i];
10664 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10665 switch (ins->type) {
10666 #ifdef MONO_ARCH_SOFT_FLOAT
10667 case STACK_R8:
10668 #endif
10669 case STACK_I8: {
10670 MonoInst *tree;
10672 g_assert (ins->opcode == OP_REGOFFSET);
10674 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10675 g_assert (tree);
10676 tree->opcode = OP_REGOFFSET;
10677 tree->inst_basereg = ins->inst_basereg;
10678 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10680 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10681 g_assert (tree);
10682 tree->opcode = OP_REGOFFSET;
10683 tree->inst_basereg = ins->inst_basereg;
10684 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10685 break;
10687 default:
10688 break;
10692 #endif
10694 /* FIXME: widening and truncation */
10697 * As an optimization, when a variable allocated to the stack is first loaded into
10698 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10699 * the variable again.
10701 orig_next_vreg = cfg->next_vreg;
10702 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10703 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10704 lvregs_len = 0;
10707 * These arrays contain the first and last instructions accessing a given
10708 * variable.
10709 * Since we emit bblocks in the same order we process them here, and we
10710 * don't split live ranges, these will precisely describe the live range of
10711 * the variable, i.e. the instruction range where a valid value can be found
10712 * in the variables location.
10714 /* FIXME: Only do this if debugging info is requested */
10715 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10716 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10717 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10718 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10720 /* Add spill loads/stores */
10721 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10722 MonoInst *ins;
10724 if (cfg->verbose_level > 2)
10725 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10727 /* Clear vreg_to_lvreg array */
10728 for (i = 0; i < lvregs_len; i++)
10729 vreg_to_lvreg [lvregs [i]] = 0;
10730 lvregs_len = 0;
10732 cfg->cbb = bb;
10733 MONO_BB_FOR_EACH_INS (bb, ins) {
10734 const char *spec = INS_INFO (ins->opcode);
10735 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10736 gboolean store, no_lvreg;
10737 int sregs [MONO_MAX_SRC_REGS];
10739 if (G_UNLIKELY (cfg->verbose_level > 2))
10740 mono_print_ins (ins);
10742 if (ins->opcode == OP_NOP)
10743 continue;
10746 * We handle LDADDR here as well, since it can only be decomposed
10747 * when variable addresses are known.
10749 if (ins->opcode == OP_LDADDR) {
10750 MonoInst *var = ins->inst_p0;
10752 if (var->opcode == OP_VTARG_ADDR) {
10753 /* Happens on SPARC/S390 where vtypes are passed by reference */
10754 MonoInst *vtaddr = var->inst_left;
10755 if (vtaddr->opcode == OP_REGVAR) {
10756 ins->opcode = OP_MOVE;
10757 ins->sreg1 = vtaddr->dreg;
10759 else if (var->inst_left->opcode == OP_REGOFFSET) {
10760 ins->opcode = OP_LOAD_MEMBASE;
10761 ins->inst_basereg = vtaddr->inst_basereg;
10762 ins->inst_offset = vtaddr->inst_offset;
10763 } else
10764 NOT_IMPLEMENTED;
10765 } else {
10766 g_assert (var->opcode == OP_REGOFFSET);
10768 ins->opcode = OP_ADD_IMM;
10769 ins->sreg1 = var->inst_basereg;
10770 ins->inst_imm = var->inst_offset;
10773 *need_local_opts = TRUE;
10774 spec = INS_INFO (ins->opcode);
10777 if (ins->opcode < MONO_CEE_LAST) {
10778 mono_print_ins (ins);
10779 g_assert_not_reached ();
10783 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10784 * src register.
10785 * FIXME:
10787 if (MONO_IS_STORE_MEMBASE (ins)) {
10788 tmp_reg = ins->dreg;
10789 ins->dreg = ins->sreg2;
10790 ins->sreg2 = tmp_reg;
10791 store = TRUE;
10793 spec2 [MONO_INST_DEST] = ' ';
10794 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10795 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10796 spec2 [MONO_INST_SRC3] = ' ';
10797 spec = spec2;
10798 } else if (MONO_IS_STORE_MEMINDEX (ins))
10799 g_assert_not_reached ();
10800 else
10801 store = FALSE;
10802 no_lvreg = FALSE;
10804 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10805 printf ("\t %.3s %d", spec, ins->dreg);
10806 num_sregs = mono_inst_get_src_registers (ins, sregs);
10807 for (srcindex = 0; srcindex < 3; ++srcindex)
10808 printf (" %d", sregs [srcindex]);
10809 printf ("\n");
10812 /***************/
10813 /* DREG */
10814 /***************/
10815 regtype = spec [MONO_INST_DEST];
10816 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10817 prev_dreg = -1;
10819 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10820 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10821 MonoInst *store_ins;
10822 int store_opcode;
10823 MonoInst *def_ins = ins;
10824 int dreg = ins->dreg; /* The original vreg */
10826 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10828 if (var->opcode == OP_REGVAR) {
10829 ins->dreg = var->dreg;
10830 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10832 * Instead of emitting a load+store, use a _membase opcode.
10834 g_assert (var->opcode == OP_REGOFFSET);
10835 if (ins->opcode == OP_MOVE) {
10836 NULLIFY_INS (ins);
10837 def_ins = NULL;
10838 } else {
10839 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10840 ins->inst_basereg = var->inst_basereg;
10841 ins->inst_offset = var->inst_offset;
10842 ins->dreg = -1;
10844 spec = INS_INFO (ins->opcode);
10845 } else {
10846 guint32 lvreg;
10848 g_assert (var->opcode == OP_REGOFFSET);
10850 prev_dreg = ins->dreg;
10852 /* Invalidate any previous lvreg for this vreg */
10853 vreg_to_lvreg [ins->dreg] = 0;
10855 lvreg = 0;
10857 #ifdef MONO_ARCH_SOFT_FLOAT
10858 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10859 regtype = 'l';
10860 store_opcode = OP_STOREI8_MEMBASE_REG;
10862 #endif
10864 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10866 if (regtype == 'l') {
10867 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10868 mono_bblock_insert_after_ins (bb, ins, store_ins);
10869 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10870 mono_bblock_insert_after_ins (bb, ins, store_ins);
10871 def_ins = store_ins;
10873 else {
10874 g_assert (store_opcode != OP_STOREV_MEMBASE);
10876 /* Try to fuse the store into the instruction itself */
10877 /* FIXME: Add more instructions */
10878 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10879 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10880 ins->inst_imm = ins->inst_c0;
10881 ins->inst_destbasereg = var->inst_basereg;
10882 ins->inst_offset = var->inst_offset;
10883 spec = INS_INFO (ins->opcode);
10884 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10885 ins->opcode = store_opcode;
10886 ins->inst_destbasereg = var->inst_basereg;
10887 ins->inst_offset = var->inst_offset;
10889 no_lvreg = TRUE;
10891 tmp_reg = ins->dreg;
10892 ins->dreg = ins->sreg2;
10893 ins->sreg2 = tmp_reg;
10894 store = TRUE;
10896 spec2 [MONO_INST_DEST] = ' ';
10897 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10898 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10899 spec2 [MONO_INST_SRC3] = ' ';
10900 spec = spec2;
10901 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10902 // FIXME: The backends expect the base reg to be in inst_basereg
10903 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10904 ins->dreg = -1;
10905 ins->inst_basereg = var->inst_basereg;
10906 ins->inst_offset = var->inst_offset;
10907 spec = INS_INFO (ins->opcode);
10908 } else {
10909 /* printf ("INS: "); mono_print_ins (ins); */
10910 /* Create a store instruction */
10911 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10913 /* Insert it after the instruction */
10914 mono_bblock_insert_after_ins (bb, ins, store_ins);
10916 def_ins = store_ins;
10919 * We can't assign ins->dreg to var->dreg here, since the
10920 * sregs could use it. So set a flag, and do it after
10921 * the sregs.
10923 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10924 dest_has_lvreg = TRUE;
10929 if (def_ins && !live_range_start [dreg]) {
10930 live_range_start [dreg] = def_ins;
10931 live_range_start_bb [dreg] = bb;
10935 /************/
10936 /* SREGS */
10937 /************/
10938 num_sregs = mono_inst_get_src_registers (ins, sregs);
10939 for (srcindex = 0; srcindex < 3; ++srcindex) {
10940 regtype = spec [MONO_INST_SRC1 + srcindex];
10941 sreg = sregs [srcindex];
10943 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10944 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10945 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10946 MonoInst *use_ins = ins;
10947 MonoInst *load_ins;
10948 guint32 load_opcode;
10950 if (var->opcode == OP_REGVAR) {
10951 sregs [srcindex] = var->dreg;
10952 //mono_inst_set_src_registers (ins, sregs);
10953 live_range_end [sreg] = use_ins;
10954 live_range_end_bb [sreg] = bb;
10955 continue;
10958 g_assert (var->opcode == OP_REGOFFSET);
10960 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10962 g_assert (load_opcode != OP_LOADV_MEMBASE);
10964 if (vreg_to_lvreg [sreg]) {
10965 g_assert (vreg_to_lvreg [sreg] != -1);
10967 /* The variable is already loaded to an lvreg */
10968 if (G_UNLIKELY (cfg->verbose_level > 2))
10969 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10970 sregs [srcindex] = vreg_to_lvreg [sreg];
10971 //mono_inst_set_src_registers (ins, sregs);
10972 continue;
10975 /* Try to fuse the load into the instruction */
10976 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10977 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10978 sregs [0] = var->inst_basereg;
10979 //mono_inst_set_src_registers (ins, sregs);
10980 ins->inst_offset = var->inst_offset;
10981 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10982 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10983 sregs [1] = var->inst_basereg;
10984 //mono_inst_set_src_registers (ins, sregs);
10985 ins->inst_offset = var->inst_offset;
10986 } else {
10987 if (MONO_IS_REAL_MOVE (ins)) {
10988 ins->opcode = OP_NOP;
10989 sreg = ins->dreg;
10990 } else {
10991 //printf ("%d ", srcindex); mono_print_ins (ins);
10993 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10995 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10996 if (var->dreg == prev_dreg) {
10998 * sreg refers to the value loaded by the load
10999 * emitted below, but we need to use ins->dreg
11000 * since it refers to the store emitted earlier.
11002 sreg = ins->dreg;
11004 g_assert (sreg != -1);
11005 vreg_to_lvreg [var->dreg] = sreg;
11006 g_assert (lvregs_len < 1024);
11007 lvregs [lvregs_len ++] = var->dreg;
11011 sregs [srcindex] = sreg;
11012 //mono_inst_set_src_registers (ins, sregs);
11014 if (regtype == 'l') {
11015 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11016 mono_bblock_insert_before_ins (bb, ins, load_ins);
11017 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11018 mono_bblock_insert_before_ins (bb, ins, load_ins);
11019 use_ins = load_ins;
11021 else {
11022 #if SIZEOF_REGISTER == 4
11023 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11024 #endif
11025 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11026 mono_bblock_insert_before_ins (bb, ins, load_ins);
11027 use_ins = load_ins;
11031 if (var->dreg < orig_next_vreg) {
11032 live_range_end [var->dreg] = use_ins;
11033 live_range_end_bb [var->dreg] = bb;
11037 mono_inst_set_src_registers (ins, sregs);
11039 if (dest_has_lvreg) {
11040 g_assert (ins->dreg != -1);
11041 vreg_to_lvreg [prev_dreg] = ins->dreg;
11042 g_assert (lvregs_len < 1024);
11043 lvregs [lvregs_len ++] = prev_dreg;
11044 dest_has_lvreg = FALSE;
11047 if (store) {
11048 tmp_reg = ins->dreg;
11049 ins->dreg = ins->sreg2;
11050 ins->sreg2 = tmp_reg;
11053 if (MONO_IS_CALL (ins)) {
11054 /* Clear vreg_to_lvreg array */
11055 for (i = 0; i < lvregs_len; i++)
11056 vreg_to_lvreg [lvregs [i]] = 0;
11057 lvregs_len = 0;
11058 } else if (ins->opcode == OP_NOP) {
11059 ins->dreg = -1;
11060 MONO_INST_NULLIFY_SREGS (ins);
11063 if (cfg->verbose_level > 2)
11064 mono_print_ins_index (1, ins);
11068 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11070 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11071 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11073 for (i = 0; i < cfg->num_varinfo; ++i) {
11074 int vreg = MONO_VARINFO (cfg, i)->vreg;
11075 MonoInst *ins;
11077 if (live_range_start [vreg]) {
11078 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11079 ins->inst_c0 = i;
11080 ins->inst_c1 = vreg;
11081 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11083 if (live_range_end [vreg]) {
11084 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11085 ins->inst_c0 = i;
11086 ins->inst_c1 = vreg;
11087 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11090 #endif
11092 g_free (live_range_start);
11093 g_free (live_range_end);
11094 g_free (live_range_start_bb);
11095 g_free (live_range_end_bb);
11099 * FIXME:
11100 * - use 'iadd' instead of 'int_add'
11101 * - handling ovf opcodes: decompose in method_to_ir.
11102 * - unify iregs/fregs
11103 * -> partly done, the missing parts are:
11104 * - a more complete unification would involve unifying the hregs as well, so
11105 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11106 * would no longer map to the machine hregs, so the code generators would need to
11107 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11108 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11109 * fp/non-fp branches speeds it up by about 15%.
11110 * - use sext/zext opcodes instead of shifts
11111 * - add OP_ICALL
11112 * - get rid of TEMPLOADs if possible and use vregs instead
11113 * - clean up usage of OP_P/OP_ opcodes
11114 * - cleanup usage of DUMMY_USE
11115 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11116 * stack
11117 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11118 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11119 * - make sure handle_stack_args () is called before the branch is emitted
11120 * - when the new IR is done, get rid of all unused stuff
11121 * - COMPARE/BEQ as separate instructions or unify them ?
11122 * - keeping them separate allows specialized compare instructions like
11123 * compare_imm, compare_membase
11124 * - most back ends unify fp compare+branch, fp compare+ceq
11125 * - integrate mono_save_args into inline_method
11126 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11127 * - handle long shift opts on 32 bit platforms somehow: they require
11128 * 3 sregs (2 for arg1 and 1 for arg2)
11129 * - make byref a 'normal' type.
11130 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11131 * variable if needed.
11132 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11133 * like inline_method.
11134 * - remove inlining restrictions
11135 * - fix LNEG and enable cfold of INEG
11136 * - generalize x86 optimizations like ldelema as a peephole optimization
11137 * - add store_mem_imm for amd64
11138 * - optimize the loading of the interruption flag in the managed->native wrappers
11139 * - avoid special handling of OP_NOP in passes
11140 * - move code inserting instructions into one function/macro.
11141 * - try a coalescing phase after liveness analysis
11142 * - add float -> vreg conversion + local optimizations on !x86
11143 * - figure out how to handle decomposed branches during optimizations, ie.
11144 * compare+branch, op_jump_table+op_br etc.
11145 * - promote RuntimeXHandles to vregs
11146 * - vtype cleanups:
11147 * - add a NEW_VARLOADA_VREG macro
11148 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11149 * accessing vtype fields.
11150 * - get rid of I8CONST on 64 bit platforms
11151 * - dealing with the increase in code size due to branches created during opcode
11152 * decomposition:
11153 * - use extended basic blocks
11154 * - all parts of the JIT
11155 * - handle_global_vregs () && local regalloc
11156 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11157 * - sources of increase in code size:
11158 * - vtypes
11159 * - long compares
11160 * - isinst and castclass
11161 * - lvregs not allocated to global registers even if used multiple times
11162 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11163 * meaningful.
11164 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11165 * - add all micro optimizations from the old JIT
11166 * - put tree optimizations into the deadce pass
11167 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11168 * specific function.
11169 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11170 * fcompare + branchCC.
11171 * - create a helper function for allocating a stack slot, taking into account
11172 * MONO_CFG_HAS_SPILLUP.
11173 * - merge r68207.
11174 * - merge the ia64 switch changes.
11175 * - optimize mono_regstate2_alloc_int/float.
11176 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11177 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11178 * parts of the tree could be separated by other instructions, killing the tree
11179 * arguments, or stores killing loads etc. Also, should we fold loads into other
11180 * instructions if the result of the load is used multiple times ?
11181 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11182 * - LAST MERGE: 108395.
11183 * - when returning vtypes in registers, generate IR and append it to the end of the
11184 * last bb instead of doing it in the epilog.
11185 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11190 NOTES
11191 -----
11193 - When to decompose opcodes:
11194 - earlier: this makes some optimizations hard to implement, since the low level IR
11195 no longer contains the neccessary information. But it is easier to do.
11196 - later: harder to implement, enables more optimizations.
11197 - Branches inside bblocks:
11198 - created when decomposing complex opcodes.
11199 - branches to another bblock: harmless, but not tracked by the branch
11200 optimizations, so need to branch to a label at the start of the bblock.
11201 - branches to inside the same bblock: very problematic, trips up the local
11202 reg allocator. Can be fixed by spitting the current bblock, but that is a
11203 complex operation, since some local vregs can become global vregs etc.
11204 - Local/global vregs:
11205 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11206 local register allocator.
11207 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11208 structure, created by mono_create_var (). Assigned to hregs or the stack by
11209 the global register allocator.
11210 - When to do optimizations like alu->alu_imm:
11211 - earlier -> saves work later on since the IR will be smaller/simpler
11212 - later -> can work on more instructions
11213 - Handling of valuetypes:
11214 - When a vtype is pushed on the stack, a new temporary is created, an
11215 instruction computing its address (LDADDR) is emitted and pushed on
11216 the stack. Need to optimize cases when the vtype is used immediately as in
11217 argument passing, stloc etc.
11218 - Instead of the to_end stuff in the old JIT, simply call the function handling
11219 the values on the stack before emitting the last instruction of the bb.
11222 #endif /* DISABLE_JIT */