2010-03-08 Zoltan Varga <vargaz@gmail.com>
[mono/afaerber.git] / mono / mini / method-to-ir.c
blobd762032e224810b68e2fb1ad3fe72a471606efbc
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
55 #include "mini.h"
56 #include "trace.h"
58 #include "ir-emit.h"
60 #include "jit-icalls.h"
61 #include "jit.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 goto inline_failure;\
69 } while (0)
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 goto exception_exit;\
73 } while (0)
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
82 } while (0)
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
91 } while (0)
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
98 } \
99 } while (0)
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
125 #ifdef MINI_OP
126 #undef MINI_OP
127 #endif
128 #ifdef MINI_OP3
129 #undef MINI_OP3
130 #endif
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
133 #define NONE ' '
134 #define IREG 'i'
135 #define FREG 'f'
136 #define VREG 'v'
137 #define XREG 'x'
138 #if SIZEOF_REGISTER == 8
139 #define LREG IREG
140 #else
141 #define LREG 'l'
142 #endif
143 /* keep in sync with the enum in mini.h */
144 const char
145 ins_info[] = {
146 #include "mini-ops.h"
148 #undef MINI_OP
149 #undef MINI_OP3
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
160 #undef MINI_OP
161 #undef MINI_OP3
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
165 (vi)->reg = -1; \
166 (vi)->idx = (id); \
167 } while (0)
169 void
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
177 guint32
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
183 guint32
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
189 guint32
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
195 guint32
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
201 guint
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
204 if (type->byref)
205 return OP_MOVE;
207 handle_enum:
208 switch (type->type) {
209 case MONO_TYPE_I1:
210 case MONO_TYPE_U1:
211 case MONO_TYPE_BOOLEAN:
212 return OP_MOVE;
213 case MONO_TYPE_I2:
214 case MONO_TYPE_U2:
215 case MONO_TYPE_CHAR:
216 return OP_MOVE;
217 case MONO_TYPE_I4:
218 case MONO_TYPE_U4:
219 return OP_MOVE;
220 case MONO_TYPE_I:
221 case MONO_TYPE_U:
222 case MONO_TYPE_PTR:
223 case MONO_TYPE_FNPTR:
224 return OP_MOVE;
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
230 return OP_MOVE;
231 case MONO_TYPE_I8:
232 case MONO_TYPE_U8:
233 #if SIZEOF_REGISTER == 8
234 return OP_MOVE;
235 #else
236 return OP_LMOVE;
237 #endif
238 case MONO_TYPE_R4:
239 return OP_FMOVE;
240 case MONO_TYPE_R8:
241 return OP_FMOVE;
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
245 goto handle_enum;
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
248 return OP_XMOVE;
249 return OP_VMOVE;
250 case MONO_TYPE_TYPEDBYREF:
251 return OP_VMOVE;
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
254 goto handle_enum;
255 case MONO_TYPE_VAR:
256 case MONO_TYPE_MVAR:
257 g_assert (cfg->generic_sharing_context);
258 return OP_MOVE;
259 default:
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
262 return -1;
265 void
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
268 int i;
269 MonoInst *tree;
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
274 printf (", OUT: ");
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
277 printf (" ]\n");
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
284 * file.
286 #ifndef DISABLE_JIT
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
292 if (!(tblock)) { \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
298 } while (0)
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
309 } while (0)
310 #endif
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
316 MonoInst *widen; \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
321 } while (0)
322 #else
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
324 #endif
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
328 sp -= 2; \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
332 CHECK_TYPE (ins); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
338 } while (0)
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
342 sp--; \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
345 CHECK_TYPE (ins); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
349 } while (0)
351 #define ADD_BINCOND(next_block) do { \
352 MonoInst *cmp; \
353 sp -= 2; \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
358 CHECK_TYPE (cmp); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
368 } else { \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
380 } while (0)
382 /* *
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
389 static void
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
393 int i, found;
395 #if 0
396 if (from->cil_code) {
397 if (to->cil_code)
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
399 else
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
401 } else {
402 if (to->cil_code)
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
404 else
405 printf ("edge from entry to exit\n");
407 #endif
409 found = FALSE;
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
412 found = TRUE;
413 break;
416 if (!found) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
421 newa [i] = to;
422 from->out_count++;
423 from->out_bb = newa;
426 found = FALSE;
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
429 found = TRUE;
430 break;
433 if (!found) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
438 newa [i] = from;
439 to->in_count++;
440 to->in_bb = newa;
444 void
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
456 * Returns:
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
464 static int
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
470 int i;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 else
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
491 return -1;
494 static GList*
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethod *method = cfg->method;
498 MonoMethodHeader *header = mono_method_get_header (method);
499 MonoExceptionClause *clause;
500 MonoBasicBlock *handler;
501 int i;
502 GList *res = NULL;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type) {
509 handler = cfg->cil_offset_to_bb [clause->handler_offset];
510 g_assert (handler);
511 res = g_list_append (res, handler);
515 return res;
518 static void
519 mono_create_spvar_for_region (MonoCompile *cfg, int region)
521 MonoInst *var;
523 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
524 if (var)
525 return;
527 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
528 /* prevent it from being register allocated */
529 var->flags |= MONO_INST_INDIRECT;
531 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
534 MonoInst *
535 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
537 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
540 static MonoInst*
541 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
543 MonoInst *var;
545 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
546 if (var)
547 return var;
549 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
550 /* prevent it from being register allocated */
551 var->flags |= MONO_INST_INDIRECT;
553 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
555 return var;
559 * Returns the type used in the eval stack when @type is loaded.
560 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
562 void
563 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
565 MonoClass *klass;
567 inst->klass = klass = mono_class_from_mono_type (type);
568 if (type->byref) {
569 inst->type = STACK_MP;
570 return;
573 handle_enum:
574 switch (type->type) {
575 case MONO_TYPE_VOID:
576 inst->type = STACK_INV;
577 return;
578 case MONO_TYPE_I1:
579 case MONO_TYPE_U1:
580 case MONO_TYPE_BOOLEAN:
581 case MONO_TYPE_I2:
582 case MONO_TYPE_U2:
583 case MONO_TYPE_CHAR:
584 case MONO_TYPE_I4:
585 case MONO_TYPE_U4:
586 inst->type = STACK_I4;
587 return;
588 case MONO_TYPE_I:
589 case MONO_TYPE_U:
590 case MONO_TYPE_PTR:
591 case MONO_TYPE_FNPTR:
592 inst->type = STACK_PTR;
593 return;
594 case MONO_TYPE_CLASS:
595 case MONO_TYPE_STRING:
596 case MONO_TYPE_OBJECT:
597 case MONO_TYPE_SZARRAY:
598 case MONO_TYPE_ARRAY:
599 inst->type = STACK_OBJ;
600 return;
601 case MONO_TYPE_I8:
602 case MONO_TYPE_U8:
603 inst->type = STACK_I8;
604 return;
605 case MONO_TYPE_R4:
606 case MONO_TYPE_R8:
607 inst->type = STACK_R8;
608 return;
609 case MONO_TYPE_VALUETYPE:
610 if (type->data.klass->enumtype) {
611 type = mono_class_enum_basetype (type->data.klass);
612 goto handle_enum;
613 } else {
614 inst->klass = klass;
615 inst->type = STACK_VTYPE;
616 return;
618 case MONO_TYPE_TYPEDBYREF:
619 inst->klass = mono_defaults.typed_reference_class;
620 inst->type = STACK_VTYPE;
621 return;
622 case MONO_TYPE_GENERICINST:
623 type = &type->data.generic_class->container_class->byval_arg;
624 goto handle_enum;
625 case MONO_TYPE_VAR :
626 case MONO_TYPE_MVAR :
627 /* FIXME: all the arguments must be references for now,
628 * later look inside cfg and see if the arg num is
629 * really a reference
631 g_assert (cfg->generic_sharing_context);
632 inst->type = STACK_OBJ;
633 return;
634 default:
635 g_error ("unknown type 0x%02x in eval stack type", type->type);
640 * The following tables are used to quickly validate the IL code in type_from_op ().
642 static const char
643 bin_num_table [STACK_MAX] [STACK_MAX] = {
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
654 static const char
655 neg_table [] = {
656 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
659 /* reduce the size of this table */
660 static const char
661 bin_int_table [STACK_MAX] [STACK_MAX] = {
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
672 static const char
673 bin_comp_table [STACK_MAX] [STACK_MAX] = {
674 /* Inv i L p F & O vt */
675 {0},
676 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
677 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
678 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
679 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
680 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
681 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
682 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
685 /* reduce the size of this table */
686 static const char
687 shift_table [STACK_MAX] [STACK_MAX] = {
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
699 * Tables to map from the non-specific opcode to the matching
700 * type-specific opcode.
702 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
703 static const guint16
704 binops_op_map [STACK_MAX] = {
705 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
708 /* handles from CEE_NEG to CEE_CONV_U8 */
709 static const guint16
710 unops_op_map [STACK_MAX] = {
711 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
714 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
715 static const guint16
716 ovfops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
720 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
721 static const guint16
722 ovf2ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
726 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
727 static const guint16
728 ovf3ops_op_map [STACK_MAX] = {
729 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
732 /* handles from CEE_BEQ to CEE_BLT_UN */
733 static const guint16
734 beqops_op_map [STACK_MAX] = {
735 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
738 /* handles from CEE_CEQ to CEE_CLT_UN */
739 static const guint16
740 ceqops_op_map [STACK_MAX] = {
741 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
745 * Sets ins->type (the type on the eval stack) according to the
746 * type of the opcode and the arguments to it.
747 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
749 * FIXME: this function sets ins->type unconditionally in some cases, but
750 * it should set it to invalid for some types (a conv.x on an object)
752 static void
753 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
755 switch (ins->opcode) {
756 /* binops */
757 case CEE_ADD:
758 case CEE_SUB:
759 case CEE_MUL:
760 case CEE_DIV:
761 case CEE_REM:
762 /* FIXME: check unverifiable args for STACK_MP */
763 ins->type = bin_num_table [src1->type] [src2->type];
764 ins->opcode += binops_op_map [ins->type];
765 break;
766 case CEE_DIV_UN:
767 case CEE_REM_UN:
768 case CEE_AND:
769 case CEE_OR:
770 case CEE_XOR:
771 ins->type = bin_int_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
773 break;
774 case CEE_SHL:
775 case CEE_SHR:
776 case CEE_SHR_UN:
777 ins->type = shift_table [src1->type] [src2->type];
778 ins->opcode += binops_op_map [ins->type];
779 break;
780 case OP_COMPARE:
781 case OP_LCOMPARE:
782 case OP_ICOMPARE:
783 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
784 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
785 ins->opcode = OP_LCOMPARE;
786 else if (src1->type == STACK_R8)
787 ins->opcode = OP_FCOMPARE;
788 else
789 ins->opcode = OP_ICOMPARE;
790 break;
791 case OP_ICOMPARE_IMM:
792 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
793 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
794 ins->opcode = OP_LCOMPARE_IMM;
795 break;
796 case CEE_BEQ:
797 case CEE_BGE:
798 case CEE_BGT:
799 case CEE_BLE:
800 case CEE_BLT:
801 case CEE_BNE_UN:
802 case CEE_BGE_UN:
803 case CEE_BGT_UN:
804 case CEE_BLE_UN:
805 case CEE_BLT_UN:
806 ins->opcode += beqops_op_map [src1->type];
807 break;
808 case OP_CEQ:
809 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
810 ins->opcode += ceqops_op_map [src1->type];
811 break;
812 case OP_CGT:
813 case OP_CGT_UN:
814 case OP_CLT:
815 case OP_CLT_UN:
816 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
818 break;
819 /* unops */
820 case CEE_NEG:
821 ins->type = neg_table [src1->type];
822 ins->opcode += unops_op_map [ins->type];
823 break;
824 case CEE_NOT:
825 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
826 ins->type = src1->type;
827 else
828 ins->type = STACK_INV;
829 ins->opcode += unops_op_map [ins->type];
830 break;
831 case CEE_CONV_I1:
832 case CEE_CONV_I2:
833 case CEE_CONV_I4:
834 case CEE_CONV_U4:
835 ins->type = STACK_I4;
836 ins->opcode += unops_op_map [src1->type];
837 break;
838 case CEE_CONV_R_UN:
839 ins->type = STACK_R8;
840 switch (src1->type) {
841 case STACK_I4:
842 case STACK_PTR:
843 ins->opcode = OP_ICONV_TO_R_UN;
844 break;
845 case STACK_I8:
846 ins->opcode = OP_LCONV_TO_R_UN;
847 break;
849 break;
850 case CEE_CONV_OVF_I1:
851 case CEE_CONV_OVF_U1:
852 case CEE_CONV_OVF_I2:
853 case CEE_CONV_OVF_U2:
854 case CEE_CONV_OVF_I4:
855 case CEE_CONV_OVF_U4:
856 ins->type = STACK_I4;
857 ins->opcode += ovf3ops_op_map [src1->type];
858 break;
859 case CEE_CONV_OVF_I_UN:
860 case CEE_CONV_OVF_U_UN:
861 ins->type = STACK_PTR;
862 ins->opcode += ovf2ops_op_map [src1->type];
863 break;
864 case CEE_CONV_OVF_I1_UN:
865 case CEE_CONV_OVF_I2_UN:
866 case CEE_CONV_OVF_I4_UN:
867 case CEE_CONV_OVF_U1_UN:
868 case CEE_CONV_OVF_U2_UN:
869 case CEE_CONV_OVF_U4_UN:
870 ins->type = STACK_I4;
871 ins->opcode += ovf2ops_op_map [src1->type];
872 break;
873 case CEE_CONV_U:
874 ins->type = STACK_PTR;
875 switch (src1->type) {
876 case STACK_I4:
877 ins->opcode = OP_ICONV_TO_U;
878 break;
879 case STACK_PTR:
880 case STACK_MP:
881 #if SIZEOF_REGISTER == 8
882 ins->opcode = OP_LCONV_TO_U;
883 #else
884 ins->opcode = OP_MOVE;
885 #endif
886 break;
887 case STACK_I8:
888 ins->opcode = OP_LCONV_TO_U;
889 break;
890 case STACK_R8:
891 ins->opcode = OP_FCONV_TO_U;
892 break;
894 break;
895 case CEE_CONV_I8:
896 case CEE_CONV_U8:
897 ins->type = STACK_I8;
898 ins->opcode += unops_op_map [src1->type];
899 break;
900 case CEE_CONV_OVF_I8:
901 case CEE_CONV_OVF_U8:
902 ins->type = STACK_I8;
903 ins->opcode += ovf3ops_op_map [src1->type];
904 break;
905 case CEE_CONV_OVF_U8_UN:
906 case CEE_CONV_OVF_I8_UN:
907 ins->type = STACK_I8;
908 ins->opcode += ovf2ops_op_map [src1->type];
909 break;
910 case CEE_CONV_R4:
911 case CEE_CONV_R8:
912 ins->type = STACK_R8;
913 ins->opcode += unops_op_map [src1->type];
914 break;
915 case OP_CKFINITE:
916 ins->type = STACK_R8;
917 break;
918 case CEE_CONV_U2:
919 case CEE_CONV_U1:
920 ins->type = STACK_I4;
921 ins->opcode += ovfops_op_map [src1->type];
922 break;
923 case CEE_CONV_I:
924 case CEE_CONV_OVF_I:
925 case CEE_CONV_OVF_U:
926 ins->type = STACK_PTR;
927 ins->opcode += ovfops_op_map [src1->type];
928 break;
929 case CEE_ADD_OVF:
930 case CEE_ADD_OVF_UN:
931 case CEE_MUL_OVF:
932 case CEE_MUL_OVF_UN:
933 case CEE_SUB_OVF:
934 case CEE_SUB_OVF_UN:
935 ins->type = bin_num_table [src1->type] [src2->type];
936 ins->opcode += ovfops_op_map [src1->type];
937 if (ins->type == STACK_R8)
938 ins->type = STACK_INV;
939 break;
940 case OP_LOAD_MEMBASE:
941 ins->type = STACK_PTR;
942 break;
943 case OP_LOADI1_MEMBASE:
944 case OP_LOADU1_MEMBASE:
945 case OP_LOADI2_MEMBASE:
946 case OP_LOADU2_MEMBASE:
947 case OP_LOADI4_MEMBASE:
948 case OP_LOADU4_MEMBASE:
949 ins->type = STACK_PTR;
950 break;
951 case OP_LOADI8_MEMBASE:
952 ins->type = STACK_I8;
953 break;
954 case OP_LOADR4_MEMBASE:
955 case OP_LOADR8_MEMBASE:
956 ins->type = STACK_R8;
957 break;
958 default:
959 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
960 break;
963 if (ins->type == STACK_MP)
964 ins->klass = mono_defaults.object_class;
967 static const char
968 ldind_type [] = {
969 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
972 #if 0
974 static const char
975 param_table [STACK_MAX] [STACK_MAX] = {
976 {0},
979 static int
980 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
981 int i;
983 if (sig->hasthis) {
984 switch (args->type) {
985 case STACK_I4:
986 case STACK_I8:
987 case STACK_R8:
988 case STACK_VTYPE:
989 case STACK_INV:
990 return 0;
992 args++;
994 for (i = 0; i < sig->param_count; ++i) {
995 switch (args [i].type) {
996 case STACK_INV:
997 return 0;
998 case STACK_MP:
999 if (!sig->params [i]->byref)
1000 return 0;
1001 continue;
1002 case STACK_OBJ:
1003 if (sig->params [i]->byref)
1004 return 0;
1005 switch (sig->params [i]->type) {
1006 case MONO_TYPE_CLASS:
1007 case MONO_TYPE_STRING:
1008 case MONO_TYPE_OBJECT:
1009 case MONO_TYPE_SZARRAY:
1010 case MONO_TYPE_ARRAY:
1011 break;
1012 default:
1013 return 0;
1015 continue;
1016 case STACK_R8:
1017 if (sig->params [i]->byref)
1018 return 0;
1019 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1020 return 0;
1021 continue;
1022 case STACK_PTR:
1023 case STACK_I4:
1024 case STACK_I8:
1025 case STACK_VTYPE:
1026 break;
1028 /*if (!param_table [args [i].type] [sig->params [i]->type])
1029 return 0;*/
1031 return 1;
1033 #endif
1036 * When we need a pointer to the current domain many times in a method, we
1037 * call mono_domain_get() once and we store the result in a local variable.
1038 * This function returns the variable that represents the MonoDomain*.
1040 inline static MonoInst *
1041 mono_get_domainvar (MonoCompile *cfg)
1043 if (!cfg->domainvar)
1044 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1045 return cfg->domainvar;
1049 * The got_var contains the address of the Global Offset Table when AOT
1050 * compiling.
1052 MonoInst *
1053 mono_get_got_var (MonoCompile *cfg)
1055 #ifdef MONO_ARCH_NEED_GOT_VAR
1056 if (!cfg->compile_aot)
1057 return NULL;
1058 if (!cfg->got_var) {
1059 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1061 return cfg->got_var;
1062 #else
1063 return NULL;
1064 #endif
1067 static MonoInst *
1068 mono_get_vtable_var (MonoCompile *cfg)
1070 g_assert (cfg->generic_sharing_context);
1072 if (!cfg->rgctx_var) {
1073 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 /* force the var to be stack allocated */
1075 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1078 return cfg->rgctx_var;
1081 static MonoType*
1082 type_from_stack_type (MonoInst *ins) {
1083 switch (ins->type) {
1084 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1085 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1086 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1087 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1088 case STACK_MP:
1089 return &ins->klass->this_arg;
1090 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1091 case STACK_VTYPE: return &ins->klass->byval_arg;
1092 default:
1093 g_error ("stack type %d to monotype not handled\n", ins->type);
1095 return NULL;
1098 static G_GNUC_UNUSED int
1099 type_to_stack_type (MonoType *t)
1101 t = mono_type_get_underlying_type (t);
1102 switch (t->type) {
1103 case MONO_TYPE_I1:
1104 case MONO_TYPE_U1:
1105 case MONO_TYPE_BOOLEAN:
1106 case MONO_TYPE_I2:
1107 case MONO_TYPE_U2:
1108 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_I4:
1110 case MONO_TYPE_U4:
1111 return STACK_I4;
1112 case MONO_TYPE_I:
1113 case MONO_TYPE_U:
1114 case MONO_TYPE_PTR:
1115 case MONO_TYPE_FNPTR:
1116 return STACK_PTR;
1117 case MONO_TYPE_CLASS:
1118 case MONO_TYPE_STRING:
1119 case MONO_TYPE_OBJECT:
1120 case MONO_TYPE_SZARRAY:
1121 case MONO_TYPE_ARRAY:
1122 return STACK_OBJ;
1123 case MONO_TYPE_I8:
1124 case MONO_TYPE_U8:
1125 return STACK_I8;
1126 case MONO_TYPE_R4:
1127 case MONO_TYPE_R8:
1128 return STACK_R8;
1129 case MONO_TYPE_VALUETYPE:
1130 case MONO_TYPE_TYPEDBYREF:
1131 return STACK_VTYPE;
1132 case MONO_TYPE_GENERICINST:
1133 if (mono_type_generic_inst_is_valuetype (t))
1134 return STACK_VTYPE;
1135 else
1136 return STACK_OBJ;
1137 break;
1138 default:
1139 g_assert_not_reached ();
1142 return -1;
1145 static MonoClass*
1146 array_access_to_klass (int opcode)
1148 switch (opcode) {
1149 case CEE_LDELEM_U1:
1150 return mono_defaults.byte_class;
1151 case CEE_LDELEM_U2:
1152 return mono_defaults.uint16_class;
1153 case CEE_LDELEM_I:
1154 case CEE_STELEM_I:
1155 return mono_defaults.int_class;
1156 case CEE_LDELEM_I1:
1157 case CEE_STELEM_I1:
1158 return mono_defaults.sbyte_class;
1159 case CEE_LDELEM_I2:
1160 case CEE_STELEM_I2:
1161 return mono_defaults.int16_class;
1162 case CEE_LDELEM_I4:
1163 case CEE_STELEM_I4:
1164 return mono_defaults.int32_class;
1165 case CEE_LDELEM_U4:
1166 return mono_defaults.uint32_class;
1167 case CEE_LDELEM_I8:
1168 case CEE_STELEM_I8:
1169 return mono_defaults.int64_class;
1170 case CEE_LDELEM_R4:
1171 case CEE_STELEM_R4:
1172 return mono_defaults.single_class;
1173 case CEE_LDELEM_R8:
1174 case CEE_STELEM_R8:
1175 return mono_defaults.double_class;
1176 case CEE_LDELEM_REF:
1177 case CEE_STELEM_REF:
1178 return mono_defaults.object_class;
1179 default:
1180 g_assert_not_reached ();
1182 return NULL;
1186 * We try to share variables when possible
1188 static MonoInst *
1189 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1191 MonoInst *res;
1192 int pos, vnum;
1194 /* inlining can result in deeper stacks */
1195 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1196 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1198 pos = ins->type - 1 + slot * STACK_MAX;
1200 switch (ins->type) {
1201 case STACK_I4:
1202 case STACK_I8:
1203 case STACK_R8:
1204 case STACK_PTR:
1205 case STACK_MP:
1206 case STACK_OBJ:
1207 if ((vnum = cfg->intvars [pos]))
1208 return cfg->varinfo [vnum];
1209 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 cfg->intvars [pos] = res->inst_c0;
1211 break;
1212 default:
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1215 return res;
1218 static void
1219 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1222 * Don't use this if a generic_context is set, since that means AOT can't
1223 * look up the method using just the image+token.
1224 * table == 0 means this is a reference made from a wrapper.
1226 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1227 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1228 jump_info_token->image = image;
1229 jump_info_token->token = token;
1230 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1235 * This function is called to handle items that are left on the evaluation stack
1236 * at basic block boundaries. What happens is that we save the values to local variables
1237 * and we reload them later when first entering the target basic block (with the
1238 * handle_loaded_temps () function).
1239 * A single joint point will use the same variables (stored in the array bb->out_stack or
1240 * bb->in_stack, if the basic block is before or after the joint point).
1242 * This function needs to be called _before_ emitting the last instruction of
1243 * the bb (i.e. before emitting a branch).
1244 * If the stack merge fails at a join point, cfg->unverifiable is set.
1246 static void
1247 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1249 int i, bindex;
1250 MonoBasicBlock *bb = cfg->cbb;
1251 MonoBasicBlock *outb;
1252 MonoInst *inst, **locals;
1253 gboolean found;
1255 if (!count)
1256 return;
1257 if (cfg->verbose_level > 3)
1258 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1259 if (!bb->out_scount) {
1260 bb->out_scount = count;
1261 //printf ("bblock %d has out:", bb->block_num);
1262 found = FALSE;
1263 for (i = 0; i < bb->out_count; ++i) {
1264 outb = bb->out_bb [i];
1265 /* exception handlers are linked, but they should not be considered for stack args */
1266 if (outb->flags & BB_EXCEPTION_HANDLER)
1267 continue;
1268 //printf (" %d", outb->block_num);
1269 if (outb->in_stack) {
1270 found = TRUE;
1271 bb->out_stack = outb->in_stack;
1272 break;
1275 //printf ("\n");
1276 if (!found) {
1277 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1278 for (i = 0; i < count; ++i) {
1280 * try to reuse temps already allocated for this purpouse, if they occupy the same
1281 * stack slot and if they are of the same type.
1282 * This won't cause conflicts since if 'local' is used to
1283 * store one of the values in the in_stack of a bblock, then
1284 * the same variable will be used for the same outgoing stack
1285 * slot as well.
1286 * This doesn't work when inlining methods, since the bblocks
1287 * in the inlined methods do not inherit their in_stack from
1288 * the bblock they are inlined to. See bug #58863 for an
1289 * example.
1291 if (cfg->inlined_method)
1292 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1293 else
1294 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1299 for (i = 0; i < bb->out_count; ++i) {
1300 outb = bb->out_bb [i];
1301 /* exception handlers are linked, but they should not be considered for stack args */
1302 if (outb->flags & BB_EXCEPTION_HANDLER)
1303 continue;
1304 if (outb->in_scount) {
1305 if (outb->in_scount != bb->out_scount) {
1306 cfg->unverifiable = TRUE;
1307 return;
1309 continue; /* check they are the same locals */
1311 outb->in_scount = count;
1312 outb->in_stack = bb->out_stack;
1315 locals = bb->out_stack;
1316 cfg->cbb = bb;
1317 for (i = 0; i < count; ++i) {
1318 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1319 inst->cil_code = sp [i]->cil_code;
1320 sp [i] = locals [i];
1321 if (cfg->verbose_level > 3)
1322 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1326 * It is possible that the out bblocks already have in_stack assigned, and
1327 * the in_stacks differ. In this case, we will store to all the different
1328 * in_stacks.
1331 found = TRUE;
1332 bindex = 0;
1333 while (found) {
1334 /* Find a bblock which has a different in_stack */
1335 found = FALSE;
1336 while (bindex < bb->out_count) {
1337 outb = bb->out_bb [bindex];
1338 /* exception handlers are linked, but they should not be considered for stack args */
1339 if (outb->flags & BB_EXCEPTION_HANDLER) {
1340 bindex++;
1341 continue;
1343 if (outb->in_stack != locals) {
1344 for (i = 0; i < count; ++i) {
1345 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1346 inst->cil_code = sp [i]->cil_code;
1347 sp [i] = locals [i];
1348 if (cfg->verbose_level > 3)
1349 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1351 locals = outb->in_stack;
1352 found = TRUE;
1353 break;
1355 bindex ++;
1360 /* Emit code which loads interface_offsets [klass->interface_id]
1361 * The array is stored in memory before vtable.
1363 static void
1364 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1366 if (cfg->compile_aot) {
1367 int ioffset_reg = alloc_preg (cfg);
1368 int iid_reg = alloc_preg (cfg);
1370 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1374 else {
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1380 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1381 * stored in "klass_reg" implements the interface "klass".
1383 static void
1384 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1386 int ibitmap_reg = alloc_preg (cfg);
1387 int ibitmap_byte_reg = alloc_preg (cfg);
1389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1391 if (cfg->compile_aot) {
1392 int iid_reg = alloc_preg (cfg);
1393 int shifted_iid_reg = alloc_preg (cfg);
1394 int ibitmap_byte_address_reg = alloc_preg (cfg);
1395 int masked_iid_reg = alloc_preg (cfg);
1396 int iid_one_bit_reg = alloc_preg (cfg);
1397 int iid_bit_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1403 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1406 } else {
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1414 * stored in "vtable_reg" implements the interface "klass".
1416 static void
1417 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1419 int ibitmap_reg = alloc_preg (cfg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1439 } else {
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1446 * Emit code which checks whenever the interface id of @klass is smaller than
1447 * than the value given by max_iid_reg.
1449 static void
1450 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1451 MonoBasicBlock *false_target)
1453 if (cfg->compile_aot) {
1454 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1458 else
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1460 if (false_target)
1461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1462 else
1463 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1466 /* Same as above, but obtains max_iid from a vtable */
1467 static void
1468 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1469 MonoBasicBlock *false_target)
1471 int max_iid_reg = alloc_preg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1474 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1477 /* Same as above, but obtains max_iid from a klass */
1478 static void
1479 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1480 MonoBasicBlock *false_target)
1482 int max_iid_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1485 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1488 static void
1489 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1491 int idepth_reg = alloc_preg (cfg);
1492 int stypes_reg = alloc_preg (cfg);
1493 int stype = alloc_preg (cfg);
1495 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1502 if (klass_ins) {
1503 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1504 } else if (cfg->compile_aot) {
1505 int const_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1508 } else {
1509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1514 static void
1515 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1517 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1520 static void
1521 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1523 int intf_reg = alloc_preg (cfg);
1525 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1526 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1528 if (true_target)
1529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1530 else
1531 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1535 * Variant of the above that takes a register to the class, not the vtable.
1537 static void
1538 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int intf_bit_reg = alloc_preg (cfg);
1542 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1543 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1545 if (true_target)
1546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1547 else
1548 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1551 static inline void
1552 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1554 if (klass_inst) {
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1556 } else if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1560 } else {
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1566 static inline void
1567 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1569 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1572 static inline void
1573 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1575 if (cfg->compile_aot) {
1576 int const_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1578 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1579 } else {
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1585 static void
1586 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1588 static void
1589 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1591 if (klass->rank) {
1592 int rank_reg = alloc_preg (cfg);
1593 int eclass_reg = alloc_preg (cfg);
1595 g_assert (!klass_inst);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1598 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1599 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1601 if (klass->cast_class == mono_defaults.object_class) {
1602 int parent_reg = alloc_preg (cfg);
1603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1604 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1605 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1606 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1607 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class == mono_defaults.enum_class) {
1610 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1611 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1612 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1613 } else {
1614 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1615 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1618 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1619 /* Check that the object is a vector too */
1620 int bounds_reg = alloc_preg (cfg);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1622 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1625 } else {
1626 int idepth_reg = alloc_preg (cfg);
1627 int stypes_reg = alloc_preg (cfg);
1628 int stype = alloc_preg (cfg);
1630 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1631 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1633 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1637 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1641 static void
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1644 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1647 static void
1648 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1650 int val_reg;
1652 g_assert (val == 0);
1654 if (align == 0)
1655 align = 4;
1657 if ((size <= 4) && (size <= align)) {
1658 switch (size) {
1659 case 1:
1660 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1661 return;
1662 case 2:
1663 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1664 return;
1665 case 4:
1666 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1667 return;
1668 #if SIZEOF_REGISTER == 8
1669 case 8:
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1671 return;
1672 #endif
1676 val_reg = alloc_preg (cfg);
1678 if (SIZEOF_REGISTER == 8)
1679 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1680 else
1681 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1683 if (align < 4) {
1684 /* This could be optimized further if neccesary */
1685 while (size >= 1) {
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1687 offset += 1;
1688 size -= 1;
1690 return;
1693 #if !NO_UNALIGNED_ACCESS
1694 if (SIZEOF_REGISTER == 8) {
1695 if (offset % 8) {
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1697 offset += 4;
1698 size -= 4;
1700 while (size >= 8) {
1701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1702 offset += 8;
1703 size -= 8;
1706 #endif
1708 while (size >= 4) {
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1710 offset += 4;
1711 size -= 4;
1713 while (size >= 2) {
1714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1715 offset += 2;
1716 size -= 2;
1718 while (size >= 1) {
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1720 offset += 1;
1721 size -= 1;
1725 #endif /* DISABLE_JIT */
1727 void
1728 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1730 int cur_reg;
1732 if (align == 0)
1733 align = 4;
1735 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1736 g_assert (size < 10000);
1738 if (align < 4) {
1739 /* This could be optimized further if neccesary */
1740 while (size >= 1) {
1741 cur_reg = alloc_preg (cfg);
1742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1744 doffset += 1;
1745 soffset += 1;
1746 size -= 1;
1750 #if !NO_UNALIGNED_ACCESS
1751 if (SIZEOF_REGISTER == 8) {
1752 while (size >= 8) {
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1756 doffset += 8;
1757 soffset += 8;
1758 size -= 8;
1761 #endif
1763 while (size >= 4) {
1764 cur_reg = alloc_preg (cfg);
1765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1767 doffset += 4;
1768 soffset += 4;
1769 size -= 4;
1771 while (size >= 2) {
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1775 doffset += 2;
1776 soffset += 2;
1777 size -= 2;
1779 while (size >= 1) {
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 doffset += 1;
1784 soffset += 1;
1785 size -= 1;
1789 #ifndef DISABLE_JIT
1791 static int
1792 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1794 if (type->byref)
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1797 handle_enum:
1798 type = mini_get_basic_type_from_generic (gsctx, type);
1799 switch (type->type) {
1800 case MONO_TYPE_VOID:
1801 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1802 case MONO_TYPE_I1:
1803 case MONO_TYPE_U1:
1804 case MONO_TYPE_BOOLEAN:
1805 case MONO_TYPE_I2:
1806 case MONO_TYPE_U2:
1807 case MONO_TYPE_CHAR:
1808 case MONO_TYPE_I4:
1809 case MONO_TYPE_U4:
1810 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1811 case MONO_TYPE_I:
1812 case MONO_TYPE_U:
1813 case MONO_TYPE_PTR:
1814 case MONO_TYPE_FNPTR:
1815 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1816 case MONO_TYPE_CLASS:
1817 case MONO_TYPE_STRING:
1818 case MONO_TYPE_OBJECT:
1819 case MONO_TYPE_SZARRAY:
1820 case MONO_TYPE_ARRAY:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1822 case MONO_TYPE_I8:
1823 case MONO_TYPE_U8:
1824 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1825 case MONO_TYPE_R4:
1826 case MONO_TYPE_R8:
1827 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1828 case MONO_TYPE_VALUETYPE:
1829 if (type->data.klass->enumtype) {
1830 type = mono_class_enum_basetype (type->data.klass);
1831 goto handle_enum;
1832 } else
1833 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1834 case MONO_TYPE_TYPEDBYREF:
1835 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1836 case MONO_TYPE_GENERICINST:
1837 type = &type->data.generic_class->container_class->byval_arg;
1838 goto handle_enum;
1839 default:
1840 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1842 return -1;
1846 * target_type_is_incompatible:
1847 * @cfg: MonoCompile context
1849 * Check that the item @arg on the evaluation stack can be stored
1850 * in the target type (can be a local, or field, etc).
1851 * The cfg arg can be used to check if we need verification or just
1852 * validity checks.
1854 * Returns: non-0 value if arg can't be stored on a target.
1856 static int
1857 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1859 MonoType *simple_type;
1860 MonoClass *klass;
1862 if (target->byref) {
1863 /* FIXME: check that the pointed to types match */
1864 if (arg->type == STACK_MP)
1865 return arg->klass != mono_class_from_mono_type (target);
1866 if (arg->type == STACK_PTR)
1867 return 0;
1868 return 1;
1871 simple_type = mono_type_get_underlying_type (target);
1872 switch (simple_type->type) {
1873 case MONO_TYPE_VOID:
1874 return 1;
1875 case MONO_TYPE_I1:
1876 case MONO_TYPE_U1:
1877 case MONO_TYPE_BOOLEAN:
1878 case MONO_TYPE_I2:
1879 case MONO_TYPE_U2:
1880 case MONO_TYPE_CHAR:
1881 case MONO_TYPE_I4:
1882 case MONO_TYPE_U4:
1883 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1884 return 1;
1885 return 0;
1886 case MONO_TYPE_PTR:
1887 /* STACK_MP is needed when setting pinned locals */
1888 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1889 return 1;
1890 return 0;
1891 case MONO_TYPE_I:
1892 case MONO_TYPE_U:
1893 case MONO_TYPE_FNPTR:
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1895 return 1;
1896 return 0;
1897 case MONO_TYPE_CLASS:
1898 case MONO_TYPE_STRING:
1899 case MONO_TYPE_OBJECT:
1900 case MONO_TYPE_SZARRAY:
1901 case MONO_TYPE_ARRAY:
1902 if (arg->type != STACK_OBJ)
1903 return 1;
1904 /* FIXME: check type compatibility */
1905 return 0;
1906 case MONO_TYPE_I8:
1907 case MONO_TYPE_U8:
1908 if (arg->type != STACK_I8)
1909 return 1;
1910 return 0;
1911 case MONO_TYPE_R4:
1912 case MONO_TYPE_R8:
1913 if (arg->type != STACK_R8)
1914 return 1;
1915 return 0;
1916 case MONO_TYPE_VALUETYPE:
1917 if (arg->type != STACK_VTYPE)
1918 return 1;
1919 klass = mono_class_from_mono_type (simple_type);
1920 if (klass != arg->klass)
1921 return 1;
1922 return 0;
1923 case MONO_TYPE_TYPEDBYREF:
1924 if (arg->type != STACK_VTYPE)
1925 return 1;
1926 klass = mono_class_from_mono_type (simple_type);
1927 if (klass != arg->klass)
1928 return 1;
1929 return 0;
1930 case MONO_TYPE_GENERICINST:
1931 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1932 if (arg->type != STACK_VTYPE)
1933 return 1;
1934 klass = mono_class_from_mono_type (simple_type);
1935 if (klass != arg->klass)
1936 return 1;
1937 return 0;
1938 } else {
1939 if (arg->type != STACK_OBJ)
1940 return 1;
1941 /* FIXME: check type compatibility */
1942 return 0;
1944 case MONO_TYPE_VAR:
1945 case MONO_TYPE_MVAR:
1946 /* FIXME: all the arguments must be references for now,
1947 * later look inside cfg and see if the arg num is
1948 * really a reference
1950 g_assert (cfg->generic_sharing_context);
1951 if (arg->type != STACK_OBJ)
1952 return 1;
1953 return 0;
1954 default:
1955 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1957 return 1;
1961 * Prepare arguments for passing to a function call.
1962 * Return a non-zero value if the arguments can't be passed to the given
1963 * signature.
1964 * The type checks are not yet complete and some conversions may need
1965 * casts on 32 or 64 bit architectures.
1967 * FIXME: implement this using target_type_is_incompatible ()
1969 static int
1970 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1972 MonoType *simple_type;
1973 int i;
1975 if (sig->hasthis) {
1976 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1977 return 1;
1978 args++;
1980 for (i = 0; i < sig->param_count; ++i) {
1981 if (sig->params [i]->byref) {
1982 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1983 return 1;
1984 continue;
1986 simple_type = sig->params [i];
1987 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1988 handle_enum:
1989 switch (simple_type->type) {
1990 case MONO_TYPE_VOID:
1991 return 1;
1992 continue;
1993 case MONO_TYPE_I1:
1994 case MONO_TYPE_U1:
1995 case MONO_TYPE_BOOLEAN:
1996 case MONO_TYPE_I2:
1997 case MONO_TYPE_U2:
1998 case MONO_TYPE_CHAR:
1999 case MONO_TYPE_I4:
2000 case MONO_TYPE_U4:
2001 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2002 return 1;
2003 continue;
2004 case MONO_TYPE_I:
2005 case MONO_TYPE_U:
2006 case MONO_TYPE_PTR:
2007 case MONO_TYPE_FNPTR:
2008 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2009 return 1;
2010 continue;
2011 case MONO_TYPE_CLASS:
2012 case MONO_TYPE_STRING:
2013 case MONO_TYPE_OBJECT:
2014 case MONO_TYPE_SZARRAY:
2015 case MONO_TYPE_ARRAY:
2016 if (args [i]->type != STACK_OBJ)
2017 return 1;
2018 continue;
2019 case MONO_TYPE_I8:
2020 case MONO_TYPE_U8:
2021 if (args [i]->type != STACK_I8)
2022 return 1;
2023 continue;
2024 case MONO_TYPE_R4:
2025 case MONO_TYPE_R8:
2026 if (args [i]->type != STACK_R8)
2027 return 1;
2028 continue;
2029 case MONO_TYPE_VALUETYPE:
2030 if (simple_type->data.klass->enumtype) {
2031 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2032 goto handle_enum;
2034 if (args [i]->type != STACK_VTYPE)
2035 return 1;
2036 continue;
2037 case MONO_TYPE_TYPEDBYREF:
2038 if (args [i]->type != STACK_VTYPE)
2039 return 1;
2040 continue;
2041 case MONO_TYPE_GENERICINST:
2042 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2043 goto handle_enum;
2045 default:
2046 g_error ("unknown type 0x%02x in check_call_signature",
2047 simple_type->type);
2050 return 0;
2053 static int
2054 callvirt_to_call (int opcode)
2056 switch (opcode) {
2057 case OP_CALLVIRT:
2058 return OP_CALL;
2059 case OP_VOIDCALLVIRT:
2060 return OP_VOIDCALL;
2061 case OP_FCALLVIRT:
2062 return OP_FCALL;
2063 case OP_VCALLVIRT:
2064 return OP_VCALL;
2065 case OP_LCALLVIRT:
2066 return OP_LCALL;
2067 default:
2068 g_assert_not_reached ();
2071 return -1;
2074 static int
2075 callvirt_to_call_membase (int opcode)
2077 switch (opcode) {
2078 case OP_CALLVIRT:
2079 return OP_CALL_MEMBASE;
2080 case OP_VOIDCALLVIRT:
2081 return OP_VOIDCALL_MEMBASE;
2082 case OP_FCALLVIRT:
2083 return OP_FCALL_MEMBASE;
2084 case OP_LCALLVIRT:
2085 return OP_LCALL_MEMBASE;
2086 case OP_VCALLVIRT:
2087 return OP_VCALL_MEMBASE;
2088 default:
2089 g_assert_not_reached ();
2092 return -1;
2095 #ifdef MONO_ARCH_HAVE_IMT
2096 static void
2097 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2099 #ifdef MONO_ARCH_IMT_REG
2100 int method_reg = alloc_preg (cfg);
2102 if (imt_arg) {
2103 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2104 } else if (cfg->compile_aot) {
2105 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2106 } else {
2107 MonoInst *ins;
2108 MONO_INST_NEW (cfg, ins, OP_PCONST);
2109 ins->inst_p0 = call->method;
2110 ins->dreg = method_reg;
2111 MONO_ADD_INS (cfg->cbb, ins);
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2115 #else
2116 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2117 #endif
2119 #endif
2121 static MonoJumpInfo *
2122 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2124 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2126 ji->ip.i = ip;
2127 ji->type = type;
2128 ji->data.target = target;
2130 return ji;
2133 inline static MonoCallInst *
2134 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2135 MonoInst **args, int calli, int virtual, int tail)
2137 MonoCallInst *call;
2138 #ifdef MONO_ARCH_SOFT_FLOAT
2139 int i;
2140 #endif
2142 if (tail)
2143 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2144 else
2145 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2147 call->args = args;
2148 call->signature = sig;
2150 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2152 if (tail) {
2153 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2154 call->vret_var = cfg->vret_addr;
2155 //g_assert_not_reached ();
2157 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2158 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2159 MonoInst *loada;
2161 temp->backend.is_pinvoke = sig->pinvoke;
2164 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2165 * address of return value to increase optimization opportunities.
2166 * Before vtype decomposition, the dreg of the call ins itself represents the
2167 * fact the call modifies the return value. After decomposition, the call will
2168 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2169 * will be transformed into an LDADDR.
2171 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2172 loada->dreg = alloc_preg (cfg);
2173 loada->inst_p0 = temp;
2174 /* We reference the call too since call->dreg could change during optimization */
2175 loada->inst_p1 = call;
2176 MONO_ADD_INS (cfg->cbb, loada);
2178 call->inst.dreg = temp->dreg;
2180 call->vret_var = loada;
2181 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2182 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2184 #ifdef MONO_ARCH_SOFT_FLOAT
2185 if (COMPILE_SOFT_FLOAT (cfg)) {
2187 * If the call has a float argument, we would need to do an r8->r4 conversion using
2188 * an icall, but that cannot be done during the call sequence since it would clobber
2189 * the call registers + the stack. So we do it before emitting the call.
2191 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2192 MonoType *t;
2193 MonoInst *in = call->args [i];
2195 if (i >= sig->hasthis)
2196 t = sig->params [i - sig->hasthis];
2197 else
2198 t = &mono_defaults.int_class->byval_arg;
2199 t = mono_type_get_underlying_type (t);
2201 if (!t->byref && t->type == MONO_TYPE_R4) {
2202 MonoInst *iargs [1];
2203 MonoInst *conv;
2205 iargs [0] = in;
2206 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2208 /* The result will be in an int vreg */
2209 call->args [i] = conv;
2213 #endif
2215 #ifdef ENABLE_LLVM
2216 if (COMPILE_LLVM (cfg))
2217 mono_llvm_emit_call (cfg, call);
2218 else
2219 mono_arch_emit_call (cfg, call);
2220 #else
2221 mono_arch_emit_call (cfg, call);
2222 #endif
2224 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2225 cfg->flags |= MONO_CFG_HAS_CALLS;
2227 return call;
2230 inline static MonoInst*
2231 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2233 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2235 call->inst.sreg1 = addr->dreg;
2237 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2239 return (MonoInst*)call;
2242 inline static MonoInst*
2243 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2245 #ifdef MONO_ARCH_RGCTX_REG
2246 MonoCallInst *call;
2247 int rgctx_reg = -1;
2249 if (rgctx_arg) {
2250 rgctx_reg = mono_alloc_preg (cfg);
2251 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2253 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2254 if (rgctx_arg) {
2255 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2256 cfg->uses_rgctx_reg = TRUE;
2257 call->rgctx_reg = TRUE;
2259 return (MonoInst*)call;
2260 #else
2261 g_assert_not_reached ();
2262 return NULL;
2263 #endif
2266 static MonoInst*
2267 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2268 static MonoInst*
2269 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2271 static MonoInst*
2272 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2273 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2275 gboolean might_be_remote;
2276 gboolean virtual = this != NULL;
2277 gboolean enable_for_aot = TRUE;
2278 int context_used;
2279 MonoCallInst *call;
2281 if (method->string_ctor) {
2282 /* Create the real signature */
2283 /* FIXME: Cache these */
2284 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2285 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2287 sig = ctor_sig;
2290 might_be_remote = this && sig->hasthis &&
2291 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2292 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2294 context_used = mono_method_check_context_used (method);
2295 if (might_be_remote && context_used) {
2296 MonoInst *addr;
2298 g_assert (cfg->generic_sharing_context);
2300 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2302 return mono_emit_calli (cfg, sig, args, addr);
2305 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2307 if (might_be_remote)
2308 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2309 else
2310 call->method = method;
2311 call->inst.flags |= MONO_INST_HAS_METHOD;
2312 call->inst.inst_left = this;
2314 if (virtual) {
2315 int vtable_reg, slot_reg, this_reg;
2317 this_reg = this->dreg;
2319 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2320 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2321 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2323 /* Make a call to delegate->invoke_impl */
2324 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2325 call->inst.inst_basereg = this_reg;
2326 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2327 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2329 return (MonoInst*)call;
2331 #endif
2333 if ((!cfg->compile_aot || enable_for_aot) &&
2334 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2335 (MONO_METHOD_IS_FINAL (method) &&
2336 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2337 !(method->klass->marshalbyref && context_used)) {
2339 * the method is not virtual, we just need to ensure this is not null
2340 * and then we can call the method directly.
2342 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2344 * The check above ensures method is not gshared, this is needed since
2345 * gshared methods can't have wrappers.
2347 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2350 if (!method->string_ctor)
2351 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2353 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2355 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2357 return (MonoInst*)call;
2360 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2362 * the method is virtual, but we can statically dispatch since either
2363 * it's class or the method itself are sealed.
2364 * But first we need to ensure it's not a null reference.
2366 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2368 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2374 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2376 vtable_reg = alloc_preg (cfg);
2377 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2378 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2379 slot_reg = -1;
2380 #ifdef MONO_ARCH_HAVE_IMT
2381 if (mono_use_imt) {
2382 guint32 imt_slot = mono_method_get_imt_slot (method);
2383 emit_imt_argument (cfg, call, imt_arg);
2384 slot_reg = vtable_reg;
2385 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2387 #endif
2388 if (slot_reg == -1) {
2389 slot_reg = alloc_preg (cfg);
2390 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2391 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2393 } else {
2394 slot_reg = vtable_reg;
2395 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2396 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2397 #ifdef MONO_ARCH_HAVE_IMT
2398 if (imt_arg) {
2399 g_assert (mono_method_signature (method)->generic_param_count);
2400 emit_imt_argument (cfg, call, imt_arg);
2402 #endif
2405 call->inst.sreg1 = slot_reg;
2406 call->virtual = TRUE;
2409 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2411 return (MonoInst*)call;
2414 static MonoInst*
2415 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2416 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2418 #ifdef MONO_ARCH_RGCTX_REG
2419 int rgctx_reg = 0;
2420 #endif
2421 MonoInst *ins;
2422 MonoCallInst *call;
2424 if (vtable_arg) {
2425 #ifdef MONO_ARCH_RGCTX_REG
2426 rgctx_reg = mono_alloc_preg (cfg);
2427 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2428 #else
2429 NOT_IMPLEMENTED;
2430 #endif
2432 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2434 call = (MonoCallInst*)ins;
2435 if (vtable_arg) {
2436 #ifdef MONO_ARCH_RGCTX_REG
2437 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2438 cfg->uses_rgctx_reg = TRUE;
2439 call->rgctx_reg = TRUE;
2440 #else
2441 NOT_IMPLEMENTED;
2442 #endif
2445 return ins;
2448 MonoInst*
2449 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2451 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2454 MonoInst*
2455 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2456 MonoInst **args)
2458 MonoCallInst *call;
2460 g_assert (sig);
2462 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2463 call->fptr = func;
2465 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2467 return (MonoInst*)call;
2470 MonoInst*
2471 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2473 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2475 g_assert (info);
2477 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2481 * mono_emit_abs_call:
2483 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2485 inline static MonoInst*
2486 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2487 MonoMethodSignature *sig, MonoInst **args)
2489 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2490 MonoInst *ins;
2493 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2494 * handle it.
2496 if (cfg->abs_patches == NULL)
2497 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2498 g_hash_table_insert (cfg->abs_patches, ji, ji);
2499 ins = mono_emit_native_call (cfg, ji, sig, args);
2500 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2501 return ins;
2504 static MonoInst*
2505 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2507 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2508 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2509 int widen_op = -1;
2512 * Native code might return non register sized integers
2513 * without initializing the upper bits.
2515 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2516 case OP_LOADI1_MEMBASE:
2517 widen_op = OP_ICONV_TO_I1;
2518 break;
2519 case OP_LOADU1_MEMBASE:
2520 widen_op = OP_ICONV_TO_U1;
2521 break;
2522 case OP_LOADI2_MEMBASE:
2523 widen_op = OP_ICONV_TO_I2;
2524 break;
2525 case OP_LOADU2_MEMBASE:
2526 widen_op = OP_ICONV_TO_U2;
2527 break;
2528 default:
2529 break;
2532 if (widen_op != -1) {
2533 int dreg = alloc_preg (cfg);
2534 MonoInst *widen;
2536 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2537 widen->type = ins->type;
2538 ins = widen;
2543 return ins;
2546 static MonoMethod*
2547 get_memcpy_method (void)
2549 static MonoMethod *memcpy_method = NULL;
2550 if (!memcpy_method) {
2551 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2552 if (!memcpy_method)
2553 g_error ("Old corlib found. Install a new one");
2555 return memcpy_method;
2559 * Emit code to copy a valuetype of type @klass whose address is stored in
2560 * @src->dreg to memory whose address is stored at @dest->dreg.
2562 void
2563 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2565 MonoInst *iargs [3];
2566 int n;
2567 guint32 align = 0;
2568 MonoMethod *memcpy_method;
2570 g_assert (klass);
2572 * This check breaks with spilled vars... need to handle it during verification anyway.
2573 * g_assert (klass && klass == src->klass && klass == dest->klass);
2576 if (native)
2577 n = mono_class_native_size (klass, &align);
2578 else
2579 n = mono_class_value_size (klass, &align);
2581 #if HAVE_WRITE_BARRIERS
2582 /* if native is true there should be no references in the struct */
2583 if (klass->has_references && !native) {
2584 /* Avoid barriers when storing to the stack */
2585 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2586 (dest->opcode == OP_LDADDR))) {
2587 int context_used = 0;
2589 iargs [0] = dest;
2590 iargs [1] = src;
2592 if (cfg->generic_sharing_context)
2593 context_used = mono_class_check_context_used (klass);
2594 if (context_used) {
2595 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2596 } else {
2597 if (cfg->compile_aot) {
2598 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2599 } else {
2600 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2601 mono_class_compute_gc_descriptor (klass);
2605 /* FIXME: this does the memcpy as well (or
2606 should), so we don't need the memcpy
2607 afterwards */
2608 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2611 #endif
2613 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2614 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2615 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2616 } else {
2617 iargs [0] = dest;
2618 iargs [1] = src;
2619 EMIT_NEW_ICONST (cfg, iargs [2], n);
2621 memcpy_method = get_memcpy_method ();
2622 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2626 static MonoMethod*
2627 get_memset_method (void)
2629 static MonoMethod *memset_method = NULL;
2630 if (!memset_method) {
2631 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2632 if (!memset_method)
2633 g_error ("Old corlib found. Install a new one");
2635 return memset_method;
2638 void
2639 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2641 MonoInst *iargs [3];
2642 int n;
2643 guint32 align;
2644 MonoMethod *memset_method;
2646 /* FIXME: Optimize this for the case when dest is an LDADDR */
2648 mono_class_init (klass);
2649 n = mono_class_value_size (klass, &align);
2651 if (n <= sizeof (gpointer) * 5) {
2652 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2654 else {
2655 memset_method = get_memset_method ();
2656 iargs [0] = dest;
2657 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2658 EMIT_NEW_ICONST (cfg, iargs [2], n);
2659 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2663 static MonoInst*
2664 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2666 MonoInst *this = NULL;
2668 g_assert (cfg->generic_sharing_context);
2670 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2671 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2672 !method->klass->valuetype)
2673 EMIT_NEW_ARGLOAD (cfg, this, 0);
2675 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2676 MonoInst *mrgctx_loc, *mrgctx_var;
2678 g_assert (!this);
2679 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2681 mrgctx_loc = mono_get_vtable_var (cfg);
2682 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2684 return mrgctx_var;
2685 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2686 MonoInst *vtable_loc, *vtable_var;
2688 g_assert (!this);
2690 vtable_loc = mono_get_vtable_var (cfg);
2691 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2693 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2694 MonoInst *mrgctx_var = vtable_var;
2695 int vtable_reg;
2697 vtable_reg = alloc_preg (cfg);
2698 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2699 vtable_var->type = STACK_PTR;
2702 return vtable_var;
2703 } else {
2704 MonoInst *ins;
2705 int vtable_reg, res_reg;
2707 vtable_reg = alloc_preg (cfg);
2708 res_reg = alloc_preg (cfg);
2709 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2710 return ins;
2714 static MonoJumpInfoRgctxEntry *
2715 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2717 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2718 res->method = method;
2719 res->in_mrgctx = in_mrgctx;
2720 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2721 res->data->type = patch_type;
2722 res->data->data.target = patch_data;
2723 res->info_type = info_type;
2725 return res;
2728 static inline MonoInst*
2729 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2731 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2734 static MonoInst*
2735 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2736 MonoClass *klass, int rgctx_type)
2738 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2739 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2741 return emit_rgctx_fetch (cfg, rgctx, entry);
2745 * emit_get_rgctx_method:
2747 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2748 * normal constants, else emit a load from the rgctx.
2750 static MonoInst*
2751 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2752 MonoMethod *cmethod, int rgctx_type)
2754 if (!context_used) {
2755 MonoInst *ins;
2757 switch (rgctx_type) {
2758 case MONO_RGCTX_INFO_METHOD:
2759 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2760 return ins;
2761 case MONO_RGCTX_INFO_METHOD_RGCTX:
2762 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2763 return ins;
2764 default:
2765 g_assert_not_reached ();
2767 } else {
2768 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2769 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2771 return emit_rgctx_fetch (cfg, rgctx, entry);
2775 static MonoInst*
2776 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2777 MonoClassField *field, int rgctx_type)
2779 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2780 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2782 return emit_rgctx_fetch (cfg, rgctx, entry);
2786 * On return the caller must check @klass for load errors.
2788 static void
2789 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2791 MonoInst *vtable_arg;
2792 MonoCallInst *call;
2793 int context_used = 0;
2795 if (cfg->generic_sharing_context)
2796 context_used = mono_class_check_context_used (klass);
2798 if (context_used) {
2799 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2800 klass, MONO_RGCTX_INFO_VTABLE);
2801 } else {
2802 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2804 if (!vtable)
2805 return;
2806 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2809 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2810 #ifdef MONO_ARCH_VTABLE_REG
2811 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2812 cfg->uses_vtable_reg = TRUE;
2813 #else
2814 NOT_IMPLEMENTED;
2815 #endif
2819 * On return the caller must check @array_class for load errors
2821 static void
2822 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2824 int vtable_reg = alloc_preg (cfg);
2825 int context_used = 0;
2827 if (cfg->generic_sharing_context)
2828 context_used = mono_class_check_context_used (array_class);
2830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2832 if (cfg->opt & MONO_OPT_SHARED) {
2833 int class_reg = alloc_preg (cfg);
2834 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2835 if (cfg->compile_aot) {
2836 int klass_reg = alloc_preg (cfg);
2837 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2838 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2839 } else {
2840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2842 } else if (context_used) {
2843 MonoInst *vtable_ins;
2845 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2846 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2847 } else {
2848 if (cfg->compile_aot) {
2849 int vt_reg;
2850 MonoVTable *vtable;
2852 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2853 return;
2854 vt_reg = alloc_preg (cfg);
2855 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2856 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2857 } else {
2858 MonoVTable *vtable;
2859 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2860 return;
2861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2865 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2868 static void
2869 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2871 if (mini_get_debug_options ()->better_cast_details) {
2872 int to_klass_reg = alloc_preg (cfg);
2873 int vtable_reg = alloc_preg (cfg);
2874 int klass_reg = alloc_preg (cfg);
2875 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2877 if (!tls_get) {
2878 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2879 exit (1);
2882 MONO_ADD_INS (cfg->cbb, tls_get);
2883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2884 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2886 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2887 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2892 static void
2893 reset_cast_details (MonoCompile *cfg)
2895 /* Reset the variables holding the cast details */
2896 if (mini_get_debug_options ()->better_cast_details) {
2897 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2899 MONO_ADD_INS (cfg->cbb, tls_get);
2900 /* It is enough to reset the from field */
2901 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2906 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2907 * generic code is generated.
2909 static MonoInst*
2910 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2912 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2914 if (context_used) {
2915 MonoInst *rgctx, *addr;
2917 /* FIXME: What if the class is shared? We might not
2918 have to get the address of the method from the
2919 RGCTX. */
2920 addr = emit_get_rgctx_method (cfg, context_used, method,
2921 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2923 rgctx = emit_get_rgctx (cfg, method, context_used);
2925 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2926 } else {
2927 return mono_emit_method_call (cfg, method, &val, NULL);
2931 static MonoInst*
2932 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2934 MonoInst *add;
2935 int obj_reg;
2936 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2937 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2938 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2939 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2941 obj_reg = sp [0]->dreg;
2942 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2943 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2945 /* FIXME: generics */
2946 g_assert (klass->rank == 0);
2948 // Check rank == 0
2949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2950 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2955 if (context_used) {
2956 MonoInst *element_class;
2958 /* This assertion is from the unboxcast insn */
2959 g_assert (klass->rank == 0);
2961 element_class = emit_get_rgctx_klass (cfg, context_used,
2962 klass->element_class, MONO_RGCTX_INFO_KLASS);
2964 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2965 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2966 } else {
2967 save_cast_details (cfg, klass->element_class, obj_reg);
2968 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2969 reset_cast_details (cfg);
2972 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2973 MONO_ADD_INS (cfg->cbb, add);
2974 add->type = STACK_MP;
2975 add->klass = klass;
2977 return add;
2981 * Returns NULL and set the cfg exception on error.
2983 static MonoInst*
2984 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2986 MonoInst *iargs [2];
2987 void *alloc_ftn;
2989 if (cfg->opt & MONO_OPT_SHARED) {
2990 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2991 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2993 alloc_ftn = mono_object_new;
2994 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2995 /* This happens often in argument checking code, eg. throw new FooException... */
2996 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2997 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2998 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2999 } else {
3000 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3001 MonoMethod *managed_alloc = NULL;
3002 gboolean pass_lw;
3004 if (!vtable) {
3005 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3006 cfg->exception_ptr = klass;
3007 return NULL;
3010 #ifndef MONO_CROSS_COMPILE
3011 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3012 #endif
3014 if (managed_alloc) {
3015 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3016 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3018 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3019 if (pass_lw) {
3020 guint32 lw = vtable->klass->instance_size;
3021 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3022 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3023 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3025 else {
3026 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3030 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3033 static MonoInst*
3034 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3035 gboolean for_box)
3037 MonoInst *iargs [2];
3038 MonoMethod *managed_alloc = NULL;
3039 void *alloc_ftn;
3042 FIXME: we cannot get managed_alloc here because we can't get
3043 the class's vtable (because it's not a closed class)
3045 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3046 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3049 if (cfg->opt & MONO_OPT_SHARED) {
3050 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3051 iargs [1] = data_inst;
3052 alloc_ftn = mono_object_new;
3053 } else {
3054 if (managed_alloc) {
3055 iargs [0] = data_inst;
3056 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3059 iargs [0] = data_inst;
3060 alloc_ftn = mono_object_new_specific;
3063 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3067 * Returns NULL and set the cfg exception on error.
3069 static MonoInst*
3070 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3072 MonoInst *alloc, *ins;
3074 if (mono_class_is_nullable (klass)) {
3075 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3076 return mono_emit_method_call (cfg, method, &val, NULL);
3079 alloc = handle_alloc (cfg, klass, TRUE);
3080 if (!alloc)
3081 return NULL;
3083 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3085 return alloc;
3088 static MonoInst *
3089 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3091 MonoInst *alloc, *ins;
3093 if (mono_class_is_nullable (klass)) {
3094 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3095 /* FIXME: What if the class is shared? We might not
3096 have to get the method address from the RGCTX. */
3097 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3098 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3099 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3101 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3102 } else {
3103 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3105 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3107 return alloc;
3111 // FIXME: This doesn't work yet (class libs tests fail?)
3112 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3115 * Returns NULL and set the cfg exception on error.
3117 static MonoInst*
3118 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3120 MonoBasicBlock *is_null_bb;
3121 int obj_reg = src->dreg;
3122 int vtable_reg = alloc_preg (cfg);
3123 MonoInst *klass_inst = NULL;
3125 if (context_used) {
3126 MonoInst *args [2];
3128 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3129 klass, MONO_RGCTX_INFO_KLASS);
3131 if (is_complex_isinst (klass)) {
3132 /* Complex case, handle by an icall */
3134 /* obj */
3135 args [0] = src;
3137 /* klass */
3138 args [1] = klass_inst;
3140 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3141 } else {
3142 /* Simple case, handled by the code below */
3146 NEW_BBLOCK (cfg, is_null_bb);
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3149 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3151 save_cast_details (cfg, klass, obj_reg);
3153 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3155 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3156 } else {
3157 int klass_reg = alloc_preg (cfg);
3159 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3161 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3162 /* the remoting code is broken, access the class for now */
3163 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3164 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3165 if (!vt) {
3166 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3167 cfg->exception_ptr = klass;
3168 return NULL;
3170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3171 } else {
3172 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3173 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3175 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3176 } else {
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3178 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3182 MONO_START_BB (cfg, is_null_bb);
3184 reset_cast_details (cfg);
3186 return src;
3190 * Returns NULL and set the cfg exception on error.
3192 static MonoInst*
3193 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3195 MonoInst *ins;
3196 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3197 int obj_reg = src->dreg;
3198 int vtable_reg = alloc_preg (cfg);
3199 int res_reg = alloc_preg (cfg);
3200 MonoInst *klass_inst = NULL;
3202 if (context_used) {
3203 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3205 if (is_complex_isinst (klass)) {
3206 MonoInst *args [2];
3208 /* Complex case, handle by an icall */
3210 /* obj */
3211 args [0] = src;
3213 /* klass */
3214 args [1] = klass_inst;
3216 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3217 } else {
3218 /* Simple case, the code below can handle it */
3222 NEW_BBLOCK (cfg, is_null_bb);
3223 NEW_BBLOCK (cfg, false_bb);
3224 NEW_BBLOCK (cfg, end_bb);
3226 /* Do the assignment at the beginning, so the other assignment can be if converted */
3227 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3228 ins->type = STACK_OBJ;
3229 ins->klass = klass;
3231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3232 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3236 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3237 g_assert (!context_used);
3238 /* the is_null_bb target simply copies the input register to the output */
3239 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3240 } else {
3241 int klass_reg = alloc_preg (cfg);
3243 if (klass->rank) {
3244 int rank_reg = alloc_preg (cfg);
3245 int eclass_reg = alloc_preg (cfg);
3247 g_assert (!context_used);
3248 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3250 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3253 if (klass->cast_class == mono_defaults.object_class) {
3254 int parent_reg = alloc_preg (cfg);
3255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3256 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3257 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3259 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3260 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3261 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3263 } else if (klass->cast_class == mono_defaults.enum_class) {
3264 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3266 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3267 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3268 } else {
3269 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3270 /* Check that the object is a vector too */
3271 int bounds_reg = alloc_preg (cfg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3273 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3277 /* the is_null_bb target simply copies the input register to the output */
3278 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3280 } else if (mono_class_is_nullable (klass)) {
3281 g_assert (!context_used);
3282 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3283 /* the is_null_bb target simply copies the input register to the output */
3284 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3285 } else {
3286 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3287 g_assert (!context_used);
3288 /* the remoting code is broken, access the class for now */
3289 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3290 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3291 if (!vt) {
3292 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3293 cfg->exception_ptr = klass;
3294 return NULL;
3296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3297 } else {
3298 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3302 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3303 } else {
3304 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3305 /* the is_null_bb target simply copies the input register to the output */
3306 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3311 MONO_START_BB (cfg, false_bb);
3313 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3314 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3316 MONO_START_BB (cfg, is_null_bb);
3318 MONO_START_BB (cfg, end_bb);
3320 return ins;
3323 static MonoInst*
3324 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3326 /* This opcode takes as input an object reference and a class, and returns:
3327 0) if the object is an instance of the class,
3328 1) if the object is not instance of the class,
3329 2) if the object is a proxy whose type cannot be determined */
3331 MonoInst *ins;
3332 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3333 int obj_reg = src->dreg;
3334 int dreg = alloc_ireg (cfg);
3335 int tmp_reg;
3336 int klass_reg = alloc_preg (cfg);
3338 NEW_BBLOCK (cfg, true_bb);
3339 NEW_BBLOCK (cfg, false_bb);
3340 NEW_BBLOCK (cfg, false2_bb);
3341 NEW_BBLOCK (cfg, end_bb);
3342 NEW_BBLOCK (cfg, no_proxy_bb);
3344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3347 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3348 NEW_BBLOCK (cfg, interface_fail_bb);
3350 tmp_reg = alloc_preg (cfg);
3351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3352 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3353 MONO_START_BB (cfg, interface_fail_bb);
3354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3356 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3358 tmp_reg = alloc_preg (cfg);
3359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3362 } else {
3363 tmp_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3367 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3368 tmp_reg = alloc_preg (cfg);
3369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3372 tmp_reg = alloc_preg (cfg);
3373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3374 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3375 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3377 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3378 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3380 MONO_START_BB (cfg, no_proxy_bb);
3382 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3385 MONO_START_BB (cfg, false_bb);
3387 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3388 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3390 MONO_START_BB (cfg, false2_bb);
3392 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3393 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3395 MONO_START_BB (cfg, true_bb);
3397 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3399 MONO_START_BB (cfg, end_bb);
3401 /* FIXME: */
3402 MONO_INST_NEW (cfg, ins, OP_ICONST);
3403 ins->dreg = dreg;
3404 ins->type = STACK_I4;
3406 return ins;
3409 static MonoInst*
3410 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3412 /* This opcode takes as input an object reference and a class, and returns:
3413 0) if the object is an instance of the class,
3414 1) if the object is a proxy whose type cannot be determined
3415 an InvalidCastException exception is thrown otherwhise*/
3417 MonoInst *ins;
3418 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3419 int obj_reg = src->dreg;
3420 int dreg = alloc_ireg (cfg);
3421 int tmp_reg = alloc_preg (cfg);
3422 int klass_reg = alloc_preg (cfg);
3424 NEW_BBLOCK (cfg, end_bb);
3425 NEW_BBLOCK (cfg, ok_result_bb);
3427 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3428 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3430 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3431 NEW_BBLOCK (cfg, interface_fail_bb);
3433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3434 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3435 MONO_START_BB (cfg, interface_fail_bb);
3436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3438 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3440 tmp_reg = alloc_preg (cfg);
3441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3443 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3445 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3448 } else {
3449 NEW_BBLOCK (cfg, no_proxy_bb);
3451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3453 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3455 tmp_reg = alloc_preg (cfg);
3456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3459 tmp_reg = alloc_preg (cfg);
3460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3464 NEW_BBLOCK (cfg, fail_1_bb);
3466 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3468 MONO_START_BB (cfg, fail_1_bb);
3470 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3473 MONO_START_BB (cfg, no_proxy_bb);
3475 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3478 MONO_START_BB (cfg, ok_result_bb);
3480 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3482 MONO_START_BB (cfg, end_bb);
3484 /* FIXME: */
3485 MONO_INST_NEW (cfg, ins, OP_ICONST);
3486 ins->dreg = dreg;
3487 ins->type = STACK_I4;
3489 return ins;
3493 * Returns NULL and set the cfg exception on error.
3495 static G_GNUC_UNUSED MonoInst*
3496 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3498 gpointer *trampoline;
3499 MonoInst *obj, *method_ins, *tramp_ins;
3500 MonoDomain *domain;
3501 guint8 **code_slot;
3503 obj = handle_alloc (cfg, klass, FALSE);
3504 if (!obj)
3505 return NULL;
3507 /* Inline the contents of mono_delegate_ctor */
3509 /* Set target field */
3510 /* Optimize away setting of NULL target */
3511 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3512 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3514 /* Set method field */
3515 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3516 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3519 * To avoid looking up the compiled code belonging to the target method
3520 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3521 * store it, and we fill it after the method has been compiled.
3523 if (!cfg->compile_aot && !method->dynamic) {
3524 MonoInst *code_slot_ins;
3526 if (context_used) {
3527 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3528 } else {
3529 domain = mono_domain_get ();
3530 mono_domain_lock (domain);
3531 if (!domain_jit_info (domain)->method_code_hash)
3532 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3533 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3534 if (!code_slot) {
3535 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3536 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3538 mono_domain_unlock (domain);
3540 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3545 /* Set invoke_impl field */
3546 if (cfg->compile_aot) {
3547 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3548 } else {
3549 trampoline = mono_create_delegate_trampoline (klass);
3550 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3552 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3554 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3556 return obj;
3559 static MonoInst*
3560 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3562 MonoJitICallInfo *info;
3564 /* Need to register the icall so it gets an icall wrapper */
3565 info = mono_get_array_new_va_icall (rank);
3567 cfg->flags |= MONO_CFG_HAS_VARARGS;
3569 /* mono_array_new_va () needs a vararg calling convention */
3570 cfg->disable_llvm = TRUE;
3572 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3573 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3576 static void
3577 mono_emit_load_got_addr (MonoCompile *cfg)
3579 MonoInst *getaddr, *dummy_use;
3581 if (!cfg->got_var || cfg->got_var_allocated)
3582 return;
3584 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3585 getaddr->dreg = cfg->got_var->dreg;
3587 /* Add it to the start of the first bblock */
3588 if (cfg->bb_entry->code) {
3589 getaddr->next = cfg->bb_entry->code;
3590 cfg->bb_entry->code = getaddr;
3592 else
3593 MONO_ADD_INS (cfg->bb_entry, getaddr);
3595 cfg->got_var_allocated = TRUE;
3598 * Add a dummy use to keep the got_var alive, since real uses might
3599 * only be generated by the back ends.
3600 * Add it to end_bblock, so the variable's lifetime covers the whole
3601 * method.
3602 * It would be better to make the usage of the got var explicit in all
3603 * cases when the backend needs it (i.e. calls, throw etc.), so this
3604 * wouldn't be needed.
3606 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3607 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3610 static int inline_limit;
3611 static gboolean inline_limit_inited;
3613 static gboolean
3614 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3616 MonoMethodHeaderSummary header;
3617 MonoVTable *vtable;
3618 #ifdef MONO_ARCH_SOFT_FLOAT
3619 MonoMethodSignature *sig = mono_method_signature (method);
3620 int i;
3621 #endif
3623 if (cfg->generic_sharing_context)
3624 return FALSE;
3626 if (cfg->inline_depth > 10)
3627 return FALSE;
3629 #ifdef MONO_ARCH_HAVE_LMF_OPS
3630 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3631 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3632 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3633 return TRUE;
3634 #endif
3637 if (!mono_method_get_header_summary (method, &header))
3638 return FALSE;
3640 /*runtime, icall and pinvoke are checked by summary call*/
3641 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3642 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3643 (method->klass->marshalbyref) ||
3644 header.has_clauses)
3645 return FALSE;
3647 /* also consider num_locals? */
3648 /* Do the size check early to avoid creating vtables */
3649 if (!inline_limit_inited) {
3650 if (getenv ("MONO_INLINELIMIT"))
3651 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3652 else
3653 inline_limit = INLINE_LENGTH_LIMIT;
3654 inline_limit_inited = TRUE;
3656 if (header.code_size >= inline_limit)
3657 return FALSE;
3660 * if we can initialize the class of the method right away, we do,
3661 * otherwise we don't allow inlining if the class needs initialization,
3662 * since it would mean inserting a call to mono_runtime_class_init()
3663 * inside the inlined code
3665 if (!(cfg->opt & MONO_OPT_SHARED)) {
3666 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3667 if (cfg->run_cctors && method->klass->has_cctor) {
3668 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3669 if (!method->klass->runtime_info)
3670 /* No vtable created yet */
3671 return FALSE;
3672 vtable = mono_class_vtable (cfg->domain, method->klass);
3673 if (!vtable)
3674 return FALSE;
3675 /* This makes so that inline cannot trigger */
3676 /* .cctors: too many apps depend on them */
3677 /* running with a specific order... */
3678 if (! vtable->initialized)
3679 return FALSE;
3680 mono_runtime_class_init (vtable);
3682 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3683 if (!method->klass->runtime_info)
3684 /* No vtable created yet */
3685 return FALSE;
3686 vtable = mono_class_vtable (cfg->domain, method->klass);
3687 if (!vtable)
3688 return FALSE;
3689 if (!vtable->initialized)
3690 return FALSE;
3692 } else {
3694 * If we're compiling for shared code
3695 * the cctor will need to be run at aot method load time, for example,
3696 * or at the end of the compilation of the inlining method.
3698 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3699 return FALSE;
3703 * CAS - do not inline methods with declarative security
3704 * Note: this has to be before any possible return TRUE;
3706 if (mono_method_has_declsec (method))
3707 return FALSE;
3709 #ifdef MONO_ARCH_SOFT_FLOAT
3710 /* FIXME: */
3711 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3712 return FALSE;
3713 for (i = 0; i < sig->param_count; ++i)
3714 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3715 return FALSE;
3716 #endif
3718 return TRUE;
3721 static gboolean
3722 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3724 if (vtable->initialized && !cfg->compile_aot)
3725 return FALSE;
3727 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3728 return FALSE;
3730 if (!mono_class_needs_cctor_run (vtable->klass, method))
3731 return FALSE;
3733 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3734 /* The initialization is already done before the method is called */
3735 return FALSE;
3737 return TRUE;
3740 static MonoInst*
3741 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3743 MonoInst *ins;
3744 guint32 size;
3745 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3747 mono_class_init (klass);
3748 size = mono_class_array_element_size (klass);
3750 mult_reg = alloc_preg (cfg);
3751 array_reg = arr->dreg;
3752 index_reg = index->dreg;
3754 #if SIZEOF_REGISTER == 8
3755 /* The array reg is 64 bits but the index reg is only 32 */
3756 if (COMPILE_LLVM (cfg)) {
3757 /* Not needed */
3758 index2_reg = index_reg;
3759 } else {
3760 index2_reg = alloc_preg (cfg);
3761 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3763 #else
3764 if (index->type == STACK_I8) {
3765 index2_reg = alloc_preg (cfg);
3766 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3767 } else {
3768 index2_reg = index_reg;
3770 #endif
3772 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3774 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3775 if (size == 1 || size == 2 || size == 4 || size == 8) {
3776 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3778 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3779 ins->type = STACK_PTR;
3781 return ins;
3783 #endif
3785 add_reg = alloc_preg (cfg);
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3788 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3789 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3790 ins->type = STACK_PTR;
3791 MONO_ADD_INS (cfg->cbb, ins);
3793 return ins;
3796 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3797 static MonoInst*
3798 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3800 int bounds_reg = alloc_preg (cfg);
3801 int add_reg = alloc_preg (cfg);
3802 int mult_reg = alloc_preg (cfg);
3803 int mult2_reg = alloc_preg (cfg);
3804 int low1_reg = alloc_preg (cfg);
3805 int low2_reg = alloc_preg (cfg);
3806 int high1_reg = alloc_preg (cfg);
3807 int high2_reg = alloc_preg (cfg);
3808 int realidx1_reg = alloc_preg (cfg);
3809 int realidx2_reg = alloc_preg (cfg);
3810 int sum_reg = alloc_preg (cfg);
3811 int index1, index2;
3812 MonoInst *ins;
3813 guint32 size;
3815 mono_class_init (klass);
3816 size = mono_class_array_element_size (klass);
3818 index1 = index_ins1->dreg;
3819 index2 = index_ins2->dreg;
3821 /* range checking */
3822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3823 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3825 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3826 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3827 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3829 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3830 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3831 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3834 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3836 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3837 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3838 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3839 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3841 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3842 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3844 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3845 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3847 ins->type = STACK_MP;
3848 ins->klass = klass;
3849 MONO_ADD_INS (cfg->cbb, ins);
3851 return ins;
3853 #endif
3855 static MonoInst*
3856 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3858 int rank;
3859 MonoInst *addr;
3860 MonoMethod *addr_method;
3861 int element_size;
3863 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3865 if (rank == 1)
3866 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3868 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3869 /* emit_ldelema_2 depends on OP_LMUL */
3870 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3871 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3873 #endif
3875 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3876 addr_method = mono_marshal_get_array_address (rank, element_size);
3877 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3879 return addr;
3882 static MonoBreakPolicy
3883 always_insert_breakpoint (MonoMethod *method)
3885 return MONO_BREAK_POLICY_ALWAYS;
3888 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3891 * mono_set_break_policy:
3892 * policy_callback: the new callback function
3894 * Allow embedders to decide wherther to actually obey breakpoint instructions
3895 * (both break IL instructions and Debugger.Break () method calls), for example
3896 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3897 * untrusted or semi-trusted code.
3899 * @policy_callback will be called every time a break point instruction needs to
3900 * be inserted with the method argument being the method that calls Debugger.Break()
3901 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3902 * if it wants the breakpoint to not be effective in the given method.
3903 * #MONO_BREAK_POLICY_ALWAYS is the default.
3905 void
3906 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3908 if (policy_callback)
3909 break_policy_func = policy_callback;
3910 else
3911 break_policy_func = always_insert_breakpoint;
3914 static gboolean
3915 should_insert_brekpoint (MonoMethod *method) {
3916 switch (break_policy_func (method)) {
3917 case MONO_BREAK_POLICY_ALWAYS:
3918 return TRUE;
3919 case MONO_BREAK_POLICY_NEVER:
3920 return FALSE;
3921 case MONO_BREAK_POLICY_ON_DBG:
3922 return mono_debug_using_mono_debugger ();
3923 default:
3924 g_warning ("Incorrect value returned from break policy callback");
3925 return FALSE;
3929 static MonoInst*
3930 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3932 MonoInst *ins = NULL;
3934 static MonoClass *runtime_helpers_class = NULL;
3935 if (! runtime_helpers_class)
3936 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3937 "System.Runtime.CompilerServices", "RuntimeHelpers");
3939 if (cmethod->klass == mono_defaults.string_class) {
3940 if (strcmp (cmethod->name, "get_Chars") == 0) {
3941 int dreg = alloc_ireg (cfg);
3942 int index_reg = alloc_preg (cfg);
3943 int mult_reg = alloc_preg (cfg);
3944 int add_reg = alloc_preg (cfg);
3946 #if SIZEOF_REGISTER == 8
3947 /* The array reg is 64 bits but the index reg is only 32 */
3948 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3949 #else
3950 index_reg = args [1]->dreg;
3951 #endif
3952 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3954 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3955 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3956 add_reg = ins->dreg;
3957 /* Avoid a warning */
3958 mult_reg = 0;
3959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3960 add_reg, 0);
3961 #else
3962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3963 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3964 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3965 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3966 #endif
3967 type_from_op (ins, NULL, NULL);
3968 return ins;
3969 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3970 int dreg = alloc_ireg (cfg);
3971 /* Decompose later to allow more optimizations */
3972 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3973 ins->type = STACK_I4;
3974 cfg->cbb->has_array_access = TRUE;
3975 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3977 return ins;
3978 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3979 int mult_reg = alloc_preg (cfg);
3980 int add_reg = alloc_preg (cfg);
3982 /* The corlib functions check for oob already. */
3983 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3984 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3985 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3986 return cfg->cbb->last_ins;
3987 } else
3988 return NULL;
3989 } else if (cmethod->klass == mono_defaults.object_class) {
3991 if (strcmp (cmethod->name, "GetType") == 0) {
3992 int dreg = alloc_preg (cfg);
3993 int vt_reg = alloc_preg (cfg);
3994 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3995 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3996 type_from_op (ins, NULL, NULL);
3998 return ins;
3999 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
4000 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
4001 int dreg = alloc_ireg (cfg);
4002 int t1 = alloc_ireg (cfg);
4004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4005 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4006 ins->type = STACK_I4;
4008 return ins;
4009 #endif
4010 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4011 MONO_INST_NEW (cfg, ins, OP_NOP);
4012 MONO_ADD_INS (cfg->cbb, ins);
4013 return ins;
4014 } else
4015 return NULL;
4016 } else if (cmethod->klass == mono_defaults.array_class) {
4017 if (cmethod->name [0] != 'g')
4018 return NULL;
4020 if (strcmp (cmethod->name, "get_Rank") == 0) {
4021 int dreg = alloc_ireg (cfg);
4022 int vtable_reg = alloc_preg (cfg);
4023 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4024 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4026 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4027 type_from_op (ins, NULL, NULL);
4029 return ins;
4030 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4031 int dreg = alloc_ireg (cfg);
4033 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4034 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4035 type_from_op (ins, NULL, NULL);
4037 return ins;
4038 } else
4039 return NULL;
4040 } else if (cmethod->klass == runtime_helpers_class) {
4042 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4043 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4044 return ins;
4045 } else
4046 return NULL;
4047 } else if (cmethod->klass == mono_defaults.thread_class) {
4048 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4049 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4050 MONO_ADD_INS (cfg->cbb, ins);
4051 return ins;
4052 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4053 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4054 MONO_ADD_INS (cfg->cbb, ins);
4055 return ins;
4057 } else if (cmethod->klass == mono_defaults.monitor_class) {
4058 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4059 if (strcmp (cmethod->name, "Enter") == 0) {
4060 MonoCallInst *call;
4062 if (COMPILE_LLVM (cfg)) {
4064 * Pass the argument normally, the LLVM backend will handle the
4065 * calling convention problems.
4067 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4068 } else {
4069 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4070 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4071 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4072 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4075 return (MonoInst*)call;
4076 } else if (strcmp (cmethod->name, "Exit") == 0) {
4077 MonoCallInst *call;
4079 if (COMPILE_LLVM (cfg)) {
4080 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4081 } else {
4082 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4083 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4084 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4085 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4088 return (MonoInst*)call;
4090 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4091 MonoMethod *fast_method = NULL;
4093 /* Avoid infinite recursion */
4094 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4095 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4096 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4097 return NULL;
4099 if (strcmp (cmethod->name, "Enter") == 0 ||
4100 strcmp (cmethod->name, "Exit") == 0)
4101 fast_method = mono_monitor_get_fast_path (cmethod);
4102 if (!fast_method)
4103 return NULL;
4105 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4106 #endif
4107 } else if (mini_class_is_system_array (cmethod->klass) &&
4108 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4109 MonoInst *addr, *store, *load;
4110 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4112 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4113 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4114 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4115 return store;
4116 } else if (cmethod->klass->image == mono_defaults.corlib &&
4117 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4118 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4119 ins = NULL;
4121 #if SIZEOF_REGISTER == 8
4122 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4123 /* 64 bit reads are already atomic */
4124 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4125 ins->dreg = mono_alloc_preg (cfg);
4126 ins->inst_basereg = args [0]->dreg;
4127 ins->inst_offset = 0;
4128 MONO_ADD_INS (cfg->cbb, ins);
4130 #endif
4132 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4133 if (strcmp (cmethod->name, "Increment") == 0) {
4134 MonoInst *ins_iconst;
4135 guint32 opcode = 0;
4137 if (fsig->params [0]->type == MONO_TYPE_I4)
4138 opcode = OP_ATOMIC_ADD_NEW_I4;
4139 #if SIZEOF_REGISTER == 8
4140 else if (fsig->params [0]->type == MONO_TYPE_I8)
4141 opcode = OP_ATOMIC_ADD_NEW_I8;
4142 #endif
4143 if (opcode) {
4144 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4145 ins_iconst->inst_c0 = 1;
4146 ins_iconst->dreg = mono_alloc_ireg (cfg);
4147 MONO_ADD_INS (cfg->cbb, ins_iconst);
4149 MONO_INST_NEW (cfg, ins, opcode);
4150 ins->dreg = mono_alloc_ireg (cfg);
4151 ins->inst_basereg = args [0]->dreg;
4152 ins->inst_offset = 0;
4153 ins->sreg2 = ins_iconst->dreg;
4154 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4155 MONO_ADD_INS (cfg->cbb, ins);
4157 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4158 MonoInst *ins_iconst;
4159 guint32 opcode = 0;
4161 if (fsig->params [0]->type == MONO_TYPE_I4)
4162 opcode = OP_ATOMIC_ADD_NEW_I4;
4163 #if SIZEOF_REGISTER == 8
4164 else if (fsig->params [0]->type == MONO_TYPE_I8)
4165 opcode = OP_ATOMIC_ADD_NEW_I8;
4166 #endif
4167 if (opcode) {
4168 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4169 ins_iconst->inst_c0 = -1;
4170 ins_iconst->dreg = mono_alloc_ireg (cfg);
4171 MONO_ADD_INS (cfg->cbb, ins_iconst);
4173 MONO_INST_NEW (cfg, ins, opcode);
4174 ins->dreg = mono_alloc_ireg (cfg);
4175 ins->inst_basereg = args [0]->dreg;
4176 ins->inst_offset = 0;
4177 ins->sreg2 = ins_iconst->dreg;
4178 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4179 MONO_ADD_INS (cfg->cbb, ins);
4181 } else if (strcmp (cmethod->name, "Add") == 0) {
4182 guint32 opcode = 0;
4184 if (fsig->params [0]->type == MONO_TYPE_I4)
4185 opcode = OP_ATOMIC_ADD_NEW_I4;
4186 #if SIZEOF_REGISTER == 8
4187 else if (fsig->params [0]->type == MONO_TYPE_I8)
4188 opcode = OP_ATOMIC_ADD_NEW_I8;
4189 #endif
4191 if (opcode) {
4192 MONO_INST_NEW (cfg, ins, opcode);
4193 ins->dreg = mono_alloc_ireg (cfg);
4194 ins->inst_basereg = args [0]->dreg;
4195 ins->inst_offset = 0;
4196 ins->sreg2 = args [1]->dreg;
4197 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4198 MONO_ADD_INS (cfg->cbb, ins);
4201 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4203 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4204 if (strcmp (cmethod->name, "Exchange") == 0) {
4205 guint32 opcode;
4206 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4208 if (fsig->params [0]->type == MONO_TYPE_I4)
4209 opcode = OP_ATOMIC_EXCHANGE_I4;
4210 #if SIZEOF_REGISTER == 8
4211 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4212 (fsig->params [0]->type == MONO_TYPE_I))
4213 opcode = OP_ATOMIC_EXCHANGE_I8;
4214 #else
4215 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4216 opcode = OP_ATOMIC_EXCHANGE_I4;
4217 #endif
4218 else
4219 return NULL;
4221 MONO_INST_NEW (cfg, ins, opcode);
4222 ins->dreg = mono_alloc_ireg (cfg);
4223 ins->inst_basereg = args [0]->dreg;
4224 ins->inst_offset = 0;
4225 ins->sreg2 = args [1]->dreg;
4226 MONO_ADD_INS (cfg->cbb, ins);
4228 switch (fsig->params [0]->type) {
4229 case MONO_TYPE_I4:
4230 ins->type = STACK_I4;
4231 break;
4232 case MONO_TYPE_I8:
4233 case MONO_TYPE_I:
4234 ins->type = STACK_I8;
4235 break;
4236 case MONO_TYPE_OBJECT:
4237 ins->type = STACK_OBJ;
4238 break;
4239 default:
4240 g_assert_not_reached ();
4243 #if HAVE_WRITE_BARRIERS
4244 if (is_ref) {
4245 MonoInst *dummy_use;
4246 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4247 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4248 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4250 #endif
4252 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4254 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4255 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4256 int size = 0;
4257 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4258 if (fsig->params [1]->type == MONO_TYPE_I4)
4259 size = 4;
4260 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4261 size = sizeof (gpointer);
4262 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4263 size = 8;
4264 if (size == 4) {
4265 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4266 ins->dreg = alloc_ireg (cfg);
4267 ins->sreg1 = args [0]->dreg;
4268 ins->sreg2 = args [1]->dreg;
4269 ins->sreg3 = args [2]->dreg;
4270 ins->type = STACK_I4;
4271 MONO_ADD_INS (cfg->cbb, ins);
4272 } else if (size == 8) {
4273 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4274 ins->dreg = alloc_ireg (cfg);
4275 ins->sreg1 = args [0]->dreg;
4276 ins->sreg2 = args [1]->dreg;
4277 ins->sreg3 = args [2]->dreg;
4278 ins->type = STACK_I8;
4279 MONO_ADD_INS (cfg->cbb, ins);
4280 } else {
4281 /* g_assert_not_reached (); */
4283 #if HAVE_WRITE_BARRIERS
4284 if (is_ref) {
4285 MonoInst *dummy_use;
4286 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4287 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4288 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4290 #endif
4292 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4294 if (ins)
4295 return ins;
4296 } else if (cmethod->klass->image == mono_defaults.corlib) {
4297 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4298 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4299 if (should_insert_brekpoint (cfg->method))
4300 MONO_INST_NEW (cfg, ins, OP_BREAK);
4301 else
4302 MONO_INST_NEW (cfg, ins, OP_NOP);
4303 MONO_ADD_INS (cfg->cbb, ins);
4304 return ins;
4306 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4307 && strcmp (cmethod->klass->name, "Environment") == 0) {
4308 #ifdef TARGET_WIN32
4309 EMIT_NEW_ICONST (cfg, ins, 1);
4310 #else
4311 EMIT_NEW_ICONST (cfg, ins, 0);
4312 #endif
4313 return ins;
4315 } else if (cmethod->klass == mono_defaults.math_class) {
4317 * There is general branches code for Min/Max, but it does not work for
4318 * all inputs:
4319 * http://everything2.com/?node_id=1051618
4323 #ifdef MONO_ARCH_SIMD_INTRINSICS
4324 if (cfg->opt & MONO_OPT_SIMD) {
4325 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4326 if (ins)
4327 return ins;
4329 #endif
4331 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4335 * This entry point could be used later for arbitrary method
4336 * redirection.
4338 inline static MonoInst*
4339 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4340 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4342 if (method->klass == mono_defaults.string_class) {
4343 /* managed string allocation support */
4344 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4345 MonoInst *iargs [2];
4346 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4347 MonoMethod *managed_alloc = NULL;
4349 g_assert (vtable); /*Should not fail since it System.String*/
4350 #ifndef MONO_CROSS_COMPILE
4351 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4352 #endif
4353 if (!managed_alloc)
4354 return NULL;
4355 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4356 iargs [1] = args [0];
4357 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4360 return NULL;
4363 static void
4364 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4366 MonoInst *store, *temp;
4367 int i;
4369 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4370 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4373 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4374 * would be different than the MonoInst's used to represent arguments, and
4375 * the ldelema implementation can't deal with that.
4376 * Solution: When ldelema is used on an inline argument, create a var for
4377 * it, emit ldelema on that var, and emit the saving code below in
4378 * inline_method () if needed.
4380 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4381 cfg->args [i] = temp;
4382 /* This uses cfg->args [i] which is set by the preceeding line */
4383 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4384 store->cil_code = sp [0]->cil_code;
4385 sp++;
4389 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4390 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4392 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4393 static gboolean
4394 check_inline_called_method_name_limit (MonoMethod *called_method)
4396 int strncmp_result;
4397 static char *limit = NULL;
4399 if (limit == NULL) {
4400 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4402 if (limit_string != NULL)
4403 limit = limit_string;
4404 else
4405 limit = (char *) "";
4408 if (limit [0] != '\0') {
4409 char *called_method_name = mono_method_full_name (called_method, TRUE);
4411 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4412 g_free (called_method_name);
4414 //return (strncmp_result <= 0);
4415 return (strncmp_result == 0);
4416 } else {
4417 return TRUE;
4420 #endif
4422 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4423 static gboolean
4424 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4426 int strncmp_result;
4427 static char *limit = NULL;
4429 if (limit == NULL) {
4430 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4431 if (limit_string != NULL) {
4432 limit = limit_string;
4433 } else {
4434 limit = (char *) "";
4438 if (limit [0] != '\0') {
4439 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4441 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4442 g_free (caller_method_name);
4444 //return (strncmp_result <= 0);
4445 return (strncmp_result == 0);
4446 } else {
4447 return TRUE;
4450 #endif
4452 static int
4453 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4454 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4456 MonoInst *ins, *rvar = NULL;
4457 MonoMethodHeader *cheader;
4458 MonoBasicBlock *ebblock, *sbblock;
4459 int i, costs;
4460 MonoMethod *prev_inlined_method;
4461 MonoInst **prev_locals, **prev_args;
4462 MonoType **prev_arg_types;
4463 guint prev_real_offset;
4464 GHashTable *prev_cbb_hash;
4465 MonoBasicBlock **prev_cil_offset_to_bb;
4466 MonoBasicBlock *prev_cbb;
4467 unsigned char* prev_cil_start;
4468 guint32 prev_cil_offset_to_bb_len;
4469 MonoMethod *prev_current_method;
4470 MonoGenericContext *prev_generic_context;
4471 gboolean ret_var_set, prev_ret_var_set;
4473 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4475 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4476 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4477 return 0;
4478 #endif
4479 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4480 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4481 return 0;
4482 #endif
4484 if (cfg->verbose_level > 2)
4485 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4487 if (!cmethod->inline_info) {
4488 mono_jit_stats.inlineable_methods++;
4489 cmethod->inline_info = 1;
4492 /* allocate local variables */
4493 cheader = mono_method_get_header (cmethod);
4495 if (cheader == NULL || mono_loader_get_last_error ()) {
4496 mono_loader_clear_error ();
4497 return 0;
4500 /* allocate space to store the return value */
4501 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4502 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4506 prev_locals = cfg->locals;
4507 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4508 for (i = 0; i < cheader->num_locals; ++i)
4509 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4511 /* allocate start and end blocks */
4512 /* This is needed so if the inline is aborted, we can clean up */
4513 NEW_BBLOCK (cfg, sbblock);
4514 sbblock->real_offset = real_offset;
4516 NEW_BBLOCK (cfg, ebblock);
4517 ebblock->block_num = cfg->num_bblocks++;
4518 ebblock->real_offset = real_offset;
4520 prev_args = cfg->args;
4521 prev_arg_types = cfg->arg_types;
4522 prev_inlined_method = cfg->inlined_method;
4523 cfg->inlined_method = cmethod;
4524 cfg->ret_var_set = FALSE;
4525 cfg->inline_depth ++;
4526 prev_real_offset = cfg->real_offset;
4527 prev_cbb_hash = cfg->cbb_hash;
4528 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4529 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4530 prev_cil_start = cfg->cil_start;
4531 prev_cbb = cfg->cbb;
4532 prev_current_method = cfg->current_method;
4533 prev_generic_context = cfg->generic_context;
4534 prev_ret_var_set = cfg->ret_var_set;
4536 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4538 ret_var_set = cfg->ret_var_set;
4540 cfg->inlined_method = prev_inlined_method;
4541 cfg->real_offset = prev_real_offset;
4542 cfg->cbb_hash = prev_cbb_hash;
4543 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4544 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4545 cfg->cil_start = prev_cil_start;
4546 cfg->locals = prev_locals;
4547 cfg->args = prev_args;
4548 cfg->arg_types = prev_arg_types;
4549 cfg->current_method = prev_current_method;
4550 cfg->generic_context = prev_generic_context;
4551 cfg->ret_var_set = prev_ret_var_set;
4552 cfg->inline_depth --;
4554 if ((costs >= 0 && costs < 60) || inline_allways) {
4555 if (cfg->verbose_level > 2)
4556 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4558 mono_jit_stats.inlined_methods++;
4560 /* always add some code to avoid block split failures */
4561 MONO_INST_NEW (cfg, ins, OP_NOP);
4562 MONO_ADD_INS (prev_cbb, ins);
4564 prev_cbb->next_bb = sbblock;
4565 link_bblock (cfg, prev_cbb, sbblock);
4568 * Get rid of the begin and end bblocks if possible to aid local
4569 * optimizations.
4571 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4573 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4574 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4576 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4577 MonoBasicBlock *prev = ebblock->in_bb [0];
4578 mono_merge_basic_blocks (cfg, prev, ebblock);
4579 cfg->cbb = prev;
4580 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4581 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4582 cfg->cbb = prev_cbb;
4584 } else {
4585 cfg->cbb = ebblock;
4588 if (rvar) {
4590 * If the inlined method contains only a throw, then the ret var is not
4591 * set, so set it to a dummy value.
4593 if (!ret_var_set) {
4594 static double r8_0 = 0.0;
4596 switch (rvar->type) {
4597 case STACK_I4:
4598 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4599 break;
4600 case STACK_I8:
4601 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4602 break;
4603 case STACK_PTR:
4604 case STACK_MP:
4605 case STACK_OBJ:
4606 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4607 break;
4608 case STACK_R8:
4609 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4610 ins->type = STACK_R8;
4611 ins->inst_p0 = (void*)&r8_0;
4612 ins->dreg = rvar->dreg;
4613 MONO_ADD_INS (cfg->cbb, ins);
4614 break;
4615 case STACK_VTYPE:
4616 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4617 break;
4618 default:
4619 g_assert_not_reached ();
4623 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4624 *sp++ = ins;
4626 return costs + 1;
4627 } else {
4628 if (cfg->verbose_level > 2)
4629 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4630 cfg->exception_type = MONO_EXCEPTION_NONE;
4631 mono_loader_clear_error ();
4633 /* This gets rid of the newly added bblocks */
4634 cfg->cbb = prev_cbb;
4636 return 0;
4640 * Some of these comments may well be out-of-date.
4641 * Design decisions: we do a single pass over the IL code (and we do bblock
4642 * splitting/merging in the few cases when it's required: a back jump to an IL
4643 * address that was not already seen as bblock starting point).
4644 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4645 * Complex operations are decomposed in simpler ones right away. We need to let the
4646 * arch-specific code peek and poke inside this process somehow (except when the
4647 * optimizations can take advantage of the full semantic info of coarse opcodes).
4648 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4649 * MonoInst->opcode initially is the IL opcode or some simplification of that
4650 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4651 * opcode with value bigger than OP_LAST.
4652 * At this point the IR can be handed over to an interpreter, a dumb code generator
4653 * or to the optimizing code generator that will translate it to SSA form.
4655 * Profiling directed optimizations.
4656 * We may compile by default with few or no optimizations and instrument the code
4657 * or the user may indicate what methods to optimize the most either in a config file
4658 * or through repeated runs where the compiler applies offline the optimizations to
4659 * each method and then decides if it was worth it.
4662 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4663 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4664 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4665 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4666 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4667 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4668 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4669 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4671 /* offset from br.s -> br like opcodes */
4672 #define BIG_BRANCH_OFFSET 13
4674 static gboolean
4675 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4677 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4679 return b == NULL || b == bb;
4682 static int
4683 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4685 unsigned char *ip = start;
4686 unsigned char *target;
4687 int i;
4688 guint cli_addr;
4689 MonoBasicBlock *bblock;
4690 const MonoOpcode *opcode;
4692 while (ip < end) {
4693 cli_addr = ip - start;
4694 i = mono_opcode_value ((const guint8 **)&ip, end);
4695 if (i < 0)
4696 UNVERIFIED;
4697 opcode = &mono_opcodes [i];
4698 switch (opcode->argument) {
4699 case MonoInlineNone:
4700 ip++;
4701 break;
4702 case MonoInlineString:
4703 case MonoInlineType:
4704 case MonoInlineField:
4705 case MonoInlineMethod:
4706 case MonoInlineTok:
4707 case MonoInlineSig:
4708 case MonoShortInlineR:
4709 case MonoInlineI:
4710 ip += 5;
4711 break;
4712 case MonoInlineVar:
4713 ip += 3;
4714 break;
4715 case MonoShortInlineVar:
4716 case MonoShortInlineI:
4717 ip += 2;
4718 break;
4719 case MonoShortInlineBrTarget:
4720 target = start + cli_addr + 2 + (signed char)ip [1];
4721 GET_BBLOCK (cfg, bblock, target);
4722 ip += 2;
4723 if (ip < end)
4724 GET_BBLOCK (cfg, bblock, ip);
4725 break;
4726 case MonoInlineBrTarget:
4727 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4728 GET_BBLOCK (cfg, bblock, target);
4729 ip += 5;
4730 if (ip < end)
4731 GET_BBLOCK (cfg, bblock, ip);
4732 break;
4733 case MonoInlineSwitch: {
4734 guint32 n = read32 (ip + 1);
4735 guint32 j;
4736 ip += 5;
4737 cli_addr += 5 + 4 * n;
4738 target = start + cli_addr;
4739 GET_BBLOCK (cfg, bblock, target);
4741 for (j = 0; j < n; ++j) {
4742 target = start + cli_addr + (gint32)read32 (ip);
4743 GET_BBLOCK (cfg, bblock, target);
4744 ip += 4;
4746 break;
4748 case MonoInlineR:
4749 case MonoInlineI8:
4750 ip += 9;
4751 break;
4752 default:
4753 g_assert_not_reached ();
4756 if (i == CEE_THROW) {
4757 unsigned char *bb_start = ip - 1;
4759 /* Find the start of the bblock containing the throw */
4760 bblock = NULL;
4761 while ((bb_start >= start) && !bblock) {
4762 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4763 bb_start --;
4765 if (bblock)
4766 bblock->out_of_line = 1;
4769 return 0;
4770 unverified:
4771 *pos = ip;
4772 return 1;
4775 static inline MonoMethod *
4776 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4778 MonoMethod *method;
4780 if (m->wrapper_type != MONO_WRAPPER_NONE)
4781 return mono_method_get_wrapper_data (m, token);
4783 method = mono_get_method_full (m->klass->image, token, klass, context);
4785 return method;
4788 static inline MonoMethod *
4789 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4791 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4793 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4794 return NULL;
4796 return method;
4799 static inline MonoClass*
4800 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4802 MonoClass *klass;
4804 if (method->wrapper_type != MONO_WRAPPER_NONE)
4805 klass = mono_method_get_wrapper_data (method, token);
4806 else
4807 klass = mono_class_get_full (method->klass->image, token, context);
4808 if (klass)
4809 mono_class_init (klass);
4810 return klass;
4814 * Returns TRUE if the JIT should abort inlining because "callee"
4815 * is influenced by security attributes.
4817 static
4818 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4820 guint32 result;
4822 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4823 return TRUE;
4826 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4827 if (result == MONO_JIT_SECURITY_OK)
4828 return FALSE;
4830 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4831 /* Generate code to throw a SecurityException before the actual call/link */
4832 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4833 MonoInst *args [2];
4835 NEW_ICONST (cfg, args [0], 4);
4836 NEW_METHODCONST (cfg, args [1], caller);
4837 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4838 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4839 /* don't hide previous results */
4840 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4841 cfg->exception_data = result;
4842 return TRUE;
4845 return FALSE;
4848 static MonoMethod*
4849 throw_exception (void)
4851 static MonoMethod *method = NULL;
4853 if (!method) {
4854 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4855 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4857 g_assert (method);
4858 return method;
4861 static void
4862 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4864 MonoMethod *thrower = throw_exception ();
4865 MonoInst *args [1];
4867 EMIT_NEW_PCONST (cfg, args [0], ex);
4868 mono_emit_method_call (cfg, thrower, args, NULL);
4872 * Return the original method is a wrapper is specified. We can only access
4873 * the custom attributes from the original method.
4875 static MonoMethod*
4876 get_original_method (MonoMethod *method)
4878 if (method->wrapper_type == MONO_WRAPPER_NONE)
4879 return method;
4881 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4882 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4883 return NULL;
4885 /* in other cases we need to find the original method */
4886 return mono_marshal_method_from_wrapper (method);
4889 static void
4890 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4891 MonoBasicBlock *bblock, unsigned char *ip)
4893 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4894 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4895 return;
4897 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4898 caller = get_original_method (caller);
4899 if (!caller)
4900 return;
4902 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4903 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4904 emit_throw_exception (cfg, mono_get_exception_field_access ());
4907 static void
4908 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4909 MonoBasicBlock *bblock, unsigned char *ip)
4911 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4912 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4913 return;
4915 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4916 caller = get_original_method (caller);
4917 if (!caller)
4918 return;
4920 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4921 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4922 emit_throw_exception (cfg, mono_get_exception_method_access ());
4926 * Check that the IL instructions at ip are the array initialization
4927 * sequence and return the pointer to the data and the size.
4929 static const char*
4930 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4933 * newarr[System.Int32]
4934 * dup
4935 * ldtoken field valuetype ...
4936 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4938 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4939 guint32 token = read32 (ip + 7);
4940 guint32 field_token = read32 (ip + 2);
4941 guint32 field_index = field_token & 0xffffff;
4942 guint32 rva;
4943 const char *data_ptr;
4944 int size = 0;
4945 MonoMethod *cmethod;
4946 MonoClass *dummy_class;
4947 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4948 int dummy_align;
4950 if (!field)
4951 return NULL;
4953 *out_field_token = field_token;
4955 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4956 if (!cmethod)
4957 return NULL;
4958 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4959 return NULL;
4960 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4961 case MONO_TYPE_BOOLEAN:
4962 case MONO_TYPE_I1:
4963 case MONO_TYPE_U1:
4964 size = 1; break;
4965 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4966 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4967 case MONO_TYPE_CHAR:
4968 case MONO_TYPE_I2:
4969 case MONO_TYPE_U2:
4970 size = 2; break;
4971 case MONO_TYPE_I4:
4972 case MONO_TYPE_U4:
4973 case MONO_TYPE_R4:
4974 size = 4; break;
4975 case MONO_TYPE_R8:
4976 #ifdef ARM_FPU_FPA
4977 return NULL; /* stupid ARM FP swapped format */
4978 #endif
4979 case MONO_TYPE_I8:
4980 case MONO_TYPE_U8:
4981 size = 8; break;
4982 #endif
4983 default:
4984 return NULL;
4986 size *= len;
4987 if (size > mono_type_size (field->type, &dummy_align))
4988 return NULL;
4989 *out_size = size;
4990 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4991 if (!method->klass->image->dynamic) {
4992 field_index = read32 (ip + 2) & 0xffffff;
4993 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4994 data_ptr = mono_image_rva_map (method->klass->image, rva);
4995 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4996 /* for aot code we do the lookup on load */
4997 if (aot && data_ptr)
4998 return GUINT_TO_POINTER (rva);
4999 } else {
5000 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5001 g_assert (!aot);
5002 data_ptr = mono_field_get_data (field);
5004 return data_ptr;
5006 return NULL;
5009 static void
5010 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5012 char *method_fname = mono_method_full_name (method, TRUE);
5013 char *method_code;
5015 if (mono_method_get_header (method)->code_size == 0)
5016 method_code = g_strdup ("method body is empty.");
5017 else
5018 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5019 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5020 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5021 g_free (method_fname);
5022 g_free (method_code);
5025 static void
5026 set_exception_object (MonoCompile *cfg, MonoException *exception)
5028 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5029 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5030 cfg->exception_ptr = exception;
5033 static gboolean
5034 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5036 MonoType *type;
5038 if (cfg->generic_sharing_context)
5039 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5040 else
5041 type = &klass->byval_arg;
5042 return MONO_TYPE_IS_REFERENCE (type);
5045 static void
5046 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5048 MonoInst *ins;
5049 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5050 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5051 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5052 /* Optimize reg-reg moves away */
5054 * Can't optimize other opcodes, since sp[0] might point to
5055 * the last ins of a decomposed opcode.
5057 sp [0]->dreg = (cfg)->locals [n]->dreg;
5058 } else {
5059 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5064 * ldloca inhibits many optimizations so try to get rid of it in common
5065 * cases.
5067 static inline unsigned char *
5068 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5070 int local, token;
5071 MonoClass *klass;
5073 if (size == 1) {
5074 local = ip [1];
5075 ip += 2;
5076 } else {
5077 local = read16 (ip + 2);
5078 ip += 4;
5081 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5082 gboolean skip = FALSE;
5084 /* From the INITOBJ case */
5085 token = read32 (ip + 2);
5086 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5087 CHECK_TYPELOAD (klass);
5088 if (generic_class_is_reference_type (cfg, klass)) {
5089 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5090 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5091 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5092 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5093 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5094 } else {
5095 skip = TRUE;
5098 if (!skip)
5099 return ip + 6;
5101 load_error:
5102 return NULL;
5105 static gboolean
5106 is_exception_class (MonoClass *class)
5108 while (class) {
5109 if (class == mono_defaults.exception_class)
5110 return TRUE;
5111 class = class->parent;
5113 return FALSE;
5117 * mono_method_to_ir:
5119 * Translate the .net IL into linear IR.
5122 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5123 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5124 guint inline_offset, gboolean is_virtual_call)
5126 MonoError error;
5127 MonoInst *ins, **sp, **stack_start;
5128 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5129 MonoSimpleBasicBlock *bb = NULL;
5130 MonoMethod *cmethod, *method_definition;
5131 MonoInst **arg_array;
5132 MonoMethodHeader *header;
5133 MonoImage *image;
5134 guint32 token, ins_flag;
5135 MonoClass *klass;
5136 MonoClass *constrained_call = NULL;
5137 unsigned char *ip, *end, *target, *err_pos;
5138 static double r8_0 = 0.0;
5139 MonoMethodSignature *sig;
5140 MonoGenericContext *generic_context = NULL;
5141 MonoGenericContainer *generic_container = NULL;
5142 MonoType **param_types;
5143 int i, n, start_new_bblock, dreg;
5144 int num_calls = 0, inline_costs = 0;
5145 int breakpoint_id = 0;
5146 guint num_args;
5147 MonoBoolean security, pinvoke;
5148 MonoSecurityManager* secman = NULL;
5149 MonoDeclSecurityActions actions;
5150 GSList *class_inits = NULL;
5151 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5152 int context_used;
5153 gboolean init_locals, seq_points, skip_dead_blocks;
5155 /* serialization and xdomain stuff may need access to private fields and methods */
5156 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5157 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5158 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5159 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5160 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5161 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5163 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5165 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5166 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5167 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5168 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5170 image = method->klass->image;
5171 header = mono_method_get_header (method);
5172 generic_container = mono_method_get_generic_container (method);
5173 sig = mono_method_signature (method);
5174 num_args = sig->hasthis + sig->param_count;
5175 ip = (unsigned char*)header->code;
5176 cfg->cil_start = ip;
5177 end = ip + header->code_size;
5178 mono_jit_stats.cil_code_size += header->code_size;
5179 init_locals = header->init_locals;
5181 seq_points = cfg->gen_seq_points && cfg->method == method;
5184 * Methods without init_locals set could cause asserts in various passes
5185 * (#497220).
5187 init_locals = TRUE;
5189 method_definition = method;
5190 while (method_definition->is_inflated) {
5191 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5192 method_definition = imethod->declaring;
5195 /* SkipVerification is not allowed if core-clr is enabled */
5196 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5197 dont_verify = TRUE;
5198 dont_verify_stloc = TRUE;
5201 if (!dont_verify && mini_method_verify (cfg, method_definition))
5202 goto exception_exit;
5204 if (mono_debug_using_mono_debugger ())
5205 cfg->keep_cil_nops = TRUE;
5207 if (sig->is_inflated)
5208 generic_context = mono_method_get_context (method);
5209 else if (generic_container)
5210 generic_context = &generic_container->context;
5211 cfg->generic_context = generic_context;
5213 if (!cfg->generic_sharing_context)
5214 g_assert (!sig->has_type_parameters);
5216 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5217 g_assert (method->is_inflated);
5218 g_assert (mono_method_get_context (method)->method_inst);
5220 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5221 g_assert (sig->generic_param_count);
5223 if (cfg->method == method) {
5224 cfg->real_offset = 0;
5225 } else {
5226 cfg->real_offset = inline_offset;
5229 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5230 cfg->cil_offset_to_bb_len = header->code_size;
5232 cfg->current_method = method;
5234 if (cfg->verbose_level > 2)
5235 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5237 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5238 if (sig->hasthis)
5239 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5240 for (n = 0; n < sig->param_count; ++n)
5241 param_types [n + sig->hasthis] = sig->params [n];
5242 cfg->arg_types = param_types;
5244 dont_inline = g_list_prepend (dont_inline, method);
5245 if (cfg->method == method) {
5247 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5248 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5250 /* ENTRY BLOCK */
5251 NEW_BBLOCK (cfg, start_bblock);
5252 cfg->bb_entry = start_bblock;
5253 start_bblock->cil_code = NULL;
5254 start_bblock->cil_length = 0;
5256 /* EXIT BLOCK */
5257 NEW_BBLOCK (cfg, end_bblock);
5258 cfg->bb_exit = end_bblock;
5259 end_bblock->cil_code = NULL;
5260 end_bblock->cil_length = 0;
5261 g_assert (cfg->num_bblocks == 2);
5263 arg_array = cfg->args;
5265 if (header->num_clauses) {
5266 cfg->spvars = g_hash_table_new (NULL, NULL);
5267 cfg->exvars = g_hash_table_new (NULL, NULL);
5269 /* handle exception clauses */
5270 for (i = 0; i < header->num_clauses; ++i) {
5271 MonoBasicBlock *try_bb;
5272 MonoExceptionClause *clause = &header->clauses [i];
5273 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5274 try_bb->real_offset = clause->try_offset;
5275 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5276 tblock->real_offset = clause->handler_offset;
5277 tblock->flags |= BB_EXCEPTION_HANDLER;
5279 link_bblock (cfg, try_bb, tblock);
5281 if (*(ip + clause->handler_offset) == CEE_POP)
5282 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5284 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5285 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5286 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5287 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5288 MONO_ADD_INS (tblock, ins);
5290 /* todo: is a fault block unsafe to optimize? */
5291 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5292 tblock->flags |= BB_EXCEPTION_UNSAFE;
5296 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5297 while (p < end) {
5298 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5300 /* catch and filter blocks get the exception object on the stack */
5301 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5302 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5303 MonoInst *dummy_use;
5305 /* mostly like handle_stack_args (), but just sets the input args */
5306 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5307 tblock->in_scount = 1;
5308 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5309 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5312 * Add a dummy use for the exvar so its liveness info will be
5313 * correct.
5315 cfg->cbb = tblock;
5316 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5318 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5319 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5320 tblock->flags |= BB_EXCEPTION_HANDLER;
5321 tblock->real_offset = clause->data.filter_offset;
5322 tblock->in_scount = 1;
5323 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5324 /* The filter block shares the exvar with the handler block */
5325 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5326 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5327 MONO_ADD_INS (tblock, ins);
5331 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5332 clause->data.catch_class &&
5333 cfg->generic_sharing_context &&
5334 mono_class_check_context_used (clause->data.catch_class)) {
5336 * In shared generic code with catch
5337 * clauses containing type variables
5338 * the exception handling code has to
5339 * be able to get to the rgctx.
5340 * Therefore we have to make sure that
5341 * the vtable/mrgctx argument (for
5342 * static or generic methods) or the
5343 * "this" argument (for non-static
5344 * methods) are live.
5346 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5347 mini_method_get_context (method)->method_inst ||
5348 method->klass->valuetype) {
5349 mono_get_vtable_var (cfg);
5350 } else {
5351 MonoInst *dummy_use;
5353 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5357 } else {
5358 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5359 cfg->cbb = start_bblock;
5360 cfg->args = arg_array;
5361 mono_save_args (cfg, sig, inline_args);
5364 /* FIRST CODE BLOCK */
5365 NEW_BBLOCK (cfg, bblock);
5366 bblock->cil_code = ip;
5367 cfg->cbb = bblock;
5368 cfg->ip = ip;
5370 ADD_BBLOCK (cfg, bblock);
5372 if (cfg->method == method) {
5373 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5374 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5375 MONO_INST_NEW (cfg, ins, OP_BREAK);
5376 MONO_ADD_INS (bblock, ins);
5380 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5381 secman = mono_security_manager_get_methods ();
5383 security = (secman && mono_method_has_declsec (method));
5384 /* at this point having security doesn't mean we have any code to generate */
5385 if (security && (cfg->method == method)) {
5386 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5387 * And we do not want to enter the next section (with allocation) if we
5388 * have nothing to generate */
5389 security = mono_declsec_get_demands (method, &actions);
5392 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5393 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5394 if (pinvoke) {
5395 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5396 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5397 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5399 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5400 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5401 pinvoke = FALSE;
5403 if (custom)
5404 mono_custom_attrs_free (custom);
5406 if (pinvoke) {
5407 custom = mono_custom_attrs_from_class (wrapped->klass);
5408 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5409 pinvoke = FALSE;
5411 if (custom)
5412 mono_custom_attrs_free (custom);
5414 } else {
5415 /* not a P/Invoke after all */
5416 pinvoke = FALSE;
5420 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5421 /* we use a separate basic block for the initialization code */
5422 NEW_BBLOCK (cfg, init_localsbb);
5423 cfg->bb_init = init_localsbb;
5424 init_localsbb->real_offset = cfg->real_offset;
5425 start_bblock->next_bb = init_localsbb;
5426 init_localsbb->next_bb = bblock;
5427 link_bblock (cfg, start_bblock, init_localsbb);
5428 link_bblock (cfg, init_localsbb, bblock);
5430 cfg->cbb = init_localsbb;
5431 } else {
5432 start_bblock->next_bb = bblock;
5433 link_bblock (cfg, start_bblock, bblock);
5436 /* at this point we know, if security is TRUE, that some code needs to be generated */
5437 if (security && (cfg->method == method)) {
5438 MonoInst *args [2];
5440 mono_jit_stats.cas_demand_generation++;
5442 if (actions.demand.blob) {
5443 /* Add code for SecurityAction.Demand */
5444 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5445 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5446 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5447 mono_emit_method_call (cfg, secman->demand, args, NULL);
5449 if (actions.noncasdemand.blob) {
5450 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5451 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5452 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5453 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5454 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5455 mono_emit_method_call (cfg, secman->demand, args, NULL);
5457 if (actions.demandchoice.blob) {
5458 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5459 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5460 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5461 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5462 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5466 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5467 if (pinvoke) {
5468 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5471 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5472 /* check if this is native code, e.g. an icall or a p/invoke */
5473 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5474 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5475 if (wrapped) {
5476 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5477 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5479 /* if this ia a native call then it can only be JITted from platform code */
5480 if ((icall || pinvk) && method->klass && method->klass->image) {
5481 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5482 MonoException *ex = icall ? mono_get_exception_security () :
5483 mono_get_exception_method_access ();
5484 emit_throw_exception (cfg, ex);
5491 if (header->code_size == 0)
5492 UNVERIFIED;
5494 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5495 ip = err_pos;
5496 UNVERIFIED;
5499 if (cfg->method == method)
5500 mono_debug_init_method (cfg, bblock, breakpoint_id);
5502 for (n = 0; n < header->num_locals; ++n) {
5503 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5504 UNVERIFIED;
5506 class_inits = NULL;
5508 /* We force the vtable variable here for all shared methods
5509 for the possibility that they might show up in a stack
5510 trace where their exact instantiation is needed. */
5511 if (cfg->generic_sharing_context && method == cfg->method) {
5512 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5513 mini_method_get_context (method)->method_inst ||
5514 method->klass->valuetype) {
5515 mono_get_vtable_var (cfg);
5516 } else {
5517 /* FIXME: Is there a better way to do this?
5518 We need the variable live for the duration
5519 of the whole method. */
5520 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5524 /* add a check for this != NULL to inlined methods */
5525 if (is_virtual_call) {
5526 MonoInst *arg_ins;
5528 NEW_ARGLOAD (cfg, arg_ins, 0);
5529 MONO_ADD_INS (cfg->cbb, arg_ins);
5530 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5533 skip_dead_blocks = !dont_verify;
5534 if (skip_dead_blocks) {
5535 bb = mono_basic_block_split (method, &error);
5536 if (!mono_error_ok (&error)) {
5537 mono_error_cleanup (&error);
5538 UNVERIFIED;
5540 g_assert (bb);
5543 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5544 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5546 ins_flag = 0;
5547 start_new_bblock = 0;
5548 cfg->cbb = bblock;
5549 while (ip < end) {
5550 if (cfg->method == method)
5551 cfg->real_offset = ip - header->code;
5552 else
5553 cfg->real_offset = inline_offset;
5554 cfg->ip = ip;
5556 context_used = 0;
5558 if (start_new_bblock) {
5559 bblock->cil_length = ip - bblock->cil_code;
5560 if (start_new_bblock == 2) {
5561 g_assert (ip == tblock->cil_code);
5562 } else {
5563 GET_BBLOCK (cfg, tblock, ip);
5565 bblock->next_bb = tblock;
5566 bblock = tblock;
5567 cfg->cbb = bblock;
5568 start_new_bblock = 0;
5569 for (i = 0; i < bblock->in_scount; ++i) {
5570 if (cfg->verbose_level > 3)
5571 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5572 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5573 *sp++ = ins;
5575 if (class_inits)
5576 g_slist_free (class_inits);
5577 class_inits = NULL;
5578 } else {
5579 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5580 link_bblock (cfg, bblock, tblock);
5581 if (sp != stack_start) {
5582 handle_stack_args (cfg, stack_start, sp - stack_start);
5583 sp = stack_start;
5584 CHECK_UNVERIFIABLE (cfg);
5586 bblock->next_bb = tblock;
5587 bblock = tblock;
5588 cfg->cbb = bblock;
5589 for (i = 0; i < bblock->in_scount; ++i) {
5590 if (cfg->verbose_level > 3)
5591 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5592 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5593 *sp++ = ins;
5595 g_slist_free (class_inits);
5596 class_inits = NULL;
5600 if (skip_dead_blocks) {
5601 int ip_offset = ip - header->code;
5603 if (ip_offset == bb->end)
5604 bb = bb->next;
5606 if (bb->dead) {
5607 int op_size = mono_opcode_size (ip, end);
5608 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5610 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5612 if (ip_offset + op_size == bb->end) {
5613 MONO_INST_NEW (cfg, ins, OP_NOP);
5614 MONO_ADD_INS (bblock, ins);
5615 start_new_bblock = 1;
5618 ip += op_size;
5619 continue;
5623 * Sequence points are points where the debugger can place a breakpoint.
5624 * Currently, we generate these automatically at points where the IL
5625 * stack is empty.
5627 if (seq_points && sp == stack_start) {
5628 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5629 MONO_ADD_INS (cfg->cbb, ins);
5632 bblock->real_offset = cfg->real_offset;
5634 if ((cfg->method == method) && cfg->coverage_info) {
5635 guint32 cil_offset = ip - header->code;
5636 cfg->coverage_info->data [cil_offset].cil_code = ip;
5638 /* TODO: Use an increment here */
5639 #if defined(TARGET_X86)
5640 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5641 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5642 ins->inst_imm = 1;
5643 MONO_ADD_INS (cfg->cbb, ins);
5644 #else
5645 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5646 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5647 #endif
5650 if (cfg->verbose_level > 3)
5651 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5653 switch (*ip) {
5654 case CEE_NOP:
5655 if (cfg->keep_cil_nops)
5656 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5657 else
5658 MONO_INST_NEW (cfg, ins, OP_NOP);
5659 ip++;
5660 MONO_ADD_INS (bblock, ins);
5661 break;
5662 case CEE_BREAK:
5663 if (should_insert_brekpoint (cfg->method))
5664 MONO_INST_NEW (cfg, ins, OP_BREAK);
5665 else
5666 MONO_INST_NEW (cfg, ins, OP_NOP);
5667 ip++;
5668 MONO_ADD_INS (bblock, ins);
5669 break;
5670 case CEE_LDARG_0:
5671 case CEE_LDARG_1:
5672 case CEE_LDARG_2:
5673 case CEE_LDARG_3:
5674 CHECK_STACK_OVF (1);
5675 n = (*ip)-CEE_LDARG_0;
5676 CHECK_ARG (n);
5677 EMIT_NEW_ARGLOAD (cfg, ins, n);
5678 ip++;
5679 *sp++ = ins;
5680 break;
5681 case CEE_LDLOC_0:
5682 case CEE_LDLOC_1:
5683 case CEE_LDLOC_2:
5684 case CEE_LDLOC_3:
5685 CHECK_STACK_OVF (1);
5686 n = (*ip)-CEE_LDLOC_0;
5687 CHECK_LOCAL (n);
5688 EMIT_NEW_LOCLOAD (cfg, ins, n);
5689 ip++;
5690 *sp++ = ins;
5691 break;
5692 case CEE_STLOC_0:
5693 case CEE_STLOC_1:
5694 case CEE_STLOC_2:
5695 case CEE_STLOC_3: {
5696 CHECK_STACK (1);
5697 n = (*ip)-CEE_STLOC_0;
5698 CHECK_LOCAL (n);
5699 --sp;
5700 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5701 UNVERIFIED;
5702 emit_stloc_ir (cfg, sp, header, n);
5703 ++ip;
5704 inline_costs += 1;
5705 break;
5707 case CEE_LDARG_S:
5708 CHECK_OPSIZE (2);
5709 CHECK_STACK_OVF (1);
5710 n = ip [1];
5711 CHECK_ARG (n);
5712 EMIT_NEW_ARGLOAD (cfg, ins, n);
5713 *sp++ = ins;
5714 ip += 2;
5715 break;
5716 case CEE_LDARGA_S:
5717 CHECK_OPSIZE (2);
5718 CHECK_STACK_OVF (1);
5719 n = ip [1];
5720 CHECK_ARG (n);
5721 NEW_ARGLOADA (cfg, ins, n);
5722 MONO_ADD_INS (cfg->cbb, ins);
5723 *sp++ = ins;
5724 ip += 2;
5725 break;
5726 case CEE_STARG_S:
5727 CHECK_OPSIZE (2);
5728 CHECK_STACK (1);
5729 --sp;
5730 n = ip [1];
5731 CHECK_ARG (n);
5732 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5733 UNVERIFIED;
5734 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5735 ip += 2;
5736 break;
5737 case CEE_LDLOC_S:
5738 CHECK_OPSIZE (2);
5739 CHECK_STACK_OVF (1);
5740 n = ip [1];
5741 CHECK_LOCAL (n);
5742 EMIT_NEW_LOCLOAD (cfg, ins, n);
5743 *sp++ = ins;
5744 ip += 2;
5745 break;
5746 case CEE_LDLOCA_S: {
5747 unsigned char *tmp_ip;
5748 CHECK_OPSIZE (2);
5749 CHECK_STACK_OVF (1);
5750 CHECK_LOCAL (ip [1]);
5752 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5753 ip = tmp_ip;
5754 inline_costs += 1;
5755 break;
5758 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5759 *sp++ = ins;
5760 ip += 2;
5761 break;
5763 case CEE_STLOC_S:
5764 CHECK_OPSIZE (2);
5765 CHECK_STACK (1);
5766 --sp;
5767 CHECK_LOCAL (ip [1]);
5768 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5769 UNVERIFIED;
5770 emit_stloc_ir (cfg, sp, header, ip [1]);
5771 ip += 2;
5772 inline_costs += 1;
5773 break;
5774 case CEE_LDNULL:
5775 CHECK_STACK_OVF (1);
5776 EMIT_NEW_PCONST (cfg, ins, NULL);
5777 ins->type = STACK_OBJ;
5778 ++ip;
5779 *sp++ = ins;
5780 break;
5781 case CEE_LDC_I4_M1:
5782 CHECK_STACK_OVF (1);
5783 EMIT_NEW_ICONST (cfg, ins, -1);
5784 ++ip;
5785 *sp++ = ins;
5786 break;
5787 case CEE_LDC_I4_0:
5788 case CEE_LDC_I4_1:
5789 case CEE_LDC_I4_2:
5790 case CEE_LDC_I4_3:
5791 case CEE_LDC_I4_4:
5792 case CEE_LDC_I4_5:
5793 case CEE_LDC_I4_6:
5794 case CEE_LDC_I4_7:
5795 case CEE_LDC_I4_8:
5796 CHECK_STACK_OVF (1);
5797 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5798 ++ip;
5799 *sp++ = ins;
5800 break;
5801 case CEE_LDC_I4_S:
5802 CHECK_OPSIZE (2);
5803 CHECK_STACK_OVF (1);
5804 ++ip;
5805 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5806 ++ip;
5807 *sp++ = ins;
5808 break;
5809 case CEE_LDC_I4:
5810 CHECK_OPSIZE (5);
5811 CHECK_STACK_OVF (1);
5812 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5813 ip += 5;
5814 *sp++ = ins;
5815 break;
5816 case CEE_LDC_I8:
5817 CHECK_OPSIZE (9);
5818 CHECK_STACK_OVF (1);
5819 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5820 ins->type = STACK_I8;
5821 ins->dreg = alloc_dreg (cfg, STACK_I8);
5822 ++ip;
5823 ins->inst_l = (gint64)read64 (ip);
5824 MONO_ADD_INS (bblock, ins);
5825 ip += 8;
5826 *sp++ = ins;
5827 break;
5828 case CEE_LDC_R4: {
5829 float *f;
5830 gboolean use_aotconst = FALSE;
5832 #ifdef TARGET_POWERPC
5833 /* FIXME: Clean this up */
5834 if (cfg->compile_aot)
5835 use_aotconst = TRUE;
5836 #endif
5838 /* FIXME: we should really allocate this only late in the compilation process */
5839 f = mono_domain_alloc (cfg->domain, sizeof (float));
5840 CHECK_OPSIZE (5);
5841 CHECK_STACK_OVF (1);
5843 if (use_aotconst) {
5844 MonoInst *cons;
5845 int dreg;
5847 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5849 dreg = alloc_freg (cfg);
5850 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5851 ins->type = STACK_R8;
5852 } else {
5853 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5854 ins->type = STACK_R8;
5855 ins->dreg = alloc_dreg (cfg, STACK_R8);
5856 ins->inst_p0 = f;
5857 MONO_ADD_INS (bblock, ins);
5859 ++ip;
5860 readr4 (ip, f);
5861 ip += 4;
5862 *sp++ = ins;
5863 break;
5865 case CEE_LDC_R8: {
5866 double *d;
5867 gboolean use_aotconst = FALSE;
5869 #ifdef TARGET_POWERPC
5870 /* FIXME: Clean this up */
5871 if (cfg->compile_aot)
5872 use_aotconst = TRUE;
5873 #endif
5875 /* FIXME: we should really allocate this only late in the compilation process */
5876 d = mono_domain_alloc (cfg->domain, sizeof (double));
5877 CHECK_OPSIZE (9);
5878 CHECK_STACK_OVF (1);
5880 if (use_aotconst) {
5881 MonoInst *cons;
5882 int dreg;
5884 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5886 dreg = alloc_freg (cfg);
5887 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5888 ins->type = STACK_R8;
5889 } else {
5890 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5891 ins->type = STACK_R8;
5892 ins->dreg = alloc_dreg (cfg, STACK_R8);
5893 ins->inst_p0 = d;
5894 MONO_ADD_INS (bblock, ins);
5896 ++ip;
5897 readr8 (ip, d);
5898 ip += 8;
5899 *sp++ = ins;
5900 break;
5902 case CEE_DUP: {
5903 MonoInst *temp, *store;
5904 CHECK_STACK (1);
5905 CHECK_STACK_OVF (1);
5906 sp--;
5907 ins = *sp;
5909 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5910 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5912 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5913 *sp++ = ins;
5915 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5916 *sp++ = ins;
5918 ++ip;
5919 inline_costs += 2;
5920 break;
5922 case CEE_POP:
5923 CHECK_STACK (1);
5924 ip++;
5925 --sp;
5927 #ifdef TARGET_X86
5928 if (sp [0]->type == STACK_R8)
5929 /* we need to pop the value from the x86 FP stack */
5930 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5931 #endif
5932 break;
5933 case CEE_JMP: {
5934 MonoCallInst *call;
5936 INLINE_FAILURE;
5938 CHECK_OPSIZE (5);
5939 if (stack_start != sp)
5940 UNVERIFIED;
5941 token = read32 (ip + 1);
5942 /* FIXME: check the signature matches */
5943 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5945 if (!cmethod)
5946 goto load_error;
5948 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5949 GENERIC_SHARING_FAILURE (CEE_JMP);
5951 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5952 CHECK_CFG_EXCEPTION;
5954 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5956 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5957 int i, n;
5959 /* Handle tail calls similarly to calls */
5960 n = fsig->param_count + fsig->hasthis;
5962 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5963 call->method = cmethod;
5964 call->tail_call = TRUE;
5965 call->signature = mono_method_signature (cmethod);
5966 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5967 call->inst.inst_p0 = cmethod;
5968 for (i = 0; i < n; ++i)
5969 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5971 mono_arch_emit_call (cfg, call);
5972 MONO_ADD_INS (bblock, (MonoInst*)call);
5974 #else
5975 for (i = 0; i < num_args; ++i)
5976 /* Prevent arguments from being optimized away */
5977 arg_array [i]->flags |= MONO_INST_VOLATILE;
5979 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5980 ins = (MonoInst*)call;
5981 ins->inst_p0 = cmethod;
5982 MONO_ADD_INS (bblock, ins);
5983 #endif
5985 ip += 5;
5986 start_new_bblock = 1;
5987 break;
5989 case CEE_CALLI:
5990 case CEE_CALL:
5991 case CEE_CALLVIRT: {
5992 MonoInst *addr = NULL;
5993 MonoMethodSignature *fsig = NULL;
5994 int array_rank = 0;
5995 int virtual = *ip == CEE_CALLVIRT;
5996 int calli = *ip == CEE_CALLI;
5997 gboolean pass_imt_from_rgctx = FALSE;
5998 MonoInst *imt_arg = NULL;
5999 gboolean pass_vtable = FALSE;
6000 gboolean pass_mrgctx = FALSE;
6001 MonoInst *vtable_arg = NULL;
6002 gboolean check_this = FALSE;
6003 gboolean supported_tail_call = FALSE;
6005 CHECK_OPSIZE (5);
6006 token = read32 (ip + 1);
6008 if (calli) {
6009 cmethod = NULL;
6010 CHECK_STACK (1);
6011 --sp;
6012 addr = *sp;
6013 if (method->wrapper_type != MONO_WRAPPER_NONE)
6014 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6015 else
6016 fsig = mono_metadata_parse_signature (image, token);
6018 n = fsig->param_count + fsig->hasthis;
6020 if (method->dynamic && fsig->pinvoke) {
6021 MonoInst *args [3];
6024 * This is a call through a function pointer using a pinvoke
6025 * signature. Have to create a wrapper and call that instead.
6026 * FIXME: This is very slow, need to create a wrapper at JIT time
6027 * instead based on the signature.
6029 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6030 EMIT_NEW_PCONST (cfg, args [1], fsig);
6031 args [2] = addr;
6032 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6034 } else {
6035 MonoMethod *cil_method;
6037 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6038 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6039 cil_method = cmethod;
6040 } else if (constrained_call) {
6041 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6043 * This is needed since get_method_constrained can't find
6044 * the method in klass representing a type var.
6045 * The type var is guaranteed to be a reference type in this
6046 * case.
6048 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6049 cil_method = cmethod;
6050 g_assert (!cmethod->klass->valuetype);
6051 } else {
6052 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6054 } else {
6055 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6056 cil_method = cmethod;
6059 if (!cmethod)
6060 goto load_error;
6061 if (!dont_verify && !cfg->skip_visibility) {
6062 MonoMethod *target_method = cil_method;
6063 if (method->is_inflated) {
6064 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6066 if (!mono_method_can_access_method (method_definition, target_method) &&
6067 !mono_method_can_access_method (method, cil_method))
6068 METHOD_ACCESS_FAILURE;
6071 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6072 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6074 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6075 /* MS.NET seems to silently convert this to a callvirt */
6076 virtual = 1;
6078 if (!cmethod->klass->inited)
6079 if (!mono_class_init (cmethod->klass))
6080 goto load_error;
6082 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6083 mini_class_is_system_array (cmethod->klass)) {
6084 array_rank = cmethod->klass->rank;
6085 fsig = mono_method_signature (cmethod);
6086 } else {
6087 if (mono_method_signature (cmethod)->pinvoke) {
6088 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6089 check_for_pending_exc, FALSE);
6090 fsig = mono_method_signature (wrapper);
6091 } else if (constrained_call) {
6092 fsig = mono_method_signature (cmethod);
6093 } else {
6094 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6098 mono_save_token_info (cfg, image, token, cil_method);
6100 n = fsig->param_count + fsig->hasthis;
6102 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6103 if (check_linkdemand (cfg, method, cmethod))
6104 INLINE_FAILURE;
6105 CHECK_CFG_EXCEPTION;
6108 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6109 g_assert_not_reached ();
6112 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6113 UNVERIFIED;
6115 if (!cfg->generic_sharing_context && cmethod)
6116 g_assert (!mono_method_check_context_used (cmethod));
6118 CHECK_STACK (n);
6120 //g_assert (!virtual || fsig->hasthis);
6122 sp -= n;
6124 if (constrained_call) {
6126 * We have the `constrained.' prefix opcode.
6128 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6130 * The type parameter is instantiated as a valuetype,
6131 * but that type doesn't override the method we're
6132 * calling, so we need to box `this'.
6134 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6135 ins->klass = constrained_call;
6136 sp [0] = handle_box (cfg, ins, constrained_call);
6137 CHECK_CFG_EXCEPTION;
6138 } else if (!constrained_call->valuetype) {
6139 int dreg = alloc_preg (cfg);
6142 * The type parameter is instantiated as a reference
6143 * type. We have a managed pointer on the stack, so
6144 * we need to dereference it here.
6146 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6147 ins->type = STACK_OBJ;
6148 sp [0] = ins;
6149 } else if (cmethod->klass->valuetype)
6150 virtual = 0;
6151 constrained_call = NULL;
6154 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6155 UNVERIFIED;
6158 * If the callee is a shared method, then its static cctor
6159 * might not get called after the call was patched.
6161 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6162 emit_generic_class_init (cfg, cmethod->klass);
6163 CHECK_TYPELOAD (cmethod->klass);
6166 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6167 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6168 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6169 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6170 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6173 * Pass vtable iff target method might
6174 * be shared, which means that sharing
6175 * is enabled for its class and its
6176 * context is sharable (and it's not a
6177 * generic method).
6179 if (sharing_enabled && context_sharable &&
6180 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6181 pass_vtable = TRUE;
6184 if (cmethod && mini_method_get_context (cmethod) &&
6185 mini_method_get_context (cmethod)->method_inst) {
6186 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6187 MonoGenericContext *context = mini_method_get_context (cmethod);
6188 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6190 g_assert (!pass_vtable);
6192 if (sharing_enabled && context_sharable)
6193 pass_mrgctx = TRUE;
6196 if (cfg->generic_sharing_context && cmethod) {
6197 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6199 context_used = mono_method_check_context_used (cmethod);
6201 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6202 /* Generic method interface
6203 calls are resolved via a
6204 helper function and don't
6205 need an imt. */
6206 if (!cmethod_context || !cmethod_context->method_inst)
6207 pass_imt_from_rgctx = TRUE;
6211 * If a shared method calls another
6212 * shared method then the caller must
6213 * have a generic sharing context
6214 * because the magic trampoline
6215 * requires it. FIXME: We shouldn't
6216 * have to force the vtable/mrgctx
6217 * variable here. Instead there
6218 * should be a flag in the cfg to
6219 * request a generic sharing context.
6221 if (context_used &&
6222 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6223 mono_get_vtable_var (cfg);
6226 if (pass_vtable) {
6227 if (context_used) {
6228 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6229 } else {
6230 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6232 CHECK_TYPELOAD (cmethod->klass);
6233 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6237 if (pass_mrgctx) {
6238 g_assert (!vtable_arg);
6240 if (!cfg->compile_aot) {
6242 * emit_get_rgctx_method () calls mono_class_vtable () so check
6243 * for type load errors before.
6245 mono_class_setup_vtable (cmethod->klass);
6246 CHECK_TYPELOAD (cmethod->klass);
6249 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6251 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6252 MONO_METHOD_IS_FINAL (cmethod)) {
6253 if (virtual)
6254 check_this = TRUE;
6255 virtual = 0;
6259 if (pass_imt_from_rgctx) {
6260 g_assert (!pass_vtable);
6261 g_assert (cmethod);
6263 imt_arg = emit_get_rgctx_method (cfg, context_used,
6264 cmethod, MONO_RGCTX_INFO_METHOD);
6267 if (check_this)
6268 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6270 /* Calling virtual generic methods */
6271 if (cmethod && virtual &&
6272 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6273 !(MONO_METHOD_IS_FINAL (cmethod) &&
6274 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6275 mono_method_signature (cmethod)->generic_param_count) {
6276 MonoInst *this_temp, *this_arg_temp, *store;
6277 MonoInst *iargs [4];
6279 g_assert (mono_method_signature (cmethod)->is_inflated);
6281 /* Prevent inlining of methods that contain indirect calls */
6282 INLINE_FAILURE;
6284 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6285 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6286 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6287 g_assert (!imt_arg);
6288 if (!context_used)
6289 g_assert (cmethod->is_inflated);
6290 imt_arg = emit_get_rgctx_method (cfg, context_used,
6291 cmethod, MONO_RGCTX_INFO_METHOD);
6292 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6293 } else
6294 #endif
6296 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6297 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6298 MONO_ADD_INS (bblock, store);
6300 /* FIXME: This should be a managed pointer */
6301 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6303 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6304 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6305 cmethod, MONO_RGCTX_INFO_METHOD);
6306 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6307 addr = mono_emit_jit_icall (cfg,
6308 mono_helper_compile_generic_method, iargs);
6310 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6312 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6315 if (!MONO_TYPE_IS_VOID (fsig->ret))
6316 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6318 ip += 5;
6319 ins_flag = 0;
6320 break;
6323 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6324 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6325 #else
6326 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6327 #endif
6329 /* Tail prefix */
6330 /* FIXME: runtime generic context pointer for jumps? */
6331 /* FIXME: handle this for generic sharing eventually */
6332 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6333 MonoCallInst *call;
6335 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6336 INLINE_FAILURE;
6338 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6339 /* Handle tail calls similarly to calls */
6340 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6341 #else
6342 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6343 call->tail_call = TRUE;
6344 call->method = cmethod;
6345 call->signature = mono_method_signature (cmethod);
6348 * We implement tail calls by storing the actual arguments into the
6349 * argument variables, then emitting a CEE_JMP.
6351 for (i = 0; i < n; ++i) {
6352 /* Prevent argument from being register allocated */
6353 arg_array [i]->flags |= MONO_INST_VOLATILE;
6354 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6356 #endif
6358 ins = (MonoInst*)call;
6359 ins->inst_p0 = cmethod;
6360 ins->inst_p1 = arg_array [0];
6361 MONO_ADD_INS (bblock, ins);
6362 link_bblock (cfg, bblock, end_bblock);
6363 start_new_bblock = 1;
6364 /* skip CEE_RET as well */
6365 ip += 6;
6366 ins_flag = 0;
6367 break;
6370 /* Conversion to a JIT intrinsic */
6371 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6372 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6373 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6374 *sp = ins;
6375 sp++;
6378 ip += 5;
6379 ins_flag = 0;
6380 break;
6383 /* Inlining */
6384 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6385 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6386 mono_method_check_inlining (cfg, cmethod) &&
6387 !g_list_find (dont_inline, cmethod)) {
6388 int costs;
6389 gboolean allways = FALSE;
6391 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6392 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6393 /* Prevent inlining of methods that call wrappers */
6394 INLINE_FAILURE;
6395 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6396 allways = TRUE;
6399 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6400 ip += 5;
6401 cfg->real_offset += 5;
6402 bblock = cfg->cbb;
6404 if (!MONO_TYPE_IS_VOID (fsig->ret))
6405 /* *sp is already set by inline_method */
6406 sp++;
6408 inline_costs += costs;
6409 ins_flag = 0;
6410 break;
6414 inline_costs += 10 * num_calls++;
6416 /* Tail recursion elimination */
6417 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6418 gboolean has_vtargs = FALSE;
6419 int i;
6421 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6422 INLINE_FAILURE;
6424 /* keep it simple */
6425 for (i = fsig->param_count - 1; i >= 0; i--) {
6426 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6427 has_vtargs = TRUE;
6430 if (!has_vtargs) {
6431 for (i = 0; i < n; ++i)
6432 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6433 MONO_INST_NEW (cfg, ins, OP_BR);
6434 MONO_ADD_INS (bblock, ins);
6435 tblock = start_bblock->out_bb [0];
6436 link_bblock (cfg, bblock, tblock);
6437 ins->inst_target_bb = tblock;
6438 start_new_bblock = 1;
6440 /* skip the CEE_RET, too */
6441 if (ip_in_bb (cfg, bblock, ip + 5))
6442 ip += 6;
6443 else
6444 ip += 5;
6446 ins_flag = 0;
6447 break;
6451 /* Generic sharing */
6452 /* FIXME: only do this for generic methods if
6453 they are not shared! */
6454 if (context_used && !imt_arg && !array_rank &&
6455 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6456 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6457 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6458 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6459 INLINE_FAILURE;
6461 g_assert (cfg->generic_sharing_context && cmethod);
6462 g_assert (!addr);
6465 * We are compiling a call to a
6466 * generic method from shared code,
6467 * which means that we have to look up
6468 * the method in the rgctx and do an
6469 * indirect call.
6471 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6474 /* Indirect calls */
6475 if (addr) {
6476 g_assert (!imt_arg);
6478 if (*ip == CEE_CALL)
6479 g_assert (context_used);
6480 else if (*ip == CEE_CALLI)
6481 g_assert (!vtable_arg);
6482 else
6483 /* FIXME: what the hell is this??? */
6484 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6485 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6487 /* Prevent inlining of methods with indirect calls */
6488 INLINE_FAILURE;
6490 if (vtable_arg) {
6491 #ifdef MONO_ARCH_RGCTX_REG
6492 MonoCallInst *call;
6493 int rgctx_reg = mono_alloc_preg (cfg);
6495 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6496 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6497 call = (MonoCallInst*)ins;
6498 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6499 cfg->uses_rgctx_reg = TRUE;
6500 call->rgctx_reg = TRUE;
6501 #else
6502 NOT_IMPLEMENTED;
6503 #endif
6504 } else {
6505 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6507 * Instead of emitting an indirect call, emit a direct call
6508 * with the contents of the aotconst as the patch info.
6510 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6511 NULLIFY_INS (addr);
6512 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6513 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6514 NULLIFY_INS (addr);
6515 } else {
6516 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6519 if (!MONO_TYPE_IS_VOID (fsig->ret))
6520 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6522 ip += 5;
6523 ins_flag = 0;
6524 break;
6527 /* Array methods */
6528 if (array_rank) {
6529 MonoInst *addr;
6531 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6532 if (sp [fsig->param_count]->type == STACK_OBJ) {
6533 MonoInst *iargs [2];
6535 iargs [0] = sp [0];
6536 iargs [1] = sp [fsig->param_count];
6538 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6541 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6542 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6543 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6544 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6546 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6548 *sp++ = ins;
6549 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6550 if (!cmethod->klass->element_class->valuetype && !readonly)
6551 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6552 CHECK_TYPELOAD (cmethod->klass);
6554 readonly = FALSE;
6555 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6556 *sp++ = addr;
6557 } else {
6558 g_assert_not_reached ();
6561 ip += 5;
6562 ins_flag = 0;
6563 break;
6566 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6567 if (ins) {
6568 if (!MONO_TYPE_IS_VOID (fsig->ret))
6569 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6571 ip += 5;
6572 ins_flag = 0;
6573 break;
6576 /* Common call */
6577 INLINE_FAILURE;
6578 if (vtable_arg) {
6579 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6580 NULL, vtable_arg);
6581 } else if (imt_arg) {
6582 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6583 } else {
6584 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6587 if (!MONO_TYPE_IS_VOID (fsig->ret))
6588 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6590 ip += 5;
6591 ins_flag = 0;
6592 break;
6594 case CEE_RET:
6595 if (cfg->method != method) {
6596 /* return from inlined method */
6598 * If in_count == 0, that means the ret is unreachable due to
6599 * being preceeded by a throw. In that case, inline_method () will
6600 * handle setting the return value
6601 * (test case: test_0_inline_throw ()).
6603 if (return_var && cfg->cbb->in_count) {
6604 MonoInst *store;
6605 CHECK_STACK (1);
6606 --sp;
6607 //g_assert (returnvar != -1);
6608 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6609 cfg->ret_var_set = TRUE;
6611 } else {
6612 if (cfg->ret) {
6613 MonoType *ret_type = mono_method_signature (method)->ret;
6615 if (seq_points) {
6617 * Place a seq point here too even through the IL stack is not
6618 * empty, so a step over on
6619 * call <FOO>
6620 * ret
6621 * will work correctly.
6623 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6624 MONO_ADD_INS (cfg->cbb, ins);
6627 g_assert (!return_var);
6628 CHECK_STACK (1);
6629 --sp;
6630 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6631 MonoInst *ret_addr;
6633 if (!cfg->vret_addr) {
6634 MonoInst *ins;
6636 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6637 } else {
6638 EMIT_NEW_RETLOADA (cfg, ret_addr);
6640 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6641 ins->klass = mono_class_from_mono_type (ret_type);
6643 } else {
6644 #ifdef MONO_ARCH_SOFT_FLOAT
6645 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6646 MonoInst *iargs [1];
6647 MonoInst *conv;
6649 iargs [0] = *sp;
6650 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6651 mono_arch_emit_setret (cfg, method, conv);
6652 } else {
6653 mono_arch_emit_setret (cfg, method, *sp);
6655 #else
6656 mono_arch_emit_setret (cfg, method, *sp);
6657 #endif
6661 if (sp != stack_start)
6662 UNVERIFIED;
6663 MONO_INST_NEW (cfg, ins, OP_BR);
6664 ip++;
6665 ins->inst_target_bb = end_bblock;
6666 MONO_ADD_INS (bblock, ins);
6667 link_bblock (cfg, bblock, end_bblock);
6668 start_new_bblock = 1;
6669 break;
6670 case CEE_BR_S:
6671 CHECK_OPSIZE (2);
6672 MONO_INST_NEW (cfg, ins, OP_BR);
6673 ip++;
6674 target = ip + 1 + (signed char)(*ip);
6675 ++ip;
6676 GET_BBLOCK (cfg, tblock, target);
6677 link_bblock (cfg, bblock, tblock);
6678 ins->inst_target_bb = tblock;
6679 if (sp != stack_start) {
6680 handle_stack_args (cfg, stack_start, sp - stack_start);
6681 sp = stack_start;
6682 CHECK_UNVERIFIABLE (cfg);
6684 MONO_ADD_INS (bblock, ins);
6685 start_new_bblock = 1;
6686 inline_costs += BRANCH_COST;
6687 break;
6688 case CEE_BEQ_S:
6689 case CEE_BGE_S:
6690 case CEE_BGT_S:
6691 case CEE_BLE_S:
6692 case CEE_BLT_S:
6693 case CEE_BNE_UN_S:
6694 case CEE_BGE_UN_S:
6695 case CEE_BGT_UN_S:
6696 case CEE_BLE_UN_S:
6697 case CEE_BLT_UN_S:
6698 CHECK_OPSIZE (2);
6699 CHECK_STACK (2);
6700 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6701 ip++;
6702 target = ip + 1 + *(signed char*)ip;
6703 ip++;
6705 ADD_BINCOND (NULL);
6707 sp = stack_start;
6708 inline_costs += BRANCH_COST;
6709 break;
6710 case CEE_BR:
6711 CHECK_OPSIZE (5);
6712 MONO_INST_NEW (cfg, ins, OP_BR);
6713 ip++;
6715 target = ip + 4 + (gint32)read32(ip);
6716 ip += 4;
6717 GET_BBLOCK (cfg, tblock, target);
6718 link_bblock (cfg, bblock, tblock);
6719 ins->inst_target_bb = tblock;
6720 if (sp != stack_start) {
6721 handle_stack_args (cfg, stack_start, sp - stack_start);
6722 sp = stack_start;
6723 CHECK_UNVERIFIABLE (cfg);
6726 MONO_ADD_INS (bblock, ins);
6728 start_new_bblock = 1;
6729 inline_costs += BRANCH_COST;
6730 break;
6731 case CEE_BRFALSE_S:
6732 case CEE_BRTRUE_S:
6733 case CEE_BRFALSE:
6734 case CEE_BRTRUE: {
6735 MonoInst *cmp;
6736 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6737 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6738 guint32 opsize = is_short ? 1 : 4;
6740 CHECK_OPSIZE (opsize);
6741 CHECK_STACK (1);
6742 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6743 UNVERIFIED;
6744 ip ++;
6745 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6746 ip += opsize;
6748 sp--;
6750 GET_BBLOCK (cfg, tblock, target);
6751 link_bblock (cfg, bblock, tblock);
6752 GET_BBLOCK (cfg, tblock, ip);
6753 link_bblock (cfg, bblock, tblock);
6755 if (sp != stack_start) {
6756 handle_stack_args (cfg, stack_start, sp - stack_start);
6757 CHECK_UNVERIFIABLE (cfg);
6760 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6761 cmp->sreg1 = sp [0]->dreg;
6762 type_from_op (cmp, sp [0], NULL);
6763 CHECK_TYPE (cmp);
6765 #if SIZEOF_REGISTER == 4
6766 if (cmp->opcode == OP_LCOMPARE_IMM) {
6767 /* Convert it to OP_LCOMPARE */
6768 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6769 ins->type = STACK_I8;
6770 ins->dreg = alloc_dreg (cfg, STACK_I8);
6771 ins->inst_l = 0;
6772 MONO_ADD_INS (bblock, ins);
6773 cmp->opcode = OP_LCOMPARE;
6774 cmp->sreg2 = ins->dreg;
6776 #endif
6777 MONO_ADD_INS (bblock, cmp);
6779 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6780 type_from_op (ins, sp [0], NULL);
6781 MONO_ADD_INS (bblock, ins);
6782 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6783 GET_BBLOCK (cfg, tblock, target);
6784 ins->inst_true_bb = tblock;
6785 GET_BBLOCK (cfg, tblock, ip);
6786 ins->inst_false_bb = tblock;
6787 start_new_bblock = 2;
6789 sp = stack_start;
6790 inline_costs += BRANCH_COST;
6791 break;
6793 case CEE_BEQ:
6794 case CEE_BGE:
6795 case CEE_BGT:
6796 case CEE_BLE:
6797 case CEE_BLT:
6798 case CEE_BNE_UN:
6799 case CEE_BGE_UN:
6800 case CEE_BGT_UN:
6801 case CEE_BLE_UN:
6802 case CEE_BLT_UN:
6803 CHECK_OPSIZE (5);
6804 CHECK_STACK (2);
6805 MONO_INST_NEW (cfg, ins, *ip);
6806 ip++;
6807 target = ip + 4 + (gint32)read32(ip);
6808 ip += 4;
6810 ADD_BINCOND (NULL);
6812 sp = stack_start;
6813 inline_costs += BRANCH_COST;
6814 break;
6815 case CEE_SWITCH: {
6816 MonoInst *src1;
6817 MonoBasicBlock **targets;
6818 MonoBasicBlock *default_bblock;
6819 MonoJumpInfoBBTable *table;
6820 int offset_reg = alloc_preg (cfg);
6821 int target_reg = alloc_preg (cfg);
6822 int table_reg = alloc_preg (cfg);
6823 int sum_reg = alloc_preg (cfg);
6824 gboolean use_op_switch;
6826 CHECK_OPSIZE (5);
6827 CHECK_STACK (1);
6828 n = read32 (ip + 1);
6829 --sp;
6830 src1 = sp [0];
6831 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6832 UNVERIFIED;
6834 ip += 5;
6835 CHECK_OPSIZE (n * sizeof (guint32));
6836 target = ip + n * sizeof (guint32);
6838 GET_BBLOCK (cfg, default_bblock, target);
6840 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6841 for (i = 0; i < n; ++i) {
6842 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6843 targets [i] = tblock;
6844 ip += 4;
6847 if (sp != stack_start) {
6849 * Link the current bb with the targets as well, so handle_stack_args
6850 * will set their in_stack correctly.
6852 link_bblock (cfg, bblock, default_bblock);
6853 for (i = 0; i < n; ++i)
6854 link_bblock (cfg, bblock, targets [i]);
6856 handle_stack_args (cfg, stack_start, sp - stack_start);
6857 sp = stack_start;
6858 CHECK_UNVERIFIABLE (cfg);
6861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6863 bblock = cfg->cbb;
6865 for (i = 0; i < n; ++i)
6866 link_bblock (cfg, bblock, targets [i]);
6868 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6869 table->table = targets;
6870 table->table_size = n;
6872 use_op_switch = FALSE;
6873 #ifdef TARGET_ARM
6874 /* ARM implements SWITCH statements differently */
6875 /* FIXME: Make it use the generic implementation */
6876 if (!cfg->compile_aot)
6877 use_op_switch = TRUE;
6878 #endif
6880 if (COMPILE_LLVM (cfg))
6881 use_op_switch = TRUE;
6883 cfg->cbb->has_jump_table = 1;
6885 if (use_op_switch) {
6886 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6887 ins->sreg1 = src1->dreg;
6888 ins->inst_p0 = table;
6889 ins->inst_many_bb = targets;
6890 ins->klass = GUINT_TO_POINTER (n);
6891 MONO_ADD_INS (cfg->cbb, ins);
6892 } else {
6893 if (sizeof (gpointer) == 8)
6894 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6895 else
6896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6898 #if SIZEOF_REGISTER == 8
6899 /* The upper word might not be zero, and we add it to a 64 bit address later */
6900 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6901 #endif
6903 if (cfg->compile_aot) {
6904 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6905 } else {
6906 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6907 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6908 ins->inst_p0 = table;
6909 ins->dreg = table_reg;
6910 MONO_ADD_INS (cfg->cbb, ins);
6913 /* FIXME: Use load_memindex */
6914 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6915 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6916 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6918 start_new_bblock = 1;
6919 inline_costs += (BRANCH_COST * 2);
6920 break;
6922 case CEE_LDIND_I1:
6923 case CEE_LDIND_U1:
6924 case CEE_LDIND_I2:
6925 case CEE_LDIND_U2:
6926 case CEE_LDIND_I4:
6927 case CEE_LDIND_U4:
6928 case CEE_LDIND_I8:
6929 case CEE_LDIND_I:
6930 case CEE_LDIND_R4:
6931 case CEE_LDIND_R8:
6932 case CEE_LDIND_REF:
6933 CHECK_STACK (1);
6934 --sp;
6936 switch (*ip) {
6937 case CEE_LDIND_R4:
6938 case CEE_LDIND_R8:
6939 dreg = alloc_freg (cfg);
6940 break;
6941 case CEE_LDIND_I8:
6942 dreg = alloc_lreg (cfg);
6943 break;
6944 default:
6945 dreg = alloc_preg (cfg);
6948 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6949 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6950 ins->flags |= ins_flag;
6951 ins_flag = 0;
6952 MONO_ADD_INS (bblock, ins);
6953 *sp++ = ins;
6954 ++ip;
6955 break;
6956 case CEE_STIND_REF:
6957 case CEE_STIND_I1:
6958 case CEE_STIND_I2:
6959 case CEE_STIND_I4:
6960 case CEE_STIND_I8:
6961 case CEE_STIND_R4:
6962 case CEE_STIND_R8:
6963 case CEE_STIND_I:
6964 CHECK_STACK (2);
6965 sp -= 2;
6967 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6968 ins->flags |= ins_flag;
6969 ins_flag = 0;
6970 MONO_ADD_INS (bblock, ins);
6972 #if HAVE_WRITE_BARRIERS
6973 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6974 MonoInst *dummy_use;
6975 /* insert call to write barrier */
6976 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6977 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6978 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
6980 #endif
6982 inline_costs += 1;
6983 ++ip;
6984 break;
6986 case CEE_MUL:
6987 CHECK_STACK (2);
6989 MONO_INST_NEW (cfg, ins, (*ip));
6990 sp -= 2;
6991 ins->sreg1 = sp [0]->dreg;
6992 ins->sreg2 = sp [1]->dreg;
6993 type_from_op (ins, sp [0], sp [1]);
6994 CHECK_TYPE (ins);
6995 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6997 /* Use the immediate opcodes if possible */
6998 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6999 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7000 if (imm_opcode != -1) {
7001 ins->opcode = imm_opcode;
7002 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7003 ins->sreg2 = -1;
7005 sp [1]->opcode = OP_NOP;
7009 MONO_ADD_INS ((cfg)->cbb, (ins));
7011 *sp++ = mono_decompose_opcode (cfg, ins);
7012 ip++;
7013 break;
7014 case CEE_ADD:
7015 case CEE_SUB:
7016 case CEE_DIV:
7017 case CEE_DIV_UN:
7018 case CEE_REM:
7019 case CEE_REM_UN:
7020 case CEE_AND:
7021 case CEE_OR:
7022 case CEE_XOR:
7023 case CEE_SHL:
7024 case CEE_SHR:
7025 case CEE_SHR_UN:
7026 CHECK_STACK (2);
7028 MONO_INST_NEW (cfg, ins, (*ip));
7029 sp -= 2;
7030 ins->sreg1 = sp [0]->dreg;
7031 ins->sreg2 = sp [1]->dreg;
7032 type_from_op (ins, sp [0], sp [1]);
7033 CHECK_TYPE (ins);
7034 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7035 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7037 /* FIXME: Pass opcode to is_inst_imm */
7039 /* Use the immediate opcodes if possible */
7040 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7041 int imm_opcode;
7043 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7044 if (imm_opcode != -1) {
7045 ins->opcode = imm_opcode;
7046 if (sp [1]->opcode == OP_I8CONST) {
7047 #if SIZEOF_REGISTER == 8
7048 ins->inst_imm = sp [1]->inst_l;
7049 #else
7050 ins->inst_ls_word = sp [1]->inst_ls_word;
7051 ins->inst_ms_word = sp [1]->inst_ms_word;
7052 #endif
7054 else
7055 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7056 ins->sreg2 = -1;
7058 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7059 if (sp [1]->next == NULL)
7060 sp [1]->opcode = OP_NOP;
7063 MONO_ADD_INS ((cfg)->cbb, (ins));
7065 *sp++ = mono_decompose_opcode (cfg, ins);
7066 ip++;
7067 break;
7068 case CEE_NEG:
7069 case CEE_NOT:
7070 case CEE_CONV_I1:
7071 case CEE_CONV_I2:
7072 case CEE_CONV_I4:
7073 case CEE_CONV_R4:
7074 case CEE_CONV_R8:
7075 case CEE_CONV_U4:
7076 case CEE_CONV_I8:
7077 case CEE_CONV_U8:
7078 case CEE_CONV_OVF_I8:
7079 case CEE_CONV_OVF_U8:
7080 case CEE_CONV_R_UN:
7081 CHECK_STACK (1);
7083 /* Special case this earlier so we have long constants in the IR */
7084 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7085 int data = sp [-1]->inst_c0;
7086 sp [-1]->opcode = OP_I8CONST;
7087 sp [-1]->type = STACK_I8;
7088 #if SIZEOF_REGISTER == 8
7089 if ((*ip) == CEE_CONV_U8)
7090 sp [-1]->inst_c0 = (guint32)data;
7091 else
7092 sp [-1]->inst_c0 = data;
7093 #else
7094 sp [-1]->inst_ls_word = data;
7095 if ((*ip) == CEE_CONV_U8)
7096 sp [-1]->inst_ms_word = 0;
7097 else
7098 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7099 #endif
7100 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7102 else {
7103 ADD_UNOP (*ip);
7105 ip++;
7106 break;
7107 case CEE_CONV_OVF_I4:
7108 case CEE_CONV_OVF_I1:
7109 case CEE_CONV_OVF_I2:
7110 case CEE_CONV_OVF_I:
7111 case CEE_CONV_OVF_U:
7112 CHECK_STACK (1);
7114 if (sp [-1]->type == STACK_R8) {
7115 ADD_UNOP (CEE_CONV_OVF_I8);
7116 ADD_UNOP (*ip);
7117 } else {
7118 ADD_UNOP (*ip);
7120 ip++;
7121 break;
7122 case CEE_CONV_OVF_U1:
7123 case CEE_CONV_OVF_U2:
7124 case CEE_CONV_OVF_U4:
7125 CHECK_STACK (1);
7127 if (sp [-1]->type == STACK_R8) {
7128 ADD_UNOP (CEE_CONV_OVF_U8);
7129 ADD_UNOP (*ip);
7130 } else {
7131 ADD_UNOP (*ip);
7133 ip++;
7134 break;
7135 case CEE_CONV_OVF_I1_UN:
7136 case CEE_CONV_OVF_I2_UN:
7137 case CEE_CONV_OVF_I4_UN:
7138 case CEE_CONV_OVF_I8_UN:
7139 case CEE_CONV_OVF_U1_UN:
7140 case CEE_CONV_OVF_U2_UN:
7141 case CEE_CONV_OVF_U4_UN:
7142 case CEE_CONV_OVF_U8_UN:
7143 case CEE_CONV_OVF_I_UN:
7144 case CEE_CONV_OVF_U_UN:
7145 case CEE_CONV_U2:
7146 case CEE_CONV_U1:
7147 case CEE_CONV_I:
7148 case CEE_CONV_U:
7149 CHECK_STACK (1);
7150 ADD_UNOP (*ip);
7151 ip++;
7152 break;
7153 case CEE_ADD_OVF:
7154 case CEE_ADD_OVF_UN:
7155 case CEE_MUL_OVF:
7156 case CEE_MUL_OVF_UN:
7157 case CEE_SUB_OVF:
7158 case CEE_SUB_OVF_UN:
7159 CHECK_STACK (2);
7160 ADD_BINOP (*ip);
7161 ip++;
7162 break;
7163 case CEE_CPOBJ:
7164 CHECK_OPSIZE (5);
7165 CHECK_STACK (2);
7166 token = read32 (ip + 1);
7167 klass = mini_get_class (method, token, generic_context);
7168 CHECK_TYPELOAD (klass);
7169 sp -= 2;
7170 if (generic_class_is_reference_type (cfg, klass)) {
7171 MonoInst *store, *load;
7172 int dreg = alloc_preg (cfg);
7174 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7175 load->flags |= ins_flag;
7176 MONO_ADD_INS (cfg->cbb, load);
7178 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7179 store->flags |= ins_flag;
7180 MONO_ADD_INS (cfg->cbb, store);
7181 } else {
7182 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7184 ins_flag = 0;
7185 ip += 5;
7186 break;
7187 case CEE_LDOBJ: {
7188 int loc_index = -1;
7189 int stloc_len = 0;
7191 CHECK_OPSIZE (5);
7192 CHECK_STACK (1);
7193 --sp;
7194 token = read32 (ip + 1);
7195 klass = mini_get_class (method, token, generic_context);
7196 CHECK_TYPELOAD (klass);
7198 /* Optimize the common ldobj+stloc combination */
7199 switch (ip [5]) {
7200 case CEE_STLOC_S:
7201 loc_index = ip [6];
7202 stloc_len = 2;
7203 break;
7204 case CEE_STLOC_0:
7205 case CEE_STLOC_1:
7206 case CEE_STLOC_2:
7207 case CEE_STLOC_3:
7208 loc_index = ip [5] - CEE_STLOC_0;
7209 stloc_len = 1;
7210 break;
7211 default:
7212 break;
7215 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7216 CHECK_LOCAL (loc_index);
7218 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7219 ins->dreg = cfg->locals [loc_index]->dreg;
7220 ip += 5;
7221 ip += stloc_len;
7222 break;
7225 /* Optimize the ldobj+stobj combination */
7226 /* The reference case ends up being a load+store anyway */
7227 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7228 CHECK_STACK (1);
7230 sp --;
7232 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7234 ip += 5 + 5;
7235 ins_flag = 0;
7236 break;
7239 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7240 *sp++ = ins;
7242 ip += 5;
7243 ins_flag = 0;
7244 inline_costs += 1;
7245 break;
7247 case CEE_LDSTR:
7248 CHECK_STACK_OVF (1);
7249 CHECK_OPSIZE (5);
7250 n = read32 (ip + 1);
7252 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7253 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7254 ins->type = STACK_OBJ;
7255 *sp = ins;
7257 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7258 MonoInst *iargs [1];
7260 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7261 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7262 } else {
7263 if (cfg->opt & MONO_OPT_SHARED) {
7264 MonoInst *iargs [3];
7266 if (cfg->compile_aot) {
7267 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7269 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7270 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7271 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7272 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7273 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7274 } else {
7275 if (bblock->out_of_line) {
7276 MonoInst *iargs [2];
7278 if (image == mono_defaults.corlib) {
7280 * Avoid relocations in AOT and save some space by using a
7281 * version of helper_ldstr specialized to mscorlib.
7283 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7284 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7285 } else {
7286 /* Avoid creating the string object */
7287 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7288 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7289 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7292 else
7293 if (cfg->compile_aot) {
7294 NEW_LDSTRCONST (cfg, ins, image, n);
7295 *sp = ins;
7296 MONO_ADD_INS (bblock, ins);
7298 else {
7299 NEW_PCONST (cfg, ins, NULL);
7300 ins->type = STACK_OBJ;
7301 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7302 *sp = ins;
7303 MONO_ADD_INS (bblock, ins);
7308 sp++;
7309 ip += 5;
7310 break;
7311 case CEE_NEWOBJ: {
7312 MonoInst *iargs [2];
7313 MonoMethodSignature *fsig;
7314 MonoInst this_ins;
7315 MonoInst *alloc;
7316 MonoInst *vtable_arg = NULL;
7318 CHECK_OPSIZE (5);
7319 token = read32 (ip + 1);
7320 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7321 if (!cmethod)
7322 goto load_error;
7323 fsig = mono_method_get_signature (cmethod, image, token);
7324 if (!fsig)
7325 goto load_error;
7327 mono_save_token_info (cfg, image, token, cmethod);
7329 if (!mono_class_init (cmethod->klass))
7330 goto load_error;
7332 if (cfg->generic_sharing_context)
7333 context_used = mono_method_check_context_used (cmethod);
7335 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7336 if (check_linkdemand (cfg, method, cmethod))
7337 INLINE_FAILURE;
7338 CHECK_CFG_EXCEPTION;
7339 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7340 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7343 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7344 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7345 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7346 mono_class_vtable (cfg->domain, cmethod->klass);
7347 CHECK_TYPELOAD (cmethod->klass);
7349 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7350 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7351 } else {
7352 if (context_used) {
7353 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7354 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7355 } else {
7356 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7358 CHECK_TYPELOAD (cmethod->klass);
7359 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7364 n = fsig->param_count;
7365 CHECK_STACK (n);
7368 * Generate smaller code for the common newobj <exception> instruction in
7369 * argument checking code.
7371 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7372 is_exception_class (cmethod->klass) && n <= 2 &&
7373 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7374 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7375 MonoInst *iargs [3];
7377 g_assert (!vtable_arg);
7379 sp -= n;
7381 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7382 switch (n) {
7383 case 0:
7384 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7385 break;
7386 case 1:
7387 iargs [1] = sp [0];
7388 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7389 break;
7390 case 2:
7391 iargs [1] = sp [0];
7392 iargs [2] = sp [1];
7393 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7394 break;
7395 default:
7396 g_assert_not_reached ();
7399 ip += 5;
7400 inline_costs += 5;
7401 break;
7404 /* move the args to allow room for 'this' in the first position */
7405 while (n--) {
7406 --sp;
7407 sp [1] = sp [0];
7410 /* check_call_signature () requires sp[0] to be set */
7411 this_ins.type = STACK_OBJ;
7412 sp [0] = &this_ins;
7413 if (check_call_signature (cfg, fsig, sp))
7414 UNVERIFIED;
7416 iargs [0] = NULL;
7418 if (mini_class_is_system_array (cmethod->klass)) {
7419 g_assert (!vtable_arg);
7421 *sp = emit_get_rgctx_method (cfg, context_used,
7422 cmethod, MONO_RGCTX_INFO_METHOD);
7424 /* Avoid varargs in the common case */
7425 if (fsig->param_count == 1)
7426 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7427 else if (fsig->param_count == 2)
7428 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7429 else if (fsig->param_count == 3)
7430 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7431 else
7432 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7433 } else if (cmethod->string_ctor) {
7434 g_assert (!context_used);
7435 g_assert (!vtable_arg);
7436 /* we simply pass a null pointer */
7437 EMIT_NEW_PCONST (cfg, *sp, NULL);
7438 /* now call the string ctor */
7439 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7440 } else {
7441 MonoInst* callvirt_this_arg = NULL;
7443 if (cmethod->klass->valuetype) {
7444 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7445 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7446 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7448 alloc = NULL;
7451 * The code generated by mini_emit_virtual_call () expects
7452 * iargs [0] to be a boxed instance, but luckily the vcall
7453 * will be transformed into a normal call there.
7455 } else if (context_used) {
7456 MonoInst *data;
7457 int rgctx_info;
7459 if (cfg->opt & MONO_OPT_SHARED)
7460 rgctx_info = MONO_RGCTX_INFO_KLASS;
7461 else
7462 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7463 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7465 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7466 *sp = alloc;
7467 } else {
7468 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7470 CHECK_TYPELOAD (cmethod->klass);
7473 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7474 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7475 * As a workaround, we call class cctors before allocating objects.
7477 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7478 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7479 if (cfg->verbose_level > 2)
7480 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7481 class_inits = g_slist_prepend (class_inits, vtable);
7484 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7485 *sp = alloc;
7487 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7489 if (alloc)
7490 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7492 /* Now call the actual ctor */
7493 /* Avoid virtual calls to ctors if possible */
7494 if (cmethod->klass->marshalbyref)
7495 callvirt_this_arg = sp [0];
7497 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7498 mono_method_check_inlining (cfg, cmethod) &&
7499 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7500 !g_list_find (dont_inline, cmethod)) {
7501 int costs;
7503 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7504 cfg->real_offset += 5;
7505 bblock = cfg->cbb;
7507 inline_costs += costs - 5;
7508 } else {
7509 INLINE_FAILURE;
7510 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7512 } else if (context_used &&
7513 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7514 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7515 MonoInst *cmethod_addr;
7517 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7518 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7520 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7521 } else {
7522 INLINE_FAILURE;
7523 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7524 callvirt_this_arg, NULL, vtable_arg);
7528 if (alloc == NULL) {
7529 /* Valuetype */
7530 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7531 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7532 *sp++= ins;
7534 else
7535 *sp++ = alloc;
7537 ip += 5;
7538 inline_costs += 5;
7539 break;
7541 case CEE_CASTCLASS:
7542 CHECK_STACK (1);
7543 --sp;
7544 CHECK_OPSIZE (5);
7545 token = read32 (ip + 1);
7546 klass = mini_get_class (method, token, generic_context);
7547 CHECK_TYPELOAD (klass);
7548 if (sp [0]->type != STACK_OBJ)
7549 UNVERIFIED;
7551 if (cfg->generic_sharing_context)
7552 context_used = mono_class_check_context_used (klass);
7554 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7555 MonoInst *args [2];
7557 /* obj */
7558 args [0] = *sp;
7560 /* klass */
7561 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7563 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7564 *sp ++ = ins;
7565 ip += 5;
7566 inline_costs += 2;
7567 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7568 MonoMethod *mono_castclass;
7569 MonoInst *iargs [1];
7570 int costs;
7572 mono_castclass = mono_marshal_get_castclass (klass);
7573 iargs [0] = sp [0];
7575 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7576 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7577 g_assert (costs > 0);
7579 ip += 5;
7580 cfg->real_offset += 5;
7581 bblock = cfg->cbb;
7583 *sp++ = iargs [0];
7585 inline_costs += costs;
7587 else {
7588 ins = handle_castclass (cfg, klass, *sp, context_used);
7589 CHECK_CFG_EXCEPTION;
7590 bblock = cfg->cbb;
7591 *sp ++ = ins;
7592 ip += 5;
7594 break;
7595 case CEE_ISINST: {
7596 CHECK_STACK (1);
7597 --sp;
7598 CHECK_OPSIZE (5);
7599 token = read32 (ip + 1);
7600 klass = mini_get_class (method, token, generic_context);
7601 CHECK_TYPELOAD (klass);
7602 if (sp [0]->type != STACK_OBJ)
7603 UNVERIFIED;
7605 if (cfg->generic_sharing_context)
7606 context_used = mono_class_check_context_used (klass);
7608 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7609 MonoInst *args [2];
7611 /* obj */
7612 args [0] = *sp;
7614 /* klass */
7615 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7617 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7618 sp++;
7619 ip += 5;
7620 inline_costs += 2;
7621 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7622 MonoMethod *mono_isinst;
7623 MonoInst *iargs [1];
7624 int costs;
7626 mono_isinst = mono_marshal_get_isinst (klass);
7627 iargs [0] = sp [0];
7629 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7630 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7631 g_assert (costs > 0);
7633 ip += 5;
7634 cfg->real_offset += 5;
7635 bblock = cfg->cbb;
7637 *sp++= iargs [0];
7639 inline_costs += costs;
7641 else {
7642 ins = handle_isinst (cfg, klass, *sp, context_used);
7643 CHECK_CFG_EXCEPTION;
7644 bblock = cfg->cbb;
7645 *sp ++ = ins;
7646 ip += 5;
7648 break;
7650 case CEE_UNBOX_ANY: {
7651 CHECK_STACK (1);
7652 --sp;
7653 CHECK_OPSIZE (5);
7654 token = read32 (ip + 1);
7655 klass = mini_get_class (method, token, generic_context);
7656 CHECK_TYPELOAD (klass);
7658 mono_save_token_info (cfg, image, token, klass);
7660 if (cfg->generic_sharing_context)
7661 context_used = mono_class_check_context_used (klass);
7663 if (generic_class_is_reference_type (cfg, klass)) {
7664 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7665 if (context_used) {
7666 MonoInst *iargs [2];
7668 /* obj */
7669 iargs [0] = *sp;
7670 /* klass */
7671 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7672 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7673 *sp ++ = ins;
7674 ip += 5;
7675 inline_costs += 2;
7676 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7677 MonoMethod *mono_castclass;
7678 MonoInst *iargs [1];
7679 int costs;
7681 mono_castclass = mono_marshal_get_castclass (klass);
7682 iargs [0] = sp [0];
7684 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7685 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7687 g_assert (costs > 0);
7689 ip += 5;
7690 cfg->real_offset += 5;
7691 bblock = cfg->cbb;
7693 *sp++ = iargs [0];
7694 inline_costs += costs;
7695 } else {
7696 ins = handle_castclass (cfg, klass, *sp, 0);
7697 CHECK_CFG_EXCEPTION;
7698 bblock = cfg->cbb;
7699 *sp ++ = ins;
7700 ip += 5;
7702 break;
7705 if (mono_class_is_nullable (klass)) {
7706 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7707 *sp++= ins;
7708 ip += 5;
7709 break;
7712 /* UNBOX */
7713 ins = handle_unbox (cfg, klass, sp, context_used);
7714 *sp = ins;
7716 ip += 5;
7718 /* LDOBJ */
7719 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7720 *sp++ = ins;
7722 inline_costs += 2;
7723 break;
7725 case CEE_BOX: {
7726 MonoInst *val;
7728 CHECK_STACK (1);
7729 --sp;
7730 val = *sp;
7731 CHECK_OPSIZE (5);
7732 token = read32 (ip + 1);
7733 klass = mini_get_class (method, token, generic_context);
7734 CHECK_TYPELOAD (klass);
7736 mono_save_token_info (cfg, image, token, klass);
7738 if (cfg->generic_sharing_context)
7739 context_used = mono_class_check_context_used (klass);
7741 if (generic_class_is_reference_type (cfg, klass)) {
7742 *sp++ = val;
7743 ip += 5;
7744 break;
7747 if (klass == mono_defaults.void_class)
7748 UNVERIFIED;
7749 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7750 UNVERIFIED;
7751 /* frequent check in generic code: box (struct), brtrue */
7752 if (!mono_class_is_nullable (klass) &&
7753 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7754 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7755 ip += 5;
7756 MONO_INST_NEW (cfg, ins, OP_BR);
7757 if (*ip == CEE_BRTRUE_S) {
7758 CHECK_OPSIZE (2);
7759 ip++;
7760 target = ip + 1 + (signed char)(*ip);
7761 ip++;
7762 } else {
7763 CHECK_OPSIZE (5);
7764 ip++;
7765 target = ip + 4 + (gint)(read32 (ip));
7766 ip += 4;
7768 GET_BBLOCK (cfg, tblock, target);
7769 link_bblock (cfg, bblock, tblock);
7770 ins->inst_target_bb = tblock;
7771 GET_BBLOCK (cfg, tblock, ip);
7773 * This leads to some inconsistency, since the two bblocks are
7774 * not really connected, but it is needed for handling stack
7775 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7776 * FIXME: This should only be needed if sp != stack_start, but that
7777 * doesn't work for some reason (test failure in mcs/tests on x86).
7779 link_bblock (cfg, bblock, tblock);
7780 if (sp != stack_start) {
7781 handle_stack_args (cfg, stack_start, sp - stack_start);
7782 sp = stack_start;
7783 CHECK_UNVERIFIABLE (cfg);
7785 MONO_ADD_INS (bblock, ins);
7786 start_new_bblock = 1;
7787 break;
7790 if (context_used) {
7791 MonoInst *data;
7792 int rgctx_info;
7794 if (cfg->opt & MONO_OPT_SHARED)
7795 rgctx_info = MONO_RGCTX_INFO_KLASS;
7796 else
7797 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7798 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7799 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7800 } else {
7801 *sp++ = handle_box (cfg, val, klass);
7804 CHECK_CFG_EXCEPTION;
7805 ip += 5;
7806 inline_costs += 1;
7807 break;
7809 case CEE_UNBOX: {
7810 CHECK_STACK (1);
7811 --sp;
7812 CHECK_OPSIZE (5);
7813 token = read32 (ip + 1);
7814 klass = mini_get_class (method, token, generic_context);
7815 CHECK_TYPELOAD (klass);
7817 mono_save_token_info (cfg, image, token, klass);
7819 if (cfg->generic_sharing_context)
7820 context_used = mono_class_check_context_used (klass);
7822 if (mono_class_is_nullable (klass)) {
7823 MonoInst *val;
7825 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7826 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7828 *sp++= ins;
7829 } else {
7830 ins = handle_unbox (cfg, klass, sp, context_used);
7831 *sp++ = ins;
7833 ip += 5;
7834 inline_costs += 2;
7835 break;
7837 case CEE_LDFLD:
7838 case CEE_LDFLDA:
7839 case CEE_STFLD: {
7840 MonoClassField *field;
7841 int costs;
7842 guint foffset;
7844 if (*ip == CEE_STFLD) {
7845 CHECK_STACK (2);
7846 sp -= 2;
7847 } else {
7848 CHECK_STACK (1);
7849 --sp;
7851 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7852 UNVERIFIED;
7853 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7854 UNVERIFIED;
7855 CHECK_OPSIZE (5);
7856 token = read32 (ip + 1);
7857 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7858 field = mono_method_get_wrapper_data (method, token);
7859 klass = field->parent;
7861 else {
7862 field = mono_field_from_token (image, token, &klass, generic_context);
7864 if (!field)
7865 goto load_error;
7866 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7867 FIELD_ACCESS_FAILURE;
7868 mono_class_init (klass);
7870 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7871 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7872 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7873 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7876 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7877 if (*ip == CEE_STFLD) {
7878 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7879 UNVERIFIED;
7880 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7881 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7882 MonoInst *iargs [5];
7884 iargs [0] = sp [0];
7885 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7886 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7887 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7888 field->offset);
7889 iargs [4] = sp [1];
7891 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7892 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7893 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7894 g_assert (costs > 0);
7896 cfg->real_offset += 5;
7897 bblock = cfg->cbb;
7899 inline_costs += costs;
7900 } else {
7901 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7903 } else {
7904 MonoInst *store;
7906 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7908 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7910 #if HAVE_WRITE_BARRIERS
7911 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7912 /* insert call to write barrier */
7913 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7914 MonoInst *iargs [2], *dummy_use;
7915 int dreg;
7917 dreg = alloc_preg (cfg);
7918 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7919 iargs [1] = sp [1];
7920 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7922 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7924 #endif
7926 store->flags |= ins_flag;
7928 ins_flag = 0;
7929 ip += 5;
7930 break;
7933 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7934 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7935 MonoInst *iargs [4];
7937 iargs [0] = sp [0];
7938 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7939 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7940 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7941 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7942 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7943 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7944 bblock = cfg->cbb;
7945 g_assert (costs > 0);
7947 cfg->real_offset += 5;
7949 *sp++ = iargs [0];
7951 inline_costs += costs;
7952 } else {
7953 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7954 *sp++ = ins;
7956 } else {
7957 if (sp [0]->type == STACK_VTYPE) {
7958 MonoInst *var;
7960 /* Have to compute the address of the variable */
7962 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7963 if (!var)
7964 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7965 else
7966 g_assert (var->klass == klass);
7968 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7969 sp [0] = ins;
7972 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7974 if (*ip == CEE_LDFLDA) {
7975 dreg = alloc_preg (cfg);
7977 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7978 ins->klass = mono_class_from_mono_type (field->type);
7979 ins->type = STACK_MP;
7980 *sp++ = ins;
7981 } else {
7982 MonoInst *load;
7984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7985 load->flags |= ins_flag;
7986 load->flags |= MONO_INST_FAULT;
7987 *sp++ = load;
7990 ins_flag = 0;
7991 ip += 5;
7992 break;
7994 case CEE_LDSFLD:
7995 case CEE_LDSFLDA:
7996 case CEE_STSFLD: {
7997 MonoClassField *field;
7998 gpointer addr = NULL;
7999 gboolean is_special_static;
8001 CHECK_OPSIZE (5);
8002 token = read32 (ip + 1);
8004 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8005 field = mono_method_get_wrapper_data (method, token);
8006 klass = field->parent;
8008 else
8009 field = mono_field_from_token (image, token, &klass, generic_context);
8010 if (!field)
8011 goto load_error;
8012 mono_class_init (klass);
8013 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8014 FIELD_ACCESS_FAILURE;
8016 /* if the class is Critical then transparent code cannot access it's fields */
8017 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8018 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8021 * We can only support shared generic static
8022 * field access on architectures where the
8023 * trampoline code has been extended to handle
8024 * the generic class init.
8026 #ifndef MONO_ARCH_VTABLE_REG
8027 GENERIC_SHARING_FAILURE (*ip);
8028 #endif
8030 if (cfg->generic_sharing_context)
8031 context_used = mono_class_check_context_used (klass);
8033 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8035 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8036 * to be called here.
8038 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8039 mono_class_vtable (cfg->domain, klass);
8040 CHECK_TYPELOAD (klass);
8042 mono_domain_lock (cfg->domain);
8043 if (cfg->domain->special_static_fields)
8044 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8045 mono_domain_unlock (cfg->domain);
8047 is_special_static = mono_class_field_is_special_static (field);
8049 /* Generate IR to compute the field address */
8050 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8052 * Fast access to TLS data
8053 * Inline version of get_thread_static_data () in
8054 * threads.c.
8056 guint32 offset;
8057 int idx, static_data_reg, array_reg, dreg;
8058 MonoInst *thread_ins;
8060 // offset &= 0x7fffffff;
8061 // idx = (offset >> 24) - 1;
8062 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8064 thread_ins = mono_get_thread_intrinsic (cfg);
8065 MONO_ADD_INS (cfg->cbb, thread_ins);
8066 static_data_reg = alloc_ireg (cfg);
8067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8069 if (cfg->compile_aot) {
8070 int offset_reg, offset2_reg, idx_reg;
8072 /* For TLS variables, this will return the TLS offset */
8073 EMIT_NEW_SFLDACONST (cfg, ins, field);
8074 offset_reg = ins->dreg;
8075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8076 idx_reg = alloc_ireg (cfg);
8077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8081 array_reg = alloc_ireg (cfg);
8082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8083 offset2_reg = alloc_ireg (cfg);
8084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8085 dreg = alloc_ireg (cfg);
8086 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8087 } else {
8088 offset = (gsize)addr & 0x7fffffff;
8089 idx = (offset >> 24) - 1;
8091 array_reg = alloc_ireg (cfg);
8092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8093 dreg = alloc_ireg (cfg);
8094 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8096 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8097 (cfg->compile_aot && is_special_static) ||
8098 (context_used && is_special_static)) {
8099 MonoInst *iargs [2];
8101 g_assert (field->parent);
8102 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8103 if (context_used) {
8104 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8105 field, MONO_RGCTX_INFO_CLASS_FIELD);
8106 } else {
8107 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8109 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8110 } else if (context_used) {
8111 MonoInst *static_data;
8114 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8115 method->klass->name_space, method->klass->name, method->name,
8116 depth, field->offset);
8119 if (mono_class_needs_cctor_run (klass, method)) {
8120 MonoCallInst *call;
8121 MonoInst *vtable;
8123 vtable = emit_get_rgctx_klass (cfg, context_used,
8124 klass, MONO_RGCTX_INFO_VTABLE);
8126 // FIXME: This doesn't work since it tries to pass the argument
8127 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8129 * The vtable pointer is always passed in a register regardless of
8130 * the calling convention, so assign it manually, and make a call
8131 * using a signature without parameters.
8133 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8134 #ifdef MONO_ARCH_VTABLE_REG
8135 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8136 cfg->uses_vtable_reg = TRUE;
8137 #else
8138 NOT_IMPLEMENTED;
8139 #endif
8143 * The pointer we're computing here is
8145 * super_info.static_data + field->offset
8147 static_data = emit_get_rgctx_klass (cfg, context_used,
8148 klass, MONO_RGCTX_INFO_STATIC_DATA);
8150 if (field->offset == 0) {
8151 ins = static_data;
8152 } else {
8153 int addr_reg = mono_alloc_preg (cfg);
8154 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8156 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8157 MonoInst *iargs [2];
8159 g_assert (field->parent);
8160 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8161 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8162 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8163 } else {
8164 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8166 CHECK_TYPELOAD (klass);
8167 if (!addr) {
8168 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8169 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8170 if (cfg->verbose_level > 2)
8171 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8172 class_inits = g_slist_prepend (class_inits, vtable);
8173 } else {
8174 if (cfg->run_cctors) {
8175 MonoException *ex;
8176 /* This makes so that inline cannot trigger */
8177 /* .cctors: too many apps depend on them */
8178 /* running with a specific order... */
8179 if (! vtable->initialized)
8180 INLINE_FAILURE;
8181 ex = mono_runtime_class_init_full (vtable, FALSE);
8182 if (ex) {
8183 set_exception_object (cfg, ex);
8184 goto exception_exit;
8188 addr = (char*)vtable->data + field->offset;
8190 if (cfg->compile_aot)
8191 EMIT_NEW_SFLDACONST (cfg, ins, field);
8192 else
8193 EMIT_NEW_PCONST (cfg, ins, addr);
8194 } else {
8195 MonoInst *iargs [1];
8196 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8197 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8201 /* Generate IR to do the actual load/store operation */
8203 if (*ip == CEE_LDSFLDA) {
8204 ins->klass = mono_class_from_mono_type (field->type);
8205 ins->type = STACK_PTR;
8206 *sp++ = ins;
8207 } else if (*ip == CEE_STSFLD) {
8208 MonoInst *store;
8209 CHECK_STACK (1);
8210 sp--;
8212 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8213 store->flags |= ins_flag;
8214 } else {
8215 gboolean is_const = FALSE;
8216 MonoVTable *vtable = NULL;
8218 if (!context_used) {
8219 vtable = mono_class_vtable (cfg->domain, klass);
8220 CHECK_TYPELOAD (klass);
8222 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8223 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8224 gpointer addr = (char*)vtable->data + field->offset;
8225 int ro_type = field->type->type;
8226 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8227 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8229 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8230 is_const = TRUE;
8231 switch (ro_type) {
8232 case MONO_TYPE_BOOLEAN:
8233 case MONO_TYPE_U1:
8234 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8235 sp++;
8236 break;
8237 case MONO_TYPE_I1:
8238 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8239 sp++;
8240 break;
8241 case MONO_TYPE_CHAR:
8242 case MONO_TYPE_U2:
8243 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8244 sp++;
8245 break;
8246 case MONO_TYPE_I2:
8247 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8248 sp++;
8249 break;
8250 break;
8251 case MONO_TYPE_I4:
8252 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8253 sp++;
8254 break;
8255 case MONO_TYPE_U4:
8256 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8257 sp++;
8258 break;
8259 #ifndef HAVE_MOVING_COLLECTOR
8260 case MONO_TYPE_I:
8261 case MONO_TYPE_U:
8262 case MONO_TYPE_STRING:
8263 case MONO_TYPE_OBJECT:
8264 case MONO_TYPE_CLASS:
8265 case MONO_TYPE_SZARRAY:
8266 case MONO_TYPE_PTR:
8267 case MONO_TYPE_FNPTR:
8268 case MONO_TYPE_ARRAY:
8269 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8270 type_to_eval_stack_type ((cfg), field->type, *sp);
8271 sp++;
8272 break;
8273 #endif
8274 case MONO_TYPE_I8:
8275 case MONO_TYPE_U8:
8276 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8277 sp++;
8278 break;
8279 case MONO_TYPE_R4:
8280 case MONO_TYPE_R8:
8281 case MONO_TYPE_VALUETYPE:
8282 default:
8283 is_const = FALSE;
8284 break;
8288 if (!is_const) {
8289 MonoInst *load;
8291 CHECK_STACK_OVF (1);
8293 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8294 load->flags |= ins_flag;
8295 ins_flag = 0;
8296 *sp++ = load;
8299 ins_flag = 0;
8300 ip += 5;
8301 break;
8303 case CEE_STOBJ:
8304 CHECK_STACK (2);
8305 sp -= 2;
8306 CHECK_OPSIZE (5);
8307 token = read32 (ip + 1);
8308 klass = mini_get_class (method, token, generic_context);
8309 CHECK_TYPELOAD (klass);
8310 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8311 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8312 ins_flag = 0;
8313 ip += 5;
8314 inline_costs += 1;
8315 break;
8318 * Array opcodes
8320 case CEE_NEWARR: {
8321 MonoInst *len_ins;
8322 const char *data_ptr;
8323 int data_size = 0;
8324 guint32 field_token;
8326 CHECK_STACK (1);
8327 --sp;
8329 CHECK_OPSIZE (5);
8330 token = read32 (ip + 1);
8332 klass = mini_get_class (method, token, generic_context);
8333 CHECK_TYPELOAD (klass);
8335 if (cfg->generic_sharing_context)
8336 context_used = mono_class_check_context_used (klass);
8338 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8339 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8340 ins->sreg1 = sp [0]->dreg;
8341 ins->type = STACK_I4;
8342 ins->dreg = alloc_ireg (cfg);
8343 MONO_ADD_INS (cfg->cbb, ins);
8344 *sp = mono_decompose_opcode (cfg, ins);
8347 if (context_used) {
8348 MonoInst *args [3];
8349 MonoClass *array_class = mono_array_class_get (klass, 1);
8350 /* FIXME: we cannot get a managed
8351 allocator because we can't get the
8352 open generic class's vtable. We
8353 have the same problem in
8354 handle_alloc_from_inst(). This
8355 needs to be solved so that we can
8356 have managed allocs of shared
8357 generic classes. */
8359 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8360 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8362 MonoMethod *managed_alloc = NULL;
8364 /* FIXME: Decompose later to help abcrem */
8366 /* vtable */
8367 args [0] = emit_get_rgctx_klass (cfg, context_used,
8368 array_class, MONO_RGCTX_INFO_VTABLE);
8369 /* array len */
8370 args [1] = sp [0];
8372 if (managed_alloc)
8373 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8374 else
8375 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8376 } else {
8377 if (cfg->opt & MONO_OPT_SHARED) {
8378 /* Decompose now to avoid problems with references to the domainvar */
8379 MonoInst *iargs [3];
8381 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8382 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8383 iargs [2] = sp [0];
8385 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8386 } else {
8387 /* Decompose later since it is needed by abcrem */
8388 MonoClass *array_type = mono_array_class_get (klass, 1);
8389 mono_class_vtable (cfg->domain, array_type);
8390 CHECK_TYPELOAD (array_type);
8392 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8393 ins->dreg = alloc_preg (cfg);
8394 ins->sreg1 = sp [0]->dreg;
8395 ins->inst_newa_class = klass;
8396 ins->type = STACK_OBJ;
8397 ins->klass = klass;
8398 MONO_ADD_INS (cfg->cbb, ins);
8399 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8400 cfg->cbb->has_array_access = TRUE;
8402 /* Needed so mono_emit_load_get_addr () gets called */
8403 mono_get_got_var (cfg);
8407 len_ins = sp [0];
8408 ip += 5;
8409 *sp++ = ins;
8410 inline_costs += 1;
8413 * we inline/optimize the initialization sequence if possible.
8414 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8415 * for small sizes open code the memcpy
8416 * ensure the rva field is big enough
8418 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8419 MonoMethod *memcpy_method = get_memcpy_method ();
8420 MonoInst *iargs [3];
8421 int add_reg = alloc_preg (cfg);
8423 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8424 if (cfg->compile_aot) {
8425 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8426 } else {
8427 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8429 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8430 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8431 ip += 11;
8434 break;
8436 case CEE_LDLEN:
8437 CHECK_STACK (1);
8438 --sp;
8439 if (sp [0]->type != STACK_OBJ)
8440 UNVERIFIED;
8442 dreg = alloc_preg (cfg);
8443 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8444 ins->dreg = alloc_preg (cfg);
8445 ins->sreg1 = sp [0]->dreg;
8446 ins->type = STACK_I4;
8447 MONO_ADD_INS (cfg->cbb, ins);
8448 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8449 cfg->cbb->has_array_access = TRUE;
8450 ip ++;
8451 *sp++ = ins;
8452 break;
8453 case CEE_LDELEMA:
8454 CHECK_STACK (2);
8455 sp -= 2;
8456 CHECK_OPSIZE (5);
8457 if (sp [0]->type != STACK_OBJ)
8458 UNVERIFIED;
8460 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8462 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8463 CHECK_TYPELOAD (klass);
8464 /* we need to make sure that this array is exactly the type it needs
8465 * to be for correctness. the wrappers are lax with their usage
8466 * so we need to ignore them here
8468 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8469 MonoClass *array_class = mono_array_class_get (klass, 1);
8470 mini_emit_check_array_type (cfg, sp [0], array_class);
8471 CHECK_TYPELOAD (array_class);
8474 readonly = FALSE;
8475 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8476 *sp++ = ins;
8477 ip += 5;
8478 break;
8479 case CEE_LDELEM:
8480 case CEE_LDELEM_I1:
8481 case CEE_LDELEM_U1:
8482 case CEE_LDELEM_I2:
8483 case CEE_LDELEM_U2:
8484 case CEE_LDELEM_I4:
8485 case CEE_LDELEM_U4:
8486 case CEE_LDELEM_I8:
8487 case CEE_LDELEM_I:
8488 case CEE_LDELEM_R4:
8489 case CEE_LDELEM_R8:
8490 case CEE_LDELEM_REF: {
8491 MonoInst *addr;
8493 CHECK_STACK (2);
8494 sp -= 2;
8496 if (*ip == CEE_LDELEM) {
8497 CHECK_OPSIZE (5);
8498 token = read32 (ip + 1);
8499 klass = mini_get_class (method, token, generic_context);
8500 CHECK_TYPELOAD (klass);
8501 mono_class_init (klass);
8503 else
8504 klass = array_access_to_klass (*ip);
8506 if (sp [0]->type != STACK_OBJ)
8507 UNVERIFIED;
8509 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8511 if (sp [1]->opcode == OP_ICONST) {
8512 int array_reg = sp [0]->dreg;
8513 int index_reg = sp [1]->dreg;
8514 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8516 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8517 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8518 } else {
8519 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8520 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8522 *sp++ = ins;
8523 if (*ip == CEE_LDELEM)
8524 ip += 5;
8525 else
8526 ++ip;
8527 break;
8529 case CEE_STELEM_I:
8530 case CEE_STELEM_I1:
8531 case CEE_STELEM_I2:
8532 case CEE_STELEM_I4:
8533 case CEE_STELEM_I8:
8534 case CEE_STELEM_R4:
8535 case CEE_STELEM_R8:
8536 case CEE_STELEM_REF:
8537 case CEE_STELEM: {
8538 MonoInst *addr;
8540 CHECK_STACK (3);
8541 sp -= 3;
8543 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8545 if (*ip == CEE_STELEM) {
8546 CHECK_OPSIZE (5);
8547 token = read32 (ip + 1);
8548 klass = mini_get_class (method, token, generic_context);
8549 CHECK_TYPELOAD (klass);
8550 mono_class_init (klass);
8552 else
8553 klass = array_access_to_klass (*ip);
8555 if (sp [0]->type != STACK_OBJ)
8556 UNVERIFIED;
8558 /* storing a NULL doesn't need any of the complex checks in stelemref */
8559 if (generic_class_is_reference_type (cfg, klass) &&
8560 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8561 MonoMethod* helper = mono_marshal_get_stelemref ();
8562 MonoInst *iargs [3];
8564 if (sp [0]->type != STACK_OBJ)
8565 UNVERIFIED;
8566 if (sp [2]->type != STACK_OBJ)
8567 UNVERIFIED;
8569 iargs [2] = sp [2];
8570 iargs [1] = sp [1];
8571 iargs [0] = sp [0];
8573 mono_emit_method_call (cfg, helper, iargs, NULL);
8574 } else {
8575 if (sp [1]->opcode == OP_ICONST) {
8576 int array_reg = sp [0]->dreg;
8577 int index_reg = sp [1]->dreg;
8578 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8580 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8581 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8582 } else {
8583 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8584 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8588 if (*ip == CEE_STELEM)
8589 ip += 5;
8590 else
8591 ++ip;
8592 inline_costs += 1;
8593 break;
8595 case CEE_CKFINITE: {
8596 CHECK_STACK (1);
8597 --sp;
8599 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8600 ins->sreg1 = sp [0]->dreg;
8601 ins->dreg = alloc_freg (cfg);
8602 ins->type = STACK_R8;
8603 MONO_ADD_INS (bblock, ins);
8605 *sp++ = mono_decompose_opcode (cfg, ins);
8607 ++ip;
8608 break;
8610 case CEE_REFANYVAL: {
8611 MonoInst *src_var, *src;
8613 int klass_reg = alloc_preg (cfg);
8614 int dreg = alloc_preg (cfg);
8616 CHECK_STACK (1);
8617 MONO_INST_NEW (cfg, ins, *ip);
8618 --sp;
8619 CHECK_OPSIZE (5);
8620 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8621 CHECK_TYPELOAD (klass);
8622 mono_class_init (klass);
8624 if (cfg->generic_sharing_context)
8625 context_used = mono_class_check_context_used (klass);
8627 // FIXME:
8628 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8629 if (!src_var)
8630 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8631 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8634 if (context_used) {
8635 MonoInst *klass_ins;
8637 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8638 klass, MONO_RGCTX_INFO_KLASS);
8640 // FIXME:
8641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8642 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8643 } else {
8644 mini_emit_class_check (cfg, klass_reg, klass);
8646 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8647 ins->type = STACK_MP;
8648 *sp++ = ins;
8649 ip += 5;
8650 break;
8652 case CEE_MKREFANY: {
8653 MonoInst *loc, *addr;
8655 CHECK_STACK (1);
8656 MONO_INST_NEW (cfg, ins, *ip);
8657 --sp;
8658 CHECK_OPSIZE (5);
8659 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8660 CHECK_TYPELOAD (klass);
8661 mono_class_init (klass);
8663 if (cfg->generic_sharing_context)
8664 context_used = mono_class_check_context_used (klass);
8666 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8667 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8669 if (context_used) {
8670 MonoInst *const_ins;
8671 int type_reg = alloc_preg (cfg);
8673 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8677 } else if (cfg->compile_aot) {
8678 int const_reg = alloc_preg (cfg);
8679 int type_reg = alloc_preg (cfg);
8681 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8685 } else {
8686 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8687 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8691 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8692 ins->type = STACK_VTYPE;
8693 ins->klass = mono_defaults.typed_reference_class;
8694 *sp++ = ins;
8695 ip += 5;
8696 break;
8698 case CEE_LDTOKEN: {
8699 gpointer handle;
8700 MonoClass *handle_class;
8702 CHECK_STACK_OVF (1);
8704 CHECK_OPSIZE (5);
8705 n = read32 (ip + 1);
8707 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8708 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8709 handle = mono_method_get_wrapper_data (method, n);
8710 handle_class = mono_method_get_wrapper_data (method, n + 1);
8711 if (handle_class == mono_defaults.typehandle_class)
8712 handle = &((MonoClass*)handle)->byval_arg;
8714 else {
8715 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8717 if (!handle)
8718 goto load_error;
8719 mono_class_init (handle_class);
8720 if (cfg->generic_sharing_context) {
8721 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8722 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8723 /* This case handles ldtoken
8724 of an open type, like for
8725 typeof(Gen<>). */
8726 context_used = 0;
8727 } else if (handle_class == mono_defaults.typehandle_class) {
8728 /* If we get a MONO_TYPE_CLASS
8729 then we need to provide the
8730 open type, not an
8731 instantiation of it. */
8732 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8733 context_used = 0;
8734 else
8735 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8736 } else if (handle_class == mono_defaults.fieldhandle_class)
8737 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8738 else if (handle_class == mono_defaults.methodhandle_class)
8739 context_used = mono_method_check_context_used (handle);
8740 else
8741 g_assert_not_reached ();
8744 if ((cfg->opt & MONO_OPT_SHARED) &&
8745 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8746 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8747 MonoInst *addr, *vtvar, *iargs [3];
8748 int method_context_used;
8750 if (cfg->generic_sharing_context)
8751 method_context_used = mono_method_check_context_used (method);
8752 else
8753 method_context_used = 0;
8755 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8757 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8758 EMIT_NEW_ICONST (cfg, iargs [1], n);
8759 if (method_context_used) {
8760 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8761 method, MONO_RGCTX_INFO_METHOD);
8762 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8763 } else {
8764 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8765 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8767 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8771 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8772 } else {
8773 gboolean use_slow_path = TRUE;
8774 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8775 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8776 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context))) {
8778 if ((cmethod->klass == mono_defaults.monotype_class->parent) && (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8779 MonoClass *tclass = mono_class_from_mono_type (handle);
8781 mono_class_init (tclass);
8782 if (context_used) {
8783 ins = emit_get_rgctx_klass (cfg, context_used,
8784 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8785 } else if (cfg->compile_aot) {
8786 if (method->wrapper_type) {
8787 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8788 /* Special case for static synchronized wrappers */
8789 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8790 } else {
8791 /* FIXME: n is not a normal token */
8792 cfg->disable_aot = TRUE;
8793 EMIT_NEW_PCONST (cfg, ins, NULL);
8795 } else {
8796 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8798 } else {
8799 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8801 ins->type = STACK_OBJ;
8802 ins->klass = cmethod->klass;
8803 ip += 5;
8804 use_slow_path = FALSE;
8805 } else if (cmethod->klass->image == mono_defaults.corlib &&
8806 !strcmp ("Mono", cmethod->klass->name_space) &&
8807 !strcmp ("Runtime", cmethod->klass->name) &&
8808 !strcmp ("NewObject", cmethod->name)) {
8810 /*FIXME relax those restrictions if it's worth the trouble*/
8811 if (!context_used && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED)) {
8812 MonoClass *klass = mono_class_from_mono_type (handle);
8813 gpointer vtable = mono_class_vtable (cfg->domain, klass);
8814 MonoInst *iargs [1];
8816 EMIT_NEW_PCONST (cfg, iargs [0], vtable);
8817 ins = mono_emit_jit_icall (cfg, mono_object_new_specific, iargs);
8819 ip += 5;
8820 use_slow_path = FALSE;
8825 if (use_slow_path) {
8826 MonoInst *addr, *vtvar;
8828 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8830 if (context_used) {
8831 if (handle_class == mono_defaults.typehandle_class) {
8832 ins = emit_get_rgctx_klass (cfg, context_used,
8833 mono_class_from_mono_type (handle),
8834 MONO_RGCTX_INFO_TYPE);
8835 } else if (handle_class == mono_defaults.methodhandle_class) {
8836 ins = emit_get_rgctx_method (cfg, context_used,
8837 handle, MONO_RGCTX_INFO_METHOD);
8838 } else if (handle_class == mono_defaults.fieldhandle_class) {
8839 ins = emit_get_rgctx_field (cfg, context_used,
8840 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8841 } else {
8842 g_assert_not_reached ();
8844 } else if (cfg->compile_aot) {
8845 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8846 } else {
8847 EMIT_NEW_PCONST (cfg, ins, handle);
8849 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8850 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8851 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8855 *sp++ = ins;
8856 ip += 5;
8857 break;
8859 case CEE_THROW:
8860 CHECK_STACK (1);
8861 MONO_INST_NEW (cfg, ins, OP_THROW);
8862 --sp;
8863 ins->sreg1 = sp [0]->dreg;
8864 ip++;
8865 bblock->out_of_line = TRUE;
8866 MONO_ADD_INS (bblock, ins);
8867 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8868 MONO_ADD_INS (bblock, ins);
8869 sp = stack_start;
8871 link_bblock (cfg, bblock, end_bblock);
8872 start_new_bblock = 1;
8873 break;
8874 case CEE_ENDFINALLY:
8875 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8876 MONO_ADD_INS (bblock, ins);
8877 ip++;
8878 start_new_bblock = 1;
8881 * Control will leave the method so empty the stack, otherwise
8882 * the next basic block will start with a nonempty stack.
8884 while (sp != stack_start) {
8885 sp--;
8887 break;
8888 case CEE_LEAVE:
8889 case CEE_LEAVE_S: {
8890 GList *handlers;
8892 if (*ip == CEE_LEAVE) {
8893 CHECK_OPSIZE (5);
8894 target = ip + 5 + (gint32)read32(ip + 1);
8895 } else {
8896 CHECK_OPSIZE (2);
8897 target = ip + 2 + (signed char)(ip [1]);
8900 /* empty the stack */
8901 while (sp != stack_start) {
8902 sp--;
8906 * If this leave statement is in a catch block, check for a
8907 * pending exception, and rethrow it if necessary.
8908 * We avoid doing this in runtime invoke wrappers, since those are called
8909 * by native code which excepts the wrapper to catch all exceptions.
8911 for (i = 0; i < header->num_clauses; ++i) {
8912 MonoExceptionClause *clause = &header->clauses [i];
8915 * Use <= in the final comparison to handle clauses with multiple
8916 * leave statements, like in bug #78024.
8917 * The ordering of the exception clauses guarantees that we find the
8918 * innermost clause.
8920 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8921 MonoInst *exc_ins;
8922 MonoBasicBlock *dont_throw;
8925 MonoInst *load;
8927 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8930 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8932 NEW_BBLOCK (cfg, dont_throw);
8935 * Currently, we allways rethrow the abort exception, despite the
8936 * fact that this is not correct. See thread6.cs for an example.
8937 * But propagating the abort exception is more important than
8938 * getting the sematics right.
8940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8942 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8944 MONO_START_BB (cfg, dont_throw);
8945 bblock = cfg->cbb;
8949 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8950 GList *tmp;
8951 for (tmp = handlers; tmp; tmp = tmp->next) {
8952 tblock = tmp->data;
8953 link_bblock (cfg, bblock, tblock);
8954 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8955 ins->inst_target_bb = tblock;
8956 MONO_ADD_INS (bblock, ins);
8957 bblock->has_call_handler = 1;
8958 if (COMPILE_LLVM (cfg)) {
8959 MonoBasicBlock *target_bb;
8962 * Link the finally bblock with the target, since it will
8963 * conceptually branch there.
8964 * FIXME: Have to link the bblock containing the endfinally.
8966 GET_BBLOCK (cfg, target_bb, target);
8967 link_bblock (cfg, tblock, target_bb);
8970 g_list_free (handlers);
8973 MONO_INST_NEW (cfg, ins, OP_BR);
8974 MONO_ADD_INS (bblock, ins);
8975 GET_BBLOCK (cfg, tblock, target);
8976 link_bblock (cfg, bblock, tblock);
8977 ins->inst_target_bb = tblock;
8978 start_new_bblock = 1;
8980 if (*ip == CEE_LEAVE)
8981 ip += 5;
8982 else
8983 ip += 2;
8985 break;
8989 * Mono specific opcodes
8991 case MONO_CUSTOM_PREFIX: {
8993 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8995 CHECK_OPSIZE (2);
8996 switch (ip [1]) {
8997 case CEE_MONO_ICALL: {
8998 gpointer func;
8999 MonoJitICallInfo *info;
9001 token = read32 (ip + 2);
9002 func = mono_method_get_wrapper_data (method, token);
9003 info = mono_find_jit_icall_by_addr (func);
9004 g_assert (info);
9006 CHECK_STACK (info->sig->param_count);
9007 sp -= info->sig->param_count;
9009 ins = mono_emit_jit_icall (cfg, info->func, sp);
9010 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9011 *sp++ = ins;
9013 ip += 6;
9014 inline_costs += 10 * num_calls++;
9016 break;
9018 case CEE_MONO_LDPTR: {
9019 gpointer ptr;
9021 CHECK_STACK_OVF (1);
9022 CHECK_OPSIZE (6);
9023 token = read32 (ip + 2);
9025 ptr = mono_method_get_wrapper_data (method, token);
9026 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9027 MonoJitICallInfo *callinfo;
9028 const char *icall_name;
9030 icall_name = method->name + strlen ("__icall_wrapper_");
9031 g_assert (icall_name);
9032 callinfo = mono_find_jit_icall_by_name (icall_name);
9033 g_assert (callinfo);
9035 if (ptr == callinfo->func) {
9036 /* Will be transformed into an AOTCONST later */
9037 EMIT_NEW_PCONST (cfg, ins, ptr);
9038 *sp++ = ins;
9039 ip += 6;
9040 break;
9043 /* FIXME: Generalize this */
9044 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9045 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9046 *sp++ = ins;
9047 ip += 6;
9048 break;
9050 EMIT_NEW_PCONST (cfg, ins, ptr);
9051 *sp++ = ins;
9052 ip += 6;
9053 inline_costs += 10 * num_calls++;
9054 /* Can't embed random pointers into AOT code */
9055 cfg->disable_aot = 1;
9056 break;
9058 case CEE_MONO_ICALL_ADDR: {
9059 MonoMethod *cmethod;
9060 gpointer ptr;
9062 CHECK_STACK_OVF (1);
9063 CHECK_OPSIZE (6);
9064 token = read32 (ip + 2);
9066 cmethod = mono_method_get_wrapper_data (method, token);
9068 if (cfg->compile_aot) {
9069 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9070 } else {
9071 ptr = mono_lookup_internal_call (cmethod);
9072 g_assert (ptr);
9073 EMIT_NEW_PCONST (cfg, ins, ptr);
9075 *sp++ = ins;
9076 ip += 6;
9077 break;
9079 case CEE_MONO_VTADDR: {
9080 MonoInst *src_var, *src;
9082 CHECK_STACK (1);
9083 --sp;
9085 // FIXME:
9086 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9087 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9088 *sp++ = src;
9089 ip += 2;
9090 break;
9092 case CEE_MONO_NEWOBJ: {
9093 MonoInst *iargs [2];
9095 CHECK_STACK_OVF (1);
9096 CHECK_OPSIZE (6);
9097 token = read32 (ip + 2);
9098 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9099 mono_class_init (klass);
9100 NEW_DOMAINCONST (cfg, iargs [0]);
9101 MONO_ADD_INS (cfg->cbb, iargs [0]);
9102 NEW_CLASSCONST (cfg, iargs [1], klass);
9103 MONO_ADD_INS (cfg->cbb, iargs [1]);
9104 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9105 ip += 6;
9106 inline_costs += 10 * num_calls++;
9107 break;
9109 case CEE_MONO_OBJADDR:
9110 CHECK_STACK (1);
9111 --sp;
9112 MONO_INST_NEW (cfg, ins, OP_MOVE);
9113 ins->dreg = alloc_preg (cfg);
9114 ins->sreg1 = sp [0]->dreg;
9115 ins->type = STACK_MP;
9116 MONO_ADD_INS (cfg->cbb, ins);
9117 *sp++ = ins;
9118 ip += 2;
9119 break;
9120 case CEE_MONO_LDNATIVEOBJ:
9122 * Similar to LDOBJ, but instead load the unmanaged
9123 * representation of the vtype to the stack.
9125 CHECK_STACK (1);
9126 CHECK_OPSIZE (6);
9127 --sp;
9128 token = read32 (ip + 2);
9129 klass = mono_method_get_wrapper_data (method, token);
9130 g_assert (klass->valuetype);
9131 mono_class_init (klass);
9134 MonoInst *src, *dest, *temp;
9136 src = sp [0];
9137 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9138 temp->backend.is_pinvoke = 1;
9139 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9140 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9142 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9143 dest->type = STACK_VTYPE;
9144 dest->klass = klass;
9146 *sp ++ = dest;
9147 ip += 6;
9149 break;
9150 case CEE_MONO_RETOBJ: {
9152 * Same as RET, but return the native representation of a vtype
9153 * to the caller.
9155 g_assert (cfg->ret);
9156 g_assert (mono_method_signature (method)->pinvoke);
9157 CHECK_STACK (1);
9158 --sp;
9160 CHECK_OPSIZE (6);
9161 token = read32 (ip + 2);
9162 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9164 if (!cfg->vret_addr) {
9165 g_assert (cfg->ret_var_is_local);
9167 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9168 } else {
9169 EMIT_NEW_RETLOADA (cfg, ins);
9171 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9173 if (sp != stack_start)
9174 UNVERIFIED;
9176 MONO_INST_NEW (cfg, ins, OP_BR);
9177 ins->inst_target_bb = end_bblock;
9178 MONO_ADD_INS (bblock, ins);
9179 link_bblock (cfg, bblock, end_bblock);
9180 start_new_bblock = 1;
9181 ip += 6;
9182 break;
9184 case CEE_MONO_CISINST:
9185 case CEE_MONO_CCASTCLASS: {
9186 int token;
9187 CHECK_STACK (1);
9188 --sp;
9189 CHECK_OPSIZE (6);
9190 token = read32 (ip + 2);
9191 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9192 if (ip [1] == CEE_MONO_CISINST)
9193 ins = handle_cisinst (cfg, klass, sp [0]);
9194 else
9195 ins = handle_ccastclass (cfg, klass, sp [0]);
9196 bblock = cfg->cbb;
9197 *sp++ = ins;
9198 ip += 6;
9199 break;
9201 case CEE_MONO_SAVE_LMF:
9202 case CEE_MONO_RESTORE_LMF:
9203 #ifdef MONO_ARCH_HAVE_LMF_OPS
9204 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9205 MONO_ADD_INS (bblock, ins);
9206 cfg->need_lmf_area = TRUE;
9207 #endif
9208 ip += 2;
9209 break;
9210 case CEE_MONO_CLASSCONST:
9211 CHECK_STACK_OVF (1);
9212 CHECK_OPSIZE (6);
9213 token = read32 (ip + 2);
9214 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9215 *sp++ = ins;
9216 ip += 6;
9217 inline_costs += 10 * num_calls++;
9218 break;
9219 case CEE_MONO_NOT_TAKEN:
9220 bblock->out_of_line = TRUE;
9221 ip += 2;
9222 break;
9223 case CEE_MONO_TLS:
9224 CHECK_STACK_OVF (1);
9225 CHECK_OPSIZE (6);
9226 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9227 ins->dreg = alloc_preg (cfg);
9228 ins->inst_offset = (gint32)read32 (ip + 2);
9229 ins->type = STACK_PTR;
9230 MONO_ADD_INS (bblock, ins);
9231 *sp++ = ins;
9232 ip += 6;
9233 break;
9234 case CEE_MONO_DYN_CALL: {
9235 MonoCallInst *call;
9237 /* It would be easier to call a trampoline, but that would put an
9238 * extra frame on the stack, confusing exception handling. So
9239 * implement it inline using an opcode for now.
9242 if (!cfg->dyn_call_var) {
9243 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9244 /* prevent it from being register allocated */
9245 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9248 /* Has to use a call inst since it local regalloc expects it */
9249 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9250 ins = (MonoInst*)call;
9251 sp -= 2;
9252 ins->sreg1 = sp [0]->dreg;
9253 ins->sreg2 = sp [1]->dreg;
9254 MONO_ADD_INS (bblock, ins);
9256 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9257 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9258 #endif
9260 ip += 2;
9261 inline_costs += 10 * num_calls++;
9263 break;
9265 default:
9266 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9267 break;
9269 break;
9272 case CEE_PREFIX1: {
9273 CHECK_OPSIZE (2);
9274 switch (ip [1]) {
9275 case CEE_ARGLIST: {
9276 /* somewhat similar to LDTOKEN */
9277 MonoInst *addr, *vtvar;
9278 CHECK_STACK_OVF (1);
9279 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9281 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9282 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9284 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9285 ins->type = STACK_VTYPE;
9286 ins->klass = mono_defaults.argumenthandle_class;
9287 *sp++ = ins;
9288 ip += 2;
9289 break;
9291 case CEE_CEQ:
9292 case CEE_CGT:
9293 case CEE_CGT_UN:
9294 case CEE_CLT:
9295 case CEE_CLT_UN: {
9296 MonoInst *cmp;
9297 CHECK_STACK (2);
9299 * The following transforms:
9300 * CEE_CEQ into OP_CEQ
9301 * CEE_CGT into OP_CGT
9302 * CEE_CGT_UN into OP_CGT_UN
9303 * CEE_CLT into OP_CLT
9304 * CEE_CLT_UN into OP_CLT_UN
9306 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9308 MONO_INST_NEW (cfg, ins, cmp->opcode);
9309 sp -= 2;
9310 cmp->sreg1 = sp [0]->dreg;
9311 cmp->sreg2 = sp [1]->dreg;
9312 type_from_op (cmp, sp [0], sp [1]);
9313 CHECK_TYPE (cmp);
9314 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9315 cmp->opcode = OP_LCOMPARE;
9316 else if (sp [0]->type == STACK_R8)
9317 cmp->opcode = OP_FCOMPARE;
9318 else
9319 cmp->opcode = OP_ICOMPARE;
9320 MONO_ADD_INS (bblock, cmp);
9321 ins->type = STACK_I4;
9322 ins->dreg = alloc_dreg (cfg, ins->type);
9323 type_from_op (ins, sp [0], sp [1]);
9325 if (cmp->opcode == OP_FCOMPARE) {
9327 * The backends expect the fceq opcodes to do the
9328 * comparison too.
9330 cmp->opcode = OP_NOP;
9331 ins->sreg1 = cmp->sreg1;
9332 ins->sreg2 = cmp->sreg2;
9334 MONO_ADD_INS (bblock, ins);
9335 *sp++ = ins;
9336 ip += 2;
9337 break;
9339 case CEE_LDFTN: {
9340 MonoInst *argconst;
9341 MonoMethod *cil_method;
9342 gboolean needs_static_rgctx_invoke;
9344 CHECK_STACK_OVF (1);
9345 CHECK_OPSIZE (6);
9346 n = read32 (ip + 2);
9347 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9348 if (!cmethod)
9349 goto load_error;
9350 mono_class_init (cmethod->klass);
9352 mono_save_token_info (cfg, image, n, cmethod);
9354 if (cfg->generic_sharing_context)
9355 context_used = mono_method_check_context_used (cmethod);
9357 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9359 cil_method = cmethod;
9360 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9361 METHOD_ACCESS_FAILURE;
9363 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9364 if (check_linkdemand (cfg, method, cmethod))
9365 INLINE_FAILURE;
9366 CHECK_CFG_EXCEPTION;
9367 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9368 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9372 * Optimize the common case of ldftn+delegate creation
9374 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9375 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9376 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9377 MonoMethod *invoke;
9378 int invoke_context_used = 0;
9380 invoke = mono_get_delegate_invoke (ctor_method->klass);
9381 if (!invoke || !mono_method_signature (invoke))
9382 goto load_error;
9384 if (cfg->generic_sharing_context)
9385 invoke_context_used = mono_method_check_context_used (invoke);
9387 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9388 /* FIXME: SGEN support */
9389 if (invoke_context_used == 0) {
9390 MonoInst *target_ins;
9392 ip += 6;
9393 if (cfg->verbose_level > 3)
9394 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9395 target_ins = sp [-1];
9396 sp --;
9397 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9398 CHECK_CFG_EXCEPTION;
9399 ip += 5;
9400 sp ++;
9401 break;
9403 #endif
9407 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9408 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9409 *sp++ = ins;
9411 ip += 6;
9412 inline_costs += 10 * num_calls++;
9413 break;
9415 case CEE_LDVIRTFTN: {
9416 MonoInst *args [2];
9418 CHECK_STACK (1);
9419 CHECK_OPSIZE (6);
9420 n = read32 (ip + 2);
9421 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9422 if (!cmethod)
9423 goto load_error;
9424 mono_class_init (cmethod->klass);
9426 if (cfg->generic_sharing_context)
9427 context_used = mono_method_check_context_used (cmethod);
9429 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9430 if (check_linkdemand (cfg, method, cmethod))
9431 INLINE_FAILURE;
9432 CHECK_CFG_EXCEPTION;
9433 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9434 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9437 --sp;
9438 args [0] = *sp;
9440 args [1] = emit_get_rgctx_method (cfg, context_used,
9441 cmethod, MONO_RGCTX_INFO_METHOD);
9443 if (context_used)
9444 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9445 else
9446 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9448 ip += 6;
9449 inline_costs += 10 * num_calls++;
9450 break;
9452 case CEE_LDARG:
9453 CHECK_STACK_OVF (1);
9454 CHECK_OPSIZE (4);
9455 n = read16 (ip + 2);
9456 CHECK_ARG (n);
9457 EMIT_NEW_ARGLOAD (cfg, ins, n);
9458 *sp++ = ins;
9459 ip += 4;
9460 break;
9461 case CEE_LDARGA:
9462 CHECK_STACK_OVF (1);
9463 CHECK_OPSIZE (4);
9464 n = read16 (ip + 2);
9465 CHECK_ARG (n);
9466 NEW_ARGLOADA (cfg, ins, n);
9467 MONO_ADD_INS (cfg->cbb, ins);
9468 *sp++ = ins;
9469 ip += 4;
9470 break;
9471 case CEE_STARG:
9472 CHECK_STACK (1);
9473 --sp;
9474 CHECK_OPSIZE (4);
9475 n = read16 (ip + 2);
9476 CHECK_ARG (n);
9477 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9478 UNVERIFIED;
9479 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9480 ip += 4;
9481 break;
9482 case CEE_LDLOC:
9483 CHECK_STACK_OVF (1);
9484 CHECK_OPSIZE (4);
9485 n = read16 (ip + 2);
9486 CHECK_LOCAL (n);
9487 EMIT_NEW_LOCLOAD (cfg, ins, n);
9488 *sp++ = ins;
9489 ip += 4;
9490 break;
9491 case CEE_LDLOCA: {
9492 unsigned char *tmp_ip;
9493 CHECK_STACK_OVF (1);
9494 CHECK_OPSIZE (4);
9495 n = read16 (ip + 2);
9496 CHECK_LOCAL (n);
9498 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9499 ip = tmp_ip;
9500 inline_costs += 1;
9501 break;
9504 EMIT_NEW_LOCLOADA (cfg, ins, n);
9505 *sp++ = ins;
9506 ip += 4;
9507 break;
9509 case CEE_STLOC:
9510 CHECK_STACK (1);
9511 --sp;
9512 CHECK_OPSIZE (4);
9513 n = read16 (ip + 2);
9514 CHECK_LOCAL (n);
9515 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9516 UNVERIFIED;
9517 emit_stloc_ir (cfg, sp, header, n);
9518 ip += 4;
9519 inline_costs += 1;
9520 break;
9521 case CEE_LOCALLOC:
9522 CHECK_STACK (1);
9523 --sp;
9524 if (sp != stack_start)
9525 UNVERIFIED;
9526 if (cfg->method != method)
9528 * Inlining this into a loop in a parent could lead to
9529 * stack overflows which is different behavior than the
9530 * non-inlined case, thus disable inlining in this case.
9532 goto inline_failure;
9534 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9535 ins->dreg = alloc_preg (cfg);
9536 ins->sreg1 = sp [0]->dreg;
9537 ins->type = STACK_PTR;
9538 MONO_ADD_INS (cfg->cbb, ins);
9540 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9541 if (init_locals)
9542 ins->flags |= MONO_INST_INIT;
9544 *sp++ = ins;
9545 ip += 2;
9546 break;
9547 case CEE_ENDFILTER: {
9548 MonoExceptionClause *clause, *nearest;
9549 int cc, nearest_num;
9551 CHECK_STACK (1);
9552 --sp;
9553 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9554 UNVERIFIED;
9555 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9556 ins->sreg1 = (*sp)->dreg;
9557 MONO_ADD_INS (bblock, ins);
9558 start_new_bblock = 1;
9559 ip += 2;
9561 nearest = NULL;
9562 nearest_num = 0;
9563 for (cc = 0; cc < header->num_clauses; ++cc) {
9564 clause = &header->clauses [cc];
9565 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9566 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9567 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9568 nearest = clause;
9569 nearest_num = cc;
9572 g_assert (nearest);
9573 if ((ip - header->code) != nearest->handler_offset)
9574 UNVERIFIED;
9576 break;
9578 case CEE_UNALIGNED_:
9579 ins_flag |= MONO_INST_UNALIGNED;
9580 /* FIXME: record alignment? we can assume 1 for now */
9581 CHECK_OPSIZE (3);
9582 ip += 3;
9583 break;
9584 case CEE_VOLATILE_:
9585 ins_flag |= MONO_INST_VOLATILE;
9586 ip += 2;
9587 break;
9588 case CEE_TAIL_:
9589 ins_flag |= MONO_INST_TAILCALL;
9590 cfg->flags |= MONO_CFG_HAS_TAIL;
9591 /* Can't inline tail calls at this time */
9592 inline_costs += 100000;
9593 ip += 2;
9594 break;
9595 case CEE_INITOBJ:
9596 CHECK_STACK (1);
9597 --sp;
9598 CHECK_OPSIZE (6);
9599 token = read32 (ip + 2);
9600 klass = mini_get_class (method, token, generic_context);
9601 CHECK_TYPELOAD (klass);
9602 if (generic_class_is_reference_type (cfg, klass))
9603 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9604 else
9605 mini_emit_initobj (cfg, *sp, NULL, klass);
9606 ip += 6;
9607 inline_costs += 1;
9608 break;
9609 case CEE_CONSTRAINED_:
9610 CHECK_OPSIZE (6);
9611 token = read32 (ip + 2);
9612 if (method->wrapper_type != MONO_WRAPPER_NONE)
9613 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9614 else
9615 constrained_call = mono_class_get_full (image, token, generic_context);
9616 CHECK_TYPELOAD (constrained_call);
9617 ip += 6;
9618 break;
9619 case CEE_CPBLK:
9620 case CEE_INITBLK: {
9621 MonoInst *iargs [3];
9622 CHECK_STACK (3);
9623 sp -= 3;
9625 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9626 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9627 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9628 /* emit_memset only works when val == 0 */
9629 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9630 } else {
9631 iargs [0] = sp [0];
9632 iargs [1] = sp [1];
9633 iargs [2] = sp [2];
9634 if (ip [1] == CEE_CPBLK) {
9635 MonoMethod *memcpy_method = get_memcpy_method ();
9636 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9637 } else {
9638 MonoMethod *memset_method = get_memset_method ();
9639 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9642 ip += 2;
9643 inline_costs += 1;
9644 break;
9646 case CEE_NO_:
9647 CHECK_OPSIZE (3);
9648 if (ip [2] & 0x1)
9649 ins_flag |= MONO_INST_NOTYPECHECK;
9650 if (ip [2] & 0x2)
9651 ins_flag |= MONO_INST_NORANGECHECK;
9652 /* we ignore the no-nullcheck for now since we
9653 * really do it explicitly only when doing callvirt->call
9655 ip += 3;
9656 break;
9657 case CEE_RETHROW: {
9658 MonoInst *load;
9659 int handler_offset = -1;
9661 for (i = 0; i < header->num_clauses; ++i) {
9662 MonoExceptionClause *clause = &header->clauses [i];
9663 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9664 handler_offset = clause->handler_offset;
9665 break;
9669 bblock->flags |= BB_EXCEPTION_UNSAFE;
9671 g_assert (handler_offset != -1);
9673 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9674 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9675 ins->sreg1 = load->dreg;
9676 MONO_ADD_INS (bblock, ins);
9677 sp = stack_start;
9678 link_bblock (cfg, bblock, end_bblock);
9679 start_new_bblock = 1;
9680 ip += 2;
9681 break;
9683 case CEE_SIZEOF: {
9684 guint32 align;
9685 int ialign;
9687 CHECK_STACK_OVF (1);
9688 CHECK_OPSIZE (6);
9689 token = read32 (ip + 2);
9690 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9691 MonoType *type = mono_type_create_from_typespec (image, token);
9692 token = mono_type_size (type, &ialign);
9693 } else {
9694 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9695 CHECK_TYPELOAD (klass);
9696 mono_class_init (klass);
9697 token = mono_class_value_size (klass, &align);
9699 EMIT_NEW_ICONST (cfg, ins, token);
9700 *sp++= ins;
9701 ip += 6;
9702 break;
9704 case CEE_REFANYTYPE: {
9705 MonoInst *src_var, *src;
9707 CHECK_STACK (1);
9708 --sp;
9710 // FIXME:
9711 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9712 if (!src_var)
9713 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9714 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9715 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9716 *sp++ = ins;
9717 ip += 2;
9718 break;
9720 case CEE_READONLY_:
9721 readonly = TRUE;
9722 ip += 2;
9723 break;
9725 case CEE_UNUSED56:
9726 case CEE_UNUSED57:
9727 case CEE_UNUSED70:
9728 case CEE_UNUSED:
9729 case CEE_UNUSED99:
9730 UNVERIFIED;
9732 default:
9733 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9734 UNVERIFIED;
9736 break;
9738 case CEE_UNUSED58:
9739 case CEE_UNUSED1:
9740 UNVERIFIED;
9742 default:
9743 g_warning ("opcode 0x%02x not handled", *ip);
9744 UNVERIFIED;
9747 if (start_new_bblock != 1)
9748 UNVERIFIED;
9750 bblock->cil_length = ip - bblock->cil_code;
9751 bblock->next_bb = end_bblock;
9753 if (cfg->method == method && cfg->domainvar) {
9754 MonoInst *store;
9755 MonoInst *get_domain;
9757 cfg->cbb = init_localsbb;
9759 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9760 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9762 else {
9763 get_domain->dreg = alloc_preg (cfg);
9764 MONO_ADD_INS (cfg->cbb, get_domain);
9766 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9767 MONO_ADD_INS (cfg->cbb, store);
9770 #ifdef TARGET_POWERPC
9771 if (cfg->compile_aot)
9772 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9773 mono_get_got_var (cfg);
9774 #endif
9776 if (cfg->method == method && cfg->got_var)
9777 mono_emit_load_got_addr (cfg);
9779 if (init_locals) {
9780 MonoInst *store;
9782 cfg->cbb = init_localsbb;
9783 cfg->ip = NULL;
9784 for (i = 0; i < header->num_locals; ++i) {
9785 MonoType *ptype = header->locals [i];
9786 int t = ptype->type;
9787 dreg = cfg->locals [i]->dreg;
9789 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9790 t = mono_class_enum_basetype (ptype->data.klass)->type;
9791 if (ptype->byref) {
9792 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9793 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9794 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9795 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9796 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9797 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9798 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9799 ins->type = STACK_R8;
9800 ins->inst_p0 = (void*)&r8_0;
9801 ins->dreg = alloc_dreg (cfg, STACK_R8);
9802 MONO_ADD_INS (init_localsbb, ins);
9803 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9804 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9805 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9806 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9807 } else {
9808 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9813 if (cfg->init_ref_vars && cfg->method == method) {
9814 /* Emit initialization for ref vars */
9815 // FIXME: Avoid duplication initialization for IL locals.
9816 for (i = 0; i < cfg->num_varinfo; ++i) {
9817 MonoInst *ins = cfg->varinfo [i];
9819 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9820 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9824 /* Add a sequence point for method entry/exit events */
9825 if (seq_points) {
9826 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9827 MONO_ADD_INS (init_localsbb, ins);
9828 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9829 MONO_ADD_INS (cfg->bb_exit, ins);
9832 cfg->ip = NULL;
9834 if (cfg->method == method) {
9835 MonoBasicBlock *bb;
9836 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9837 bb->region = mono_find_block_region (cfg, bb->real_offset);
9838 if (cfg->spvars)
9839 mono_create_spvar_for_region (cfg, bb->region);
9840 if (cfg->verbose_level > 2)
9841 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9845 g_slist_free (class_inits);
9846 dont_inline = g_list_remove (dont_inline, method);
9848 if (inline_costs < 0) {
9849 char *mname;
9851 /* Method is too large */
9852 mname = mono_method_full_name (method, TRUE);
9853 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9854 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9855 g_free (mname);
9856 return -1;
9859 if ((cfg->verbose_level > 2) && (cfg->method == method))
9860 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9862 return inline_costs;
9864 exception_exit:
9865 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9866 g_slist_free (class_inits);
9867 mono_basic_block_free (bb);
9868 dont_inline = g_list_remove (dont_inline, method);
9869 return -1;
9871 inline_failure:
9872 g_slist_free (class_inits);
9873 mono_basic_block_free (bb);
9874 dont_inline = g_list_remove (dont_inline, method);
9875 return -1;
9877 load_error:
9878 g_slist_free (class_inits);
9879 mono_basic_block_free (bb);
9880 dont_inline = g_list_remove (dont_inline, method);
9881 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9882 return -1;
9884 unverified:
9885 g_slist_free (class_inits);
9886 mono_basic_block_free (bb);
9887 dont_inline = g_list_remove (dont_inline, method);
9888 set_exception_type_from_invalid_il (cfg, method, ip);
9889 return -1;
9892 static int
9893 store_membase_reg_to_store_membase_imm (int opcode)
9895 switch (opcode) {
9896 case OP_STORE_MEMBASE_REG:
9897 return OP_STORE_MEMBASE_IMM;
9898 case OP_STOREI1_MEMBASE_REG:
9899 return OP_STOREI1_MEMBASE_IMM;
9900 case OP_STOREI2_MEMBASE_REG:
9901 return OP_STOREI2_MEMBASE_IMM;
9902 case OP_STOREI4_MEMBASE_REG:
9903 return OP_STOREI4_MEMBASE_IMM;
9904 case OP_STOREI8_MEMBASE_REG:
9905 return OP_STOREI8_MEMBASE_IMM;
9906 default:
9907 g_assert_not_reached ();
9910 return -1;
9913 #endif /* DISABLE_JIT */
9916 mono_op_to_op_imm (int opcode)
9918 switch (opcode) {
9919 case OP_IADD:
9920 return OP_IADD_IMM;
9921 case OP_ISUB:
9922 return OP_ISUB_IMM;
9923 case OP_IDIV:
9924 return OP_IDIV_IMM;
9925 case OP_IDIV_UN:
9926 return OP_IDIV_UN_IMM;
9927 case OP_IREM:
9928 return OP_IREM_IMM;
9929 case OP_IREM_UN:
9930 return OP_IREM_UN_IMM;
9931 case OP_IMUL:
9932 return OP_IMUL_IMM;
9933 case OP_IAND:
9934 return OP_IAND_IMM;
9935 case OP_IOR:
9936 return OP_IOR_IMM;
9937 case OP_IXOR:
9938 return OP_IXOR_IMM;
9939 case OP_ISHL:
9940 return OP_ISHL_IMM;
9941 case OP_ISHR:
9942 return OP_ISHR_IMM;
9943 case OP_ISHR_UN:
9944 return OP_ISHR_UN_IMM;
9946 case OP_LADD:
9947 return OP_LADD_IMM;
9948 case OP_LSUB:
9949 return OP_LSUB_IMM;
9950 case OP_LAND:
9951 return OP_LAND_IMM;
9952 case OP_LOR:
9953 return OP_LOR_IMM;
9954 case OP_LXOR:
9955 return OP_LXOR_IMM;
9956 case OP_LSHL:
9957 return OP_LSHL_IMM;
9958 case OP_LSHR:
9959 return OP_LSHR_IMM;
9960 case OP_LSHR_UN:
9961 return OP_LSHR_UN_IMM;
9963 case OP_COMPARE:
9964 return OP_COMPARE_IMM;
9965 case OP_ICOMPARE:
9966 return OP_ICOMPARE_IMM;
9967 case OP_LCOMPARE:
9968 return OP_LCOMPARE_IMM;
9970 case OP_STORE_MEMBASE_REG:
9971 return OP_STORE_MEMBASE_IMM;
9972 case OP_STOREI1_MEMBASE_REG:
9973 return OP_STOREI1_MEMBASE_IMM;
9974 case OP_STOREI2_MEMBASE_REG:
9975 return OP_STOREI2_MEMBASE_IMM;
9976 case OP_STOREI4_MEMBASE_REG:
9977 return OP_STOREI4_MEMBASE_IMM;
9979 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9980 case OP_X86_PUSH:
9981 return OP_X86_PUSH_IMM;
9982 case OP_X86_COMPARE_MEMBASE_REG:
9983 return OP_X86_COMPARE_MEMBASE_IMM;
9984 #endif
9985 #if defined(TARGET_AMD64)
9986 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9987 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9988 #endif
9989 case OP_VOIDCALL_REG:
9990 return OP_VOIDCALL;
9991 case OP_CALL_REG:
9992 return OP_CALL;
9993 case OP_LCALL_REG:
9994 return OP_LCALL;
9995 case OP_FCALL_REG:
9996 return OP_FCALL;
9997 case OP_LOCALLOC:
9998 return OP_LOCALLOC_IMM;
10001 return -1;
10004 static int
10005 ldind_to_load_membase (int opcode)
10007 switch (opcode) {
10008 case CEE_LDIND_I1:
10009 return OP_LOADI1_MEMBASE;
10010 case CEE_LDIND_U1:
10011 return OP_LOADU1_MEMBASE;
10012 case CEE_LDIND_I2:
10013 return OP_LOADI2_MEMBASE;
10014 case CEE_LDIND_U2:
10015 return OP_LOADU2_MEMBASE;
10016 case CEE_LDIND_I4:
10017 return OP_LOADI4_MEMBASE;
10018 case CEE_LDIND_U4:
10019 return OP_LOADU4_MEMBASE;
10020 case CEE_LDIND_I:
10021 return OP_LOAD_MEMBASE;
10022 case CEE_LDIND_REF:
10023 return OP_LOAD_MEMBASE;
10024 case CEE_LDIND_I8:
10025 return OP_LOADI8_MEMBASE;
10026 case CEE_LDIND_R4:
10027 return OP_LOADR4_MEMBASE;
10028 case CEE_LDIND_R8:
10029 return OP_LOADR8_MEMBASE;
10030 default:
10031 g_assert_not_reached ();
10034 return -1;
10037 static int
10038 stind_to_store_membase (int opcode)
10040 switch (opcode) {
10041 case CEE_STIND_I1:
10042 return OP_STOREI1_MEMBASE_REG;
10043 case CEE_STIND_I2:
10044 return OP_STOREI2_MEMBASE_REG;
10045 case CEE_STIND_I4:
10046 return OP_STOREI4_MEMBASE_REG;
10047 case CEE_STIND_I:
10048 case CEE_STIND_REF:
10049 return OP_STORE_MEMBASE_REG;
10050 case CEE_STIND_I8:
10051 return OP_STOREI8_MEMBASE_REG;
10052 case CEE_STIND_R4:
10053 return OP_STORER4_MEMBASE_REG;
10054 case CEE_STIND_R8:
10055 return OP_STORER8_MEMBASE_REG;
10056 default:
10057 g_assert_not_reached ();
10060 return -1;
10064 mono_load_membase_to_load_mem (int opcode)
10066 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10067 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10068 switch (opcode) {
10069 case OP_LOAD_MEMBASE:
10070 return OP_LOAD_MEM;
10071 case OP_LOADU1_MEMBASE:
10072 return OP_LOADU1_MEM;
10073 case OP_LOADU2_MEMBASE:
10074 return OP_LOADU2_MEM;
10075 case OP_LOADI4_MEMBASE:
10076 return OP_LOADI4_MEM;
10077 case OP_LOADU4_MEMBASE:
10078 return OP_LOADU4_MEM;
10079 #if SIZEOF_REGISTER == 8
10080 case OP_LOADI8_MEMBASE:
10081 return OP_LOADI8_MEM;
10082 #endif
10084 #endif
10086 return -1;
10089 static inline int
10090 op_to_op_dest_membase (int store_opcode, int opcode)
10092 #if defined(TARGET_X86)
10093 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10094 return -1;
10096 switch (opcode) {
10097 case OP_IADD:
10098 return OP_X86_ADD_MEMBASE_REG;
10099 case OP_ISUB:
10100 return OP_X86_SUB_MEMBASE_REG;
10101 case OP_IAND:
10102 return OP_X86_AND_MEMBASE_REG;
10103 case OP_IOR:
10104 return OP_X86_OR_MEMBASE_REG;
10105 case OP_IXOR:
10106 return OP_X86_XOR_MEMBASE_REG;
10107 case OP_ADD_IMM:
10108 case OP_IADD_IMM:
10109 return OP_X86_ADD_MEMBASE_IMM;
10110 case OP_SUB_IMM:
10111 case OP_ISUB_IMM:
10112 return OP_X86_SUB_MEMBASE_IMM;
10113 case OP_AND_IMM:
10114 case OP_IAND_IMM:
10115 return OP_X86_AND_MEMBASE_IMM;
10116 case OP_OR_IMM:
10117 case OP_IOR_IMM:
10118 return OP_X86_OR_MEMBASE_IMM;
10119 case OP_XOR_IMM:
10120 case OP_IXOR_IMM:
10121 return OP_X86_XOR_MEMBASE_IMM;
10122 case OP_MOVE:
10123 return OP_NOP;
10125 #endif
10127 #if defined(TARGET_AMD64)
10128 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10129 return -1;
10131 switch (opcode) {
10132 case OP_IADD:
10133 return OP_X86_ADD_MEMBASE_REG;
10134 case OP_ISUB:
10135 return OP_X86_SUB_MEMBASE_REG;
10136 case OP_IAND:
10137 return OP_X86_AND_MEMBASE_REG;
10138 case OP_IOR:
10139 return OP_X86_OR_MEMBASE_REG;
10140 case OP_IXOR:
10141 return OP_X86_XOR_MEMBASE_REG;
10142 case OP_IADD_IMM:
10143 return OP_X86_ADD_MEMBASE_IMM;
10144 case OP_ISUB_IMM:
10145 return OP_X86_SUB_MEMBASE_IMM;
10146 case OP_IAND_IMM:
10147 return OP_X86_AND_MEMBASE_IMM;
10148 case OP_IOR_IMM:
10149 return OP_X86_OR_MEMBASE_IMM;
10150 case OP_IXOR_IMM:
10151 return OP_X86_XOR_MEMBASE_IMM;
10152 case OP_LADD:
10153 return OP_AMD64_ADD_MEMBASE_REG;
10154 case OP_LSUB:
10155 return OP_AMD64_SUB_MEMBASE_REG;
10156 case OP_LAND:
10157 return OP_AMD64_AND_MEMBASE_REG;
10158 case OP_LOR:
10159 return OP_AMD64_OR_MEMBASE_REG;
10160 case OP_LXOR:
10161 return OP_AMD64_XOR_MEMBASE_REG;
10162 case OP_ADD_IMM:
10163 case OP_LADD_IMM:
10164 return OP_AMD64_ADD_MEMBASE_IMM;
10165 case OP_SUB_IMM:
10166 case OP_LSUB_IMM:
10167 return OP_AMD64_SUB_MEMBASE_IMM;
10168 case OP_AND_IMM:
10169 case OP_LAND_IMM:
10170 return OP_AMD64_AND_MEMBASE_IMM;
10171 case OP_OR_IMM:
10172 case OP_LOR_IMM:
10173 return OP_AMD64_OR_MEMBASE_IMM;
10174 case OP_XOR_IMM:
10175 case OP_LXOR_IMM:
10176 return OP_AMD64_XOR_MEMBASE_IMM;
10177 case OP_MOVE:
10178 return OP_NOP;
10180 #endif
10182 return -1;
10185 static inline int
10186 op_to_op_store_membase (int store_opcode, int opcode)
10188 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10189 switch (opcode) {
10190 case OP_ICEQ:
10191 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10192 return OP_X86_SETEQ_MEMBASE;
10193 case OP_CNE:
10194 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10195 return OP_X86_SETNE_MEMBASE;
10197 #endif
10199 return -1;
10202 static inline int
10203 op_to_op_src1_membase (int load_opcode, int opcode)
10205 #ifdef TARGET_X86
10206 /* FIXME: This has sign extension issues */
10208 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10209 return OP_X86_COMPARE_MEMBASE8_IMM;
10212 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10213 return -1;
10215 switch (opcode) {
10216 case OP_X86_PUSH:
10217 return OP_X86_PUSH_MEMBASE;
10218 case OP_COMPARE_IMM:
10219 case OP_ICOMPARE_IMM:
10220 return OP_X86_COMPARE_MEMBASE_IMM;
10221 case OP_COMPARE:
10222 case OP_ICOMPARE:
10223 return OP_X86_COMPARE_MEMBASE_REG;
10225 #endif
10227 #ifdef TARGET_AMD64
10228 /* FIXME: This has sign extension issues */
10230 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10231 return OP_X86_COMPARE_MEMBASE8_IMM;
10234 switch (opcode) {
10235 case OP_X86_PUSH:
10236 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10237 return OP_X86_PUSH_MEMBASE;
10238 break;
10239 /* FIXME: This only works for 32 bit immediates
10240 case OP_COMPARE_IMM:
10241 case OP_LCOMPARE_IMM:
10242 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10243 return OP_AMD64_COMPARE_MEMBASE_IMM;
10245 case OP_ICOMPARE_IMM:
10246 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10247 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10248 break;
10249 case OP_COMPARE:
10250 case OP_LCOMPARE:
10251 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10252 return OP_AMD64_COMPARE_MEMBASE_REG;
10253 break;
10254 case OP_ICOMPARE:
10255 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10256 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10257 break;
10259 #endif
10261 return -1;
10264 static inline int
10265 op_to_op_src2_membase (int load_opcode, int opcode)
10267 #ifdef TARGET_X86
10268 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10269 return -1;
10271 switch (opcode) {
10272 case OP_COMPARE:
10273 case OP_ICOMPARE:
10274 return OP_X86_COMPARE_REG_MEMBASE;
10275 case OP_IADD:
10276 return OP_X86_ADD_REG_MEMBASE;
10277 case OP_ISUB:
10278 return OP_X86_SUB_REG_MEMBASE;
10279 case OP_IAND:
10280 return OP_X86_AND_REG_MEMBASE;
10281 case OP_IOR:
10282 return OP_X86_OR_REG_MEMBASE;
10283 case OP_IXOR:
10284 return OP_X86_XOR_REG_MEMBASE;
10286 #endif
10288 #ifdef TARGET_AMD64
10289 switch (opcode) {
10290 case OP_ICOMPARE:
10291 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10292 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10293 break;
10294 case OP_COMPARE:
10295 case OP_LCOMPARE:
10296 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10297 return OP_AMD64_COMPARE_REG_MEMBASE;
10298 break;
10299 case OP_IADD:
10300 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10301 return OP_X86_ADD_REG_MEMBASE;
10302 case OP_ISUB:
10303 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10304 return OP_X86_SUB_REG_MEMBASE;
10305 case OP_IAND:
10306 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10307 return OP_X86_AND_REG_MEMBASE;
10308 case OP_IOR:
10309 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10310 return OP_X86_OR_REG_MEMBASE;
10311 case OP_IXOR:
10312 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10313 return OP_X86_XOR_REG_MEMBASE;
10314 case OP_LADD:
10315 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10316 return OP_AMD64_ADD_REG_MEMBASE;
10317 case OP_LSUB:
10318 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10319 return OP_AMD64_SUB_REG_MEMBASE;
10320 case OP_LAND:
10321 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10322 return OP_AMD64_AND_REG_MEMBASE;
10323 case OP_LOR:
10324 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10325 return OP_AMD64_OR_REG_MEMBASE;
10326 case OP_LXOR:
10327 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10328 return OP_AMD64_XOR_REG_MEMBASE;
10330 #endif
10332 return -1;
10336 mono_op_to_op_imm_noemul (int opcode)
10338 switch (opcode) {
10339 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10340 case OP_LSHR:
10341 case OP_LSHL:
10342 case OP_LSHR_UN:
10343 #endif
10344 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10345 case OP_IDIV:
10346 case OP_IDIV_UN:
10347 case OP_IREM:
10348 case OP_IREM_UN:
10349 #endif
10350 return -1;
10351 default:
10352 return mono_op_to_op_imm (opcode);
10356 #ifndef DISABLE_JIT
10359 * mono_handle_global_vregs:
10361 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10362 * for them.
10364 void
10365 mono_handle_global_vregs (MonoCompile *cfg)
10367 gint32 *vreg_to_bb;
10368 MonoBasicBlock *bb;
10369 int i, pos;
10371 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10373 #ifdef MONO_ARCH_SIMD_INTRINSICS
10374 if (cfg->uses_simd_intrinsics)
10375 mono_simd_simplify_indirection (cfg);
10376 #endif
10378 /* Find local vregs used in more than one bb */
10379 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10380 MonoInst *ins = bb->code;
10381 int block_num = bb->block_num;
10383 if (cfg->verbose_level > 2)
10384 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10386 cfg->cbb = bb;
10387 for (; ins; ins = ins->next) {
10388 const char *spec = INS_INFO (ins->opcode);
10389 int regtype = 0, regindex;
10390 gint32 prev_bb;
10392 if (G_UNLIKELY (cfg->verbose_level > 2))
10393 mono_print_ins (ins);
10395 g_assert (ins->opcode >= MONO_CEE_LAST);
10397 for (regindex = 0; regindex < 4; regindex ++) {
10398 int vreg = 0;
10400 if (regindex == 0) {
10401 regtype = spec [MONO_INST_DEST];
10402 if (regtype == ' ')
10403 continue;
10404 vreg = ins->dreg;
10405 } else if (regindex == 1) {
10406 regtype = spec [MONO_INST_SRC1];
10407 if (regtype == ' ')
10408 continue;
10409 vreg = ins->sreg1;
10410 } else if (regindex == 2) {
10411 regtype = spec [MONO_INST_SRC2];
10412 if (regtype == ' ')
10413 continue;
10414 vreg = ins->sreg2;
10415 } else if (regindex == 3) {
10416 regtype = spec [MONO_INST_SRC3];
10417 if (regtype == ' ')
10418 continue;
10419 vreg = ins->sreg3;
10422 #if SIZEOF_REGISTER == 4
10423 /* In the LLVM case, the long opcodes are not decomposed */
10424 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10426 * Since some instructions reference the original long vreg,
10427 * and some reference the two component vregs, it is quite hard
10428 * to determine when it needs to be global. So be conservative.
10430 if (!get_vreg_to_inst (cfg, vreg)) {
10431 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10433 if (cfg->verbose_level > 2)
10434 printf ("LONG VREG R%d made global.\n", vreg);
10438 * Make the component vregs volatile since the optimizations can
10439 * get confused otherwise.
10441 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10442 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10444 #endif
10446 g_assert (vreg != -1);
10448 prev_bb = vreg_to_bb [vreg];
10449 if (prev_bb == 0) {
10450 /* 0 is a valid block num */
10451 vreg_to_bb [vreg] = block_num + 1;
10452 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10453 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10454 continue;
10456 if (!get_vreg_to_inst (cfg, vreg)) {
10457 if (G_UNLIKELY (cfg->verbose_level > 2))
10458 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10460 switch (regtype) {
10461 case 'i':
10462 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10463 break;
10464 case 'l':
10465 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10466 break;
10467 case 'f':
10468 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10469 break;
10470 case 'v':
10471 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10472 break;
10473 default:
10474 g_assert_not_reached ();
10478 /* Flag as having been used in more than one bb */
10479 vreg_to_bb [vreg] = -1;
10485 /* If a variable is used in only one bblock, convert it into a local vreg */
10486 for (i = 0; i < cfg->num_varinfo; i++) {
10487 MonoInst *var = cfg->varinfo [i];
10488 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10490 switch (var->type) {
10491 case STACK_I4:
10492 case STACK_OBJ:
10493 case STACK_PTR:
10494 case STACK_MP:
10495 case STACK_VTYPE:
10496 #if SIZEOF_REGISTER == 8
10497 case STACK_I8:
10498 #endif
10499 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10500 /* Enabling this screws up the fp stack on x86 */
10501 case STACK_R8:
10502 #endif
10503 /* Arguments are implicitly global */
10504 /* Putting R4 vars into registers doesn't work currently */
10505 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10507 * Make that the variable's liveness interval doesn't contain a call, since
10508 * that would cause the lvreg to be spilled, making the whole optimization
10509 * useless.
10511 /* This is too slow for JIT compilation */
10512 #if 0
10513 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10514 MonoInst *ins;
10515 int def_index, call_index, ins_index;
10516 gboolean spilled = FALSE;
10518 def_index = -1;
10519 call_index = -1;
10520 ins_index = 0;
10521 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10522 const char *spec = INS_INFO (ins->opcode);
10524 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10525 def_index = ins_index;
10527 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10528 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10529 if (call_index > def_index) {
10530 spilled = TRUE;
10531 break;
10535 if (MONO_IS_CALL (ins))
10536 call_index = ins_index;
10538 ins_index ++;
10541 if (spilled)
10542 break;
10544 #endif
10546 if (G_UNLIKELY (cfg->verbose_level > 2))
10547 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10548 var->flags |= MONO_INST_IS_DEAD;
10549 cfg->vreg_to_inst [var->dreg] = NULL;
10551 break;
10556 * Compress the varinfo and vars tables so the liveness computation is faster and
10557 * takes up less space.
10559 pos = 0;
10560 for (i = 0; i < cfg->num_varinfo; ++i) {
10561 MonoInst *var = cfg->varinfo [i];
10562 if (pos < i && cfg->locals_start == i)
10563 cfg->locals_start = pos;
10564 if (!(var->flags & MONO_INST_IS_DEAD)) {
10565 if (pos < i) {
10566 cfg->varinfo [pos] = cfg->varinfo [i];
10567 cfg->varinfo [pos]->inst_c0 = pos;
10568 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10569 cfg->vars [pos].idx = pos;
10570 #if SIZEOF_REGISTER == 4
10571 if (cfg->varinfo [pos]->type == STACK_I8) {
10572 /* Modify the two component vars too */
10573 MonoInst *var1;
10575 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10576 var1->inst_c0 = pos;
10577 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10578 var1->inst_c0 = pos;
10580 #endif
10582 pos ++;
10585 cfg->num_varinfo = pos;
10586 if (cfg->locals_start > cfg->num_varinfo)
10587 cfg->locals_start = cfg->num_varinfo;
10591 * mono_spill_global_vars:
10593 * Generate spill code for variables which are not allocated to registers,
10594 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10595 * code is generated which could be optimized by the local optimization passes.
10597 void
10598 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10600 MonoBasicBlock *bb;
10601 char spec2 [16];
10602 int orig_next_vreg;
10603 guint32 *vreg_to_lvreg;
10604 guint32 *lvregs;
10605 guint32 i, lvregs_len;
10606 gboolean dest_has_lvreg = FALSE;
10607 guint32 stacktypes [128];
10608 MonoInst **live_range_start, **live_range_end;
10609 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10611 *need_local_opts = FALSE;
10613 memset (spec2, 0, sizeof (spec2));
10615 /* FIXME: Move this function to mini.c */
10616 stacktypes ['i'] = STACK_PTR;
10617 stacktypes ['l'] = STACK_I8;
10618 stacktypes ['f'] = STACK_R8;
10619 #ifdef MONO_ARCH_SIMD_INTRINSICS
10620 stacktypes ['x'] = STACK_VTYPE;
10621 #endif
10623 #if SIZEOF_REGISTER == 4
10624 /* Create MonoInsts for longs */
10625 for (i = 0; i < cfg->num_varinfo; i++) {
10626 MonoInst *ins = cfg->varinfo [i];
10628 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10629 switch (ins->type) {
10630 case STACK_R8:
10631 case STACK_I8: {
10632 MonoInst *tree;
10634 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10635 break;
10637 g_assert (ins->opcode == OP_REGOFFSET);
10639 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10640 g_assert (tree);
10641 tree->opcode = OP_REGOFFSET;
10642 tree->inst_basereg = ins->inst_basereg;
10643 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10645 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10646 g_assert (tree);
10647 tree->opcode = OP_REGOFFSET;
10648 tree->inst_basereg = ins->inst_basereg;
10649 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10650 break;
10652 default:
10653 break;
10657 #endif
10659 /* FIXME: widening and truncation */
10662 * As an optimization, when a variable allocated to the stack is first loaded into
10663 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10664 * the variable again.
10666 orig_next_vreg = cfg->next_vreg;
10667 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10668 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10669 lvregs_len = 0;
10672 * These arrays contain the first and last instructions accessing a given
10673 * variable.
10674 * Since we emit bblocks in the same order we process them here, and we
10675 * don't split live ranges, these will precisely describe the live range of
10676 * the variable, i.e. the instruction range where a valid value can be found
10677 * in the variables location.
10678 * The live range is computed using the liveness info computed by the liveness pass.
10679 * We can't use vmv->range, since that is an abstract live range, and we need
10680 * one which is instruction precise.
10681 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10683 /* FIXME: Only do this if debugging info is requested */
10684 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10685 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10686 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10687 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10689 /* Add spill loads/stores */
10690 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10691 MonoInst *ins;
10693 if (cfg->verbose_level > 2)
10694 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10696 /* Clear vreg_to_lvreg array */
10697 for (i = 0; i < lvregs_len; i++)
10698 vreg_to_lvreg [lvregs [i]] = 0;
10699 lvregs_len = 0;
10701 cfg->cbb = bb;
10702 MONO_BB_FOR_EACH_INS (bb, ins) {
10703 const char *spec = INS_INFO (ins->opcode);
10704 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10705 gboolean store, no_lvreg;
10706 int sregs [MONO_MAX_SRC_REGS];
10708 if (G_UNLIKELY (cfg->verbose_level > 2))
10709 mono_print_ins (ins);
10711 if (ins->opcode == OP_NOP)
10712 continue;
10715 * We handle LDADDR here as well, since it can only be decomposed
10716 * when variable addresses are known.
10718 if (ins->opcode == OP_LDADDR) {
10719 MonoInst *var = ins->inst_p0;
10721 if (var->opcode == OP_VTARG_ADDR) {
10722 /* Happens on SPARC/S390 where vtypes are passed by reference */
10723 MonoInst *vtaddr = var->inst_left;
10724 if (vtaddr->opcode == OP_REGVAR) {
10725 ins->opcode = OP_MOVE;
10726 ins->sreg1 = vtaddr->dreg;
10728 else if (var->inst_left->opcode == OP_REGOFFSET) {
10729 ins->opcode = OP_LOAD_MEMBASE;
10730 ins->inst_basereg = vtaddr->inst_basereg;
10731 ins->inst_offset = vtaddr->inst_offset;
10732 } else
10733 NOT_IMPLEMENTED;
10734 } else {
10735 g_assert (var->opcode == OP_REGOFFSET);
10737 ins->opcode = OP_ADD_IMM;
10738 ins->sreg1 = var->inst_basereg;
10739 ins->inst_imm = var->inst_offset;
10742 *need_local_opts = TRUE;
10743 spec = INS_INFO (ins->opcode);
10746 if (ins->opcode < MONO_CEE_LAST) {
10747 mono_print_ins (ins);
10748 g_assert_not_reached ();
10752 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10753 * src register.
10754 * FIXME:
10756 if (MONO_IS_STORE_MEMBASE (ins)) {
10757 tmp_reg = ins->dreg;
10758 ins->dreg = ins->sreg2;
10759 ins->sreg2 = tmp_reg;
10760 store = TRUE;
10762 spec2 [MONO_INST_DEST] = ' ';
10763 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10764 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10765 spec2 [MONO_INST_SRC3] = ' ';
10766 spec = spec2;
10767 } else if (MONO_IS_STORE_MEMINDEX (ins))
10768 g_assert_not_reached ();
10769 else
10770 store = FALSE;
10771 no_lvreg = FALSE;
10773 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10774 printf ("\t %.3s %d", spec, ins->dreg);
10775 num_sregs = mono_inst_get_src_registers (ins, sregs);
10776 for (srcindex = 0; srcindex < 3; ++srcindex)
10777 printf (" %d", sregs [srcindex]);
10778 printf ("\n");
10781 /***************/
10782 /* DREG */
10783 /***************/
10784 regtype = spec [MONO_INST_DEST];
10785 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10786 prev_dreg = -1;
10788 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10789 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10790 MonoInst *store_ins;
10791 int store_opcode;
10792 MonoInst *def_ins = ins;
10793 int dreg = ins->dreg; /* The original vreg */
10795 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10797 if (var->opcode == OP_REGVAR) {
10798 ins->dreg = var->dreg;
10799 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10801 * Instead of emitting a load+store, use a _membase opcode.
10803 g_assert (var->opcode == OP_REGOFFSET);
10804 if (ins->opcode == OP_MOVE) {
10805 NULLIFY_INS (ins);
10806 def_ins = NULL;
10807 } else {
10808 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10809 ins->inst_basereg = var->inst_basereg;
10810 ins->inst_offset = var->inst_offset;
10811 ins->dreg = -1;
10813 spec = INS_INFO (ins->opcode);
10814 } else {
10815 guint32 lvreg;
10817 g_assert (var->opcode == OP_REGOFFSET);
10819 prev_dreg = ins->dreg;
10821 /* Invalidate any previous lvreg for this vreg */
10822 vreg_to_lvreg [ins->dreg] = 0;
10824 lvreg = 0;
10826 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10827 regtype = 'l';
10828 store_opcode = OP_STOREI8_MEMBASE_REG;
10831 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10833 if (regtype == 'l') {
10834 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10835 mono_bblock_insert_after_ins (bb, ins, store_ins);
10836 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10837 mono_bblock_insert_after_ins (bb, ins, store_ins);
10838 def_ins = store_ins;
10840 else {
10841 g_assert (store_opcode != OP_STOREV_MEMBASE);
10843 /* Try to fuse the store into the instruction itself */
10844 /* FIXME: Add more instructions */
10845 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10846 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10847 ins->inst_imm = ins->inst_c0;
10848 ins->inst_destbasereg = var->inst_basereg;
10849 ins->inst_offset = var->inst_offset;
10850 spec = INS_INFO (ins->opcode);
10851 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10852 ins->opcode = store_opcode;
10853 ins->inst_destbasereg = var->inst_basereg;
10854 ins->inst_offset = var->inst_offset;
10856 no_lvreg = TRUE;
10858 tmp_reg = ins->dreg;
10859 ins->dreg = ins->sreg2;
10860 ins->sreg2 = tmp_reg;
10861 store = TRUE;
10863 spec2 [MONO_INST_DEST] = ' ';
10864 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10865 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10866 spec2 [MONO_INST_SRC3] = ' ';
10867 spec = spec2;
10868 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10869 // FIXME: The backends expect the base reg to be in inst_basereg
10870 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10871 ins->dreg = -1;
10872 ins->inst_basereg = var->inst_basereg;
10873 ins->inst_offset = var->inst_offset;
10874 spec = INS_INFO (ins->opcode);
10875 } else {
10876 /* printf ("INS: "); mono_print_ins (ins); */
10877 /* Create a store instruction */
10878 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10880 /* Insert it after the instruction */
10881 mono_bblock_insert_after_ins (bb, ins, store_ins);
10883 def_ins = store_ins;
10886 * We can't assign ins->dreg to var->dreg here, since the
10887 * sregs could use it. So set a flag, and do it after
10888 * the sregs.
10890 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10891 dest_has_lvreg = TRUE;
10896 if (def_ins && !live_range_start [dreg]) {
10897 live_range_start [dreg] = def_ins;
10898 live_range_start_bb [dreg] = bb;
10902 /************/
10903 /* SREGS */
10904 /************/
10905 num_sregs = mono_inst_get_src_registers (ins, sregs);
10906 for (srcindex = 0; srcindex < 3; ++srcindex) {
10907 regtype = spec [MONO_INST_SRC1 + srcindex];
10908 sreg = sregs [srcindex];
10910 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10911 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10912 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10913 MonoInst *use_ins = ins;
10914 MonoInst *load_ins;
10915 guint32 load_opcode;
10917 if (var->opcode == OP_REGVAR) {
10918 sregs [srcindex] = var->dreg;
10919 //mono_inst_set_src_registers (ins, sregs);
10920 live_range_end [sreg] = use_ins;
10921 live_range_end_bb [sreg] = bb;
10922 continue;
10925 g_assert (var->opcode == OP_REGOFFSET);
10927 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10929 g_assert (load_opcode != OP_LOADV_MEMBASE);
10931 if (vreg_to_lvreg [sreg]) {
10932 g_assert (vreg_to_lvreg [sreg] != -1);
10934 /* The variable is already loaded to an lvreg */
10935 if (G_UNLIKELY (cfg->verbose_level > 2))
10936 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10937 sregs [srcindex] = vreg_to_lvreg [sreg];
10938 //mono_inst_set_src_registers (ins, sregs);
10939 continue;
10942 /* Try to fuse the load into the instruction */
10943 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10944 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10945 sregs [0] = var->inst_basereg;
10946 //mono_inst_set_src_registers (ins, sregs);
10947 ins->inst_offset = var->inst_offset;
10948 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10949 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10950 sregs [1] = var->inst_basereg;
10951 //mono_inst_set_src_registers (ins, sregs);
10952 ins->inst_offset = var->inst_offset;
10953 } else {
10954 if (MONO_IS_REAL_MOVE (ins)) {
10955 ins->opcode = OP_NOP;
10956 sreg = ins->dreg;
10957 } else {
10958 //printf ("%d ", srcindex); mono_print_ins (ins);
10960 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10962 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10963 if (var->dreg == prev_dreg) {
10965 * sreg refers to the value loaded by the load
10966 * emitted below, but we need to use ins->dreg
10967 * since it refers to the store emitted earlier.
10969 sreg = ins->dreg;
10971 g_assert (sreg != -1);
10972 vreg_to_lvreg [var->dreg] = sreg;
10973 g_assert (lvregs_len < 1024);
10974 lvregs [lvregs_len ++] = var->dreg;
10978 sregs [srcindex] = sreg;
10979 //mono_inst_set_src_registers (ins, sregs);
10981 if (regtype == 'l') {
10982 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10983 mono_bblock_insert_before_ins (bb, ins, load_ins);
10984 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10985 mono_bblock_insert_before_ins (bb, ins, load_ins);
10986 use_ins = load_ins;
10988 else {
10989 #if SIZEOF_REGISTER == 4
10990 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10991 #endif
10992 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10993 mono_bblock_insert_before_ins (bb, ins, load_ins);
10994 use_ins = load_ins;
10998 if (var->dreg < orig_next_vreg) {
10999 live_range_end [var->dreg] = use_ins;
11000 live_range_end_bb [var->dreg] = bb;
11004 mono_inst_set_src_registers (ins, sregs);
11006 if (dest_has_lvreg) {
11007 g_assert (ins->dreg != -1);
11008 vreg_to_lvreg [prev_dreg] = ins->dreg;
11009 g_assert (lvregs_len < 1024);
11010 lvregs [lvregs_len ++] = prev_dreg;
11011 dest_has_lvreg = FALSE;
11014 if (store) {
11015 tmp_reg = ins->dreg;
11016 ins->dreg = ins->sreg2;
11017 ins->sreg2 = tmp_reg;
11020 if (MONO_IS_CALL (ins)) {
11021 /* Clear vreg_to_lvreg array */
11022 for (i = 0; i < lvregs_len; i++)
11023 vreg_to_lvreg [lvregs [i]] = 0;
11024 lvregs_len = 0;
11025 } else if (ins->opcode == OP_NOP) {
11026 ins->dreg = -1;
11027 MONO_INST_NULLIFY_SREGS (ins);
11030 if (cfg->verbose_level > 2)
11031 mono_print_ins_index (1, ins);
11034 /* Extend the live range based on the liveness info */
11035 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11036 for (i = 0; i < cfg->num_varinfo; i ++) {
11037 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11039 if (vreg_is_volatile (cfg, vi->vreg))
11040 /* The liveness info is incomplete */
11041 continue;
11043 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11044 /* Live from at least the first ins of this bb */
11045 live_range_start [vi->vreg] = bb->code;
11046 live_range_start_bb [vi->vreg] = bb;
11049 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11050 /* Live at least until the last ins of this bb */
11051 live_range_end [vi->vreg] = bb->last_ins;
11052 live_range_end_bb [vi->vreg] = bb;
11058 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11060 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11061 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11063 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11064 for (i = 0; i < cfg->num_varinfo; ++i) {
11065 int vreg = MONO_VARINFO (cfg, i)->vreg;
11066 MonoInst *ins;
11068 if (live_range_start [vreg]) {
11069 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11070 ins->inst_c0 = i;
11071 ins->inst_c1 = vreg;
11072 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11074 if (live_range_end [vreg]) {
11075 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11076 ins->inst_c0 = i;
11077 ins->inst_c1 = vreg;
11078 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11079 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11080 else
11081 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11085 #endif
11087 g_free (live_range_start);
11088 g_free (live_range_end);
11089 g_free (live_range_start_bb);
11090 g_free (live_range_end_bb);
11094 * FIXME:
11095 * - use 'iadd' instead of 'int_add'
11096 * - handling ovf opcodes: decompose in method_to_ir.
11097 * - unify iregs/fregs
11098 * -> partly done, the missing parts are:
11099 * - a more complete unification would involve unifying the hregs as well, so
11100 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11101 * would no longer map to the machine hregs, so the code generators would need to
11102 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11103 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11104 * fp/non-fp branches speeds it up by about 15%.
11105 * - use sext/zext opcodes instead of shifts
11106 * - add OP_ICALL
11107 * - get rid of TEMPLOADs if possible and use vregs instead
11108 * - clean up usage of OP_P/OP_ opcodes
11109 * - cleanup usage of DUMMY_USE
11110 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11111 * stack
11112 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11113 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11114 * - make sure handle_stack_args () is called before the branch is emitted
11115 * - when the new IR is done, get rid of all unused stuff
11116 * - COMPARE/BEQ as separate instructions or unify them ?
11117 * - keeping them separate allows specialized compare instructions like
11118 * compare_imm, compare_membase
11119 * - most back ends unify fp compare+branch, fp compare+ceq
11120 * - integrate mono_save_args into inline_method
11121 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11122 * - handle long shift opts on 32 bit platforms somehow: they require
11123 * 3 sregs (2 for arg1 and 1 for arg2)
11124 * - make byref a 'normal' type.
11125 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11126 * variable if needed.
11127 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11128 * like inline_method.
11129 * - remove inlining restrictions
11130 * - fix LNEG and enable cfold of INEG
11131 * - generalize x86 optimizations like ldelema as a peephole optimization
11132 * - add store_mem_imm for amd64
11133 * - optimize the loading of the interruption flag in the managed->native wrappers
11134 * - avoid special handling of OP_NOP in passes
11135 * - move code inserting instructions into one function/macro.
11136 * - try a coalescing phase after liveness analysis
11137 * - add float -> vreg conversion + local optimizations on !x86
11138 * - figure out how to handle decomposed branches during optimizations, ie.
11139 * compare+branch, op_jump_table+op_br etc.
11140 * - promote RuntimeXHandles to vregs
11141 * - vtype cleanups:
11142 * - add a NEW_VARLOADA_VREG macro
11143 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11144 * accessing vtype fields.
11145 * - get rid of I8CONST on 64 bit platforms
11146 * - dealing with the increase in code size due to branches created during opcode
11147 * decomposition:
11148 * - use extended basic blocks
11149 * - all parts of the JIT
11150 * - handle_global_vregs () && local regalloc
11151 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11152 * - sources of increase in code size:
11153 * - vtypes
11154 * - long compares
11155 * - isinst and castclass
11156 * - lvregs not allocated to global registers even if used multiple times
11157 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11158 * meaningful.
11159 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11160 * - add all micro optimizations from the old JIT
11161 * - put tree optimizations into the deadce pass
11162 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11163 * specific function.
11164 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11165 * fcompare + branchCC.
11166 * - create a helper function for allocating a stack slot, taking into account
11167 * MONO_CFG_HAS_SPILLUP.
11168 * - merge r68207.
11169 * - merge the ia64 switch changes.
11170 * - optimize mono_regstate2_alloc_int/float.
11171 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11172 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11173 * parts of the tree could be separated by other instructions, killing the tree
11174 * arguments, or stores killing loads etc. Also, should we fold loads into other
11175 * instructions if the result of the load is used multiple times ?
11176 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11177 * - LAST MERGE: 108395.
11178 * - when returning vtypes in registers, generate IR and append it to the end of the
11179 * last bb instead of doing it in the epilog.
11180 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11185 NOTES
11186 -----
11188 - When to decompose opcodes:
11189 - earlier: this makes some optimizations hard to implement, since the low level IR
11190 no longer contains the neccessary information. But it is easier to do.
11191 - later: harder to implement, enables more optimizations.
11192 - Branches inside bblocks:
11193 - created when decomposing complex opcodes.
11194 - branches to another bblock: harmless, but not tracked by the branch
11195 optimizations, so need to branch to a label at the start of the bblock.
11196 - branches to inside the same bblock: very problematic, trips up the local
11197 reg allocator. Can be fixed by spitting the current bblock, but that is a
11198 complex operation, since some local vregs can become global vregs etc.
11199 - Local/global vregs:
11200 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11201 local register allocator.
11202 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11203 structure, created by mono_create_var (). Assigned to hregs or the stack by
11204 the global register allocator.
11205 - When to do optimizations like alu->alu_imm:
11206 - earlier -> saves work later on since the IR will be smaller/simpler
11207 - later -> can work on more instructions
11208 - Handling of valuetypes:
11209 - When a vtype is pushed on the stack, a new temporary is created, an
11210 instruction computing its address (LDADDR) is emitted and pushed on
11211 the stack. Need to optimize cases when the vtype is used immediately as in
11212 argument passing, stloc etc.
11213 - Instead of the to_end stuff in the old JIT, simply call the function handling
11214 the values on the stack before emitting the last instruction of the bb.
11217 #endif /* DISABLE_JIT */