Fix typo in GC_compare_and_exchange
[mono.git] / mono / mini / method-to-ir.c
blob4c9ad867f54dcccd91f95cc1badfd6d973408cae
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
54 #include "mini.h"
55 #include "trace.h"
57 #include "ir-emit.h"
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 goto inline_failure;\
66 } while (0)
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 goto exception_exit;\
70 } while (0)
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
79 } while (0)
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
88 } while (0)
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
95 } \
96 } while (0)
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
121 #ifdef MINI_OP
122 #undef MINI_OP
123 #endif
124 #ifdef MINI_OP3
125 #undef MINI_OP3
126 #endif
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
129 #define NONE ' '
130 #define IREG 'i'
131 #define FREG 'f'
132 #define VREG 'v'
133 #define XREG 'x'
134 #if SIZEOF_REGISTER == 8
135 #define LREG IREG
136 #else
137 #define LREG 'l'
138 #endif
139 /* keep in sync with the enum in mini.h */
140 const char
141 ins_info[] = {
142 #include "mini-ops.h"
144 #undef MINI_OP
145 #undef MINI_OP3
147 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
150 * This should contain the index of the last sreg + 1. This is not the same
151 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
153 const gint8 ins_sreg_counts[] = {
154 #include "mini-ops.h"
156 #undef MINI_OP
157 #undef MINI_OP3
159 extern GHashTable *jit_icall_name_hash;
161 #define MONO_INIT_VARINFO(vi,id) do { \
162 (vi)->range.first_use.pos.bid = 0xffff; \
163 (vi)->reg = -1; \
164 (vi)->idx = (id); \
165 } while (0)
167 void
168 mono_inst_set_src_registers (MonoInst *ins, int *regs)
170 ins->sreg1 = regs [0];
171 ins->sreg2 = regs [1];
172 ins->sreg3 = regs [2];
175 guint32
176 mono_alloc_ireg (MonoCompile *cfg)
178 return alloc_ireg (cfg);
181 guint32
182 mono_alloc_freg (MonoCompile *cfg)
184 return alloc_freg (cfg);
187 guint32
188 mono_alloc_preg (MonoCompile *cfg)
190 return alloc_preg (cfg);
193 guint32
194 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
196 return alloc_dreg (cfg, stack_type);
199 guint
200 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
202 if (type->byref)
203 return OP_MOVE;
205 handle_enum:
206 switch (type->type) {
207 case MONO_TYPE_I1:
208 case MONO_TYPE_U1:
209 case MONO_TYPE_BOOLEAN:
210 return OP_MOVE;
211 case MONO_TYPE_I2:
212 case MONO_TYPE_U2:
213 case MONO_TYPE_CHAR:
214 return OP_MOVE;
215 case MONO_TYPE_I4:
216 case MONO_TYPE_U4:
217 return OP_MOVE;
218 case MONO_TYPE_I:
219 case MONO_TYPE_U:
220 case MONO_TYPE_PTR:
221 case MONO_TYPE_FNPTR:
222 return OP_MOVE;
223 case MONO_TYPE_CLASS:
224 case MONO_TYPE_STRING:
225 case MONO_TYPE_OBJECT:
226 case MONO_TYPE_SZARRAY:
227 case MONO_TYPE_ARRAY:
228 return OP_MOVE;
229 case MONO_TYPE_I8:
230 case MONO_TYPE_U8:
231 #if SIZEOF_REGISTER == 8
232 return OP_MOVE;
233 #else
234 return OP_LMOVE;
235 #endif
236 case MONO_TYPE_R4:
237 return OP_FMOVE;
238 case MONO_TYPE_R8:
239 return OP_FMOVE;
240 case MONO_TYPE_VALUETYPE:
241 if (type->data.klass->enumtype) {
242 type = mono_class_enum_basetype (type->data.klass);
243 goto handle_enum;
245 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
246 return OP_XMOVE;
247 return OP_VMOVE;
248 case MONO_TYPE_TYPEDBYREF:
249 return OP_VMOVE;
250 case MONO_TYPE_GENERICINST:
251 type = &type->data.generic_class->container_class->byval_arg;
252 goto handle_enum;
253 case MONO_TYPE_VAR:
254 case MONO_TYPE_MVAR:
255 g_assert (cfg->generic_sharing_context);
256 return OP_MOVE;
257 default:
258 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
260 return -1;
263 void
264 mono_print_bb (MonoBasicBlock *bb, const char *msg)
266 int i;
267 MonoInst *tree;
269 printf ("\n%s %d: [IN: ", msg, bb->block_num);
270 for (i = 0; i < bb->in_count; ++i)
271 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
272 printf (", OUT: ");
273 for (i = 0; i < bb->out_count; ++i)
274 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
275 printf (" ]\n");
276 for (tree = bb->code; tree; tree = tree->next)
277 mono_print_ins_index (-1, tree);
281 * Can't put this at the beginning, since other files reference stuff from this
282 * file.
284 #ifndef DISABLE_JIT
286 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
288 #define GET_BBLOCK(cfg,tblock,ip) do { \
289 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
290 if (!(tblock)) { \
291 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
292 NEW_BBLOCK (cfg, (tblock)); \
293 (tblock)->cil_code = (ip); \
294 ADD_BBLOCK (cfg, (tblock)); \
296 } while (0)
298 #if defined(TARGET_X86) || defined(TARGET_AMD64)
299 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
300 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
301 (dest)->dreg = alloc_preg ((cfg)); \
302 (dest)->sreg1 = (sr1); \
303 (dest)->sreg2 = (sr2); \
304 (dest)->inst_imm = (imm); \
305 (dest)->backend.shift_amount = (shift); \
306 MONO_ADD_INS ((cfg)->cbb, (dest)); \
307 } while (0)
308 #endif
310 #if SIZEOF_REGISTER == 8
311 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
312 /* FIXME: Need to add many more cases */ \
313 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
314 MonoInst *widen; \
315 int dr = alloc_preg (cfg); \
316 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
317 (ins)->sreg2 = widen->dreg; \
319 } while (0)
320 #else
321 #define ADD_WIDEN_OP(ins, arg1, arg2)
322 #endif
324 #define ADD_BINOP(op) do { \
325 MONO_INST_NEW (cfg, ins, (op)); \
326 sp -= 2; \
327 ins->sreg1 = sp [0]->dreg; \
328 ins->sreg2 = sp [1]->dreg; \
329 type_from_op (ins, sp [0], sp [1]); \
330 CHECK_TYPE (ins); \
331 /* Have to insert a widening op */ \
332 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
333 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
334 MONO_ADD_INS ((cfg)->cbb, (ins)); \
335 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
336 } while (0)
338 #define ADD_UNOP(op) do { \
339 MONO_INST_NEW (cfg, ins, (op)); \
340 sp--; \
341 ins->sreg1 = sp [0]->dreg; \
342 type_from_op (ins, sp [0], NULL); \
343 CHECK_TYPE (ins); \
344 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
345 MONO_ADD_INS ((cfg)->cbb, (ins)); \
346 *sp++ = mono_decompose_opcode (cfg, ins); \
347 } while (0)
349 #define ADD_BINCOND(next_block) do { \
350 MonoInst *cmp; \
351 sp -= 2; \
352 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
353 cmp->sreg1 = sp [0]->dreg; \
354 cmp->sreg2 = sp [1]->dreg; \
355 type_from_op (cmp, sp [0], sp [1]); \
356 CHECK_TYPE (cmp); \
357 type_from_op (ins, sp [0], sp [1]); \
358 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
359 GET_BBLOCK (cfg, tblock, target); \
360 link_bblock (cfg, bblock, tblock); \
361 ins->inst_true_bb = tblock; \
362 if ((next_block)) { \
363 link_bblock (cfg, bblock, (next_block)); \
364 ins->inst_false_bb = (next_block); \
365 start_new_bblock = 1; \
366 } else { \
367 GET_BBLOCK (cfg, tblock, ip); \
368 link_bblock (cfg, bblock, tblock); \
369 ins->inst_false_bb = tblock; \
370 start_new_bblock = 2; \
372 if (sp != stack_start) { \
373 handle_stack_args (cfg, stack_start, sp - stack_start); \
374 CHECK_UNVERIFIABLE (cfg); \
376 MONO_ADD_INS (bblock, cmp); \
377 MONO_ADD_INS (bblock, ins); \
378 } while (0)
380 /* *
381 * link_bblock: Links two basic blocks
383 * links two basic blocks in the control flow graph, the 'from'
384 * argument is the starting block and the 'to' argument is the block
385 * the control flow ends to after 'from'.
387 static void
388 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
390 MonoBasicBlock **newa;
391 int i, found;
393 #if 0
394 if (from->cil_code) {
395 if (to->cil_code)
396 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
397 else
398 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
399 } else {
400 if (to->cil_code)
401 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
402 else
403 printf ("edge from entry to exit\n");
405 #endif
407 found = FALSE;
408 for (i = 0; i < from->out_count; ++i) {
409 if (to == from->out_bb [i]) {
410 found = TRUE;
411 break;
414 if (!found) {
415 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
416 for (i = 0; i < from->out_count; ++i) {
417 newa [i] = from->out_bb [i];
419 newa [i] = to;
420 from->out_count++;
421 from->out_bb = newa;
424 found = FALSE;
425 for (i = 0; i < to->in_count; ++i) {
426 if (from == to->in_bb [i]) {
427 found = TRUE;
428 break;
431 if (!found) {
432 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
433 for (i = 0; i < to->in_count; ++i) {
434 newa [i] = to->in_bb [i];
436 newa [i] = from;
437 to->in_count++;
438 to->in_bb = newa;
442 void
443 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
445 link_bblock (cfg, from, to);
449 * mono_find_block_region:
451 * We mark each basic block with a region ID. We use that to avoid BB
452 * optimizations when blocks are in different regions.
454 * Returns:
455 * A region token that encodes where this region is, and information
456 * about the clause owner for this block.
458 * The region encodes the try/catch/filter clause that owns this block
459 * as well as the type. -1 is a special value that represents a block
460 * that is in none of try/catch/filter.
462 static int
463 mono_find_block_region (MonoCompile *cfg, int offset)
465 MonoMethod *method = cfg->method;
466 MonoMethodHeader *header = mono_method_get_header (method);
467 MonoExceptionClause *clause;
468 int i;
470 for (i = 0; i < header->num_clauses; ++i) {
471 clause = &header->clauses [i];
472 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
473 (offset < (clause->handler_offset)))
474 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
476 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
477 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
478 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
479 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
480 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
481 else
482 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
485 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
486 return ((i + 1) << 8) | clause->flags;
489 return -1;
492 static GList*
493 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
495 MonoMethod *method = cfg->method;
496 MonoMethodHeader *header = mono_method_get_header (method);
497 MonoExceptionClause *clause;
498 MonoBasicBlock *handler;
499 int i;
500 GList *res = NULL;
502 for (i = 0; i < header->num_clauses; ++i) {
503 clause = &header->clauses [i];
504 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
505 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
506 if (clause->flags == type) {
507 handler = cfg->cil_offset_to_bb [clause->handler_offset];
508 g_assert (handler);
509 res = g_list_append (res, handler);
513 return res;
516 static void
517 mono_create_spvar_for_region (MonoCompile *cfg, int region)
519 MonoInst *var;
521 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
522 if (var)
523 return;
525 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
526 /* prevent it from being register allocated */
527 var->flags |= MONO_INST_INDIRECT;
529 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
532 static MonoInst *
533 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
535 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
538 static MonoInst*
539 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
541 MonoInst *var;
543 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
544 if (var)
545 return var;
547 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
548 /* prevent it from being register allocated */
549 var->flags |= MONO_INST_INDIRECT;
551 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 return var;
557 * Returns the type used in the eval stack when @type is loaded.
558 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
560 void
561 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
563 MonoClass *klass;
565 inst->klass = klass = mono_class_from_mono_type (type);
566 if (type->byref) {
567 inst->type = STACK_MP;
568 return;
571 handle_enum:
572 switch (type->type) {
573 case MONO_TYPE_VOID:
574 inst->type = STACK_INV;
575 return;
576 case MONO_TYPE_I1:
577 case MONO_TYPE_U1:
578 case MONO_TYPE_BOOLEAN:
579 case MONO_TYPE_I2:
580 case MONO_TYPE_U2:
581 case MONO_TYPE_CHAR:
582 case MONO_TYPE_I4:
583 case MONO_TYPE_U4:
584 inst->type = STACK_I4;
585 return;
586 case MONO_TYPE_I:
587 case MONO_TYPE_U:
588 case MONO_TYPE_PTR:
589 case MONO_TYPE_FNPTR:
590 inst->type = STACK_PTR;
591 return;
592 case MONO_TYPE_CLASS:
593 case MONO_TYPE_STRING:
594 case MONO_TYPE_OBJECT:
595 case MONO_TYPE_SZARRAY:
596 case MONO_TYPE_ARRAY:
597 inst->type = STACK_OBJ;
598 return;
599 case MONO_TYPE_I8:
600 case MONO_TYPE_U8:
601 inst->type = STACK_I8;
602 return;
603 case MONO_TYPE_R4:
604 case MONO_TYPE_R8:
605 inst->type = STACK_R8;
606 return;
607 case MONO_TYPE_VALUETYPE:
608 if (type->data.klass->enumtype) {
609 type = mono_class_enum_basetype (type->data.klass);
610 goto handle_enum;
611 } else {
612 inst->klass = klass;
613 inst->type = STACK_VTYPE;
614 return;
616 case MONO_TYPE_TYPEDBYREF:
617 inst->klass = mono_defaults.typed_reference_class;
618 inst->type = STACK_VTYPE;
619 return;
620 case MONO_TYPE_GENERICINST:
621 type = &type->data.generic_class->container_class->byval_arg;
622 goto handle_enum;
623 case MONO_TYPE_VAR :
624 case MONO_TYPE_MVAR :
625 /* FIXME: all the arguments must be references for now,
626 * later look inside cfg and see if the arg num is
627 * really a reference
629 g_assert (cfg->generic_sharing_context);
630 inst->type = STACK_OBJ;
631 return;
632 default:
633 g_error ("unknown type 0x%02x in eval stack type", type->type);
638 * The following tables are used to quickly validate the IL code in type_from_op ().
640 static const char
641 bin_num_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
652 static const char
653 neg_table [] = {
654 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
657 /* reduce the size of this table */
658 static const char
659 bin_int_table [STACK_MAX] [STACK_MAX] = {
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
670 static const char
671 bin_comp_table [STACK_MAX] [STACK_MAX] = {
672 /* Inv i L p F & O vt */
673 {0},
674 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
675 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
676 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
677 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
678 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
679 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
680 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
683 /* reduce the size of this table */
684 static const char
685 shift_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 * Tables to map from the non-specific opcode to the matching
698 * type-specific opcode.
700 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
701 static const guint16
702 binops_op_map [STACK_MAX] = {
703 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
706 /* handles from CEE_NEG to CEE_CONV_U8 */
707 static const guint16
708 unops_op_map [STACK_MAX] = {
709 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
712 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
713 static const guint16
714 ovfops_op_map [STACK_MAX] = {
715 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
718 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
719 static const guint16
720 ovf2ops_op_map [STACK_MAX] = {
721 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
724 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
725 static const guint16
726 ovf3ops_op_map [STACK_MAX] = {
727 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
730 /* handles from CEE_BEQ to CEE_BLT_UN */
731 static const guint16
732 beqops_op_map [STACK_MAX] = {
733 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
736 /* handles from CEE_CEQ to CEE_CLT_UN */
737 static const guint16
738 ceqops_op_map [STACK_MAX] = {
739 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
743 * Sets ins->type (the type on the eval stack) according to the
744 * type of the opcode and the arguments to it.
745 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
747 * FIXME: this function sets ins->type unconditionally in some cases, but
748 * it should set it to invalid for some types (a conv.x on an object)
750 static void
751 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
753 switch (ins->opcode) {
754 /* binops */
755 case CEE_ADD:
756 case CEE_SUB:
757 case CEE_MUL:
758 case CEE_DIV:
759 case CEE_REM:
760 /* FIXME: check unverifiable args for STACK_MP */
761 ins->type = bin_num_table [src1->type] [src2->type];
762 ins->opcode += binops_op_map [ins->type];
763 break;
764 case CEE_DIV_UN:
765 case CEE_REM_UN:
766 case CEE_AND:
767 case CEE_OR:
768 case CEE_XOR:
769 ins->type = bin_int_table [src1->type] [src2->type];
770 ins->opcode += binops_op_map [ins->type];
771 break;
772 case CEE_SHL:
773 case CEE_SHR:
774 case CEE_SHR_UN:
775 ins->type = shift_table [src1->type] [src2->type];
776 ins->opcode += binops_op_map [ins->type];
777 break;
778 case OP_COMPARE:
779 case OP_LCOMPARE:
780 case OP_ICOMPARE:
781 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
782 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
783 ins->opcode = OP_LCOMPARE;
784 else if (src1->type == STACK_R8)
785 ins->opcode = OP_FCOMPARE;
786 else
787 ins->opcode = OP_ICOMPARE;
788 break;
789 case OP_ICOMPARE_IMM:
790 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
791 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
792 ins->opcode = OP_LCOMPARE_IMM;
793 break;
794 case CEE_BEQ:
795 case CEE_BGE:
796 case CEE_BGT:
797 case CEE_BLE:
798 case CEE_BLT:
799 case CEE_BNE_UN:
800 case CEE_BGE_UN:
801 case CEE_BGT_UN:
802 case CEE_BLE_UN:
803 case CEE_BLT_UN:
804 ins->opcode += beqops_op_map [src1->type];
805 break;
806 case OP_CEQ:
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 ins->opcode += ceqops_op_map [src1->type];
809 break;
810 case OP_CGT:
811 case OP_CGT_UN:
812 case OP_CLT:
813 case OP_CLT_UN:
814 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
815 ins->opcode += ceqops_op_map [src1->type];
816 break;
817 /* unops */
818 case CEE_NEG:
819 ins->type = neg_table [src1->type];
820 ins->opcode += unops_op_map [ins->type];
821 break;
822 case CEE_NOT:
823 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
824 ins->type = src1->type;
825 else
826 ins->type = STACK_INV;
827 ins->opcode += unops_op_map [ins->type];
828 break;
829 case CEE_CONV_I1:
830 case CEE_CONV_I2:
831 case CEE_CONV_I4:
832 case CEE_CONV_U4:
833 ins->type = STACK_I4;
834 ins->opcode += unops_op_map [src1->type];
835 break;
836 case CEE_CONV_R_UN:
837 ins->type = STACK_R8;
838 switch (src1->type) {
839 case STACK_I4:
840 case STACK_PTR:
841 ins->opcode = OP_ICONV_TO_R_UN;
842 break;
843 case STACK_I8:
844 ins->opcode = OP_LCONV_TO_R_UN;
845 break;
847 break;
848 case CEE_CONV_OVF_I1:
849 case CEE_CONV_OVF_U1:
850 case CEE_CONV_OVF_I2:
851 case CEE_CONV_OVF_U2:
852 case CEE_CONV_OVF_I4:
853 case CEE_CONV_OVF_U4:
854 ins->type = STACK_I4;
855 ins->opcode += ovf3ops_op_map [src1->type];
856 break;
857 case CEE_CONV_OVF_I_UN:
858 case CEE_CONV_OVF_U_UN:
859 ins->type = STACK_PTR;
860 ins->opcode += ovf2ops_op_map [src1->type];
861 break;
862 case CEE_CONV_OVF_I1_UN:
863 case CEE_CONV_OVF_I2_UN:
864 case CEE_CONV_OVF_I4_UN:
865 case CEE_CONV_OVF_U1_UN:
866 case CEE_CONV_OVF_U2_UN:
867 case CEE_CONV_OVF_U4_UN:
868 ins->type = STACK_I4;
869 ins->opcode += ovf2ops_op_map [src1->type];
870 break;
871 case CEE_CONV_U:
872 ins->type = STACK_PTR;
873 switch (src1->type) {
874 case STACK_I4:
875 ins->opcode = OP_ICONV_TO_U;
876 break;
877 case STACK_PTR:
878 case STACK_MP:
879 #if SIZEOF_REGISTER == 8
880 ins->opcode = OP_LCONV_TO_U;
881 #else
882 ins->opcode = OP_MOVE;
883 #endif
884 break;
885 case STACK_I8:
886 ins->opcode = OP_LCONV_TO_U;
887 break;
888 case STACK_R8:
889 ins->opcode = OP_FCONV_TO_U;
890 break;
892 break;
893 case CEE_CONV_I8:
894 case CEE_CONV_U8:
895 ins->type = STACK_I8;
896 ins->opcode += unops_op_map [src1->type];
897 break;
898 case CEE_CONV_OVF_I8:
899 case CEE_CONV_OVF_U8:
900 ins->type = STACK_I8;
901 ins->opcode += ovf3ops_op_map [src1->type];
902 break;
903 case CEE_CONV_OVF_U8_UN:
904 case CEE_CONV_OVF_I8_UN:
905 ins->type = STACK_I8;
906 ins->opcode += ovf2ops_op_map [src1->type];
907 break;
908 case CEE_CONV_R4:
909 case CEE_CONV_R8:
910 ins->type = STACK_R8;
911 ins->opcode += unops_op_map [src1->type];
912 break;
913 case OP_CKFINITE:
914 ins->type = STACK_R8;
915 break;
916 case CEE_CONV_U2:
917 case CEE_CONV_U1:
918 ins->type = STACK_I4;
919 ins->opcode += ovfops_op_map [src1->type];
920 break;
921 case CEE_CONV_I:
922 case CEE_CONV_OVF_I:
923 case CEE_CONV_OVF_U:
924 ins->type = STACK_PTR;
925 ins->opcode += ovfops_op_map [src1->type];
926 break;
927 case CEE_ADD_OVF:
928 case CEE_ADD_OVF_UN:
929 case CEE_MUL_OVF:
930 case CEE_MUL_OVF_UN:
931 case CEE_SUB_OVF:
932 case CEE_SUB_OVF_UN:
933 ins->type = bin_num_table [src1->type] [src2->type];
934 ins->opcode += ovfops_op_map [src1->type];
935 if (ins->type == STACK_R8)
936 ins->type = STACK_INV;
937 break;
938 case OP_LOAD_MEMBASE:
939 ins->type = STACK_PTR;
940 break;
941 case OP_LOADI1_MEMBASE:
942 case OP_LOADU1_MEMBASE:
943 case OP_LOADI2_MEMBASE:
944 case OP_LOADU2_MEMBASE:
945 case OP_LOADI4_MEMBASE:
946 case OP_LOADU4_MEMBASE:
947 ins->type = STACK_PTR;
948 break;
949 case OP_LOADI8_MEMBASE:
950 ins->type = STACK_I8;
951 break;
952 case OP_LOADR4_MEMBASE:
953 case OP_LOADR8_MEMBASE:
954 ins->type = STACK_R8;
955 break;
956 default:
957 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
958 break;
961 if (ins->type == STACK_MP)
962 ins->klass = mono_defaults.object_class;
965 static const char
966 ldind_type [] = {
967 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
970 #if 0
972 static const char
973 param_table [STACK_MAX] [STACK_MAX] = {
974 {0},
977 static int
978 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
979 int i;
981 if (sig->hasthis) {
982 switch (args->type) {
983 case STACK_I4:
984 case STACK_I8:
985 case STACK_R8:
986 case STACK_VTYPE:
987 case STACK_INV:
988 return 0;
990 args++;
992 for (i = 0; i < sig->param_count; ++i) {
993 switch (args [i].type) {
994 case STACK_INV:
995 return 0;
996 case STACK_MP:
997 if (!sig->params [i]->byref)
998 return 0;
999 continue;
1000 case STACK_OBJ:
1001 if (sig->params [i]->byref)
1002 return 0;
1003 switch (sig->params [i]->type) {
1004 case MONO_TYPE_CLASS:
1005 case MONO_TYPE_STRING:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1009 break;
1010 default:
1011 return 0;
1013 continue;
1014 case STACK_R8:
1015 if (sig->params [i]->byref)
1016 return 0;
1017 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1018 return 0;
1019 continue;
1020 case STACK_PTR:
1021 case STACK_I4:
1022 case STACK_I8:
1023 case STACK_VTYPE:
1024 break;
1026 /*if (!param_table [args [i].type] [sig->params [i]->type])
1027 return 0;*/
1029 return 1;
1031 #endif
1034 * When we need a pointer to the current domain many times in a method, we
1035 * call mono_domain_get() once and we store the result in a local variable.
1036 * This function returns the variable that represents the MonoDomain*.
1038 inline static MonoInst *
1039 mono_get_domainvar (MonoCompile *cfg)
1041 if (!cfg->domainvar)
1042 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1043 return cfg->domainvar;
1047 * The got_var contains the address of the Global Offset Table when AOT
1048 * compiling.
1050 MonoInst *
1051 mono_get_got_var (MonoCompile *cfg)
1053 #ifdef MONO_ARCH_NEED_GOT_VAR
1054 if (!cfg->compile_aot)
1055 return NULL;
1056 if (!cfg->got_var) {
1057 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1059 return cfg->got_var;
1060 #else
1061 return NULL;
1062 #endif
1065 static MonoInst *
1066 mono_get_vtable_var (MonoCompile *cfg)
1068 g_assert (cfg->generic_sharing_context);
1070 if (!cfg->rgctx_var) {
1071 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1072 /* force the var to be stack allocated */
1073 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1076 return cfg->rgctx_var;
1079 static MonoType*
1080 type_from_stack_type (MonoInst *ins) {
1081 switch (ins->type) {
1082 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1083 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1084 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1085 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1086 case STACK_MP:
1087 return &ins->klass->this_arg;
1088 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1089 case STACK_VTYPE: return &ins->klass->byval_arg;
1090 default:
1091 g_error ("stack type %d to monotype not handled\n", ins->type);
1093 return NULL;
1096 static G_GNUC_UNUSED int
1097 type_to_stack_type (MonoType *t)
1099 switch (mono_type_get_underlying_type (t)->type) {
1100 case MONO_TYPE_I1:
1101 case MONO_TYPE_U1:
1102 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_I2:
1104 case MONO_TYPE_U2:
1105 case MONO_TYPE_CHAR:
1106 case MONO_TYPE_I4:
1107 case MONO_TYPE_U4:
1108 return STACK_I4;
1109 case MONO_TYPE_I:
1110 case MONO_TYPE_U:
1111 case MONO_TYPE_PTR:
1112 case MONO_TYPE_FNPTR:
1113 return STACK_PTR;
1114 case MONO_TYPE_CLASS:
1115 case MONO_TYPE_STRING:
1116 case MONO_TYPE_OBJECT:
1117 case MONO_TYPE_SZARRAY:
1118 case MONO_TYPE_ARRAY:
1119 return STACK_OBJ;
1120 case MONO_TYPE_I8:
1121 case MONO_TYPE_U8:
1122 return STACK_I8;
1123 case MONO_TYPE_R4:
1124 case MONO_TYPE_R8:
1125 return STACK_R8;
1126 case MONO_TYPE_VALUETYPE:
1127 case MONO_TYPE_TYPEDBYREF:
1128 return STACK_VTYPE;
1129 case MONO_TYPE_GENERICINST:
1130 if (mono_type_generic_inst_is_valuetype (t))
1131 return STACK_VTYPE;
1132 else
1133 return STACK_OBJ;
1134 break;
1135 default:
1136 g_assert_not_reached ();
1139 return -1;
1142 static MonoClass*
1143 array_access_to_klass (int opcode)
1145 switch (opcode) {
1146 case CEE_LDELEM_U1:
1147 return mono_defaults.byte_class;
1148 case CEE_LDELEM_U2:
1149 return mono_defaults.uint16_class;
1150 case CEE_LDELEM_I:
1151 case CEE_STELEM_I:
1152 return mono_defaults.int_class;
1153 case CEE_LDELEM_I1:
1154 case CEE_STELEM_I1:
1155 return mono_defaults.sbyte_class;
1156 case CEE_LDELEM_I2:
1157 case CEE_STELEM_I2:
1158 return mono_defaults.int16_class;
1159 case CEE_LDELEM_I4:
1160 case CEE_STELEM_I4:
1161 return mono_defaults.int32_class;
1162 case CEE_LDELEM_U4:
1163 return mono_defaults.uint32_class;
1164 case CEE_LDELEM_I8:
1165 case CEE_STELEM_I8:
1166 return mono_defaults.int64_class;
1167 case CEE_LDELEM_R4:
1168 case CEE_STELEM_R4:
1169 return mono_defaults.single_class;
1170 case CEE_LDELEM_R8:
1171 case CEE_STELEM_R8:
1172 return mono_defaults.double_class;
1173 case CEE_LDELEM_REF:
1174 case CEE_STELEM_REF:
1175 return mono_defaults.object_class;
1176 default:
1177 g_assert_not_reached ();
1179 return NULL;
1183 * We try to share variables when possible
1185 static MonoInst *
1186 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1188 MonoInst *res;
1189 int pos, vnum;
1191 /* inlining can result in deeper stacks */
1192 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1193 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1195 pos = ins->type - 1 + slot * STACK_MAX;
1197 switch (ins->type) {
1198 case STACK_I4:
1199 case STACK_I8:
1200 case STACK_R8:
1201 case STACK_PTR:
1202 case STACK_MP:
1203 case STACK_OBJ:
1204 if ((vnum = cfg->intvars [pos]))
1205 return cfg->varinfo [vnum];
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1207 cfg->intvars [pos] = res->inst_c0;
1208 break;
1209 default:
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1212 return res;
1215 static void
1216 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1219 * Don't use this if a generic_context is set, since that means AOT can't
1220 * look up the method using just the image+token.
1221 * table == 0 means this is a reference made from a wrapper.
1223 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1224 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1225 jump_info_token->image = image;
1226 jump_info_token->token = token;
1227 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1232 * This function is called to handle items that are left on the evaluation stack
1233 * at basic block boundaries. What happens is that we save the values to local variables
1234 * and we reload them later when first entering the target basic block (with the
1235 * handle_loaded_temps () function).
1236 * A single joint point will use the same variables (stored in the array bb->out_stack or
1237 * bb->in_stack, if the basic block is before or after the joint point).
1239 * This function needs to be called _before_ emitting the last instruction of
1240 * the bb (i.e. before emitting a branch).
1241 * If the stack merge fails at a join point, cfg->unverifiable is set.
1243 static void
1244 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1246 int i, bindex;
1247 MonoBasicBlock *bb = cfg->cbb;
1248 MonoBasicBlock *outb;
1249 MonoInst *inst, **locals;
1250 gboolean found;
1252 if (!count)
1253 return;
1254 if (cfg->verbose_level > 3)
1255 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1256 if (!bb->out_scount) {
1257 bb->out_scount = count;
1258 //printf ("bblock %d has out:", bb->block_num);
1259 found = FALSE;
1260 for (i = 0; i < bb->out_count; ++i) {
1261 outb = bb->out_bb [i];
1262 /* exception handlers are linked, but they should not be considered for stack args */
1263 if (outb->flags & BB_EXCEPTION_HANDLER)
1264 continue;
1265 //printf (" %d", outb->block_num);
1266 if (outb->in_stack) {
1267 found = TRUE;
1268 bb->out_stack = outb->in_stack;
1269 break;
1272 //printf ("\n");
1273 if (!found) {
1274 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1275 for (i = 0; i < count; ++i) {
1277 * try to reuse temps already allocated for this purpouse, if they occupy the same
1278 * stack slot and if they are of the same type.
1279 * This won't cause conflicts since if 'local' is used to
1280 * store one of the values in the in_stack of a bblock, then
1281 * the same variable will be used for the same outgoing stack
1282 * slot as well.
1283 * This doesn't work when inlining methods, since the bblocks
1284 * in the inlined methods do not inherit their in_stack from
1285 * the bblock they are inlined to. See bug #58863 for an
1286 * example.
1288 if (cfg->inlined_method)
1289 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1290 else
1291 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1296 for (i = 0; i < bb->out_count; ++i) {
1297 outb = bb->out_bb [i];
1298 /* exception handlers are linked, but they should not be considered for stack args */
1299 if (outb->flags & BB_EXCEPTION_HANDLER)
1300 continue;
1301 if (outb->in_scount) {
1302 if (outb->in_scount != bb->out_scount) {
1303 cfg->unverifiable = TRUE;
1304 return;
1306 continue; /* check they are the same locals */
1308 outb->in_scount = count;
1309 outb->in_stack = bb->out_stack;
1312 locals = bb->out_stack;
1313 cfg->cbb = bb;
1314 for (i = 0; i < count; ++i) {
1315 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1316 inst->cil_code = sp [i]->cil_code;
1317 sp [i] = locals [i];
1318 if (cfg->verbose_level > 3)
1319 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1323 * It is possible that the out bblocks already have in_stack assigned, and
1324 * the in_stacks differ. In this case, we will store to all the different
1325 * in_stacks.
1328 found = TRUE;
1329 bindex = 0;
1330 while (found) {
1331 /* Find a bblock which has a different in_stack */
1332 found = FALSE;
1333 while (bindex < bb->out_count) {
1334 outb = bb->out_bb [bindex];
1335 /* exception handlers are linked, but they should not be considered for stack args */
1336 if (outb->flags & BB_EXCEPTION_HANDLER) {
1337 bindex++;
1338 continue;
1340 if (outb->in_stack != locals) {
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1348 locals = outb->in_stack;
1349 found = TRUE;
1350 break;
1352 bindex ++;
1357 /* Emit code which loads interface_offsets [klass->interface_id]
1358 * The array is stored in memory before vtable.
1360 static void
1361 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1363 if (cfg->compile_aot) {
1364 int ioffset_reg = alloc_preg (cfg);
1365 int iid_reg = alloc_preg (cfg);
1367 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1368 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1371 else {
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1377 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1378 * stored in "klass_reg" implements the interface "klass".
1380 static void
1381 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 int ibitmap_byte_reg = alloc_preg (cfg);
1386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1388 if (cfg->compile_aot) {
1389 int iid_reg = alloc_preg (cfg);
1390 int shifted_iid_reg = alloc_preg (cfg);
1391 int ibitmap_byte_address_reg = alloc_preg (cfg);
1392 int masked_iid_reg = alloc_preg (cfg);
1393 int iid_one_bit_reg = alloc_preg (cfg);
1394 int iid_bit_reg = alloc_preg (cfg);
1395 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1400 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1402 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1403 } else {
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1410 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1411 * stored in "vtable_reg" implements the interface "klass".
1413 static void
1414 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1416 int ibitmap_reg = alloc_preg (cfg);
1417 int ibitmap_byte_reg = alloc_preg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1421 if (cfg->compile_aot) {
1422 int iid_reg = alloc_preg (cfg);
1423 int shifted_iid_reg = alloc_preg (cfg);
1424 int ibitmap_byte_address_reg = alloc_preg (cfg);
1425 int masked_iid_reg = alloc_preg (cfg);
1426 int iid_one_bit_reg = alloc_preg (cfg);
1427 int iid_bit_reg = alloc_preg (cfg);
1428 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1431 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1433 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1436 } else {
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1443 * Emit code which checks whenever the interface id of @klass is smaller than
1444 * than the value given by max_iid_reg.
1446 static void
1447 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 if (cfg->compile_aot) {
1451 int iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1453 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1455 else
1456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1457 if (false_target)
1458 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1459 else
1460 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1463 /* Same as above, but obtains max_iid from a vtable */
1464 static void
1465 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1466 MonoBasicBlock *false_target)
1468 int max_iid_reg = alloc_preg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1471 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1474 /* Same as above, but obtains max_iid from a klass */
1475 static void
1476 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1477 MonoBasicBlock *false_target)
1479 int max_iid_reg = alloc_preg (cfg);
1481 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1482 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1485 static void
1486 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1488 int idepth_reg = alloc_preg (cfg);
1489 int stypes_reg = alloc_preg (cfg);
1490 int stype = alloc_preg (cfg);
1492 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1493 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1497 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1499 if (cfg->compile_aot) {
1500 int const_reg = alloc_preg (cfg);
1501 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1502 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1503 } else {
1504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1509 static void
1510 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int intf_reg = alloc_preg (cfg);
1514 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1515 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1517 if (true_target)
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1519 else
1520 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1524 * Variant of the above that takes a register to the class, not the vtable.
1526 static void
1527 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1529 int intf_bit_reg = alloc_preg (cfg);
1531 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1532 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1534 if (true_target)
1535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1536 else
1537 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1540 static inline void
1541 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1543 if (cfg->compile_aot) {
1544 int const_reg = alloc_preg (cfg);
1545 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1547 } else {
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1553 static inline void
1554 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1556 if (cfg->compile_aot) {
1557 int const_reg = alloc_preg (cfg);
1558 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1560 } else {
1561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1566 static void
1567 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1569 if (klass->rank) {
1570 int rank_reg = alloc_preg (cfg);
1571 int eclass_reg = alloc_preg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1574 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1575 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1576 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1578 if (klass->cast_class == mono_defaults.object_class) {
1579 int parent_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1581 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1582 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class) {
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1589 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1590 } else {
1591 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1592 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1595 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1596 /* Check that the object is a vector too */
1597 int bounds_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1600 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1602 } else {
1603 int idepth_reg = alloc_preg (cfg);
1604 int stypes_reg = alloc_preg (cfg);
1605 int stype = alloc_preg (cfg);
1607 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1608 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1609 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1610 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1614 mini_emit_class_check (cfg, stype, klass);
1618 static void
1619 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1621 int val_reg;
1623 g_assert (val == 0);
1625 if (align == 0)
1626 align = 4;
1628 if ((size <= 4) && (size <= align)) {
1629 switch (size) {
1630 case 1:
1631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1632 return;
1633 case 2:
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1635 return;
1636 case 4:
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1638 return;
1639 #if SIZEOF_REGISTER == 8
1640 case 8:
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1642 return;
1643 #endif
1647 val_reg = alloc_preg (cfg);
1649 if (SIZEOF_REGISTER == 8)
1650 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1651 else
1652 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1654 if (align < 4) {
1655 /* This could be optimized further if neccesary */
1656 while (size >= 1) {
1657 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1658 offset += 1;
1659 size -= 1;
1661 return;
1664 #if !NO_UNALIGNED_ACCESS
1665 if (SIZEOF_REGISTER == 8) {
1666 if (offset % 8) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1668 offset += 4;
1669 size -= 4;
1671 while (size >= 8) {
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1673 offset += 8;
1674 size -= 8;
1677 #endif
1679 while (size >= 4) {
1680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1681 offset += 4;
1682 size -= 4;
1684 while (size >= 2) {
1685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1686 offset += 2;
1687 size -= 2;
1689 while (size >= 1) {
1690 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1691 offset += 1;
1692 size -= 1;
1696 #endif /* DISABLE_JIT */
1698 void
1699 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1701 int cur_reg;
1703 if (align == 0)
1704 align = 4;
1706 if (align < 4) {
1707 /* This could be optimized further if neccesary */
1708 while (size >= 1) {
1709 cur_reg = alloc_preg (cfg);
1710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1712 doffset += 1;
1713 soffset += 1;
1714 size -= 1;
1718 #if !NO_UNALIGNED_ACCESS
1719 if (SIZEOF_REGISTER == 8) {
1720 while (size >= 8) {
1721 cur_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1724 doffset += 8;
1725 soffset += 8;
1726 size -= 8;
1729 #endif
1731 while (size >= 4) {
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1735 doffset += 4;
1736 soffset += 4;
1737 size -= 4;
1739 while (size >= 2) {
1740 cur_reg = alloc_preg (cfg);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1743 doffset += 2;
1744 soffset += 2;
1745 size -= 2;
1747 while (size >= 1) {
1748 cur_reg = alloc_preg (cfg);
1749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1751 doffset += 1;
1752 soffset += 1;
1753 size -= 1;
1757 #ifndef DISABLE_JIT
1759 static int
1760 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1762 if (type->byref)
1763 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1765 handle_enum:
1766 type = mini_get_basic_type_from_generic (gsctx, type);
1767 switch (type->type) {
1768 case MONO_TYPE_VOID:
1769 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1770 case MONO_TYPE_I1:
1771 case MONO_TYPE_U1:
1772 case MONO_TYPE_BOOLEAN:
1773 case MONO_TYPE_I2:
1774 case MONO_TYPE_U2:
1775 case MONO_TYPE_CHAR:
1776 case MONO_TYPE_I4:
1777 case MONO_TYPE_U4:
1778 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1779 case MONO_TYPE_I:
1780 case MONO_TYPE_U:
1781 case MONO_TYPE_PTR:
1782 case MONO_TYPE_FNPTR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1784 case MONO_TYPE_CLASS:
1785 case MONO_TYPE_STRING:
1786 case MONO_TYPE_OBJECT:
1787 case MONO_TYPE_SZARRAY:
1788 case MONO_TYPE_ARRAY:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1790 case MONO_TYPE_I8:
1791 case MONO_TYPE_U8:
1792 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1793 case MONO_TYPE_R4:
1794 case MONO_TYPE_R8:
1795 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1796 case MONO_TYPE_VALUETYPE:
1797 if (type->data.klass->enumtype) {
1798 type = mono_class_enum_basetype (type->data.klass);
1799 goto handle_enum;
1800 } else
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_TYPEDBYREF:
1803 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1804 case MONO_TYPE_GENERICINST:
1805 type = &type->data.generic_class->container_class->byval_arg;
1806 goto handle_enum;
1807 default:
1808 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1810 return -1;
1814 * target_type_is_incompatible:
1815 * @cfg: MonoCompile context
1817 * Check that the item @arg on the evaluation stack can be stored
1818 * in the target type (can be a local, or field, etc).
1819 * The cfg arg can be used to check if we need verification or just
1820 * validity checks.
1822 * Returns: non-0 value if arg can't be stored on a target.
1824 static int
1825 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1827 MonoType *simple_type;
1828 MonoClass *klass;
1830 if (target->byref) {
1831 /* FIXME: check that the pointed to types match */
1832 if (arg->type == STACK_MP)
1833 return arg->klass != mono_class_from_mono_type (target);
1834 if (arg->type == STACK_PTR)
1835 return 0;
1836 return 1;
1839 simple_type = mono_type_get_underlying_type (target);
1840 switch (simple_type->type) {
1841 case MONO_TYPE_VOID:
1842 return 1;
1843 case MONO_TYPE_I1:
1844 case MONO_TYPE_U1:
1845 case MONO_TYPE_BOOLEAN:
1846 case MONO_TYPE_I2:
1847 case MONO_TYPE_U2:
1848 case MONO_TYPE_CHAR:
1849 case MONO_TYPE_I4:
1850 case MONO_TYPE_U4:
1851 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1852 return 1;
1853 return 0;
1854 case MONO_TYPE_PTR:
1855 /* STACK_MP is needed when setting pinned locals */
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1857 return 1;
1858 return 0;
1859 case MONO_TYPE_I:
1860 case MONO_TYPE_U:
1861 case MONO_TYPE_FNPTR:
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1863 return 1;
1864 return 0;
1865 case MONO_TYPE_CLASS:
1866 case MONO_TYPE_STRING:
1867 case MONO_TYPE_OBJECT:
1868 case MONO_TYPE_SZARRAY:
1869 case MONO_TYPE_ARRAY:
1870 if (arg->type != STACK_OBJ)
1871 return 1;
1872 /* FIXME: check type compatibility */
1873 return 0;
1874 case MONO_TYPE_I8:
1875 case MONO_TYPE_U8:
1876 if (arg->type != STACK_I8)
1877 return 1;
1878 return 0;
1879 case MONO_TYPE_R4:
1880 case MONO_TYPE_R8:
1881 if (arg->type != STACK_R8)
1882 return 1;
1883 return 0;
1884 case MONO_TYPE_VALUETYPE:
1885 if (arg->type != STACK_VTYPE)
1886 return 1;
1887 klass = mono_class_from_mono_type (simple_type);
1888 if (klass != arg->klass)
1889 return 1;
1890 return 0;
1891 case MONO_TYPE_TYPEDBYREF:
1892 if (arg->type != STACK_VTYPE)
1893 return 1;
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1896 return 1;
1897 return 0;
1898 case MONO_TYPE_GENERICINST:
1899 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1900 if (arg->type != STACK_VTYPE)
1901 return 1;
1902 klass = mono_class_from_mono_type (simple_type);
1903 if (klass != arg->klass)
1904 return 1;
1905 return 0;
1906 } else {
1907 if (arg->type != STACK_OBJ)
1908 return 1;
1909 /* FIXME: check type compatibility */
1910 return 0;
1912 case MONO_TYPE_VAR:
1913 case MONO_TYPE_MVAR:
1914 /* FIXME: all the arguments must be references for now,
1915 * later look inside cfg and see if the arg num is
1916 * really a reference
1918 g_assert (cfg->generic_sharing_context);
1919 if (arg->type != STACK_OBJ)
1920 return 1;
1921 return 0;
1922 default:
1923 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1925 return 1;
1929 * Prepare arguments for passing to a function call.
1930 * Return a non-zero value if the arguments can't be passed to the given
1931 * signature.
1932 * The type checks are not yet complete and some conversions may need
1933 * casts on 32 or 64 bit architectures.
1935 * FIXME: implement this using target_type_is_incompatible ()
1937 static int
1938 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1940 MonoType *simple_type;
1941 int i;
1943 if (sig->hasthis) {
1944 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1945 return 1;
1946 args++;
1948 for (i = 0; i < sig->param_count; ++i) {
1949 if (sig->params [i]->byref) {
1950 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1951 return 1;
1952 continue;
1954 simple_type = sig->params [i];
1955 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1956 handle_enum:
1957 switch (simple_type->type) {
1958 case MONO_TYPE_VOID:
1959 return 1;
1960 continue;
1961 case MONO_TYPE_I1:
1962 case MONO_TYPE_U1:
1963 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_I2:
1965 case MONO_TYPE_U2:
1966 case MONO_TYPE_CHAR:
1967 case MONO_TYPE_I4:
1968 case MONO_TYPE_U4:
1969 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1970 return 1;
1971 continue;
1972 case MONO_TYPE_I:
1973 case MONO_TYPE_U:
1974 case MONO_TYPE_PTR:
1975 case MONO_TYPE_FNPTR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1977 return 1;
1978 continue;
1979 case MONO_TYPE_CLASS:
1980 case MONO_TYPE_STRING:
1981 case MONO_TYPE_OBJECT:
1982 case MONO_TYPE_SZARRAY:
1983 case MONO_TYPE_ARRAY:
1984 if (args [i]->type != STACK_OBJ)
1985 return 1;
1986 continue;
1987 case MONO_TYPE_I8:
1988 case MONO_TYPE_U8:
1989 if (args [i]->type != STACK_I8)
1990 return 1;
1991 continue;
1992 case MONO_TYPE_R4:
1993 case MONO_TYPE_R8:
1994 if (args [i]->type != STACK_R8)
1995 return 1;
1996 continue;
1997 case MONO_TYPE_VALUETYPE:
1998 if (simple_type->data.klass->enumtype) {
1999 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2000 goto handle_enum;
2002 if (args [i]->type != STACK_VTYPE)
2003 return 1;
2004 continue;
2005 case MONO_TYPE_TYPEDBYREF:
2006 if (args [i]->type != STACK_VTYPE)
2007 return 1;
2008 continue;
2009 case MONO_TYPE_GENERICINST:
2010 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2011 goto handle_enum;
2013 default:
2014 g_error ("unknown type 0x%02x in check_call_signature",
2015 simple_type->type);
2018 return 0;
2021 static int
2022 callvirt_to_call (int opcode)
2024 switch (opcode) {
2025 case OP_CALLVIRT:
2026 return OP_CALL;
2027 case OP_VOIDCALLVIRT:
2028 return OP_VOIDCALL;
2029 case OP_FCALLVIRT:
2030 return OP_FCALL;
2031 case OP_VCALLVIRT:
2032 return OP_VCALL;
2033 case OP_LCALLVIRT:
2034 return OP_LCALL;
2035 default:
2036 g_assert_not_reached ();
2039 return -1;
2042 static int
2043 callvirt_to_call_membase (int opcode)
2045 switch (opcode) {
2046 case OP_CALLVIRT:
2047 return OP_CALL_MEMBASE;
2048 case OP_VOIDCALLVIRT:
2049 return OP_VOIDCALL_MEMBASE;
2050 case OP_FCALLVIRT:
2051 return OP_FCALL_MEMBASE;
2052 case OP_LCALLVIRT:
2053 return OP_LCALL_MEMBASE;
2054 case OP_VCALLVIRT:
2055 return OP_VCALL_MEMBASE;
2056 default:
2057 g_assert_not_reached ();
2060 return -1;
2063 #ifdef MONO_ARCH_HAVE_IMT
2064 static void
2065 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2067 #ifdef MONO_ARCH_IMT_REG
2068 int method_reg = alloc_preg (cfg);
2070 if (imt_arg) {
2071 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2072 } else if (cfg->compile_aot) {
2073 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2074 } else {
2075 MonoInst *ins;
2076 MONO_INST_NEW (cfg, ins, OP_PCONST);
2077 ins->inst_p0 = call->method;
2078 ins->dreg = method_reg;
2079 MONO_ADD_INS (cfg->cbb, ins);
2082 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2083 #else
2084 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2085 #endif
2087 #endif
2089 static MonoJumpInfo *
2090 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2092 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2094 ji->ip.i = ip;
2095 ji->type = type;
2096 ji->data.target = target;
2098 return ji;
2101 inline static MonoInst*
2102 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2104 inline static MonoCallInst *
2105 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2106 MonoInst **args, int calli, int virtual, int tail)
2108 MonoCallInst *call;
2109 #ifdef MONO_ARCH_SOFT_FLOAT
2110 int i;
2111 #endif
2113 if (tail)
2114 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2115 else
2116 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2118 call->args = args;
2119 call->signature = sig;
2121 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2123 if (tail) {
2124 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2125 call->vret_var = cfg->vret_addr;
2126 //g_assert_not_reached ();
2128 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2129 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2130 MonoInst *loada;
2132 temp->backend.is_pinvoke = sig->pinvoke;
2135 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2136 * address of return value to increase optimization opportunities.
2137 * Before vtype decomposition, the dreg of the call ins itself represents the
2138 * fact the call modifies the return value. After decomposition, the call will
2139 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2140 * will be transformed into an LDADDR.
2142 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2143 loada->dreg = alloc_preg (cfg);
2144 loada->inst_p0 = temp;
2145 /* We reference the call too since call->dreg could change during optimization */
2146 loada->inst_p1 = call;
2147 MONO_ADD_INS (cfg->cbb, loada);
2149 call->inst.dreg = temp->dreg;
2151 call->vret_var = loada;
2152 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2153 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2155 #ifdef MONO_ARCH_SOFT_FLOAT
2157 * If the call has a float argument, we would need to do an r8->r4 conversion using
2158 * an icall, but that cannot be done during the call sequence since it would clobber
2159 * the call registers + the stack. So we do it before emitting the call.
2161 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2162 MonoType *t;
2163 MonoInst *in = call->args [i];
2165 if (i >= sig->hasthis)
2166 t = sig->params [i - sig->hasthis];
2167 else
2168 t = &mono_defaults.int_class->byval_arg;
2169 t = mono_type_get_underlying_type (t);
2171 if (!t->byref && t->type == MONO_TYPE_R4) {
2172 MonoInst *iargs [1];
2173 MonoInst *conv;
2175 iargs [0] = in;
2176 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2178 /* The result will be in an int vreg */
2179 call->args [i] = conv;
2182 #endif
2184 #ifdef ENABLE_LLVM
2185 if (COMPILE_LLVM (cfg))
2186 mono_llvm_emit_call (cfg, call);
2187 else
2188 mono_arch_emit_call (cfg, call);
2189 #else
2190 mono_arch_emit_call (cfg, call);
2191 #endif
2193 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2194 cfg->flags |= MONO_CFG_HAS_CALLS;
2196 return call;
2199 inline static MonoInst*
2200 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2202 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2204 call->inst.sreg1 = addr->dreg;
2206 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2208 return (MonoInst*)call;
2211 inline static MonoInst*
2212 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2214 #ifdef MONO_ARCH_RGCTX_REG
2215 MonoCallInst *call;
2216 int rgctx_reg = -1;
2218 if (rgctx_arg) {
2219 rgctx_reg = mono_alloc_preg (cfg);
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2222 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2223 if (rgctx_arg) {
2224 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2225 cfg->uses_rgctx_reg = TRUE;
2226 call->rgctx_reg = TRUE;
2228 return (MonoInst*)call;
2229 #else
2230 g_assert_not_reached ();
2231 return NULL;
2232 #endif
2235 static MonoInst*
2236 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2237 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2239 gboolean virtual = this != NULL;
2240 gboolean enable_for_aot = TRUE;
2241 MonoCallInst *call;
2243 if (method->string_ctor) {
2244 /* Create the real signature */
2245 /* FIXME: Cache these */
2246 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2247 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2249 sig = ctor_sig;
2252 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2254 if (this && sig->hasthis &&
2255 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2256 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2257 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2258 } else {
2259 call->method = method;
2261 call->inst.flags |= MONO_INST_HAS_METHOD;
2262 call->inst.inst_left = this;
2264 if (virtual) {
2265 int vtable_reg, slot_reg, this_reg;
2267 this_reg = this->dreg;
2269 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2270 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2271 /* Make a call to delegate->invoke_impl */
2272 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2273 call->inst.inst_basereg = this_reg;
2274 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2275 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2277 return (MonoInst*)call;
2279 #endif
2281 if ((!cfg->compile_aot || enable_for_aot) &&
2282 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2283 (MONO_METHOD_IS_FINAL (method) &&
2284 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2286 * the method is not virtual, we just need to ensure this is not null
2287 * and then we can call the method directly.
2289 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2290 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2293 if (!method->string_ctor) {
2294 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2295 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2296 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2299 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2301 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2303 return (MonoInst*)call;
2306 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2308 * the method is virtual, but we can statically dispatch since either
2309 * it's class or the method itself are sealed.
2310 * But first we need to ensure it's not a null reference.
2312 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2313 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2314 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2316 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2317 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2319 return (MonoInst*)call;
2322 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2324 vtable_reg = alloc_preg (cfg);
2325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2326 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2327 slot_reg = -1;
2328 #ifdef MONO_ARCH_HAVE_IMT
2329 if (mono_use_imt) {
2330 guint32 imt_slot = mono_method_get_imt_slot (method);
2331 emit_imt_argument (cfg, call, imt_arg);
2332 slot_reg = vtable_reg;
2333 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2335 #endif
2336 if (slot_reg == -1) {
2337 slot_reg = alloc_preg (cfg);
2338 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2339 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2341 } else {
2342 slot_reg = vtable_reg;
2343 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2344 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2345 #ifdef MONO_ARCH_HAVE_IMT
2346 if (imt_arg) {
2347 g_assert (mono_method_signature (method)->generic_param_count);
2348 emit_imt_argument (cfg, call, imt_arg);
2350 #endif
2353 call->inst.sreg1 = slot_reg;
2354 call->virtual = TRUE;
2357 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2359 return (MonoInst*)call;
2362 static MonoInst*
2363 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2364 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2366 int rgctx_reg;
2367 MonoInst *ins;
2368 MonoCallInst *call;
2370 if (vtable_arg) {
2371 #ifdef MONO_ARCH_RGCTX_REG
2372 rgctx_reg = mono_alloc_preg (cfg);
2373 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2374 #else
2375 NOT_IMPLEMENTED;
2376 #endif
2378 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2380 call = (MonoCallInst*)ins;
2381 if (vtable_arg) {
2382 #ifdef MONO_ARCH_RGCTX_REG
2383 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2384 cfg->uses_rgctx_reg = TRUE;
2385 call->rgctx_reg = TRUE;
2386 #else
2387 NOT_IMPLEMENTED;
2388 #endif
2391 return ins;
2394 static inline MonoInst*
2395 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2397 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2400 MonoInst*
2401 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2402 MonoInst **args)
2404 MonoCallInst *call;
2406 g_assert (sig);
2408 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2409 call->fptr = func;
2411 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2413 return (MonoInst*)call;
2416 inline static MonoInst*
2417 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2419 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2421 g_assert (info);
2423 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2427 * mono_emit_abs_call:
2429 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2431 inline static MonoInst*
2432 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2433 MonoMethodSignature *sig, MonoInst **args)
2435 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2436 MonoInst *ins;
2439 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2440 * handle it.
2442 if (cfg->abs_patches == NULL)
2443 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2444 g_hash_table_insert (cfg->abs_patches, ji, ji);
2445 ins = mono_emit_native_call (cfg, ji, sig, args);
2446 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2447 return ins;
2450 static MonoInst*
2451 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2453 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2454 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2455 int widen_op = -1;
2458 * Native code might return non register sized integers
2459 * without initializing the upper bits.
2461 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2462 case OP_LOADI1_MEMBASE:
2463 widen_op = OP_ICONV_TO_I1;
2464 break;
2465 case OP_LOADU1_MEMBASE:
2466 widen_op = OP_ICONV_TO_U1;
2467 break;
2468 case OP_LOADI2_MEMBASE:
2469 widen_op = OP_ICONV_TO_I2;
2470 break;
2471 case OP_LOADU2_MEMBASE:
2472 widen_op = OP_ICONV_TO_U2;
2473 break;
2474 default:
2475 break;
2478 if (widen_op != -1) {
2479 int dreg = alloc_preg (cfg);
2480 MonoInst *widen;
2482 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2483 widen->type = ins->type;
2484 ins = widen;
2489 return ins;
2492 static MonoMethod*
2493 get_memcpy_method (void)
2495 static MonoMethod *memcpy_method = NULL;
2496 if (!memcpy_method) {
2497 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2498 if (!memcpy_method)
2499 g_error ("Old corlib found. Install a new one");
2501 return memcpy_method;
2505 * Emit code to copy a valuetype of type @klass whose address is stored in
2506 * @src->dreg to memory whose address is stored at @dest->dreg.
2508 void
2509 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2511 MonoInst *iargs [3];
2512 int n;
2513 guint32 align = 0;
2514 MonoMethod *memcpy_method;
2516 g_assert (klass);
2518 * This check breaks with spilled vars... need to handle it during verification anyway.
2519 * g_assert (klass && klass == src->klass && klass == dest->klass);
2522 if (native)
2523 n = mono_class_native_size (klass, &align);
2524 else
2525 n = mono_class_value_size (klass, &align);
2527 #if HAVE_WRITE_BARRIERS
2528 /* if native is true there should be no references in the struct */
2529 if (klass->has_references && !native) {
2530 /* Avoid barriers when storing to the stack */
2531 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2532 (dest->opcode == OP_LDADDR))) {
2533 iargs [0] = dest;
2534 iargs [1] = src;
2535 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2537 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2540 #endif
2542 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2543 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2544 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2545 } else {
2546 iargs [0] = dest;
2547 iargs [1] = src;
2548 EMIT_NEW_ICONST (cfg, iargs [2], n);
2550 memcpy_method = get_memcpy_method ();
2551 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2555 static MonoMethod*
2556 get_memset_method (void)
2558 static MonoMethod *memset_method = NULL;
2559 if (!memset_method) {
2560 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2561 if (!memset_method)
2562 g_error ("Old corlib found. Install a new one");
2564 return memset_method;
2567 void
2568 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2570 MonoInst *iargs [3];
2571 int n;
2572 guint32 align;
2573 MonoMethod *memset_method;
2575 /* FIXME: Optimize this for the case when dest is an LDADDR */
2577 mono_class_init (klass);
2578 n = mono_class_value_size (klass, &align);
2580 if (n <= sizeof (gpointer) * 5) {
2581 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2583 else {
2584 memset_method = get_memset_method ();
2585 iargs [0] = dest;
2586 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2587 EMIT_NEW_ICONST (cfg, iargs [2], n);
2588 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2592 static MonoInst*
2593 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2595 MonoInst *this = NULL;
2597 g_assert (cfg->generic_sharing_context);
2599 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2600 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2601 !method->klass->valuetype)
2602 EMIT_NEW_ARGLOAD (cfg, this, 0);
2604 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2605 MonoInst *mrgctx_loc, *mrgctx_var;
2607 g_assert (!this);
2608 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2610 mrgctx_loc = mono_get_vtable_var (cfg);
2611 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2613 return mrgctx_var;
2614 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2615 MonoInst *vtable_loc, *vtable_var;
2617 g_assert (!this);
2619 vtable_loc = mono_get_vtable_var (cfg);
2620 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2622 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2623 MonoInst *mrgctx_var = vtable_var;
2624 int vtable_reg;
2626 vtable_reg = alloc_preg (cfg);
2627 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2628 vtable_var->type = STACK_PTR;
2631 return vtable_var;
2632 } else {
2633 MonoInst *ins;
2634 int vtable_reg, res_reg;
2636 vtable_reg = alloc_preg (cfg);
2637 res_reg = alloc_preg (cfg);
2638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2639 return ins;
2643 static MonoJumpInfoRgctxEntry *
2644 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2646 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2647 res->method = method;
2648 res->in_mrgctx = in_mrgctx;
2649 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2650 res->data->type = patch_type;
2651 res->data->data.target = patch_data;
2652 res->info_type = info_type;
2654 return res;
2657 static inline MonoInst*
2658 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2660 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2663 static MonoInst*
2664 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2665 MonoClass *klass, int rgctx_type)
2667 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2668 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2670 return emit_rgctx_fetch (cfg, rgctx, entry);
2673 static MonoInst*
2674 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2675 MonoMethod *cmethod, int rgctx_type)
2677 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2678 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2680 return emit_rgctx_fetch (cfg, rgctx, entry);
2683 static MonoInst*
2684 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2685 MonoClassField *field, int rgctx_type)
2687 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2688 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2690 return emit_rgctx_fetch (cfg, rgctx, entry);
2693 static void
2694 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2696 int vtable_reg = alloc_preg (cfg);
2697 int context_used = 0;
2699 if (cfg->generic_sharing_context)
2700 context_used = mono_class_check_context_used (array_class);
2702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2704 if (cfg->opt & MONO_OPT_SHARED) {
2705 int class_reg = alloc_preg (cfg);
2706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2707 if (cfg->compile_aot) {
2708 int klass_reg = alloc_preg (cfg);
2709 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2710 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2711 } else {
2712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2714 } else if (context_used) {
2715 MonoInst *vtable_ins;
2717 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2718 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2719 } else {
2720 if (cfg->compile_aot) {
2721 int vt_reg = alloc_preg (cfg);
2722 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2723 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2724 } else {
2725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2729 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2732 static void
2733 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2735 if (mini_get_debug_options ()->better_cast_details) {
2736 int to_klass_reg = alloc_preg (cfg);
2737 int vtable_reg = alloc_preg (cfg);
2738 int klass_reg = alloc_preg (cfg);
2739 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2741 if (!tls_get) {
2742 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2743 exit (1);
2746 MONO_ADD_INS (cfg->cbb, tls_get);
2747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2751 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2756 static void
2757 reset_cast_details (MonoCompile *cfg)
2759 /* Reset the variables holding the cast details */
2760 if (mini_get_debug_options ()->better_cast_details) {
2761 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2763 MONO_ADD_INS (cfg->cbb, tls_get);
2764 /* It is enough to reset the from field */
2765 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2770 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2771 * generic code is generated.
2773 static MonoInst*
2774 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2776 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2778 if (context_used) {
2779 MonoInst *rgctx, *addr;
2781 /* FIXME: What if the class is shared? We might not
2782 have to get the address of the method from the
2783 RGCTX. */
2784 addr = emit_get_rgctx_method (cfg, context_used, method,
2785 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2787 rgctx = emit_get_rgctx (cfg, method, context_used);
2789 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2790 } else {
2791 return mono_emit_method_call (cfg, method, &val, NULL);
2795 static MonoInst*
2796 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2798 MonoInst *add;
2799 int obj_reg;
2800 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2801 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2802 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2803 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2805 obj_reg = sp [0]->dreg;
2806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2809 /* FIXME: generics */
2810 g_assert (klass->rank == 0);
2812 // Check rank == 0
2813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2814 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2816 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2819 if (context_used) {
2820 MonoInst *element_class;
2822 /* This assertion is from the unboxcast insn */
2823 g_assert (klass->rank == 0);
2825 element_class = emit_get_rgctx_klass (cfg, context_used,
2826 klass->element_class, MONO_RGCTX_INFO_KLASS);
2828 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2829 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2830 } else {
2831 save_cast_details (cfg, klass->element_class, obj_reg);
2832 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2833 reset_cast_details (cfg);
2836 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2837 MONO_ADD_INS (cfg->cbb, add);
2838 add->type = STACK_MP;
2839 add->klass = klass;
2841 return add;
2844 static MonoInst*
2845 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2847 MonoInst *iargs [2];
2848 void *alloc_ftn;
2850 if (cfg->opt & MONO_OPT_SHARED) {
2851 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2852 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2854 alloc_ftn = mono_object_new;
2855 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2856 /* This happens often in argument checking code, eg. throw new FooException... */
2857 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2858 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2859 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2860 } else {
2861 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2862 #ifdef MONO_CROSS_COMPILE
2863 MonoMethod *managed_alloc = NULL;
2864 #else
2865 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2866 #endif
2867 gboolean pass_lw;
2869 if (managed_alloc) {
2870 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2871 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2873 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2874 if (pass_lw) {
2875 guint32 lw = vtable->klass->instance_size;
2876 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2877 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2878 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2880 else {
2881 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2885 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2888 static MonoInst*
2889 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2890 gboolean for_box)
2892 MonoInst *iargs [2];
2893 MonoMethod *managed_alloc = NULL;
2894 void *alloc_ftn;
2897 FIXME: we cannot get managed_alloc here because we can't get
2898 the class's vtable (because it's not a closed class)
2900 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2901 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2904 if (cfg->opt & MONO_OPT_SHARED) {
2905 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2906 iargs [1] = data_inst;
2907 alloc_ftn = mono_object_new;
2908 } else {
2909 if (managed_alloc) {
2910 iargs [0] = data_inst;
2911 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2914 iargs [0] = data_inst;
2915 alloc_ftn = mono_object_new_specific;
2918 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2921 static MonoInst*
2922 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2924 MonoInst *alloc, *ins;
2926 if (mono_class_is_nullable (klass)) {
2927 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2928 return mono_emit_method_call (cfg, method, &val, NULL);
2931 alloc = handle_alloc (cfg, klass, TRUE);
2933 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2935 return alloc;
2938 static MonoInst *
2939 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2941 MonoInst *alloc, *ins;
2943 if (mono_class_is_nullable (klass)) {
2944 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2945 /* FIXME: What if the class is shared? We might not
2946 have to get the method address from the RGCTX. */
2947 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2948 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2949 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2951 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2952 } else {
2953 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2955 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2957 return alloc;
2961 static MonoInst*
2962 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2964 MonoBasicBlock *is_null_bb;
2965 int obj_reg = src->dreg;
2966 int vtable_reg = alloc_preg (cfg);
2968 NEW_BBLOCK (cfg, is_null_bb);
2970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2973 save_cast_details (cfg, klass, obj_reg);
2975 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2976 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2977 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2978 } else {
2979 int klass_reg = alloc_preg (cfg);
2981 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2983 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2984 /* the remoting code is broken, access the class for now */
2985 if (0) {
2986 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2987 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2988 } else {
2989 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2992 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2993 } else {
2994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2995 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2999 MONO_START_BB (cfg, is_null_bb);
3001 reset_cast_details (cfg);
3003 return src;
3006 static MonoInst*
3007 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3009 MonoInst *ins;
3010 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3011 int obj_reg = src->dreg;
3012 int vtable_reg = alloc_preg (cfg);
3013 int res_reg = alloc_preg (cfg);
3015 NEW_BBLOCK (cfg, is_null_bb);
3016 NEW_BBLOCK (cfg, false_bb);
3017 NEW_BBLOCK (cfg, end_bb);
3019 /* Do the assignment at the beginning, so the other assignment can be if converted */
3020 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3021 ins->type = STACK_OBJ;
3022 ins->klass = klass;
3024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3025 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3027 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3028 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3029 /* the is_null_bb target simply copies the input register to the output */
3030 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3031 } else {
3032 int klass_reg = alloc_preg (cfg);
3034 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3036 if (klass->rank) {
3037 int rank_reg = alloc_preg (cfg);
3038 int eclass_reg = alloc_preg (cfg);
3040 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3042 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3045 if (klass->cast_class == mono_defaults.object_class) {
3046 int parent_reg = alloc_preg (cfg);
3047 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3048 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3049 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3050 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3051 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3052 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3053 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3055 } else if (klass->cast_class == mono_defaults.enum_class) {
3056 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3057 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3058 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3059 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3060 } else {
3061 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3062 /* Check that the object is a vector too */
3063 int bounds_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3069 /* the is_null_bb target simply copies the input register to the output */
3070 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3072 } else if (mono_class_is_nullable (klass)) {
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3074 /* the is_null_bb target simply copies the input register to the output */
3075 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3076 } else {
3077 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3078 /* the remoting code is broken, access the class for now */
3079 if (0) {
3080 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3082 } else {
3083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3086 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3087 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3088 } else {
3089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3090 /* the is_null_bb target simply copies the input register to the output */
3091 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3096 MONO_START_BB (cfg, false_bb);
3098 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3099 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3101 MONO_START_BB (cfg, is_null_bb);
3103 MONO_START_BB (cfg, end_bb);
3105 return ins;
3108 static MonoInst*
3109 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3111 /* This opcode takes as input an object reference and a class, and returns:
3112 0) if the object is an instance of the class,
3113 1) if the object is not instance of the class,
3114 2) if the object is a proxy whose type cannot be determined */
3116 MonoInst *ins;
3117 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3118 int obj_reg = src->dreg;
3119 int dreg = alloc_ireg (cfg);
3120 int tmp_reg;
3121 int klass_reg = alloc_preg (cfg);
3123 NEW_BBLOCK (cfg, true_bb);
3124 NEW_BBLOCK (cfg, false_bb);
3125 NEW_BBLOCK (cfg, false2_bb);
3126 NEW_BBLOCK (cfg, end_bb);
3127 NEW_BBLOCK (cfg, no_proxy_bb);
3129 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3130 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3132 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3133 NEW_BBLOCK (cfg, interface_fail_bb);
3135 tmp_reg = alloc_preg (cfg);
3136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3137 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3138 MONO_START_BB (cfg, interface_fail_bb);
3139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3141 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3143 tmp_reg = alloc_preg (cfg);
3144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3147 } else {
3148 tmp_reg = alloc_preg (cfg);
3149 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3152 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3153 tmp_reg = alloc_preg (cfg);
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3157 tmp_reg = alloc_preg (cfg);
3158 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3162 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3163 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3165 MONO_START_BB (cfg, no_proxy_bb);
3167 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3170 MONO_START_BB (cfg, false_bb);
3172 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3175 MONO_START_BB (cfg, false2_bb);
3177 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3180 MONO_START_BB (cfg, true_bb);
3182 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3184 MONO_START_BB (cfg, end_bb);
3186 /* FIXME: */
3187 MONO_INST_NEW (cfg, ins, OP_ICONST);
3188 ins->dreg = dreg;
3189 ins->type = STACK_I4;
3191 return ins;
3194 static MonoInst*
3195 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3197 /* This opcode takes as input an object reference and a class, and returns:
3198 0) if the object is an instance of the class,
3199 1) if the object is a proxy whose type cannot be determined
3200 an InvalidCastException exception is thrown otherwhise*/
3202 MonoInst *ins;
3203 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3204 int obj_reg = src->dreg;
3205 int dreg = alloc_ireg (cfg);
3206 int tmp_reg = alloc_preg (cfg);
3207 int klass_reg = alloc_preg (cfg);
3209 NEW_BBLOCK (cfg, end_bb);
3210 NEW_BBLOCK (cfg, ok_result_bb);
3212 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3213 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3215 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3216 NEW_BBLOCK (cfg, interface_fail_bb);
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3219 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3220 MONO_START_BB (cfg, interface_fail_bb);
3221 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3223 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3225 tmp_reg = alloc_preg (cfg);
3226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3228 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3230 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3233 } else {
3234 NEW_BBLOCK (cfg, no_proxy_bb);
3236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3238 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3240 tmp_reg = alloc_preg (cfg);
3241 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3244 tmp_reg = alloc_preg (cfg);
3245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3249 NEW_BBLOCK (cfg, fail_1_bb);
3251 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3253 MONO_START_BB (cfg, fail_1_bb);
3255 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3258 MONO_START_BB (cfg, no_proxy_bb);
3260 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3263 MONO_START_BB (cfg, ok_result_bb);
3265 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3267 MONO_START_BB (cfg, end_bb);
3269 /* FIXME: */
3270 MONO_INST_NEW (cfg, ins, OP_ICONST);
3271 ins->dreg = dreg;
3272 ins->type = STACK_I4;
3274 return ins;
3277 static G_GNUC_UNUSED MonoInst*
3278 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3280 gpointer *trampoline;
3281 MonoInst *obj, *method_ins, *tramp_ins;
3282 MonoDomain *domain;
3283 guint8 **code_slot;
3285 obj = handle_alloc (cfg, klass, FALSE);
3287 /* Inline the contents of mono_delegate_ctor */
3289 /* Set target field */
3290 /* Optimize away setting of NULL target */
3291 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3292 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3294 /* Set method field */
3295 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3299 * To avoid looking up the compiled code belonging to the target method
3300 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3301 * store it, and we fill it after the method has been compiled.
3303 if (!cfg->compile_aot && !method->dynamic) {
3304 MonoInst *code_slot_ins;
3306 domain = mono_domain_get ();
3307 mono_domain_lock (domain);
3308 if (!domain_jit_info (domain)->method_code_hash)
3309 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3310 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3311 if (!code_slot) {
3312 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3313 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3315 mono_domain_unlock (domain);
3317 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3321 /* Set invoke_impl field */
3322 if (cfg->compile_aot) {
3323 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3324 } else {
3325 trampoline = mono_create_delegate_trampoline (klass);
3326 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3328 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3330 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3332 return obj;
3335 static MonoInst*
3336 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3338 MonoJitICallInfo *info;
3340 /* Need to register the icall so it gets an icall wrapper */
3341 info = mono_get_array_new_va_icall (rank);
3343 cfg->flags |= MONO_CFG_HAS_VARARGS;
3345 /* mono_array_new_va () needs a vararg calling convention */
3346 cfg->disable_llvm = TRUE;
3348 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3349 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3352 static void
3353 mono_emit_load_got_addr (MonoCompile *cfg)
3355 MonoInst *getaddr, *dummy_use;
3357 if (!cfg->got_var || cfg->got_var_allocated)
3358 return;
3360 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3361 getaddr->dreg = cfg->got_var->dreg;
3363 /* Add it to the start of the first bblock */
3364 if (cfg->bb_entry->code) {
3365 getaddr->next = cfg->bb_entry->code;
3366 cfg->bb_entry->code = getaddr;
3368 else
3369 MONO_ADD_INS (cfg->bb_entry, getaddr);
3371 cfg->got_var_allocated = TRUE;
3374 * Add a dummy use to keep the got_var alive, since real uses might
3375 * only be generated by the back ends.
3376 * Add it to end_bblock, so the variable's lifetime covers the whole
3377 * method.
3378 * It would be better to make the usage of the got var explicit in all
3379 * cases when the backend needs it (i.e. calls, throw etc.), so this
3380 * wouldn't be needed.
3382 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3383 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3386 static int inline_limit;
3387 static gboolean inline_limit_inited;
3389 static gboolean
3390 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3392 MonoMethodHeader *header;
3393 MonoVTable *vtable;
3394 #ifdef MONO_ARCH_SOFT_FLOAT
3395 MonoMethodSignature *sig = mono_method_signature (method);
3396 int i;
3397 #endif
3399 if (cfg->generic_sharing_context)
3400 return FALSE;
3402 #ifdef MONO_ARCH_HAVE_LMF_OPS
3403 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3404 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3405 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3406 return TRUE;
3407 #endif
3409 if (method->is_inflated)
3410 /* Avoid inflating the header */
3411 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3412 else
3413 header = mono_method_get_header (method);
3415 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3416 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3417 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3418 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3419 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3420 (method->klass->marshalbyref) ||
3421 !header || header->num_clauses)
3422 return FALSE;
3424 /* also consider num_locals? */
3425 /* Do the size check early to avoid creating vtables */
3426 if (!inline_limit_inited) {
3427 if (getenv ("MONO_INLINELIMIT"))
3428 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3429 else
3430 inline_limit = INLINE_LENGTH_LIMIT;
3431 inline_limit_inited = TRUE;
3433 if (header->code_size >= inline_limit)
3434 return FALSE;
3437 * if we can initialize the class of the method right away, we do,
3438 * otherwise we don't allow inlining if the class needs initialization,
3439 * since it would mean inserting a call to mono_runtime_class_init()
3440 * inside the inlined code
3442 if (!(cfg->opt & MONO_OPT_SHARED)) {
3443 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3444 if (cfg->run_cctors && method->klass->has_cctor) {
3445 if (!method->klass->runtime_info)
3446 /* No vtable created yet */
3447 return FALSE;
3448 vtable = mono_class_vtable (cfg->domain, method->klass);
3449 if (!vtable)
3450 return FALSE;
3451 /* This makes so that inline cannot trigger */
3452 /* .cctors: too many apps depend on them */
3453 /* running with a specific order... */
3454 if (! vtable->initialized)
3455 return FALSE;
3456 mono_runtime_class_init (vtable);
3458 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3459 if (!method->klass->runtime_info)
3460 /* No vtable created yet */
3461 return FALSE;
3462 vtable = mono_class_vtable (cfg->domain, method->klass);
3463 if (!vtable)
3464 return FALSE;
3465 if (!vtable->initialized)
3466 return FALSE;
3468 } else {
3470 * If we're compiling for shared code
3471 * the cctor will need to be run at aot method load time, for example,
3472 * or at the end of the compilation of the inlining method.
3474 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3475 return FALSE;
3479 * CAS - do not inline methods with declarative security
3480 * Note: this has to be before any possible return TRUE;
3482 if (mono_method_has_declsec (method))
3483 return FALSE;
3485 #ifdef MONO_ARCH_SOFT_FLOAT
3486 /* FIXME: */
3487 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3488 return FALSE;
3489 for (i = 0; i < sig->param_count; ++i)
3490 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3491 return FALSE;
3492 #endif
3494 return TRUE;
3497 static gboolean
3498 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3500 if (vtable->initialized && !cfg->compile_aot)
3501 return FALSE;
3503 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3504 return FALSE;
3506 if (!mono_class_needs_cctor_run (vtable->klass, method))
3507 return FALSE;
3509 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3510 /* The initialization is already done before the method is called */
3511 return FALSE;
3513 return TRUE;
3516 static MonoInst*
3517 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3519 MonoInst *ins;
3520 guint32 size;
3521 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3523 mono_class_init (klass);
3524 size = mono_class_array_element_size (klass);
3526 mult_reg = alloc_preg (cfg);
3527 array_reg = arr->dreg;
3528 index_reg = index->dreg;
3530 #if SIZEOF_REGISTER == 8
3531 /* The array reg is 64 bits but the index reg is only 32 */
3532 index2_reg = alloc_preg (cfg);
3533 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3534 #else
3535 if (index->type == STACK_I8) {
3536 index2_reg = alloc_preg (cfg);
3537 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3538 } else {
3539 index2_reg = index_reg;
3541 #endif
3543 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3545 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3546 if (size == 1 || size == 2 || size == 4 || size == 8) {
3547 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3549 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3550 ins->type = STACK_PTR;
3552 return ins;
3554 #endif
3556 add_reg = alloc_preg (cfg);
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3559 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3560 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3561 ins->type = STACK_PTR;
3562 MONO_ADD_INS (cfg->cbb, ins);
3564 return ins;
3567 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3568 static MonoInst*
3569 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3571 int bounds_reg = alloc_preg (cfg);
3572 int add_reg = alloc_preg (cfg);
3573 int mult_reg = alloc_preg (cfg);
3574 int mult2_reg = alloc_preg (cfg);
3575 int low1_reg = alloc_preg (cfg);
3576 int low2_reg = alloc_preg (cfg);
3577 int high1_reg = alloc_preg (cfg);
3578 int high2_reg = alloc_preg (cfg);
3579 int realidx1_reg = alloc_preg (cfg);
3580 int realidx2_reg = alloc_preg (cfg);
3581 int sum_reg = alloc_preg (cfg);
3582 int index1, index2;
3583 MonoInst *ins;
3584 guint32 size;
3586 mono_class_init (klass);
3587 size = mono_class_array_element_size (klass);
3589 index1 = index_ins1->dreg;
3590 index2 = index_ins2->dreg;
3592 /* range checking */
3593 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3594 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3597 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3598 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3600 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3601 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3602 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3604 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3605 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3606 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3607 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3608 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3609 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3610 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3612 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3613 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3615 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3616 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3618 ins->type = STACK_MP;
3619 ins->klass = klass;
3620 MONO_ADD_INS (cfg->cbb, ins);
3622 return ins;
3624 #endif
3626 static MonoInst*
3627 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3629 int rank;
3630 MonoInst *addr;
3631 MonoMethod *addr_method;
3632 int element_size;
3634 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3636 if (rank == 1)
3637 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3639 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3640 /* emit_ldelema_2 depends on OP_LMUL */
3641 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3642 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3644 #endif
3646 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3647 addr_method = mono_marshal_get_array_address (rank, element_size);
3648 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3650 return addr;
3653 static MonoInst*
3654 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3656 MonoInst *ins = NULL;
3658 static MonoClass *runtime_helpers_class = NULL;
3659 if (! runtime_helpers_class)
3660 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3661 "System.Runtime.CompilerServices", "RuntimeHelpers");
3663 if (cmethod->klass == mono_defaults.string_class) {
3664 if (strcmp (cmethod->name, "get_Chars") == 0) {
3665 int dreg = alloc_ireg (cfg);
3666 int index_reg = alloc_preg (cfg);
3667 int mult_reg = alloc_preg (cfg);
3668 int add_reg = alloc_preg (cfg);
3670 #if SIZEOF_REGISTER == 8
3671 /* The array reg is 64 bits but the index reg is only 32 */
3672 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3673 #else
3674 index_reg = args [1]->dreg;
3675 #endif
3676 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3678 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3679 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3680 add_reg = ins->dreg;
3681 /* Avoid a warning */
3682 mult_reg = 0;
3683 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3684 add_reg, 0);
3685 #else
3686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3687 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3688 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3689 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3690 #endif
3691 type_from_op (ins, NULL, NULL);
3692 return ins;
3693 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3694 int dreg = alloc_ireg (cfg);
3695 /* Decompose later to allow more optimizations */
3696 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3697 ins->type = STACK_I4;
3698 cfg->cbb->has_array_access = TRUE;
3699 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3701 return ins;
3702 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3703 int mult_reg = alloc_preg (cfg);
3704 int add_reg = alloc_preg (cfg);
3706 /* The corlib functions check for oob already. */
3707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3708 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3710 } else
3711 return NULL;
3712 } else if (cmethod->klass == mono_defaults.object_class) {
3714 if (strcmp (cmethod->name, "GetType") == 0) {
3715 int dreg = alloc_preg (cfg);
3716 int vt_reg = alloc_preg (cfg);
3717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3718 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3719 type_from_op (ins, NULL, NULL);
3721 return ins;
3722 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3723 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3724 int dreg = alloc_ireg (cfg);
3725 int t1 = alloc_ireg (cfg);
3727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3728 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3729 ins->type = STACK_I4;
3731 return ins;
3732 #endif
3733 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3734 MONO_INST_NEW (cfg, ins, OP_NOP);
3735 MONO_ADD_INS (cfg->cbb, ins);
3736 return ins;
3737 } else
3738 return NULL;
3739 } else if (cmethod->klass == mono_defaults.array_class) {
3740 if (cmethod->name [0] != 'g')
3741 return NULL;
3743 if (strcmp (cmethod->name, "get_Rank") == 0) {
3744 int dreg = alloc_ireg (cfg);
3745 int vtable_reg = alloc_preg (cfg);
3746 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3747 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3748 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3749 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3750 type_from_op (ins, NULL, NULL);
3752 return ins;
3753 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3754 int dreg = alloc_ireg (cfg);
3756 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3757 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3758 type_from_op (ins, NULL, NULL);
3760 return ins;
3761 } else
3762 return NULL;
3763 } else if (cmethod->klass == runtime_helpers_class) {
3765 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3766 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3767 return ins;
3768 } else
3769 return NULL;
3770 } else if (cmethod->klass == mono_defaults.thread_class) {
3771 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3772 ins->dreg = alloc_preg (cfg);
3773 ins->type = STACK_OBJ;
3774 MONO_ADD_INS (cfg->cbb, ins);
3775 return ins;
3776 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3777 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3778 MONO_ADD_INS (cfg->cbb, ins);
3779 return ins;
3780 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3781 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3782 MONO_ADD_INS (cfg->cbb, ins);
3783 return ins;
3785 } else if (cmethod->klass == mono_defaults.monitor_class) {
3786 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3787 if (strcmp (cmethod->name, "Enter") == 0) {
3788 MonoCallInst *call;
3790 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3791 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3792 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3793 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3795 return (MonoInst*)call;
3796 } else if (strcmp (cmethod->name, "Exit") == 0) {
3797 MonoCallInst *call;
3799 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3800 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3801 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3802 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3804 return (MonoInst*)call;
3806 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3807 MonoMethod *fast_method = NULL;
3809 /* Avoid infinite recursion */
3810 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3811 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3812 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3813 return NULL;
3815 if (strcmp (cmethod->name, "Enter") == 0 ||
3816 strcmp (cmethod->name, "Exit") == 0)
3817 fast_method = mono_monitor_get_fast_path (cmethod);
3818 if (!fast_method)
3819 return NULL;
3821 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3822 #endif
3823 } else if (mini_class_is_system_array (cmethod->klass) &&
3824 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3825 MonoInst *addr, *store, *load;
3826 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3828 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3829 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3830 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3831 return store;
3832 } else if (cmethod->klass->image == mono_defaults.corlib &&
3833 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3834 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3835 ins = NULL;
3837 #if SIZEOF_REGISTER == 8
3838 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3839 /* 64 bit reads are already atomic */
3840 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3841 ins->dreg = mono_alloc_preg (cfg);
3842 ins->inst_basereg = args [0]->dreg;
3843 ins->inst_offset = 0;
3844 MONO_ADD_INS (cfg->cbb, ins);
3846 #endif
3848 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3849 if (strcmp (cmethod->name, "Increment") == 0) {
3850 MonoInst *ins_iconst;
3851 guint32 opcode = 0;
3853 if (fsig->params [0]->type == MONO_TYPE_I4)
3854 opcode = OP_ATOMIC_ADD_NEW_I4;
3855 #if SIZEOF_REGISTER == 8
3856 else if (fsig->params [0]->type == MONO_TYPE_I8)
3857 opcode = OP_ATOMIC_ADD_NEW_I8;
3858 #endif
3859 if (opcode) {
3860 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3861 ins_iconst->inst_c0 = 1;
3862 ins_iconst->dreg = mono_alloc_ireg (cfg);
3863 MONO_ADD_INS (cfg->cbb, ins_iconst);
3865 MONO_INST_NEW (cfg, ins, opcode);
3866 ins->dreg = mono_alloc_ireg (cfg);
3867 ins->inst_basereg = args [0]->dreg;
3868 ins->inst_offset = 0;
3869 ins->sreg2 = ins_iconst->dreg;
3870 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3871 MONO_ADD_INS (cfg->cbb, ins);
3873 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3874 MonoInst *ins_iconst;
3875 guint32 opcode = 0;
3877 if (fsig->params [0]->type == MONO_TYPE_I4)
3878 opcode = OP_ATOMIC_ADD_NEW_I4;
3879 #if SIZEOF_REGISTER == 8
3880 else if (fsig->params [0]->type == MONO_TYPE_I8)
3881 opcode = OP_ATOMIC_ADD_NEW_I8;
3882 #endif
3883 if (opcode) {
3884 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3885 ins_iconst->inst_c0 = -1;
3886 ins_iconst->dreg = mono_alloc_ireg (cfg);
3887 MONO_ADD_INS (cfg->cbb, ins_iconst);
3889 MONO_INST_NEW (cfg, ins, opcode);
3890 ins->dreg = mono_alloc_ireg (cfg);
3891 ins->inst_basereg = args [0]->dreg;
3892 ins->inst_offset = 0;
3893 ins->sreg2 = ins_iconst->dreg;
3894 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3895 MONO_ADD_INS (cfg->cbb, ins);
3897 } else if (strcmp (cmethod->name, "Add") == 0) {
3898 guint32 opcode = 0;
3900 if (fsig->params [0]->type == MONO_TYPE_I4)
3901 opcode = OP_ATOMIC_ADD_NEW_I4;
3902 #if SIZEOF_REGISTER == 8
3903 else if (fsig->params [0]->type == MONO_TYPE_I8)
3904 opcode = OP_ATOMIC_ADD_NEW_I8;
3905 #endif
3907 if (opcode) {
3908 MONO_INST_NEW (cfg, ins, opcode);
3909 ins->dreg = mono_alloc_ireg (cfg);
3910 ins->inst_basereg = args [0]->dreg;
3911 ins->inst_offset = 0;
3912 ins->sreg2 = args [1]->dreg;
3913 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3914 MONO_ADD_INS (cfg->cbb, ins);
3917 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3919 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3920 if (strcmp (cmethod->name, "Exchange") == 0) {
3921 guint32 opcode;
3923 if (fsig->params [0]->type == MONO_TYPE_I4)
3924 opcode = OP_ATOMIC_EXCHANGE_I4;
3925 #if SIZEOF_REGISTER == 8
3926 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3927 (fsig->params [0]->type == MONO_TYPE_I) ||
3928 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3929 opcode = OP_ATOMIC_EXCHANGE_I8;
3930 #else
3931 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3932 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3933 opcode = OP_ATOMIC_EXCHANGE_I4;
3934 #endif
3935 else
3936 return NULL;
3938 MONO_INST_NEW (cfg, ins, opcode);
3939 ins->dreg = mono_alloc_ireg (cfg);
3940 ins->inst_basereg = args [0]->dreg;
3941 ins->inst_offset = 0;
3942 ins->sreg2 = args [1]->dreg;
3943 MONO_ADD_INS (cfg->cbb, ins);
3945 switch (fsig->params [0]->type) {
3946 case MONO_TYPE_I4:
3947 ins->type = STACK_I4;
3948 break;
3949 case MONO_TYPE_I8:
3950 case MONO_TYPE_I:
3951 ins->type = STACK_I8;
3952 break;
3953 case MONO_TYPE_OBJECT:
3954 ins->type = STACK_OBJ;
3955 break;
3956 default:
3957 g_assert_not_reached ();
3960 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3962 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3963 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3964 int size = 0;
3965 if (fsig->params [1]->type == MONO_TYPE_I4)
3966 size = 4;
3967 else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
3968 size = sizeof (gpointer);
3969 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
3970 size = 8;
3971 if (size == 4) {
3972 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
3973 ins->dreg = alloc_ireg (cfg);
3974 ins->sreg1 = args [0]->dreg;
3975 ins->sreg2 = args [1]->dreg;
3976 ins->sreg3 = args [2]->dreg;
3977 ins->type = STACK_I4;
3978 MONO_ADD_INS (cfg->cbb, ins);
3979 } else if (size == 8) {
3980 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
3981 ins->dreg = alloc_ireg (cfg);
3982 ins->sreg1 = args [0]->dreg;
3983 ins->sreg2 = args [1]->dreg;
3984 ins->sreg3 = args [2]->dreg;
3985 ins->type = STACK_I8;
3986 MONO_ADD_INS (cfg->cbb, ins);
3987 } else {
3988 /* g_assert_not_reached (); */
3991 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3993 if (ins)
3994 return ins;
3995 } else if (cmethod->klass->image == mono_defaults.corlib) {
3996 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3997 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3998 MONO_INST_NEW (cfg, ins, OP_BREAK);
3999 MONO_ADD_INS (cfg->cbb, ins);
4000 return ins;
4002 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4003 && strcmp (cmethod->klass->name, "Environment") == 0) {
4004 #ifdef PLATFORM_WIN32
4005 EMIT_NEW_ICONST (cfg, ins, 1);
4006 #else
4007 EMIT_NEW_ICONST (cfg, ins, 0);
4008 #endif
4009 return ins;
4011 } else if (cmethod->klass == mono_defaults.math_class) {
4013 * There is general branches code for Min/Max, but it does not work for
4014 * all inputs:
4015 * http://everything2.com/?node_id=1051618
4019 #ifdef MONO_ARCH_SIMD_INTRINSICS
4020 if (cfg->opt & MONO_OPT_SIMD) {
4021 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4022 if (ins)
4023 return ins;
4025 #endif
4027 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4031 * This entry point could be used later for arbitrary method
4032 * redirection.
4034 inline static MonoInst*
4035 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4036 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4038 if (method->klass == mono_defaults.string_class) {
4039 /* managed string allocation support */
4040 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4041 MonoInst *iargs [2];
4042 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4043 #ifdef MONO_CROSS_COMPILE
4044 MonoMethod *managed_alloc = NULL;
4045 #else
4046 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4047 #endif
4048 if (!managed_alloc)
4049 return NULL;
4050 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4051 iargs [1] = args [0];
4052 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4055 return NULL;
4058 static void
4059 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4061 MonoInst *store, *temp;
4062 int i;
4064 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4065 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4068 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4069 * would be different than the MonoInst's used to represent arguments, and
4070 * the ldelema implementation can't deal with that.
4071 * Solution: When ldelema is used on an inline argument, create a var for
4072 * it, emit ldelema on that var, and emit the saving code below in
4073 * inline_method () if needed.
4075 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4076 cfg->args [i] = temp;
4077 /* This uses cfg->args [i] which is set by the preceeding line */
4078 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4079 store->cil_code = sp [0]->cil_code;
4080 sp++;
4084 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4085 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4087 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4088 static gboolean
4089 check_inline_called_method_name_limit (MonoMethod *called_method)
4091 int strncmp_result;
4092 static char *limit = NULL;
4094 if (limit == NULL) {
4095 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4097 if (limit_string != NULL)
4098 limit = limit_string;
4099 else
4100 limit = (char *) "";
4103 if (limit [0] != '\0') {
4104 char *called_method_name = mono_method_full_name (called_method, TRUE);
4106 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4107 g_free (called_method_name);
4109 //return (strncmp_result <= 0);
4110 return (strncmp_result == 0);
4111 } else {
4112 return TRUE;
4115 #endif
4117 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4118 static gboolean
4119 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4121 int strncmp_result;
4122 static char *limit = NULL;
4124 if (limit == NULL) {
4125 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4126 if (limit_string != NULL) {
4127 limit = limit_string;
4128 } else {
4129 limit = (char *) "";
4133 if (limit [0] != '\0') {
4134 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4136 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4137 g_free (caller_method_name);
4139 //return (strncmp_result <= 0);
4140 return (strncmp_result == 0);
4141 } else {
4142 return TRUE;
4145 #endif
4147 static int
4148 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4149 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4151 MonoInst *ins, *rvar = NULL;
4152 MonoMethodHeader *cheader;
4153 MonoBasicBlock *ebblock, *sbblock;
4154 int i, costs;
4155 MonoMethod *prev_inlined_method;
4156 MonoInst **prev_locals, **prev_args;
4157 MonoType **prev_arg_types;
4158 guint prev_real_offset;
4159 GHashTable *prev_cbb_hash;
4160 MonoBasicBlock **prev_cil_offset_to_bb;
4161 MonoBasicBlock *prev_cbb;
4162 unsigned char* prev_cil_start;
4163 guint32 prev_cil_offset_to_bb_len;
4164 MonoMethod *prev_current_method;
4165 MonoGenericContext *prev_generic_context;
4166 gboolean ret_var_set, prev_ret_var_set;
4168 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4170 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4171 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4172 return 0;
4173 #endif
4174 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4175 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4176 return 0;
4177 #endif
4179 if (cfg->verbose_level > 2)
4180 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4182 if (!cmethod->inline_info) {
4183 mono_jit_stats.inlineable_methods++;
4184 cmethod->inline_info = 1;
4186 /* allocate space to store the return value */
4187 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4188 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4191 /* allocate local variables */
4192 cheader = mono_method_get_header (cmethod);
4193 prev_locals = cfg->locals;
4194 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4195 for (i = 0; i < cheader->num_locals; ++i)
4196 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4198 /* allocate start and end blocks */
4199 /* This is needed so if the inline is aborted, we can clean up */
4200 NEW_BBLOCK (cfg, sbblock);
4201 sbblock->real_offset = real_offset;
4203 NEW_BBLOCK (cfg, ebblock);
4204 ebblock->block_num = cfg->num_bblocks++;
4205 ebblock->real_offset = real_offset;
4207 prev_args = cfg->args;
4208 prev_arg_types = cfg->arg_types;
4209 prev_inlined_method = cfg->inlined_method;
4210 cfg->inlined_method = cmethod;
4211 cfg->ret_var_set = FALSE;
4212 prev_real_offset = cfg->real_offset;
4213 prev_cbb_hash = cfg->cbb_hash;
4214 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4215 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4216 prev_cil_start = cfg->cil_start;
4217 prev_cbb = cfg->cbb;
4218 prev_current_method = cfg->current_method;
4219 prev_generic_context = cfg->generic_context;
4220 prev_ret_var_set = cfg->ret_var_set;
4222 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4224 ret_var_set = cfg->ret_var_set;
4226 cfg->inlined_method = prev_inlined_method;
4227 cfg->real_offset = prev_real_offset;
4228 cfg->cbb_hash = prev_cbb_hash;
4229 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4230 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4231 cfg->cil_start = prev_cil_start;
4232 cfg->locals = prev_locals;
4233 cfg->args = prev_args;
4234 cfg->arg_types = prev_arg_types;
4235 cfg->current_method = prev_current_method;
4236 cfg->generic_context = prev_generic_context;
4237 cfg->ret_var_set = prev_ret_var_set;
4239 if ((costs >= 0 && costs < 60) || inline_allways) {
4240 if (cfg->verbose_level > 2)
4241 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4243 mono_jit_stats.inlined_methods++;
4245 /* always add some code to avoid block split failures */
4246 MONO_INST_NEW (cfg, ins, OP_NOP);
4247 MONO_ADD_INS (prev_cbb, ins);
4249 prev_cbb->next_bb = sbblock;
4250 link_bblock (cfg, prev_cbb, sbblock);
4253 * Get rid of the begin and end bblocks if possible to aid local
4254 * optimizations.
4256 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4258 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4259 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4261 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4262 MonoBasicBlock *prev = ebblock->in_bb [0];
4263 mono_merge_basic_blocks (cfg, prev, ebblock);
4264 cfg->cbb = prev;
4265 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4266 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4267 cfg->cbb = prev_cbb;
4269 } else {
4270 cfg->cbb = ebblock;
4273 if (rvar) {
4275 * If the inlined method contains only a throw, then the ret var is not
4276 * set, so set it to a dummy value.
4278 if (!ret_var_set) {
4279 static double r8_0 = 0.0;
4281 switch (rvar->type) {
4282 case STACK_I4:
4283 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4284 break;
4285 case STACK_I8:
4286 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4287 break;
4288 case STACK_PTR:
4289 case STACK_MP:
4290 case STACK_OBJ:
4291 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4292 break;
4293 case STACK_R8:
4294 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4295 ins->type = STACK_R8;
4296 ins->inst_p0 = (void*)&r8_0;
4297 ins->dreg = rvar->dreg;
4298 MONO_ADD_INS (cfg->cbb, ins);
4299 break;
4300 case STACK_VTYPE:
4301 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4302 break;
4303 default:
4304 g_assert_not_reached ();
4308 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4309 *sp++ = ins;
4311 return costs + 1;
4312 } else {
4313 if (cfg->verbose_level > 2)
4314 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4315 cfg->exception_type = MONO_EXCEPTION_NONE;
4316 mono_loader_clear_error ();
4318 /* This gets rid of the newly added bblocks */
4319 cfg->cbb = prev_cbb;
4321 return 0;
4325 * Some of these comments may well be out-of-date.
4326 * Design decisions: we do a single pass over the IL code (and we do bblock
4327 * splitting/merging in the few cases when it's required: a back jump to an IL
4328 * address that was not already seen as bblock starting point).
4329 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4330 * Complex operations are decomposed in simpler ones right away. We need to let the
4331 * arch-specific code peek and poke inside this process somehow (except when the
4332 * optimizations can take advantage of the full semantic info of coarse opcodes).
4333 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4334 * MonoInst->opcode initially is the IL opcode or some simplification of that
4335 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4336 * opcode with value bigger than OP_LAST.
4337 * At this point the IR can be handed over to an interpreter, a dumb code generator
4338 * or to the optimizing code generator that will translate it to SSA form.
4340 * Profiling directed optimizations.
4341 * We may compile by default with few or no optimizations and instrument the code
4342 * or the user may indicate what methods to optimize the most either in a config file
4343 * or through repeated runs where the compiler applies offline the optimizations to
4344 * each method and then decides if it was worth it.
4347 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4348 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4349 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4350 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4351 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4352 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4353 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4354 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4356 /* offset from br.s -> br like opcodes */
4357 #define BIG_BRANCH_OFFSET 13
4359 static gboolean
4360 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4362 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4364 return b == NULL || b == bb;
4367 static int
4368 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4370 unsigned char *ip = start;
4371 unsigned char *target;
4372 int i;
4373 guint cli_addr;
4374 MonoBasicBlock *bblock;
4375 const MonoOpcode *opcode;
4377 while (ip < end) {
4378 cli_addr = ip - start;
4379 i = mono_opcode_value ((const guint8 **)&ip, end);
4380 if (i < 0)
4381 UNVERIFIED;
4382 opcode = &mono_opcodes [i];
4383 switch (opcode->argument) {
4384 case MonoInlineNone:
4385 ip++;
4386 break;
4387 case MonoInlineString:
4388 case MonoInlineType:
4389 case MonoInlineField:
4390 case MonoInlineMethod:
4391 case MonoInlineTok:
4392 case MonoInlineSig:
4393 case MonoShortInlineR:
4394 case MonoInlineI:
4395 ip += 5;
4396 break;
4397 case MonoInlineVar:
4398 ip += 3;
4399 break;
4400 case MonoShortInlineVar:
4401 case MonoShortInlineI:
4402 ip += 2;
4403 break;
4404 case MonoShortInlineBrTarget:
4405 target = start + cli_addr + 2 + (signed char)ip [1];
4406 GET_BBLOCK (cfg, bblock, target);
4407 ip += 2;
4408 if (ip < end)
4409 GET_BBLOCK (cfg, bblock, ip);
4410 break;
4411 case MonoInlineBrTarget:
4412 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4413 GET_BBLOCK (cfg, bblock, target);
4414 ip += 5;
4415 if (ip < end)
4416 GET_BBLOCK (cfg, bblock, ip);
4417 break;
4418 case MonoInlineSwitch: {
4419 guint32 n = read32 (ip + 1);
4420 guint32 j;
4421 ip += 5;
4422 cli_addr += 5 + 4 * n;
4423 target = start + cli_addr;
4424 GET_BBLOCK (cfg, bblock, target);
4426 for (j = 0; j < n; ++j) {
4427 target = start + cli_addr + (gint32)read32 (ip);
4428 GET_BBLOCK (cfg, bblock, target);
4429 ip += 4;
4431 break;
4433 case MonoInlineR:
4434 case MonoInlineI8:
4435 ip += 9;
4436 break;
4437 default:
4438 g_assert_not_reached ();
4441 if (i == CEE_THROW) {
4442 unsigned char *bb_start = ip - 1;
4444 /* Find the start of the bblock containing the throw */
4445 bblock = NULL;
4446 while ((bb_start >= start) && !bblock) {
4447 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4448 bb_start --;
4450 if (bblock)
4451 bblock->out_of_line = 1;
4454 return 0;
4455 unverified:
4456 *pos = ip;
4457 return 1;
4460 static inline MonoMethod *
4461 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4463 MonoMethod *method;
4465 if (m->wrapper_type != MONO_WRAPPER_NONE)
4466 return mono_method_get_wrapper_data (m, token);
4468 method = mono_get_method_full (m->klass->image, token, klass, context);
4470 return method;
4473 static inline MonoMethod *
4474 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4476 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4478 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4479 return NULL;
4481 return method;
4484 static inline MonoClass*
4485 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4487 MonoClass *klass;
4489 if (method->wrapper_type != MONO_WRAPPER_NONE)
4490 klass = mono_method_get_wrapper_data (method, token);
4491 else
4492 klass = mono_class_get_full (method->klass->image, token, context);
4493 if (klass)
4494 mono_class_init (klass);
4495 return klass;
4499 * Returns TRUE if the JIT should abort inlining because "callee"
4500 * is influenced by security attributes.
4502 static
4503 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4505 guint32 result;
4507 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4508 return TRUE;
4511 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4512 if (result == MONO_JIT_SECURITY_OK)
4513 return FALSE;
4515 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4516 /* Generate code to throw a SecurityException before the actual call/link */
4517 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4518 MonoInst *args [2];
4520 NEW_ICONST (cfg, args [0], 4);
4521 NEW_METHODCONST (cfg, args [1], caller);
4522 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4523 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4524 /* don't hide previous results */
4525 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4526 cfg->exception_data = result;
4527 return TRUE;
4530 return FALSE;
4533 static MonoMethod*
4534 method_access_exception (void)
4536 static MonoMethod *method = NULL;
4538 if (!method) {
4539 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4540 method = mono_class_get_method_from_name (secman->securitymanager,
4541 "MethodAccessException", 2);
4543 g_assert (method);
4544 return method;
4547 static void
4548 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4549 MonoBasicBlock *bblock, unsigned char *ip)
4551 MonoMethod *thrower = method_access_exception ();
4552 MonoInst *args [2];
4554 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4555 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4556 mono_emit_method_call (cfg, thrower, args, NULL);
4559 static MonoMethod*
4560 field_access_exception (void)
4562 static MonoMethod *method = NULL;
4564 if (!method) {
4565 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4566 method = mono_class_get_method_from_name (secman->securitymanager,
4567 "FieldAccessException", 2);
4569 g_assert (method);
4570 return method;
4573 static void
4574 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4575 MonoBasicBlock *bblock, unsigned char *ip)
4577 MonoMethod *thrower = field_access_exception ();
4578 MonoInst *args [2];
4580 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4581 EMIT_NEW_METHODCONST (cfg, args [1], field);
4582 mono_emit_method_call (cfg, thrower, args, NULL);
4586 * Return the original method is a wrapper is specified. We can only access
4587 * the custom attributes from the original method.
4589 static MonoMethod*
4590 get_original_method (MonoMethod *method)
4592 if (method->wrapper_type == MONO_WRAPPER_NONE)
4593 return method;
4595 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4596 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4597 return NULL;
4599 /* in other cases we need to find the original method */
4600 return mono_marshal_method_from_wrapper (method);
4603 static void
4604 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4605 MonoBasicBlock *bblock, unsigned char *ip)
4607 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4608 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4609 return;
4611 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4612 caller = get_original_method (caller);
4613 if (!caller)
4614 return;
4616 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4617 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4618 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4621 static void
4622 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4623 MonoBasicBlock *bblock, unsigned char *ip)
4625 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4626 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4627 return;
4629 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4630 caller = get_original_method (caller);
4631 if (!caller)
4632 return;
4634 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4635 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4636 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4640 * Check that the IL instructions at ip are the array initialization
4641 * sequence and return the pointer to the data and the size.
4643 static const char*
4644 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4647 * newarr[System.Int32]
4648 * dup
4649 * ldtoken field valuetype ...
4650 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4652 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4653 guint32 token = read32 (ip + 7);
4654 guint32 field_token = read32 (ip + 2);
4655 guint32 field_index = field_token & 0xffffff;
4656 guint32 rva;
4657 const char *data_ptr;
4658 int size = 0;
4659 MonoMethod *cmethod;
4660 MonoClass *dummy_class;
4661 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4662 int dummy_align;
4664 if (!field)
4665 return NULL;
4667 *out_field_token = field_token;
4669 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4670 if (!cmethod)
4671 return NULL;
4672 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4673 return NULL;
4674 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4675 case MONO_TYPE_BOOLEAN:
4676 case MONO_TYPE_I1:
4677 case MONO_TYPE_U1:
4678 size = 1; break;
4679 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4680 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4681 case MONO_TYPE_CHAR:
4682 case MONO_TYPE_I2:
4683 case MONO_TYPE_U2:
4684 size = 2; break;
4685 case MONO_TYPE_I4:
4686 case MONO_TYPE_U4:
4687 case MONO_TYPE_R4:
4688 size = 4; break;
4689 case MONO_TYPE_R8:
4690 #ifdef ARM_FPU_FPA
4691 return NULL; /* stupid ARM FP swapped format */
4692 #endif
4693 case MONO_TYPE_I8:
4694 case MONO_TYPE_U8:
4695 size = 8; break;
4696 #endif
4697 default:
4698 return NULL;
4700 size *= len;
4701 if (size > mono_type_size (field->type, &dummy_align))
4702 return NULL;
4703 *out_size = size;
4704 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4705 if (!method->klass->image->dynamic) {
4706 field_index = read32 (ip + 2) & 0xffffff;
4707 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4708 data_ptr = mono_image_rva_map (method->klass->image, rva);
4709 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4710 /* for aot code we do the lookup on load */
4711 if (aot && data_ptr)
4712 return GUINT_TO_POINTER (rva);
4713 } else {
4714 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4715 g_assert (!aot);
4716 data_ptr = mono_field_get_data (field);
4718 return data_ptr;
4720 return NULL;
4723 static void
4724 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4726 char *method_fname = mono_method_full_name (method, TRUE);
4727 char *method_code;
4729 if (mono_method_get_header (method)->code_size == 0)
4730 method_code = g_strdup ("method body is empty.");
4731 else
4732 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4733 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4734 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4735 g_free (method_fname);
4736 g_free (method_code);
4739 static void
4740 set_exception_object (MonoCompile *cfg, MonoException *exception)
4742 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4743 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4744 cfg->exception_ptr = exception;
4747 static gboolean
4748 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4750 MonoType *type;
4752 if (cfg->generic_sharing_context)
4753 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4754 else
4755 type = &klass->byval_arg;
4756 return MONO_TYPE_IS_REFERENCE (type);
4760 * mono_decompose_array_access_opts:
4762 * Decompose array access opcodes.
4763 * This should be in decompose.c, but it emits calls so it has to stay here until
4764 * the old JIT is gone.
4766 void
4767 mono_decompose_array_access_opts (MonoCompile *cfg)
4769 MonoBasicBlock *bb, *first_bb;
4772 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4773 * can be executed anytime. It should be run before decompose_long
4777 * Create a dummy bblock and emit code into it so we can use the normal
4778 * code generation macros.
4780 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4781 first_bb = cfg->cbb;
4783 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4784 MonoInst *ins;
4785 MonoInst *prev = NULL;
4786 MonoInst *dest;
4787 MonoInst *iargs [3];
4788 gboolean restart;
4790 if (!bb->has_array_access)
4791 continue;
4793 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4795 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4796 restart = TRUE;
4798 while (restart) {
4799 restart = FALSE;
4801 for (ins = bb->code; ins; ins = ins->next) {
4802 switch (ins->opcode) {
4803 case OP_LDLEN:
4804 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4805 G_STRUCT_OFFSET (MonoArray, max_length));
4806 MONO_ADD_INS (cfg->cbb, dest);
4807 break;
4808 case OP_BOUNDS_CHECK:
4809 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4810 break;
4811 case OP_NEWARR:
4812 if (cfg->opt & MONO_OPT_SHARED) {
4813 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4814 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4815 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4816 iargs [2]->dreg = ins->sreg1;
4818 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4819 dest->dreg = ins->dreg;
4820 } else {
4821 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4823 g_assert (vtable);
4824 NEW_VTABLECONST (cfg, iargs [0], vtable);
4825 MONO_ADD_INS (cfg->cbb, iargs [0]);
4826 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4827 iargs [1]->dreg = ins->sreg1;
4829 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4830 dest->dreg = ins->dreg;
4832 break;
4833 case OP_STRLEN:
4834 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4835 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4836 MONO_ADD_INS (cfg->cbb, dest);
4837 break;
4838 default:
4839 break;
4842 g_assert (cfg->cbb == first_bb);
4844 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4845 /* Replace the original instruction with the new code sequence */
4847 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4848 first_bb->code = first_bb->last_ins = NULL;
4849 first_bb->in_count = first_bb->out_count = 0;
4850 cfg->cbb = first_bb;
4852 else
4853 prev = ins;
4857 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4861 typedef union {
4862 guint32 vali [2];
4863 gint64 vall;
4864 double vald;
4865 } DVal;
4867 #ifdef MONO_ARCH_SOFT_FLOAT
4870 * mono_decompose_soft_float:
4872 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4873 * similar to long support on 32 bit platforms. 32 bit float values require special
4874 * handling when used as locals, arguments, and in calls.
4875 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4877 void
4878 mono_decompose_soft_float (MonoCompile *cfg)
4880 MonoBasicBlock *bb, *first_bb;
4883 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4887 * Create a dummy bblock and emit code into it so we can use the normal
4888 * code generation macros.
4890 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4891 first_bb = cfg->cbb;
4893 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4894 MonoInst *ins;
4895 MonoInst *prev = NULL;
4896 gboolean restart;
4898 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4900 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4901 restart = TRUE;
4903 while (restart) {
4904 restart = FALSE;
4906 for (ins = bb->code; ins; ins = ins->next) {
4907 const char *spec = INS_INFO (ins->opcode);
4909 /* Most fp operations are handled automatically by opcode emulation */
4911 switch (ins->opcode) {
4912 case OP_R8CONST: {
4913 DVal d;
4914 d.vald = *(double*)ins->inst_p0;
4915 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4916 break;
4918 case OP_R4CONST: {
4919 DVal d;
4920 /* We load the r8 value */
4921 d.vald = *(float*)ins->inst_p0;
4922 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4923 break;
4925 case OP_FMOVE:
4926 ins->opcode = OP_LMOVE;
4927 break;
4928 case OP_FGETLOW32:
4929 ins->opcode = OP_MOVE;
4930 ins->sreg1 = ins->sreg1 + 1;
4931 break;
4932 case OP_FGETHIGH32:
4933 ins->opcode = OP_MOVE;
4934 ins->sreg1 = ins->sreg1 + 2;
4935 break;
4936 case OP_SETFRET: {
4937 int reg = ins->sreg1;
4939 ins->opcode = OP_SETLRET;
4940 ins->dreg = -1;
4941 ins->sreg1 = reg + 1;
4942 ins->sreg2 = reg + 2;
4943 break;
4945 case OP_LOADR8_MEMBASE:
4946 ins->opcode = OP_LOADI8_MEMBASE;
4947 break;
4948 case OP_STORER8_MEMBASE_REG:
4949 ins->opcode = OP_STOREI8_MEMBASE_REG;
4950 break;
4951 case OP_STORER4_MEMBASE_REG: {
4952 MonoInst *iargs [2];
4953 int addr_reg;
4955 /* Arg 1 is the double value */
4956 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4957 iargs [0]->dreg = ins->sreg1;
4959 /* Arg 2 is the address to store to */
4960 addr_reg = mono_alloc_preg (cfg);
4961 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4962 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4963 restart = TRUE;
4964 break;
4966 case OP_LOADR4_MEMBASE: {
4967 MonoInst *iargs [1];
4968 MonoInst *conv;
4969 int addr_reg;
4971 addr_reg = mono_alloc_preg (cfg);
4972 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4973 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4974 conv->dreg = ins->dreg;
4975 break;
4977 case OP_FCALL:
4978 case OP_FCALL_REG:
4979 case OP_FCALL_MEMBASE: {
4980 MonoCallInst *call = (MonoCallInst*)ins;
4981 if (call->signature->ret->type == MONO_TYPE_R4) {
4982 MonoCallInst *call2;
4983 MonoInst *iargs [1];
4984 MonoInst *conv;
4986 /* Convert the call into a call returning an int */
4987 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4988 memcpy (call2, call, sizeof (MonoCallInst));
4989 switch (ins->opcode) {
4990 case OP_FCALL:
4991 call2->inst.opcode = OP_CALL;
4992 break;
4993 case OP_FCALL_REG:
4994 call2->inst.opcode = OP_CALL_REG;
4995 break;
4996 case OP_FCALL_MEMBASE:
4997 call2->inst.opcode = OP_CALL_MEMBASE;
4998 break;
4999 default:
5000 g_assert_not_reached ();
5002 call2->inst.dreg = mono_alloc_ireg (cfg);
5003 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5005 /* FIXME: Optimize this */
5007 /* Emit an r4->r8 conversion */
5008 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5009 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5010 conv->dreg = ins->dreg;
5011 } else {
5012 switch (ins->opcode) {
5013 case OP_FCALL:
5014 ins->opcode = OP_LCALL;
5015 break;
5016 case OP_FCALL_REG:
5017 ins->opcode = OP_LCALL_REG;
5018 break;
5019 case OP_FCALL_MEMBASE:
5020 ins->opcode = OP_LCALL_MEMBASE;
5021 break;
5022 default:
5023 g_assert_not_reached ();
5026 break;
5028 case OP_FCOMPARE: {
5029 MonoJitICallInfo *info;
5030 MonoInst *iargs [2];
5031 MonoInst *call, *cmp, *br;
5033 /* Convert fcompare+fbcc to icall+icompare+beq */
5035 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5036 g_assert (info);
5038 /* Create dummy MonoInst's for the arguments */
5039 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5040 iargs [0]->dreg = ins->sreg1;
5041 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5042 iargs [1]->dreg = ins->sreg2;
5044 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5046 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5047 cmp->sreg1 = call->dreg;
5048 cmp->inst_imm = 0;
5049 MONO_ADD_INS (cfg->cbb, cmp);
5051 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5052 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5053 br->inst_true_bb = ins->next->inst_true_bb;
5054 br->inst_false_bb = ins->next->inst_false_bb;
5055 MONO_ADD_INS (cfg->cbb, br);
5057 /* The call sequence might include fp ins */
5058 restart = TRUE;
5060 /* Skip fbcc or fccc */
5061 NULLIFY_INS (ins->next);
5062 break;
5064 case OP_FCEQ:
5065 case OP_FCGT:
5066 case OP_FCGT_UN:
5067 case OP_FCLT:
5068 case OP_FCLT_UN: {
5069 MonoJitICallInfo *info;
5070 MonoInst *iargs [2];
5071 MonoInst *call;
5073 /* Convert fccc to icall+icompare+iceq */
5075 info = mono_find_jit_opcode_emulation (ins->opcode);
5076 g_assert (info);
5078 /* Create dummy MonoInst's for the arguments */
5079 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5080 iargs [0]->dreg = ins->sreg1;
5081 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5082 iargs [1]->dreg = ins->sreg2;
5084 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5087 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5089 /* The call sequence might include fp ins */
5090 restart = TRUE;
5091 break;
5093 case OP_CKFINITE: {
5094 MonoInst *iargs [2];
5095 MonoInst *call, *cmp;
5097 /* Convert to icall+icompare+cond_exc+move */
5099 /* Create dummy MonoInst's for the arguments */
5100 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5101 iargs [0]->dreg = ins->sreg1;
5103 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5105 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5106 cmp->sreg1 = call->dreg;
5107 cmp->inst_imm = 1;
5108 MONO_ADD_INS (cfg->cbb, cmp);
5110 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5112 /* Do the assignment if the value is finite */
5113 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5115 restart = TRUE;
5116 break;
5118 default:
5119 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5120 mono_print_ins (ins);
5121 g_assert_not_reached ();
5123 break;
5126 g_assert (cfg->cbb == first_bb);
5128 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5129 /* Replace the original instruction with the new code sequence */
5131 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5132 first_bb->code = first_bb->last_ins = NULL;
5133 first_bb->in_count = first_bb->out_count = 0;
5134 cfg->cbb = first_bb;
5136 else
5137 prev = ins;
5141 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5144 mono_decompose_long_opts (cfg);
5147 #endif
5149 static void
5150 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5152 MonoInst *ins;
5153 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5154 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5155 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5156 /* Optimize reg-reg moves away */
5158 * Can't optimize other opcodes, since sp[0] might point to
5159 * the last ins of a decomposed opcode.
5161 sp [0]->dreg = (cfg)->locals [n]->dreg;
5162 } else {
5163 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5168 * ldloca inhibits many optimizations so try to get rid of it in common
5169 * cases.
5171 static inline unsigned char *
5172 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5174 int local, token;
5175 MonoClass *klass;
5177 if (size == 1) {
5178 local = ip [1];
5179 ip += 2;
5180 } else {
5181 local = read16 (ip + 2);
5182 ip += 4;
5185 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5186 gboolean skip = FALSE;
5188 /* From the INITOBJ case */
5189 token = read32 (ip + 2);
5190 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5191 CHECK_TYPELOAD (klass);
5192 if (generic_class_is_reference_type (cfg, klass)) {
5193 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5194 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5195 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5196 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5197 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5198 } else {
5199 skip = TRUE;
5202 if (!skip)
5203 return ip + 6;
5205 load_error:
5206 return NULL;
5209 static gboolean
5210 is_exception_class (MonoClass *class)
5212 while (class) {
5213 if (class == mono_defaults.exception_class)
5214 return TRUE;
5215 class = class->parent;
5217 return FALSE;
5221 * mono_method_to_ir:
5223 * Translate the .net IL into linear IR.
5226 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5227 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5228 guint inline_offset, gboolean is_virtual_call)
5230 MonoInst *ins, **sp, **stack_start;
5231 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5232 MonoMethod *cmethod, *method_definition;
5233 MonoInst **arg_array;
5234 MonoMethodHeader *header;
5235 MonoImage *image;
5236 guint32 token, ins_flag;
5237 MonoClass *klass;
5238 MonoClass *constrained_call = NULL;
5239 unsigned char *ip, *end, *target, *err_pos;
5240 static double r8_0 = 0.0;
5241 MonoMethodSignature *sig;
5242 MonoGenericContext *generic_context = NULL;
5243 MonoGenericContainer *generic_container = NULL;
5244 MonoType **param_types;
5245 int i, n, start_new_bblock, dreg;
5246 int num_calls = 0, inline_costs = 0;
5247 int breakpoint_id = 0;
5248 guint num_args;
5249 MonoBoolean security, pinvoke;
5250 MonoSecurityManager* secman = NULL;
5251 MonoDeclSecurityActions actions;
5252 GSList *class_inits = NULL;
5253 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5254 int context_used;
5255 gboolean init_locals;
5257 /* serialization and xdomain stuff may need access to private fields and methods */
5258 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5259 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5260 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5261 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5262 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5263 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5265 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5267 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5268 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5269 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5270 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5272 image = method->klass->image;
5273 header = mono_method_get_header (method);
5274 generic_container = mono_method_get_generic_container (method);
5275 sig = mono_method_signature (method);
5276 num_args = sig->hasthis + sig->param_count;
5277 ip = (unsigned char*)header->code;
5278 cfg->cil_start = ip;
5279 end = ip + header->code_size;
5280 mono_jit_stats.cil_code_size += header->code_size;
5281 init_locals = header->init_locals;
5284 * Methods without init_locals set could cause asserts in various passes
5285 * (#497220).
5287 init_locals = TRUE;
5289 method_definition = method;
5290 while (method_definition->is_inflated) {
5291 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5292 method_definition = imethod->declaring;
5295 /* SkipVerification is not allowed if core-clr is enabled */
5296 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5297 dont_verify = TRUE;
5298 dont_verify_stloc = TRUE;
5301 if (!dont_verify && mini_method_verify (cfg, method_definition))
5302 goto exception_exit;
5304 if (mono_debug_using_mono_debugger ())
5305 cfg->keep_cil_nops = TRUE;
5307 if (sig->is_inflated)
5308 generic_context = mono_method_get_context (method);
5309 else if (generic_container)
5310 generic_context = &generic_container->context;
5311 cfg->generic_context = generic_context;
5313 if (!cfg->generic_sharing_context)
5314 g_assert (!sig->has_type_parameters);
5316 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5317 g_assert (method->is_inflated);
5318 g_assert (mono_method_get_context (method)->method_inst);
5320 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5321 g_assert (sig->generic_param_count);
5323 if (cfg->method == method) {
5324 cfg->real_offset = 0;
5325 } else {
5326 cfg->real_offset = inline_offset;
5329 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5330 cfg->cil_offset_to_bb_len = header->code_size;
5332 cfg->current_method = method;
5334 if (cfg->verbose_level > 2)
5335 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5337 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5338 if (sig->hasthis)
5339 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5340 for (n = 0; n < sig->param_count; ++n)
5341 param_types [n + sig->hasthis] = sig->params [n];
5342 cfg->arg_types = param_types;
5344 dont_inline = g_list_prepend (dont_inline, method);
5345 if (cfg->method == method) {
5347 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5348 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5350 /* ENTRY BLOCK */
5351 NEW_BBLOCK (cfg, start_bblock);
5352 cfg->bb_entry = start_bblock;
5353 start_bblock->cil_code = NULL;
5354 start_bblock->cil_length = 0;
5356 /* EXIT BLOCK */
5357 NEW_BBLOCK (cfg, end_bblock);
5358 cfg->bb_exit = end_bblock;
5359 end_bblock->cil_code = NULL;
5360 end_bblock->cil_length = 0;
5361 g_assert (cfg->num_bblocks == 2);
5363 arg_array = cfg->args;
5365 if (header->num_clauses) {
5366 cfg->spvars = g_hash_table_new (NULL, NULL);
5367 cfg->exvars = g_hash_table_new (NULL, NULL);
5369 /* handle exception clauses */
5370 for (i = 0; i < header->num_clauses; ++i) {
5371 MonoBasicBlock *try_bb;
5372 MonoExceptionClause *clause = &header->clauses [i];
5373 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5374 try_bb->real_offset = clause->try_offset;
5375 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5376 tblock->real_offset = clause->handler_offset;
5377 tblock->flags |= BB_EXCEPTION_HANDLER;
5379 link_bblock (cfg, try_bb, tblock);
5381 if (*(ip + clause->handler_offset) == CEE_POP)
5382 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5384 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5385 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5386 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5387 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5388 MONO_ADD_INS (tblock, ins);
5390 /* todo: is a fault block unsafe to optimize? */
5391 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5392 tblock->flags |= BB_EXCEPTION_UNSAFE;
5396 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5397 while (p < end) {
5398 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5400 /* catch and filter blocks get the exception object on the stack */
5401 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5402 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5403 MonoInst *dummy_use;
5405 /* mostly like handle_stack_args (), but just sets the input args */
5406 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5407 tblock->in_scount = 1;
5408 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5409 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5412 * Add a dummy use for the exvar so its liveness info will be
5413 * correct.
5415 cfg->cbb = tblock;
5416 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5418 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5419 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5420 tblock->flags |= BB_EXCEPTION_HANDLER;
5421 tblock->real_offset = clause->data.filter_offset;
5422 tblock->in_scount = 1;
5423 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5424 /* The filter block shares the exvar with the handler block */
5425 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5426 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5427 MONO_ADD_INS (tblock, ins);
5431 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5432 clause->data.catch_class &&
5433 cfg->generic_sharing_context &&
5434 mono_class_check_context_used (clause->data.catch_class)) {
5436 * In shared generic code with catch
5437 * clauses containing type variables
5438 * the exception handling code has to
5439 * be able to get to the rgctx.
5440 * Therefore we have to make sure that
5441 * the vtable/mrgctx argument (for
5442 * static or generic methods) or the
5443 * "this" argument (for non-static
5444 * methods) are live.
5446 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5447 mini_method_get_context (method)->method_inst ||
5448 method->klass->valuetype) {
5449 mono_get_vtable_var (cfg);
5450 } else {
5451 MonoInst *dummy_use;
5453 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5457 } else {
5458 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5459 cfg->cbb = start_bblock;
5460 cfg->args = arg_array;
5461 mono_save_args (cfg, sig, inline_args);
5464 /* FIRST CODE BLOCK */
5465 NEW_BBLOCK (cfg, bblock);
5466 bblock->cil_code = ip;
5467 cfg->cbb = bblock;
5468 cfg->ip = ip;
5470 ADD_BBLOCK (cfg, bblock);
5472 if (cfg->method == method) {
5473 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5474 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5475 MONO_INST_NEW (cfg, ins, OP_BREAK);
5476 MONO_ADD_INS (bblock, ins);
5480 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5481 secman = mono_security_manager_get_methods ();
5483 security = (secman && mono_method_has_declsec (method));
5484 /* at this point having security doesn't mean we have any code to generate */
5485 if (security && (cfg->method == method)) {
5486 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5487 * And we do not want to enter the next section (with allocation) if we
5488 * have nothing to generate */
5489 security = mono_declsec_get_demands (method, &actions);
5492 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5493 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5494 if (pinvoke) {
5495 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5496 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5497 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5499 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5500 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5501 pinvoke = FALSE;
5503 if (custom)
5504 mono_custom_attrs_free (custom);
5506 if (pinvoke) {
5507 custom = mono_custom_attrs_from_class (wrapped->klass);
5508 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5509 pinvoke = FALSE;
5511 if (custom)
5512 mono_custom_attrs_free (custom);
5514 } else {
5515 /* not a P/Invoke after all */
5516 pinvoke = FALSE;
5520 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5521 /* we use a separate basic block for the initialization code */
5522 NEW_BBLOCK (cfg, init_localsbb);
5523 cfg->bb_init = init_localsbb;
5524 init_localsbb->real_offset = cfg->real_offset;
5525 start_bblock->next_bb = init_localsbb;
5526 init_localsbb->next_bb = bblock;
5527 link_bblock (cfg, start_bblock, init_localsbb);
5528 link_bblock (cfg, init_localsbb, bblock);
5530 cfg->cbb = init_localsbb;
5531 } else {
5532 start_bblock->next_bb = bblock;
5533 link_bblock (cfg, start_bblock, bblock);
5536 /* at this point we know, if security is TRUE, that some code needs to be generated */
5537 if (security && (cfg->method == method)) {
5538 MonoInst *args [2];
5540 mono_jit_stats.cas_demand_generation++;
5542 if (actions.demand.blob) {
5543 /* Add code for SecurityAction.Demand */
5544 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5545 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5546 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5547 mono_emit_method_call (cfg, secman->demand, args, NULL);
5549 if (actions.noncasdemand.blob) {
5550 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5551 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5552 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5553 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5554 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5555 mono_emit_method_call (cfg, secman->demand, args, NULL);
5557 if (actions.demandchoice.blob) {
5558 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5559 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5560 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5561 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5562 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5566 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5567 if (pinvoke) {
5568 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5571 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5572 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5573 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5574 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5575 if (!(method->klass && method->klass->image &&
5576 mono_security_core_clr_is_platform_image (method->klass->image))) {
5577 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5583 if (header->code_size == 0)
5584 UNVERIFIED;
5586 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5587 ip = err_pos;
5588 UNVERIFIED;
5591 if (cfg->method == method)
5592 mono_debug_init_method (cfg, bblock, breakpoint_id);
5594 for (n = 0; n < header->num_locals; ++n) {
5595 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5596 UNVERIFIED;
5598 class_inits = NULL;
5600 /* We force the vtable variable here for all shared methods
5601 for the possibility that they might show up in a stack
5602 trace where their exact instantiation is needed. */
5603 if (cfg->generic_sharing_context && method == cfg->method) {
5604 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5605 mini_method_get_context (method)->method_inst ||
5606 method->klass->valuetype) {
5607 mono_get_vtable_var (cfg);
5608 } else {
5609 /* FIXME: Is there a better way to do this?
5610 We need the variable live for the duration
5611 of the whole method. */
5612 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5616 /* add a check for this != NULL to inlined methods */
5617 if (is_virtual_call) {
5618 MonoInst *arg_ins;
5620 NEW_ARGLOAD (cfg, arg_ins, 0);
5621 MONO_ADD_INS (cfg->cbb, arg_ins);
5622 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5623 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5624 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5627 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5628 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5630 ins_flag = 0;
5631 start_new_bblock = 0;
5632 cfg->cbb = bblock;
5633 while (ip < end) {
5635 if (cfg->method == method)
5636 cfg->real_offset = ip - header->code;
5637 else
5638 cfg->real_offset = inline_offset;
5639 cfg->ip = ip;
5641 context_used = 0;
5643 if (start_new_bblock) {
5644 bblock->cil_length = ip - bblock->cil_code;
5645 if (start_new_bblock == 2) {
5646 g_assert (ip == tblock->cil_code);
5647 } else {
5648 GET_BBLOCK (cfg, tblock, ip);
5650 bblock->next_bb = tblock;
5651 bblock = tblock;
5652 cfg->cbb = bblock;
5653 start_new_bblock = 0;
5654 for (i = 0; i < bblock->in_scount; ++i) {
5655 if (cfg->verbose_level > 3)
5656 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5657 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5658 *sp++ = ins;
5660 if (class_inits)
5661 g_slist_free (class_inits);
5662 class_inits = NULL;
5663 } else {
5664 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5665 link_bblock (cfg, bblock, tblock);
5666 if (sp != stack_start) {
5667 handle_stack_args (cfg, stack_start, sp - stack_start);
5668 sp = stack_start;
5669 CHECK_UNVERIFIABLE (cfg);
5671 bblock->next_bb = tblock;
5672 bblock = tblock;
5673 cfg->cbb = bblock;
5674 for (i = 0; i < bblock->in_scount; ++i) {
5675 if (cfg->verbose_level > 3)
5676 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5677 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5678 *sp++ = ins;
5680 g_slist_free (class_inits);
5681 class_inits = NULL;
5685 bblock->real_offset = cfg->real_offset;
5687 if ((cfg->method == method) && cfg->coverage_info) {
5688 guint32 cil_offset = ip - header->code;
5689 cfg->coverage_info->data [cil_offset].cil_code = ip;
5691 /* TODO: Use an increment here */
5692 #if defined(TARGET_X86)
5693 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5694 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5695 ins->inst_imm = 1;
5696 MONO_ADD_INS (cfg->cbb, ins);
5697 #else
5698 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5699 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5700 #endif
5703 if (cfg->verbose_level > 3)
5704 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5706 switch (*ip) {
5707 case CEE_NOP:
5708 if (cfg->keep_cil_nops)
5709 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5710 else
5711 MONO_INST_NEW (cfg, ins, OP_NOP);
5712 ip++;
5713 MONO_ADD_INS (bblock, ins);
5714 break;
5715 case CEE_BREAK:
5716 MONO_INST_NEW (cfg, ins, OP_BREAK);
5717 ip++;
5718 MONO_ADD_INS (bblock, ins);
5719 break;
5720 case CEE_LDARG_0:
5721 case CEE_LDARG_1:
5722 case CEE_LDARG_2:
5723 case CEE_LDARG_3:
5724 CHECK_STACK_OVF (1);
5725 n = (*ip)-CEE_LDARG_0;
5726 CHECK_ARG (n);
5727 EMIT_NEW_ARGLOAD (cfg, ins, n);
5728 ip++;
5729 *sp++ = ins;
5730 break;
5731 case CEE_LDLOC_0:
5732 case CEE_LDLOC_1:
5733 case CEE_LDLOC_2:
5734 case CEE_LDLOC_3:
5735 CHECK_STACK_OVF (1);
5736 n = (*ip)-CEE_LDLOC_0;
5737 CHECK_LOCAL (n);
5738 EMIT_NEW_LOCLOAD (cfg, ins, n);
5739 ip++;
5740 *sp++ = ins;
5741 break;
5742 case CEE_STLOC_0:
5743 case CEE_STLOC_1:
5744 case CEE_STLOC_2:
5745 case CEE_STLOC_3: {
5746 CHECK_STACK (1);
5747 n = (*ip)-CEE_STLOC_0;
5748 CHECK_LOCAL (n);
5749 --sp;
5750 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5751 UNVERIFIED;
5752 emit_stloc_ir (cfg, sp, header, n);
5753 ++ip;
5754 inline_costs += 1;
5755 break;
5757 case CEE_LDARG_S:
5758 CHECK_OPSIZE (2);
5759 CHECK_STACK_OVF (1);
5760 n = ip [1];
5761 CHECK_ARG (n);
5762 EMIT_NEW_ARGLOAD (cfg, ins, n);
5763 *sp++ = ins;
5764 ip += 2;
5765 break;
5766 case CEE_LDARGA_S:
5767 CHECK_OPSIZE (2);
5768 CHECK_STACK_OVF (1);
5769 n = ip [1];
5770 CHECK_ARG (n);
5771 NEW_ARGLOADA (cfg, ins, n);
5772 MONO_ADD_INS (cfg->cbb, ins);
5773 *sp++ = ins;
5774 ip += 2;
5775 break;
5776 case CEE_STARG_S:
5777 CHECK_OPSIZE (2);
5778 CHECK_STACK (1);
5779 --sp;
5780 n = ip [1];
5781 CHECK_ARG (n);
5782 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5783 UNVERIFIED;
5784 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5785 ip += 2;
5786 break;
5787 case CEE_LDLOC_S:
5788 CHECK_OPSIZE (2);
5789 CHECK_STACK_OVF (1);
5790 n = ip [1];
5791 CHECK_LOCAL (n);
5792 EMIT_NEW_LOCLOAD (cfg, ins, n);
5793 *sp++ = ins;
5794 ip += 2;
5795 break;
5796 case CEE_LDLOCA_S: {
5797 unsigned char *tmp_ip;
5798 CHECK_OPSIZE (2);
5799 CHECK_STACK_OVF (1);
5800 CHECK_LOCAL (ip [1]);
5802 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5803 ip = tmp_ip;
5804 inline_costs += 1;
5805 break;
5808 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5809 *sp++ = ins;
5810 ip += 2;
5811 break;
5813 case CEE_STLOC_S:
5814 CHECK_OPSIZE (2);
5815 CHECK_STACK (1);
5816 --sp;
5817 CHECK_LOCAL (ip [1]);
5818 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5819 UNVERIFIED;
5820 emit_stloc_ir (cfg, sp, header, ip [1]);
5821 ip += 2;
5822 inline_costs += 1;
5823 break;
5824 case CEE_LDNULL:
5825 CHECK_STACK_OVF (1);
5826 EMIT_NEW_PCONST (cfg, ins, NULL);
5827 ins->type = STACK_OBJ;
5828 ++ip;
5829 *sp++ = ins;
5830 break;
5831 case CEE_LDC_I4_M1:
5832 CHECK_STACK_OVF (1);
5833 EMIT_NEW_ICONST (cfg, ins, -1);
5834 ++ip;
5835 *sp++ = ins;
5836 break;
5837 case CEE_LDC_I4_0:
5838 case CEE_LDC_I4_1:
5839 case CEE_LDC_I4_2:
5840 case CEE_LDC_I4_3:
5841 case CEE_LDC_I4_4:
5842 case CEE_LDC_I4_5:
5843 case CEE_LDC_I4_6:
5844 case CEE_LDC_I4_7:
5845 case CEE_LDC_I4_8:
5846 CHECK_STACK_OVF (1);
5847 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5848 ++ip;
5849 *sp++ = ins;
5850 break;
5851 case CEE_LDC_I4_S:
5852 CHECK_OPSIZE (2);
5853 CHECK_STACK_OVF (1);
5854 ++ip;
5855 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5856 ++ip;
5857 *sp++ = ins;
5858 break;
5859 case CEE_LDC_I4:
5860 CHECK_OPSIZE (5);
5861 CHECK_STACK_OVF (1);
5862 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5863 ip += 5;
5864 *sp++ = ins;
5865 break;
5866 case CEE_LDC_I8:
5867 CHECK_OPSIZE (9);
5868 CHECK_STACK_OVF (1);
5869 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5870 ins->type = STACK_I8;
5871 ins->dreg = alloc_dreg (cfg, STACK_I8);
5872 ++ip;
5873 ins->inst_l = (gint64)read64 (ip);
5874 MONO_ADD_INS (bblock, ins);
5875 ip += 8;
5876 *sp++ = ins;
5877 break;
5878 case CEE_LDC_R4: {
5879 float *f;
5880 gboolean use_aotconst = FALSE;
5882 #ifdef TARGET_POWERPC
5883 /* FIXME: Clean this up */
5884 if (cfg->compile_aot)
5885 use_aotconst = TRUE;
5886 #endif
5888 /* FIXME: we should really allocate this only late in the compilation process */
5889 f = mono_domain_alloc (cfg->domain, sizeof (float));
5890 CHECK_OPSIZE (5);
5891 CHECK_STACK_OVF (1);
5893 if (use_aotconst) {
5894 MonoInst *cons;
5895 int dreg;
5897 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5899 dreg = alloc_freg (cfg);
5900 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5901 ins->type = STACK_R8;
5902 } else {
5903 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5904 ins->type = STACK_R8;
5905 ins->dreg = alloc_dreg (cfg, STACK_R8);
5906 ins->inst_p0 = f;
5907 MONO_ADD_INS (bblock, ins);
5909 ++ip;
5910 readr4 (ip, f);
5911 ip += 4;
5912 *sp++ = ins;
5913 break;
5915 case CEE_LDC_R8: {
5916 double *d;
5917 gboolean use_aotconst = FALSE;
5919 #ifdef TARGET_POWERPC
5920 /* FIXME: Clean this up */
5921 if (cfg->compile_aot)
5922 use_aotconst = TRUE;
5923 #endif
5925 /* FIXME: we should really allocate this only late in the compilation process */
5926 d = mono_domain_alloc (cfg->domain, sizeof (double));
5927 CHECK_OPSIZE (9);
5928 CHECK_STACK_OVF (1);
5930 if (use_aotconst) {
5931 MonoInst *cons;
5932 int dreg;
5934 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5936 dreg = alloc_freg (cfg);
5937 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5938 ins->type = STACK_R8;
5939 } else {
5940 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5941 ins->type = STACK_R8;
5942 ins->dreg = alloc_dreg (cfg, STACK_R8);
5943 ins->inst_p0 = d;
5944 MONO_ADD_INS (bblock, ins);
5946 ++ip;
5947 readr8 (ip, d);
5948 ip += 8;
5949 *sp++ = ins;
5950 break;
5952 case CEE_DUP: {
5953 MonoInst *temp, *store;
5954 CHECK_STACK (1);
5955 CHECK_STACK_OVF (1);
5956 sp--;
5957 ins = *sp;
5959 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5960 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5962 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5963 *sp++ = ins;
5965 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5966 *sp++ = ins;
5968 ++ip;
5969 inline_costs += 2;
5970 break;
5972 case CEE_POP:
5973 CHECK_STACK (1);
5974 ip++;
5975 --sp;
5977 #ifdef TARGET_X86
5978 if (sp [0]->type == STACK_R8)
5979 /* we need to pop the value from the x86 FP stack */
5980 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5981 #endif
5982 break;
5983 case CEE_JMP: {
5984 MonoCallInst *call;
5986 INLINE_FAILURE;
5988 CHECK_OPSIZE (5);
5989 if (stack_start != sp)
5990 UNVERIFIED;
5991 token = read32 (ip + 1);
5992 /* FIXME: check the signature matches */
5993 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5995 if (!cmethod)
5996 goto load_error;
5998 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5999 GENERIC_SHARING_FAILURE (CEE_JMP);
6001 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6002 CHECK_CFG_EXCEPTION;
6004 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6006 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6007 int i, n;
6009 /* Handle tail calls similarly to calls */
6010 n = fsig->param_count + fsig->hasthis;
6012 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6013 call->method = cmethod;
6014 call->tail_call = TRUE;
6015 call->signature = mono_method_signature (cmethod);
6016 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6017 call->inst.inst_p0 = cmethod;
6018 for (i = 0; i < n; ++i)
6019 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6021 mono_arch_emit_call (cfg, call);
6022 MONO_ADD_INS (bblock, (MonoInst*)call);
6024 #else
6025 for (i = 0; i < num_args; ++i)
6026 /* Prevent arguments from being optimized away */
6027 arg_array [i]->flags |= MONO_INST_VOLATILE;
6029 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6030 ins = (MonoInst*)call;
6031 ins->inst_p0 = cmethod;
6032 MONO_ADD_INS (bblock, ins);
6033 #endif
6035 ip += 5;
6036 start_new_bblock = 1;
6037 break;
6039 case CEE_CALLI:
6040 case CEE_CALL:
6041 case CEE_CALLVIRT: {
6042 MonoInst *addr = NULL;
6043 MonoMethodSignature *fsig = NULL;
6044 int array_rank = 0;
6045 int virtual = *ip == CEE_CALLVIRT;
6046 int calli = *ip == CEE_CALLI;
6047 gboolean pass_imt_from_rgctx = FALSE;
6048 MonoInst *imt_arg = NULL;
6049 gboolean pass_vtable = FALSE;
6050 gboolean pass_mrgctx = FALSE;
6051 MonoInst *vtable_arg = NULL;
6052 gboolean check_this = FALSE;
6053 gboolean supported_tail_call = FALSE;
6055 CHECK_OPSIZE (5);
6056 token = read32 (ip + 1);
6058 if (calli) {
6059 cmethod = NULL;
6060 CHECK_STACK (1);
6061 --sp;
6062 addr = *sp;
6063 if (method->wrapper_type != MONO_WRAPPER_NONE)
6064 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6065 else
6066 fsig = mono_metadata_parse_signature (image, token);
6068 n = fsig->param_count + fsig->hasthis;
6069 } else {
6070 MonoMethod *cil_method;
6072 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6073 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6074 cil_method = cmethod;
6075 } else if (constrained_call) {
6076 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6078 * This is needed since get_method_constrained can't find
6079 * the method in klass representing a type var.
6080 * The type var is guaranteed to be a reference type in this
6081 * case.
6083 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6084 cil_method = cmethod;
6085 g_assert (!cmethod->klass->valuetype);
6086 } else {
6087 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6089 } else {
6090 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6091 cil_method = cmethod;
6094 if (!cmethod)
6095 goto load_error;
6096 if (!dont_verify && !cfg->skip_visibility) {
6097 MonoMethod *target_method = cil_method;
6098 if (method->is_inflated) {
6099 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6101 if (!mono_method_can_access_method (method_definition, target_method) &&
6102 !mono_method_can_access_method (method, cil_method))
6103 METHOD_ACCESS_FAILURE;
6106 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6107 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6109 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6110 /* MS.NET seems to silently convert this to a callvirt */
6111 virtual = 1;
6113 if (!cmethod->klass->inited)
6114 if (!mono_class_init (cmethod->klass))
6115 goto load_error;
6117 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6118 mini_class_is_system_array (cmethod->klass)) {
6119 array_rank = cmethod->klass->rank;
6120 fsig = mono_method_signature (cmethod);
6121 } else {
6122 if (mono_method_signature (cmethod)->pinvoke) {
6123 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6124 check_for_pending_exc, FALSE);
6125 fsig = mono_method_signature (wrapper);
6126 } else if (constrained_call) {
6127 fsig = mono_method_signature (cmethod);
6128 } else {
6129 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6133 mono_save_token_info (cfg, image, token, cil_method);
6135 n = fsig->param_count + fsig->hasthis;
6137 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6138 if (check_linkdemand (cfg, method, cmethod))
6139 INLINE_FAILURE;
6140 CHECK_CFG_EXCEPTION;
6143 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6144 g_assert_not_reached ();
6147 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6148 UNVERIFIED;
6150 if (!cfg->generic_sharing_context && cmethod)
6151 g_assert (!mono_method_check_context_used (cmethod));
6153 CHECK_STACK (n);
6155 //g_assert (!virtual || fsig->hasthis);
6157 sp -= n;
6159 if (constrained_call) {
6161 * We have the `constrained.' prefix opcode.
6163 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6164 int dreg;
6167 * The type parameter is instantiated as a valuetype,
6168 * but that type doesn't override the method we're
6169 * calling, so we need to box `this'.
6171 dreg = alloc_dreg (cfg, STACK_VTYPE);
6172 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6173 ins->klass = constrained_call;
6174 sp [0] = handle_box (cfg, ins, constrained_call);
6175 } else if (!constrained_call->valuetype) {
6176 int dreg = alloc_preg (cfg);
6179 * The type parameter is instantiated as a reference
6180 * type. We have a managed pointer on the stack, so
6181 * we need to dereference it here.
6183 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6184 ins->type = STACK_OBJ;
6185 sp [0] = ins;
6186 } else if (cmethod->klass->valuetype)
6187 virtual = 0;
6188 constrained_call = NULL;
6191 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6192 UNVERIFIED;
6195 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6196 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6197 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6198 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6199 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6202 * Pass vtable iff target method might
6203 * be shared, which means that sharing
6204 * is enabled for its class and its
6205 * context is sharable (and it's not a
6206 * generic method).
6208 if (sharing_enabled && context_sharable &&
6209 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6210 pass_vtable = TRUE;
6213 if (cmethod && mini_method_get_context (cmethod) &&
6214 mini_method_get_context (cmethod)->method_inst) {
6215 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6216 MonoGenericContext *context = mini_method_get_context (cmethod);
6217 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6219 g_assert (!pass_vtable);
6221 if (sharing_enabled && context_sharable)
6222 pass_mrgctx = TRUE;
6225 if (cfg->generic_sharing_context && cmethod) {
6226 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6228 context_used = mono_method_check_context_used (cmethod);
6230 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6231 /* Generic method interface
6232 calls are resolved via a
6233 helper function and don't
6234 need an imt. */
6235 if (!cmethod_context || !cmethod_context->method_inst)
6236 pass_imt_from_rgctx = TRUE;
6240 * If a shared method calls another
6241 * shared method then the caller must
6242 * have a generic sharing context
6243 * because the magic trampoline
6244 * requires it. FIXME: We shouldn't
6245 * have to force the vtable/mrgctx
6246 * variable here. Instead there
6247 * should be a flag in the cfg to
6248 * request a generic sharing context.
6250 if (context_used &&
6251 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6252 mono_get_vtable_var (cfg);
6255 if (pass_vtable) {
6256 if (context_used) {
6257 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6258 } else {
6259 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6261 CHECK_TYPELOAD (cmethod->klass);
6262 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6266 if (pass_mrgctx) {
6267 g_assert (!vtable_arg);
6269 if (context_used) {
6270 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6271 } else {
6272 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6275 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6276 MONO_METHOD_IS_FINAL (cmethod)) {
6277 if (virtual)
6278 check_this = TRUE;
6279 virtual = 0;
6283 if (pass_imt_from_rgctx) {
6284 g_assert (!pass_vtable);
6285 g_assert (cmethod);
6287 imt_arg = emit_get_rgctx_method (cfg, context_used,
6288 cmethod, MONO_RGCTX_INFO_METHOD);
6291 if (check_this) {
6292 MonoInst *check;
6294 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6295 check->sreg1 = sp [0]->dreg;
6296 MONO_ADD_INS (cfg->cbb, check);
6299 /* Calling virtual generic methods */
6300 if (cmethod && virtual &&
6301 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6302 !(MONO_METHOD_IS_FINAL (cmethod) &&
6303 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6304 mono_method_signature (cmethod)->generic_param_count) {
6305 MonoInst *this_temp, *this_arg_temp, *store;
6306 MonoInst *iargs [4];
6308 g_assert (mono_method_signature (cmethod)->is_inflated);
6310 /* Prevent inlining of methods that contain indirect calls */
6311 INLINE_FAILURE;
6313 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && !defined(ENABLE_LLVM)
6314 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6315 g_assert (!imt_arg);
6316 if (context_used) {
6317 imt_arg = emit_get_rgctx_method (cfg, context_used,
6318 cmethod, MONO_RGCTX_INFO_METHOD);
6320 } else {
6321 g_assert (cmethod->is_inflated);
6322 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6324 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6325 } else
6326 #endif
6328 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6329 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6330 MONO_ADD_INS (bblock, store);
6332 /* FIXME: This should be a managed pointer */
6333 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6335 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6336 if (context_used) {
6337 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6338 cmethod, MONO_RGCTX_INFO_METHOD);
6339 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6340 addr = mono_emit_jit_icall (cfg,
6341 mono_helper_compile_generic_method, iargs);
6342 } else {
6343 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6344 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6345 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6348 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6350 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6353 if (!MONO_TYPE_IS_VOID (fsig->ret))
6354 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6356 ip += 5;
6357 ins_flag = 0;
6358 break;
6361 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6362 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6363 #else
6364 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6365 #endif
6367 /* Tail prefix */
6368 /* FIXME: runtime generic context pointer for jumps? */
6369 /* FIXME: handle this for generic sharing eventually */
6370 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6371 MonoCallInst *call;
6373 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6374 INLINE_FAILURE;
6376 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6377 /* Handle tail calls similarly to calls */
6378 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6379 #else
6380 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6381 call->tail_call = TRUE;
6382 call->method = cmethod;
6383 call->signature = mono_method_signature (cmethod);
6386 * We implement tail calls by storing the actual arguments into the
6387 * argument variables, then emitting a CEE_JMP.
6389 for (i = 0; i < n; ++i) {
6390 /* Prevent argument from being register allocated */
6391 arg_array [i]->flags |= MONO_INST_VOLATILE;
6392 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6394 #endif
6396 ins = (MonoInst*)call;
6397 ins->inst_p0 = cmethod;
6398 ins->inst_p1 = arg_array [0];
6399 MONO_ADD_INS (bblock, ins);
6400 link_bblock (cfg, bblock, end_bblock);
6401 start_new_bblock = 1;
6402 /* skip CEE_RET as well */
6403 ip += 6;
6404 ins_flag = 0;
6405 break;
6408 /* Conversion to a JIT intrinsic */
6409 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6410 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6411 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6412 *sp = ins;
6413 sp++;
6416 ip += 5;
6417 ins_flag = 0;
6418 break;
6421 /* Inlining */
6422 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6423 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6424 mono_method_check_inlining (cfg, cmethod) &&
6425 !g_list_find (dont_inline, cmethod)) {
6426 int costs;
6427 gboolean allways = FALSE;
6429 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6430 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6431 /* Prevent inlining of methods that call wrappers */
6432 INLINE_FAILURE;
6433 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6434 allways = TRUE;
6437 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6438 ip += 5;
6439 cfg->real_offset += 5;
6440 bblock = cfg->cbb;
6442 if (!MONO_TYPE_IS_VOID (fsig->ret))
6443 /* *sp is already set by inline_method */
6444 sp++;
6446 inline_costs += costs;
6447 ins_flag = 0;
6448 break;
6452 inline_costs += 10 * num_calls++;
6454 /* Tail recursion elimination */
6455 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6456 gboolean has_vtargs = FALSE;
6457 int i;
6459 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6460 INLINE_FAILURE;
6462 /* keep it simple */
6463 for (i = fsig->param_count - 1; i >= 0; i--) {
6464 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6465 has_vtargs = TRUE;
6468 if (!has_vtargs) {
6469 for (i = 0; i < n; ++i)
6470 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6471 MONO_INST_NEW (cfg, ins, OP_BR);
6472 MONO_ADD_INS (bblock, ins);
6473 tblock = start_bblock->out_bb [0];
6474 link_bblock (cfg, bblock, tblock);
6475 ins->inst_target_bb = tblock;
6476 start_new_bblock = 1;
6478 /* skip the CEE_RET, too */
6479 if (ip_in_bb (cfg, bblock, ip + 5))
6480 ip += 6;
6481 else
6482 ip += 5;
6484 ins_flag = 0;
6485 break;
6489 /* Generic sharing */
6490 /* FIXME: only do this for generic methods if
6491 they are not shared! */
6492 if (context_used && !imt_arg && !array_rank &&
6493 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6494 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6495 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6496 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6497 INLINE_FAILURE;
6499 g_assert (cfg->generic_sharing_context && cmethod);
6500 g_assert (!addr);
6503 * We are compiling a call to a
6504 * generic method from shared code,
6505 * which means that we have to look up
6506 * the method in the rgctx and do an
6507 * indirect call.
6509 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6512 /* Indirect calls */
6513 if (addr) {
6514 g_assert (!imt_arg);
6516 if (*ip == CEE_CALL)
6517 g_assert (context_used);
6518 else if (*ip == CEE_CALLI)
6519 g_assert (!vtable_arg);
6520 else
6521 /* FIXME: what the hell is this??? */
6522 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6523 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6525 /* Prevent inlining of methods with indirect calls */
6526 INLINE_FAILURE;
6528 if (vtable_arg) {
6529 #ifdef MONO_ARCH_RGCTX_REG
6530 MonoCallInst *call;
6531 int rgctx_reg = mono_alloc_preg (cfg);
6533 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6534 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6535 call = (MonoCallInst*)ins;
6536 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6537 cfg->uses_rgctx_reg = TRUE;
6538 call->rgctx_reg = TRUE;
6539 #else
6540 NOT_IMPLEMENTED;
6541 #endif
6542 } else {
6543 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6545 * Instead of emitting an indirect call, emit a direct call
6546 * with the contents of the aotconst as the patch info.
6548 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6549 NULLIFY_INS (addr);
6550 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6551 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6552 NULLIFY_INS (addr);
6553 } else {
6554 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6557 if (!MONO_TYPE_IS_VOID (fsig->ret))
6558 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6560 ip += 5;
6561 ins_flag = 0;
6562 break;
6565 /* Array methods */
6566 if (array_rank) {
6567 MonoInst *addr;
6569 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6570 if (sp [fsig->param_count]->type == STACK_OBJ) {
6571 MonoInst *iargs [2];
6573 iargs [0] = sp [0];
6574 iargs [1] = sp [fsig->param_count];
6576 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6579 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6580 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6581 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6582 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6584 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6586 *sp++ = ins;
6587 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6588 if (!cmethod->klass->element_class->valuetype && !readonly)
6589 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6591 readonly = FALSE;
6592 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6593 *sp++ = addr;
6594 } else {
6595 g_assert_not_reached ();
6598 ip += 5;
6599 ins_flag = 0;
6600 break;
6603 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6604 if (ins) {
6605 if (!MONO_TYPE_IS_VOID (fsig->ret))
6606 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6608 ip += 5;
6609 ins_flag = 0;
6610 break;
6613 /* Common call */
6614 INLINE_FAILURE;
6615 if (vtable_arg) {
6616 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6617 NULL, vtable_arg);
6618 } else if (imt_arg) {
6619 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6620 } else {
6621 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6624 if (!MONO_TYPE_IS_VOID (fsig->ret))
6625 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6627 ip += 5;
6628 ins_flag = 0;
6629 break;
6631 case CEE_RET:
6632 if (cfg->method != method) {
6633 /* return from inlined method */
6635 * If in_count == 0, that means the ret is unreachable due to
6636 * being preceeded by a throw. In that case, inline_method () will
6637 * handle setting the return value
6638 * (test case: test_0_inline_throw ()).
6640 if (return_var && cfg->cbb->in_count) {
6641 MonoInst *store;
6642 CHECK_STACK (1);
6643 --sp;
6644 //g_assert (returnvar != -1);
6645 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6646 cfg->ret_var_set = TRUE;
6648 } else {
6649 if (cfg->ret) {
6650 MonoType *ret_type = mono_method_signature (method)->ret;
6652 g_assert (!return_var);
6653 CHECK_STACK (1);
6654 --sp;
6655 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6656 MonoInst *ret_addr;
6658 if (!cfg->vret_addr) {
6659 MonoInst *ins;
6661 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6662 } else {
6663 EMIT_NEW_RETLOADA (cfg, ret_addr);
6665 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6666 ins->klass = mono_class_from_mono_type (ret_type);
6668 } else {
6669 #ifdef MONO_ARCH_SOFT_FLOAT
6670 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6671 MonoInst *iargs [1];
6672 MonoInst *conv;
6674 iargs [0] = *sp;
6675 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6676 mono_arch_emit_setret (cfg, method, conv);
6677 } else {
6678 mono_arch_emit_setret (cfg, method, *sp);
6680 #else
6681 mono_arch_emit_setret (cfg, method, *sp);
6682 #endif
6686 if (sp != stack_start)
6687 UNVERIFIED;
6688 MONO_INST_NEW (cfg, ins, OP_BR);
6689 ip++;
6690 ins->inst_target_bb = end_bblock;
6691 MONO_ADD_INS (bblock, ins);
6692 link_bblock (cfg, bblock, end_bblock);
6693 start_new_bblock = 1;
6694 break;
6695 case CEE_BR_S:
6696 CHECK_OPSIZE (2);
6697 MONO_INST_NEW (cfg, ins, OP_BR);
6698 ip++;
6699 target = ip + 1 + (signed char)(*ip);
6700 ++ip;
6701 GET_BBLOCK (cfg, tblock, target);
6702 link_bblock (cfg, bblock, tblock);
6703 ins->inst_target_bb = tblock;
6704 if (sp != stack_start) {
6705 handle_stack_args (cfg, stack_start, sp - stack_start);
6706 sp = stack_start;
6707 CHECK_UNVERIFIABLE (cfg);
6709 MONO_ADD_INS (bblock, ins);
6710 start_new_bblock = 1;
6711 inline_costs += BRANCH_COST;
6712 break;
6713 case CEE_BEQ_S:
6714 case CEE_BGE_S:
6715 case CEE_BGT_S:
6716 case CEE_BLE_S:
6717 case CEE_BLT_S:
6718 case CEE_BNE_UN_S:
6719 case CEE_BGE_UN_S:
6720 case CEE_BGT_UN_S:
6721 case CEE_BLE_UN_S:
6722 case CEE_BLT_UN_S:
6723 CHECK_OPSIZE (2);
6724 CHECK_STACK (2);
6725 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6726 ip++;
6727 target = ip + 1 + *(signed char*)ip;
6728 ip++;
6730 ADD_BINCOND (NULL);
6732 sp = stack_start;
6733 inline_costs += BRANCH_COST;
6734 break;
6735 case CEE_BR:
6736 CHECK_OPSIZE (5);
6737 MONO_INST_NEW (cfg, ins, OP_BR);
6738 ip++;
6740 target = ip + 4 + (gint32)read32(ip);
6741 ip += 4;
6742 GET_BBLOCK (cfg, tblock, target);
6743 link_bblock (cfg, bblock, tblock);
6744 ins->inst_target_bb = tblock;
6745 if (sp != stack_start) {
6746 handle_stack_args (cfg, stack_start, sp - stack_start);
6747 sp = stack_start;
6748 CHECK_UNVERIFIABLE (cfg);
6751 MONO_ADD_INS (bblock, ins);
6753 start_new_bblock = 1;
6754 inline_costs += BRANCH_COST;
6755 break;
6756 case CEE_BRFALSE_S:
6757 case CEE_BRTRUE_S:
6758 case CEE_BRFALSE:
6759 case CEE_BRTRUE: {
6760 MonoInst *cmp;
6761 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6762 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6763 guint32 opsize = is_short ? 1 : 4;
6765 CHECK_OPSIZE (opsize);
6766 CHECK_STACK (1);
6767 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6768 UNVERIFIED;
6769 ip ++;
6770 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6771 ip += opsize;
6773 sp--;
6775 GET_BBLOCK (cfg, tblock, target);
6776 link_bblock (cfg, bblock, tblock);
6777 GET_BBLOCK (cfg, tblock, ip);
6778 link_bblock (cfg, bblock, tblock);
6780 if (sp != stack_start) {
6781 handle_stack_args (cfg, stack_start, sp - stack_start);
6782 CHECK_UNVERIFIABLE (cfg);
6785 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6786 cmp->sreg1 = sp [0]->dreg;
6787 type_from_op (cmp, sp [0], NULL);
6788 CHECK_TYPE (cmp);
6790 #if SIZEOF_REGISTER == 4
6791 if (cmp->opcode == OP_LCOMPARE_IMM) {
6792 /* Convert it to OP_LCOMPARE */
6793 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6794 ins->type = STACK_I8;
6795 ins->dreg = alloc_dreg (cfg, STACK_I8);
6796 ins->inst_l = 0;
6797 MONO_ADD_INS (bblock, ins);
6798 cmp->opcode = OP_LCOMPARE;
6799 cmp->sreg2 = ins->dreg;
6801 #endif
6802 MONO_ADD_INS (bblock, cmp);
6804 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6805 type_from_op (ins, sp [0], NULL);
6806 MONO_ADD_INS (bblock, ins);
6807 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6808 GET_BBLOCK (cfg, tblock, target);
6809 ins->inst_true_bb = tblock;
6810 GET_BBLOCK (cfg, tblock, ip);
6811 ins->inst_false_bb = tblock;
6812 start_new_bblock = 2;
6814 sp = stack_start;
6815 inline_costs += BRANCH_COST;
6816 break;
6818 case CEE_BEQ:
6819 case CEE_BGE:
6820 case CEE_BGT:
6821 case CEE_BLE:
6822 case CEE_BLT:
6823 case CEE_BNE_UN:
6824 case CEE_BGE_UN:
6825 case CEE_BGT_UN:
6826 case CEE_BLE_UN:
6827 case CEE_BLT_UN:
6828 CHECK_OPSIZE (5);
6829 CHECK_STACK (2);
6830 MONO_INST_NEW (cfg, ins, *ip);
6831 ip++;
6832 target = ip + 4 + (gint32)read32(ip);
6833 ip += 4;
6835 ADD_BINCOND (NULL);
6837 sp = stack_start;
6838 inline_costs += BRANCH_COST;
6839 break;
6840 case CEE_SWITCH: {
6841 MonoInst *src1;
6842 MonoBasicBlock **targets;
6843 MonoBasicBlock *default_bblock;
6844 MonoJumpInfoBBTable *table;
6845 int offset_reg = alloc_preg (cfg);
6846 int target_reg = alloc_preg (cfg);
6847 int table_reg = alloc_preg (cfg);
6848 int sum_reg = alloc_preg (cfg);
6849 gboolean use_op_switch;
6851 CHECK_OPSIZE (5);
6852 CHECK_STACK (1);
6853 n = read32 (ip + 1);
6854 --sp;
6855 src1 = sp [0];
6856 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6857 UNVERIFIED;
6859 ip += 5;
6860 CHECK_OPSIZE (n * sizeof (guint32));
6861 target = ip + n * sizeof (guint32);
6863 GET_BBLOCK (cfg, default_bblock, target);
6865 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6866 for (i = 0; i < n; ++i) {
6867 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6868 targets [i] = tblock;
6869 ip += 4;
6872 if (sp != stack_start) {
6874 * Link the current bb with the targets as well, so handle_stack_args
6875 * will set their in_stack correctly.
6877 link_bblock (cfg, bblock, default_bblock);
6878 for (i = 0; i < n; ++i)
6879 link_bblock (cfg, bblock, targets [i]);
6881 handle_stack_args (cfg, stack_start, sp - stack_start);
6882 sp = stack_start;
6883 CHECK_UNVERIFIABLE (cfg);
6886 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6887 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6888 bblock = cfg->cbb;
6890 for (i = 0; i < n; ++i)
6891 link_bblock (cfg, bblock, targets [i]);
6893 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6894 table->table = targets;
6895 table->table_size = n;
6897 use_op_switch = FALSE;
6898 #ifdef TARGET_ARM
6899 /* ARM implements SWITCH statements differently */
6900 /* FIXME: Make it use the generic implementation */
6901 if (!cfg->compile_aot)
6902 use_op_switch = TRUE;
6903 #endif
6905 if (COMPILE_LLVM (cfg))
6906 use_op_switch = TRUE;
6908 cfg->cbb->has_jump_table = 1;
6910 if (use_op_switch) {
6911 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6912 ins->sreg1 = src1->dreg;
6913 ins->inst_p0 = table;
6914 ins->inst_many_bb = targets;
6915 ins->klass = GUINT_TO_POINTER (n);
6916 MONO_ADD_INS (cfg->cbb, ins);
6917 } else {
6918 if (sizeof (gpointer) == 8)
6919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6920 else
6921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6923 #if SIZEOF_REGISTER == 8
6924 /* The upper word might not be zero, and we add it to a 64 bit address later */
6925 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6926 #endif
6928 if (cfg->compile_aot) {
6929 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6930 } else {
6931 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6932 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6933 ins->inst_p0 = table;
6934 ins->dreg = table_reg;
6935 MONO_ADD_INS (cfg->cbb, ins);
6938 /* FIXME: Use load_memindex */
6939 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6941 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6943 start_new_bblock = 1;
6944 inline_costs += (BRANCH_COST * 2);
6945 break;
6947 case CEE_LDIND_I1:
6948 case CEE_LDIND_U1:
6949 case CEE_LDIND_I2:
6950 case CEE_LDIND_U2:
6951 case CEE_LDIND_I4:
6952 case CEE_LDIND_U4:
6953 case CEE_LDIND_I8:
6954 case CEE_LDIND_I:
6955 case CEE_LDIND_R4:
6956 case CEE_LDIND_R8:
6957 case CEE_LDIND_REF:
6958 CHECK_STACK (1);
6959 --sp;
6961 switch (*ip) {
6962 case CEE_LDIND_R4:
6963 case CEE_LDIND_R8:
6964 dreg = alloc_freg (cfg);
6965 break;
6966 case CEE_LDIND_I8:
6967 dreg = alloc_lreg (cfg);
6968 break;
6969 default:
6970 dreg = alloc_preg (cfg);
6973 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6974 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6975 ins->flags |= ins_flag;
6976 ins_flag = 0;
6977 MONO_ADD_INS (bblock, ins);
6978 *sp++ = ins;
6979 ++ip;
6980 break;
6981 case CEE_STIND_REF:
6982 case CEE_STIND_I1:
6983 case CEE_STIND_I2:
6984 case CEE_STIND_I4:
6985 case CEE_STIND_I8:
6986 case CEE_STIND_R4:
6987 case CEE_STIND_R8:
6988 case CEE_STIND_I:
6989 CHECK_STACK (2);
6990 sp -= 2;
6992 #if HAVE_WRITE_BARRIERS
6993 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6994 /* insert call to write barrier */
6995 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6996 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6997 ins_flag = 0;
6998 ip++;
6999 break;
7001 #endif
7003 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7004 ins->flags |= ins_flag;
7005 ins_flag = 0;
7006 MONO_ADD_INS (bblock, ins);
7007 inline_costs += 1;
7008 ++ip;
7009 break;
7011 case CEE_MUL:
7012 CHECK_STACK (2);
7014 MONO_INST_NEW (cfg, ins, (*ip));
7015 sp -= 2;
7016 ins->sreg1 = sp [0]->dreg;
7017 ins->sreg2 = sp [1]->dreg;
7018 type_from_op (ins, sp [0], sp [1]);
7019 CHECK_TYPE (ins);
7020 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7022 /* Use the immediate opcodes if possible */
7023 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7024 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7025 if (imm_opcode != -1) {
7026 ins->opcode = imm_opcode;
7027 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7028 ins->sreg2 = -1;
7030 sp [1]->opcode = OP_NOP;
7034 MONO_ADD_INS ((cfg)->cbb, (ins));
7036 *sp++ = mono_decompose_opcode (cfg, ins);
7037 ip++;
7038 break;
7039 case CEE_ADD:
7040 case CEE_SUB:
7041 case CEE_DIV:
7042 case CEE_DIV_UN:
7043 case CEE_REM:
7044 case CEE_REM_UN:
7045 case CEE_AND:
7046 case CEE_OR:
7047 case CEE_XOR:
7048 case CEE_SHL:
7049 case CEE_SHR:
7050 case CEE_SHR_UN:
7051 CHECK_STACK (2);
7053 MONO_INST_NEW (cfg, ins, (*ip));
7054 sp -= 2;
7055 ins->sreg1 = sp [0]->dreg;
7056 ins->sreg2 = sp [1]->dreg;
7057 type_from_op (ins, sp [0], sp [1]);
7058 CHECK_TYPE (ins);
7059 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7060 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7062 /* FIXME: Pass opcode to is_inst_imm */
7064 /* Use the immediate opcodes if possible */
7065 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7066 int imm_opcode;
7068 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7069 if (imm_opcode != -1) {
7070 ins->opcode = imm_opcode;
7071 if (sp [1]->opcode == OP_I8CONST) {
7072 #if SIZEOF_REGISTER == 8
7073 ins->inst_imm = sp [1]->inst_l;
7074 #else
7075 ins->inst_ls_word = sp [1]->inst_ls_word;
7076 ins->inst_ms_word = sp [1]->inst_ms_word;
7077 #endif
7079 else
7080 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7081 ins->sreg2 = -1;
7083 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7084 if (sp [1]->next == NULL)
7085 sp [1]->opcode = OP_NOP;
7088 MONO_ADD_INS ((cfg)->cbb, (ins));
7090 *sp++ = mono_decompose_opcode (cfg, ins);
7091 ip++;
7092 break;
7093 case CEE_NEG:
7094 case CEE_NOT:
7095 case CEE_CONV_I1:
7096 case CEE_CONV_I2:
7097 case CEE_CONV_I4:
7098 case CEE_CONV_R4:
7099 case CEE_CONV_R8:
7100 case CEE_CONV_U4:
7101 case CEE_CONV_I8:
7102 case CEE_CONV_U8:
7103 case CEE_CONV_OVF_I8:
7104 case CEE_CONV_OVF_U8:
7105 case CEE_CONV_R_UN:
7106 CHECK_STACK (1);
7108 /* Special case this earlier so we have long constants in the IR */
7109 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7110 int data = sp [-1]->inst_c0;
7111 sp [-1]->opcode = OP_I8CONST;
7112 sp [-1]->type = STACK_I8;
7113 #if SIZEOF_REGISTER == 8
7114 if ((*ip) == CEE_CONV_U8)
7115 sp [-1]->inst_c0 = (guint32)data;
7116 else
7117 sp [-1]->inst_c0 = data;
7118 #else
7119 sp [-1]->inst_ls_word = data;
7120 if ((*ip) == CEE_CONV_U8)
7121 sp [-1]->inst_ms_word = 0;
7122 else
7123 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7124 #endif
7125 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7127 else {
7128 ADD_UNOP (*ip);
7130 ip++;
7131 break;
7132 case CEE_CONV_OVF_I4:
7133 case CEE_CONV_OVF_I1:
7134 case CEE_CONV_OVF_I2:
7135 case CEE_CONV_OVF_I:
7136 case CEE_CONV_OVF_U:
7137 CHECK_STACK (1);
7139 if (sp [-1]->type == STACK_R8) {
7140 ADD_UNOP (CEE_CONV_OVF_I8);
7141 ADD_UNOP (*ip);
7142 } else {
7143 ADD_UNOP (*ip);
7145 ip++;
7146 break;
7147 case CEE_CONV_OVF_U1:
7148 case CEE_CONV_OVF_U2:
7149 case CEE_CONV_OVF_U4:
7150 CHECK_STACK (1);
7152 if (sp [-1]->type == STACK_R8) {
7153 ADD_UNOP (CEE_CONV_OVF_U8);
7154 ADD_UNOP (*ip);
7155 } else {
7156 ADD_UNOP (*ip);
7158 ip++;
7159 break;
7160 case CEE_CONV_OVF_I1_UN:
7161 case CEE_CONV_OVF_I2_UN:
7162 case CEE_CONV_OVF_I4_UN:
7163 case CEE_CONV_OVF_I8_UN:
7164 case CEE_CONV_OVF_U1_UN:
7165 case CEE_CONV_OVF_U2_UN:
7166 case CEE_CONV_OVF_U4_UN:
7167 case CEE_CONV_OVF_U8_UN:
7168 case CEE_CONV_OVF_I_UN:
7169 case CEE_CONV_OVF_U_UN:
7170 case CEE_CONV_U2:
7171 case CEE_CONV_U1:
7172 case CEE_CONV_I:
7173 case CEE_CONV_U:
7174 CHECK_STACK (1);
7175 ADD_UNOP (*ip);
7176 ip++;
7177 break;
7178 case CEE_ADD_OVF:
7179 case CEE_ADD_OVF_UN:
7180 case CEE_MUL_OVF:
7181 case CEE_MUL_OVF_UN:
7182 case CEE_SUB_OVF:
7183 case CEE_SUB_OVF_UN:
7184 CHECK_STACK (2);
7185 ADD_BINOP (*ip);
7186 ip++;
7187 break;
7188 case CEE_CPOBJ:
7189 CHECK_OPSIZE (5);
7190 CHECK_STACK (2);
7191 token = read32 (ip + 1);
7192 klass = mini_get_class (method, token, generic_context);
7193 CHECK_TYPELOAD (klass);
7194 sp -= 2;
7195 if (generic_class_is_reference_type (cfg, klass)) {
7196 MonoInst *store, *load;
7197 int dreg = alloc_preg (cfg);
7199 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7200 load->flags |= ins_flag;
7201 MONO_ADD_INS (cfg->cbb, load);
7203 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7204 store->flags |= ins_flag;
7205 MONO_ADD_INS (cfg->cbb, store);
7206 } else {
7207 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7209 ins_flag = 0;
7210 ip += 5;
7211 break;
7212 case CEE_LDOBJ: {
7213 int loc_index = -1;
7214 int stloc_len = 0;
7216 CHECK_OPSIZE (5);
7217 CHECK_STACK (1);
7218 --sp;
7219 token = read32 (ip + 1);
7220 klass = mini_get_class (method, token, generic_context);
7221 CHECK_TYPELOAD (klass);
7223 /* Optimize the common ldobj+stloc combination */
7224 switch (ip [5]) {
7225 case CEE_STLOC_S:
7226 loc_index = ip [6];
7227 stloc_len = 2;
7228 break;
7229 case CEE_STLOC_0:
7230 case CEE_STLOC_1:
7231 case CEE_STLOC_2:
7232 case CEE_STLOC_3:
7233 loc_index = ip [5] - CEE_STLOC_0;
7234 stloc_len = 1;
7235 break;
7236 default:
7237 break;
7240 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7241 CHECK_LOCAL (loc_index);
7243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7244 ins->dreg = cfg->locals [loc_index]->dreg;
7245 ip += 5;
7246 ip += stloc_len;
7247 break;
7250 /* Optimize the ldobj+stobj combination */
7251 /* The reference case ends up being a load+store anyway */
7252 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7253 CHECK_STACK (1);
7255 sp --;
7257 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7259 ip += 5 + 5;
7260 ins_flag = 0;
7261 break;
7264 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7265 *sp++ = ins;
7267 ip += 5;
7268 ins_flag = 0;
7269 inline_costs += 1;
7270 break;
7272 case CEE_LDSTR:
7273 CHECK_STACK_OVF (1);
7274 CHECK_OPSIZE (5);
7275 n = read32 (ip + 1);
7277 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7278 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7279 ins->type = STACK_OBJ;
7280 *sp = ins;
7282 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7283 MonoInst *iargs [1];
7285 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7286 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7287 } else {
7288 if (cfg->opt & MONO_OPT_SHARED) {
7289 MonoInst *iargs [3];
7291 if (cfg->compile_aot) {
7292 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7294 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7295 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7296 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7297 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7298 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7299 } else {
7300 if (bblock->out_of_line) {
7301 MonoInst *iargs [2];
7303 if (image == mono_defaults.corlib) {
7305 * Avoid relocations in AOT and save some space by using a
7306 * version of helper_ldstr specialized to mscorlib.
7308 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7309 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7310 } else {
7311 /* Avoid creating the string object */
7312 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7313 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7314 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7317 else
7318 if (cfg->compile_aot) {
7319 NEW_LDSTRCONST (cfg, ins, image, n);
7320 *sp = ins;
7321 MONO_ADD_INS (bblock, ins);
7323 else {
7324 NEW_PCONST (cfg, ins, NULL);
7325 ins->type = STACK_OBJ;
7326 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7327 *sp = ins;
7328 MONO_ADD_INS (bblock, ins);
7333 sp++;
7334 ip += 5;
7335 break;
7336 case CEE_NEWOBJ: {
7337 MonoInst *iargs [2];
7338 MonoMethodSignature *fsig;
7339 MonoInst this_ins;
7340 MonoInst *alloc;
7341 MonoInst *vtable_arg = NULL;
7343 CHECK_OPSIZE (5);
7344 token = read32 (ip + 1);
7345 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7346 if (!cmethod)
7347 goto load_error;
7348 fsig = mono_method_get_signature (cmethod, image, token);
7350 mono_save_token_info (cfg, image, token, cmethod);
7352 if (!mono_class_init (cmethod->klass))
7353 goto load_error;
7355 if (cfg->generic_sharing_context)
7356 context_used = mono_method_check_context_used (cmethod);
7358 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7359 if (check_linkdemand (cfg, method, cmethod))
7360 INLINE_FAILURE;
7361 CHECK_CFG_EXCEPTION;
7362 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7363 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7366 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7367 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7368 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7369 if (context_used) {
7370 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7371 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7372 } else {
7373 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7375 } else {
7376 if (context_used) {
7377 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7378 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7379 } else {
7380 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7382 CHECK_TYPELOAD (cmethod->klass);
7383 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7388 n = fsig->param_count;
7389 CHECK_STACK (n);
7392 * Generate smaller code for the common newobj <exception> instruction in
7393 * argument checking code.
7395 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7396 is_exception_class (cmethod->klass) && n <= 2 &&
7397 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7398 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7399 MonoInst *iargs [3];
7401 g_assert (!vtable_arg);
7403 sp -= n;
7405 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7406 switch (n) {
7407 case 0:
7408 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7409 break;
7410 case 1:
7411 iargs [1] = sp [0];
7412 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7413 break;
7414 case 2:
7415 iargs [1] = sp [0];
7416 iargs [2] = sp [1];
7417 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7418 break;
7419 default:
7420 g_assert_not_reached ();
7423 ip += 5;
7424 inline_costs += 5;
7425 break;
7428 /* move the args to allow room for 'this' in the first position */
7429 while (n--) {
7430 --sp;
7431 sp [1] = sp [0];
7434 /* check_call_signature () requires sp[0] to be set */
7435 this_ins.type = STACK_OBJ;
7436 sp [0] = &this_ins;
7437 if (check_call_signature (cfg, fsig, sp))
7438 UNVERIFIED;
7440 iargs [0] = NULL;
7442 if (mini_class_is_system_array (cmethod->klass)) {
7443 g_assert (!vtable_arg);
7445 if (context_used) {
7446 *sp = emit_get_rgctx_method (cfg, context_used,
7447 cmethod, MONO_RGCTX_INFO_METHOD);
7448 } else {
7449 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7452 /* Avoid varargs in the common case */
7453 if (fsig->param_count == 1)
7454 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7455 else if (fsig->param_count == 2)
7456 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7457 else
7458 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7459 } else if (cmethod->string_ctor) {
7460 g_assert (!context_used);
7461 g_assert (!vtable_arg);
7462 /* we simply pass a null pointer */
7463 EMIT_NEW_PCONST (cfg, *sp, NULL);
7464 /* now call the string ctor */
7465 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7466 } else {
7467 MonoInst* callvirt_this_arg = NULL;
7469 if (cmethod->klass->valuetype) {
7470 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7471 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7472 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7474 alloc = NULL;
7477 * The code generated by mini_emit_virtual_call () expects
7478 * iargs [0] to be a boxed instance, but luckily the vcall
7479 * will be transformed into a normal call there.
7481 } else if (context_used) {
7482 MonoInst *data;
7483 int rgctx_info;
7485 if (cfg->opt & MONO_OPT_SHARED)
7486 rgctx_info = MONO_RGCTX_INFO_KLASS;
7487 else
7488 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7489 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7491 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7492 *sp = alloc;
7493 } else {
7494 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7496 CHECK_TYPELOAD (cmethod->klass);
7499 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7500 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7501 * As a workaround, we call class cctors before allocating objects.
7503 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7504 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7505 if (cfg->verbose_level > 2)
7506 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7507 class_inits = g_slist_prepend (class_inits, vtable);
7510 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7511 *sp = alloc;
7514 if (alloc)
7515 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7517 /* Now call the actual ctor */
7518 /* Avoid virtual calls to ctors if possible */
7519 if (cmethod->klass->marshalbyref)
7520 callvirt_this_arg = sp [0];
7522 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7523 mono_method_check_inlining (cfg, cmethod) &&
7524 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7525 !g_list_find (dont_inline, cmethod)) {
7526 int costs;
7528 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7529 cfg->real_offset += 5;
7530 bblock = cfg->cbb;
7532 inline_costs += costs - 5;
7533 } else {
7534 INLINE_FAILURE;
7535 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7537 } else if (context_used &&
7538 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7539 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7540 MonoInst *cmethod_addr;
7542 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7543 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7545 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7546 } else {
7547 INLINE_FAILURE;
7548 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7549 callvirt_this_arg, NULL, vtable_arg);
7550 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7551 GENERIC_SHARING_FAILURE (*ip);
7555 if (alloc == NULL) {
7556 /* Valuetype */
7557 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7558 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7559 *sp++= ins;
7561 else
7562 *sp++ = alloc;
7564 ip += 5;
7565 inline_costs += 5;
7566 break;
7568 case CEE_CASTCLASS:
7569 CHECK_STACK (1);
7570 --sp;
7571 CHECK_OPSIZE (5);
7572 token = read32 (ip + 1);
7573 klass = mini_get_class (method, token, generic_context);
7574 CHECK_TYPELOAD (klass);
7575 if (sp [0]->type != STACK_OBJ)
7576 UNVERIFIED;
7578 if (cfg->generic_sharing_context)
7579 context_used = mono_class_check_context_used (klass);
7581 if (context_used) {
7582 MonoInst *args [2];
7584 /* obj */
7585 args [0] = *sp;
7587 /* klass */
7588 args [1] = emit_get_rgctx_klass (cfg, context_used,
7589 klass, MONO_RGCTX_INFO_KLASS);
7591 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7592 *sp ++ = ins;
7593 ip += 5;
7594 inline_costs += 2;
7595 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7596 MonoMethod *mono_castclass;
7597 MonoInst *iargs [1];
7598 int costs;
7600 mono_castclass = mono_marshal_get_castclass (klass);
7601 iargs [0] = sp [0];
7603 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7604 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7605 g_assert (costs > 0);
7607 ip += 5;
7608 cfg->real_offset += 5;
7609 bblock = cfg->cbb;
7611 *sp++ = iargs [0];
7613 inline_costs += costs;
7615 else {
7616 ins = handle_castclass (cfg, klass, *sp);
7617 bblock = cfg->cbb;
7618 *sp ++ = ins;
7619 ip += 5;
7621 break;
7622 case CEE_ISINST: {
7623 CHECK_STACK (1);
7624 --sp;
7625 CHECK_OPSIZE (5);
7626 token = read32 (ip + 1);
7627 klass = mini_get_class (method, token, generic_context);
7628 CHECK_TYPELOAD (klass);
7629 if (sp [0]->type != STACK_OBJ)
7630 UNVERIFIED;
7632 if (cfg->generic_sharing_context)
7633 context_used = mono_class_check_context_used (klass);
7635 if (context_used) {
7636 MonoInst *args [2];
7638 /* obj */
7639 args [0] = *sp;
7641 /* klass */
7642 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7644 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7645 sp++;
7646 ip += 5;
7647 inline_costs += 2;
7648 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7649 MonoMethod *mono_isinst;
7650 MonoInst *iargs [1];
7651 int costs;
7653 mono_isinst = mono_marshal_get_isinst (klass);
7654 iargs [0] = sp [0];
7656 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7657 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7658 g_assert (costs > 0);
7660 ip += 5;
7661 cfg->real_offset += 5;
7662 bblock = cfg->cbb;
7664 *sp++= iargs [0];
7666 inline_costs += costs;
7668 else {
7669 ins = handle_isinst (cfg, klass, *sp);
7670 bblock = cfg->cbb;
7671 *sp ++ = ins;
7672 ip += 5;
7674 break;
7676 case CEE_UNBOX_ANY: {
7677 CHECK_STACK (1);
7678 --sp;
7679 CHECK_OPSIZE (5);
7680 token = read32 (ip + 1);
7681 klass = mini_get_class (method, token, generic_context);
7682 CHECK_TYPELOAD (klass);
7684 mono_save_token_info (cfg, image, token, klass);
7686 if (cfg->generic_sharing_context)
7687 context_used = mono_class_check_context_used (klass);
7689 if (generic_class_is_reference_type (cfg, klass)) {
7690 /* CASTCLASS */
7691 if (context_used) {
7692 MonoInst *iargs [2];
7694 /* obj */
7695 iargs [0] = *sp;
7696 /* klass */
7697 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7698 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7699 *sp ++ = ins;
7700 ip += 5;
7701 inline_costs += 2;
7702 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7703 MonoMethod *mono_castclass;
7704 MonoInst *iargs [1];
7705 int costs;
7707 mono_castclass = mono_marshal_get_castclass (klass);
7708 iargs [0] = sp [0];
7710 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7711 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7713 g_assert (costs > 0);
7715 ip += 5;
7716 cfg->real_offset += 5;
7717 bblock = cfg->cbb;
7719 *sp++ = iargs [0];
7720 inline_costs += costs;
7721 } else {
7722 ins = handle_castclass (cfg, klass, *sp);
7723 bblock = cfg->cbb;
7724 *sp ++ = ins;
7725 ip += 5;
7727 break;
7730 if (mono_class_is_nullable (klass)) {
7731 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7732 *sp++= ins;
7733 ip += 5;
7734 break;
7737 /* UNBOX */
7738 ins = handle_unbox (cfg, klass, sp, context_used);
7739 *sp = ins;
7741 ip += 5;
7743 /* LDOBJ */
7744 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7745 *sp++ = ins;
7747 inline_costs += 2;
7748 break;
7750 case CEE_BOX: {
7751 MonoInst *val;
7753 CHECK_STACK (1);
7754 --sp;
7755 val = *sp;
7756 CHECK_OPSIZE (5);
7757 token = read32 (ip + 1);
7758 klass = mini_get_class (method, token, generic_context);
7759 CHECK_TYPELOAD (klass);
7761 mono_save_token_info (cfg, image, token, klass);
7763 if (cfg->generic_sharing_context)
7764 context_used = mono_class_check_context_used (klass);
7766 if (generic_class_is_reference_type (cfg, klass)) {
7767 *sp++ = val;
7768 ip += 5;
7769 break;
7772 if (klass == mono_defaults.void_class)
7773 UNVERIFIED;
7774 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7775 UNVERIFIED;
7776 /* frequent check in generic code: box (struct), brtrue */
7777 if (!mono_class_is_nullable (klass) &&
7778 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7779 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7780 ip += 5;
7781 MONO_INST_NEW (cfg, ins, OP_BR);
7782 if (*ip == CEE_BRTRUE_S) {
7783 CHECK_OPSIZE (2);
7784 ip++;
7785 target = ip + 1 + (signed char)(*ip);
7786 ip++;
7787 } else {
7788 CHECK_OPSIZE (5);
7789 ip++;
7790 target = ip + 4 + (gint)(read32 (ip));
7791 ip += 4;
7793 GET_BBLOCK (cfg, tblock, target);
7794 link_bblock (cfg, bblock, tblock);
7795 ins->inst_target_bb = tblock;
7796 GET_BBLOCK (cfg, tblock, ip);
7798 * This leads to some inconsistency, since the two bblocks are
7799 * not really connected, but it is needed for handling stack
7800 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7801 * FIXME: This should only be needed if sp != stack_start, but that
7802 * doesn't work for some reason (test failure in mcs/tests on x86).
7804 link_bblock (cfg, bblock, tblock);
7805 if (sp != stack_start) {
7806 handle_stack_args (cfg, stack_start, sp - stack_start);
7807 sp = stack_start;
7808 CHECK_UNVERIFIABLE (cfg);
7810 MONO_ADD_INS (bblock, ins);
7811 start_new_bblock = 1;
7812 break;
7815 if (context_used) {
7816 MonoInst *data;
7817 int rgctx_info;
7819 if (cfg->opt & MONO_OPT_SHARED)
7820 rgctx_info = MONO_RGCTX_INFO_KLASS;
7821 else
7822 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7823 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7824 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7825 } else {
7826 *sp++ = handle_box (cfg, val, klass);
7829 ip += 5;
7830 inline_costs += 1;
7831 break;
7833 case CEE_UNBOX: {
7834 CHECK_STACK (1);
7835 --sp;
7836 CHECK_OPSIZE (5);
7837 token = read32 (ip + 1);
7838 klass = mini_get_class (method, token, generic_context);
7839 CHECK_TYPELOAD (klass);
7841 mono_save_token_info (cfg, image, token, klass);
7843 if (cfg->generic_sharing_context)
7844 context_used = mono_class_check_context_used (klass);
7846 if (mono_class_is_nullable (klass)) {
7847 MonoInst *val;
7849 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7850 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7852 *sp++= ins;
7853 } else {
7854 ins = handle_unbox (cfg, klass, sp, context_used);
7855 *sp++ = ins;
7857 ip += 5;
7858 inline_costs += 2;
7859 break;
7861 case CEE_LDFLD:
7862 case CEE_LDFLDA:
7863 case CEE_STFLD: {
7864 MonoClassField *field;
7865 int costs;
7866 guint foffset;
7868 if (*ip == CEE_STFLD) {
7869 CHECK_STACK (2);
7870 sp -= 2;
7871 } else {
7872 CHECK_STACK (1);
7873 --sp;
7875 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7876 UNVERIFIED;
7877 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7878 UNVERIFIED;
7879 CHECK_OPSIZE (5);
7880 token = read32 (ip + 1);
7881 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7882 field = mono_method_get_wrapper_data (method, token);
7883 klass = field->parent;
7885 else {
7886 field = mono_field_from_token (image, token, &klass, generic_context);
7888 if (!field)
7889 goto load_error;
7890 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7891 FIELD_ACCESS_FAILURE;
7892 mono_class_init (klass);
7894 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7895 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7896 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7897 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7900 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7901 if (*ip == CEE_STFLD) {
7902 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7903 UNVERIFIED;
7904 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7905 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7906 MonoInst *iargs [5];
7908 iargs [0] = sp [0];
7909 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7910 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7911 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7912 field->offset);
7913 iargs [4] = sp [1];
7915 if (cfg->opt & MONO_OPT_INLINE) {
7916 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7917 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7918 g_assert (costs > 0);
7920 cfg->real_offset += 5;
7921 bblock = cfg->cbb;
7923 inline_costs += costs;
7924 } else {
7925 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7927 } else {
7928 MonoInst *store;
7930 #if HAVE_WRITE_BARRIERS
7931 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7932 /* insert call to write barrier */
7933 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7934 MonoInst *iargs [2];
7935 int dreg;
7937 dreg = alloc_preg (cfg);
7938 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7939 iargs [1] = sp [1];
7940 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7942 #endif
7944 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7946 store->flags |= ins_flag;
7948 ins_flag = 0;
7949 ip += 5;
7950 break;
7953 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7954 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7955 MonoInst *iargs [4];
7957 iargs [0] = sp [0];
7958 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7959 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7960 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7961 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7962 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7963 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7964 bblock = cfg->cbb;
7965 g_assert (costs > 0);
7967 cfg->real_offset += 5;
7969 *sp++ = iargs [0];
7971 inline_costs += costs;
7972 } else {
7973 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7974 *sp++ = ins;
7976 } else {
7977 if (sp [0]->type == STACK_VTYPE) {
7978 MonoInst *var;
7980 /* Have to compute the address of the variable */
7982 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7983 if (!var)
7984 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7985 else
7986 g_assert (var->klass == klass);
7988 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7989 sp [0] = ins;
7992 if (*ip == CEE_LDFLDA) {
7993 dreg = alloc_preg (cfg);
7995 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7996 ins->klass = mono_class_from_mono_type (field->type);
7997 ins->type = STACK_MP;
7998 *sp++ = ins;
7999 } else {
8000 MonoInst *load;
8002 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8003 load->flags |= ins_flag;
8004 *sp++ = load;
8007 ins_flag = 0;
8008 ip += 5;
8009 break;
8011 case CEE_LDSFLD:
8012 case CEE_LDSFLDA:
8013 case CEE_STSFLD: {
8014 MonoClassField *field;
8015 gpointer addr = NULL;
8016 gboolean is_special_static;
8018 CHECK_OPSIZE (5);
8019 token = read32 (ip + 1);
8021 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8022 field = mono_method_get_wrapper_data (method, token);
8023 klass = field->parent;
8025 else
8026 field = mono_field_from_token (image, token, &klass, generic_context);
8027 if (!field)
8028 goto load_error;
8029 mono_class_init (klass);
8030 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8031 FIELD_ACCESS_FAILURE;
8033 /* if the class is Critical then transparent code cannot access it's fields */
8034 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8035 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8038 * We can only support shared generic static
8039 * field access on architectures where the
8040 * trampoline code has been extended to handle
8041 * the generic class init.
8043 #ifndef MONO_ARCH_VTABLE_REG
8044 GENERIC_SHARING_FAILURE (*ip);
8045 #endif
8047 if (cfg->generic_sharing_context)
8048 context_used = mono_class_check_context_used (klass);
8050 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8052 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8053 * to be called here.
8055 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8056 mono_class_vtable (cfg->domain, klass);
8057 CHECK_TYPELOAD (klass);
8059 mono_domain_lock (cfg->domain);
8060 if (cfg->domain->special_static_fields)
8061 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8062 mono_domain_unlock (cfg->domain);
8064 is_special_static = mono_class_field_is_special_static (field);
8066 /* Generate IR to compute the field address */
8068 if ((cfg->opt & MONO_OPT_SHARED) ||
8069 (cfg->compile_aot && is_special_static) ||
8070 (context_used && is_special_static)) {
8071 MonoInst *iargs [2];
8073 g_assert (field->parent);
8074 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8075 if (context_used) {
8076 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8077 field, MONO_RGCTX_INFO_CLASS_FIELD);
8078 } else {
8079 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8081 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8082 } else if (context_used) {
8083 MonoInst *static_data;
8086 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8087 method->klass->name_space, method->klass->name, method->name,
8088 depth, field->offset);
8091 if (mono_class_needs_cctor_run (klass, method)) {
8092 MonoCallInst *call;
8093 MonoInst *vtable;
8095 vtable = emit_get_rgctx_klass (cfg, context_used,
8096 klass, MONO_RGCTX_INFO_VTABLE);
8098 // FIXME: This doesn't work since it tries to pass the argument
8099 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8101 * The vtable pointer is always passed in a register regardless of
8102 * the calling convention, so assign it manually, and make a call
8103 * using a signature without parameters.
8105 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8106 #ifdef MONO_ARCH_VTABLE_REG
8107 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8108 cfg->uses_vtable_reg = TRUE;
8109 #else
8110 NOT_IMPLEMENTED;
8111 #endif
8115 * The pointer we're computing here is
8117 * super_info.static_data + field->offset
8119 static_data = emit_get_rgctx_klass (cfg, context_used,
8120 klass, MONO_RGCTX_INFO_STATIC_DATA);
8122 if (field->offset == 0) {
8123 ins = static_data;
8124 } else {
8125 int addr_reg = mono_alloc_preg (cfg);
8126 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8128 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8129 MonoInst *iargs [2];
8131 g_assert (field->parent);
8132 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8133 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8134 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8135 } else {
8136 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8138 CHECK_TYPELOAD (klass);
8139 if (!addr) {
8140 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8141 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8142 if (cfg->verbose_level > 2)
8143 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8144 class_inits = g_slist_prepend (class_inits, vtable);
8145 } else {
8146 if (cfg->run_cctors) {
8147 MonoException *ex;
8148 /* This makes so that inline cannot trigger */
8149 /* .cctors: too many apps depend on them */
8150 /* running with a specific order... */
8151 if (! vtable->initialized)
8152 INLINE_FAILURE;
8153 ex = mono_runtime_class_init_full (vtable, FALSE);
8154 if (ex) {
8155 set_exception_object (cfg, ex);
8156 goto exception_exit;
8160 addr = (char*)vtable->data + field->offset;
8162 if (cfg->compile_aot)
8163 EMIT_NEW_SFLDACONST (cfg, ins, field);
8164 else
8165 EMIT_NEW_PCONST (cfg, ins, addr);
8166 } else {
8168 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8169 * This could be later optimized to do just a couple of
8170 * memory dereferences with constant offsets.
8172 MonoInst *iargs [1];
8173 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8174 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8178 /* Generate IR to do the actual load/store operation */
8180 if (*ip == CEE_LDSFLDA) {
8181 ins->klass = mono_class_from_mono_type (field->type);
8182 ins->type = STACK_PTR;
8183 *sp++ = ins;
8184 } else if (*ip == CEE_STSFLD) {
8185 MonoInst *store;
8186 CHECK_STACK (1);
8187 sp--;
8189 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8190 store->flags |= ins_flag;
8191 } else {
8192 gboolean is_const = FALSE;
8193 MonoVTable *vtable = NULL;
8195 if (!context_used) {
8196 vtable = mono_class_vtable (cfg->domain, klass);
8197 CHECK_TYPELOAD (klass);
8199 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8200 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8201 gpointer addr = (char*)vtable->data + field->offset;
8202 int ro_type = field->type->type;
8203 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8204 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8206 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8207 is_const = TRUE;
8208 switch (ro_type) {
8209 case MONO_TYPE_BOOLEAN:
8210 case MONO_TYPE_U1:
8211 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8212 sp++;
8213 break;
8214 case MONO_TYPE_I1:
8215 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8216 sp++;
8217 break;
8218 case MONO_TYPE_CHAR:
8219 case MONO_TYPE_U2:
8220 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8221 sp++;
8222 break;
8223 case MONO_TYPE_I2:
8224 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8225 sp++;
8226 break;
8227 break;
8228 case MONO_TYPE_I4:
8229 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8230 sp++;
8231 break;
8232 case MONO_TYPE_U4:
8233 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8234 sp++;
8235 break;
8236 #ifndef HAVE_MOVING_COLLECTOR
8237 case MONO_TYPE_I:
8238 case MONO_TYPE_U:
8239 case MONO_TYPE_STRING:
8240 case MONO_TYPE_OBJECT:
8241 case MONO_TYPE_CLASS:
8242 case MONO_TYPE_SZARRAY:
8243 case MONO_TYPE_PTR:
8244 case MONO_TYPE_FNPTR:
8245 case MONO_TYPE_ARRAY:
8246 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8247 type_to_eval_stack_type ((cfg), field->type, *sp);
8248 sp++;
8249 break;
8250 #endif
8251 case MONO_TYPE_I8:
8252 case MONO_TYPE_U8:
8253 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8254 sp++;
8255 break;
8256 case MONO_TYPE_R4:
8257 case MONO_TYPE_R8:
8258 case MONO_TYPE_VALUETYPE:
8259 default:
8260 is_const = FALSE;
8261 break;
8265 if (!is_const) {
8266 MonoInst *load;
8268 CHECK_STACK_OVF (1);
8270 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8271 load->flags |= ins_flag;
8272 ins_flag = 0;
8273 *sp++ = load;
8276 ins_flag = 0;
8277 ip += 5;
8278 break;
8280 case CEE_STOBJ:
8281 CHECK_STACK (2);
8282 sp -= 2;
8283 CHECK_OPSIZE (5);
8284 token = read32 (ip + 1);
8285 klass = mini_get_class (method, token, generic_context);
8286 CHECK_TYPELOAD (klass);
8287 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8288 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8289 ins_flag = 0;
8290 ip += 5;
8291 inline_costs += 1;
8292 break;
8295 * Array opcodes
8297 case CEE_NEWARR: {
8298 MonoInst *len_ins;
8299 const char *data_ptr;
8300 int data_size = 0;
8301 guint32 field_token;
8303 CHECK_STACK (1);
8304 --sp;
8306 CHECK_OPSIZE (5);
8307 token = read32 (ip + 1);
8309 klass = mini_get_class (method, token, generic_context);
8310 CHECK_TYPELOAD (klass);
8312 if (cfg->generic_sharing_context)
8313 context_used = mono_class_check_context_used (klass);
8315 if (context_used) {
8316 MonoInst *args [2];
8318 /* FIXME: Decompose later to help abcrem */
8320 /* vtable */
8321 args [0] = emit_get_rgctx_klass (cfg, context_used,
8322 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8324 /* array len */
8325 args [1] = sp [0];
8327 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8328 } else {
8329 if (cfg->opt & MONO_OPT_SHARED) {
8330 /* Decompose now to avoid problems with references to the domainvar */
8331 MonoInst *iargs [3];
8333 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8334 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8335 iargs [2] = sp [0];
8337 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8338 } else {
8339 /* Decompose later since it is needed by abcrem */
8340 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8341 ins->dreg = alloc_preg (cfg);
8342 ins->sreg1 = sp [0]->dreg;
8343 ins->inst_newa_class = klass;
8344 ins->type = STACK_OBJ;
8345 ins->klass = klass;
8346 MONO_ADD_INS (cfg->cbb, ins);
8347 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8348 cfg->cbb->has_array_access = TRUE;
8350 /* Needed so mono_emit_load_get_addr () gets called */
8351 mono_get_got_var (cfg);
8355 len_ins = sp [0];
8356 ip += 5;
8357 *sp++ = ins;
8358 inline_costs += 1;
8361 * we inline/optimize the initialization sequence if possible.
8362 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8363 * for small sizes open code the memcpy
8364 * ensure the rva field is big enough
8366 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8367 MonoMethod *memcpy_method = get_memcpy_method ();
8368 MonoInst *iargs [3];
8369 int add_reg = alloc_preg (cfg);
8371 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8372 if (cfg->compile_aot) {
8373 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8374 } else {
8375 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8377 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8378 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8379 ip += 11;
8382 break;
8384 case CEE_LDLEN:
8385 CHECK_STACK (1);
8386 --sp;
8387 if (sp [0]->type != STACK_OBJ)
8388 UNVERIFIED;
8390 dreg = alloc_preg (cfg);
8391 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8392 ins->dreg = alloc_preg (cfg);
8393 ins->sreg1 = sp [0]->dreg;
8394 ins->type = STACK_I4;
8395 MONO_ADD_INS (cfg->cbb, ins);
8396 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8397 cfg->cbb->has_array_access = TRUE;
8398 ip ++;
8399 *sp++ = ins;
8400 break;
8401 case CEE_LDELEMA:
8402 CHECK_STACK (2);
8403 sp -= 2;
8404 CHECK_OPSIZE (5);
8405 if (sp [0]->type != STACK_OBJ)
8406 UNVERIFIED;
8408 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8410 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8411 CHECK_TYPELOAD (klass);
8412 /* we need to make sure that this array is exactly the type it needs
8413 * to be for correctness. the wrappers are lax with their usage
8414 * so we need to ignore them here
8416 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8417 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8419 readonly = FALSE;
8420 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8421 *sp++ = ins;
8422 ip += 5;
8423 break;
8424 case CEE_LDELEM_ANY:
8425 case CEE_LDELEM_I1:
8426 case CEE_LDELEM_U1:
8427 case CEE_LDELEM_I2:
8428 case CEE_LDELEM_U2:
8429 case CEE_LDELEM_I4:
8430 case CEE_LDELEM_U4:
8431 case CEE_LDELEM_I8:
8432 case CEE_LDELEM_I:
8433 case CEE_LDELEM_R4:
8434 case CEE_LDELEM_R8:
8435 case CEE_LDELEM_REF: {
8436 MonoInst *addr;
8438 CHECK_STACK (2);
8439 sp -= 2;
8441 if (*ip == CEE_LDELEM_ANY) {
8442 CHECK_OPSIZE (5);
8443 token = read32 (ip + 1);
8444 klass = mini_get_class (method, token, generic_context);
8445 CHECK_TYPELOAD (klass);
8446 mono_class_init (klass);
8448 else
8449 klass = array_access_to_klass (*ip);
8451 if (sp [0]->type != STACK_OBJ)
8452 UNVERIFIED;
8454 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8456 if (sp [1]->opcode == OP_ICONST) {
8457 int array_reg = sp [0]->dreg;
8458 int index_reg = sp [1]->dreg;
8459 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8461 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8462 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8463 } else {
8464 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8465 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8467 *sp++ = ins;
8468 if (*ip == CEE_LDELEM_ANY)
8469 ip += 5;
8470 else
8471 ++ip;
8472 break;
8474 case CEE_STELEM_I:
8475 case CEE_STELEM_I1:
8476 case CEE_STELEM_I2:
8477 case CEE_STELEM_I4:
8478 case CEE_STELEM_I8:
8479 case CEE_STELEM_R4:
8480 case CEE_STELEM_R8:
8481 case CEE_STELEM_REF:
8482 case CEE_STELEM_ANY: {
8483 MonoInst *addr;
8485 CHECK_STACK (3);
8486 sp -= 3;
8488 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8490 if (*ip == CEE_STELEM_ANY) {
8491 CHECK_OPSIZE (5);
8492 token = read32 (ip + 1);
8493 klass = mini_get_class (method, token, generic_context);
8494 CHECK_TYPELOAD (klass);
8495 mono_class_init (klass);
8497 else
8498 klass = array_access_to_klass (*ip);
8500 if (sp [0]->type != STACK_OBJ)
8501 UNVERIFIED;
8503 /* storing a NULL doesn't need any of the complex checks in stelemref */
8504 if (generic_class_is_reference_type (cfg, klass) &&
8505 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8506 MonoMethod* helper = mono_marshal_get_stelemref ();
8507 MonoInst *iargs [3];
8509 if (sp [0]->type != STACK_OBJ)
8510 UNVERIFIED;
8511 if (sp [2]->type != STACK_OBJ)
8512 UNVERIFIED;
8514 iargs [2] = sp [2];
8515 iargs [1] = sp [1];
8516 iargs [0] = sp [0];
8518 mono_emit_method_call (cfg, helper, iargs, NULL);
8519 } else {
8520 if (sp [1]->opcode == OP_ICONST) {
8521 int array_reg = sp [0]->dreg;
8522 int index_reg = sp [1]->dreg;
8523 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8525 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8526 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8527 } else {
8528 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8529 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8533 if (*ip == CEE_STELEM_ANY)
8534 ip += 5;
8535 else
8536 ++ip;
8537 inline_costs += 1;
8538 break;
8540 case CEE_CKFINITE: {
8541 CHECK_STACK (1);
8542 --sp;
8544 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8545 ins->sreg1 = sp [0]->dreg;
8546 ins->dreg = alloc_freg (cfg);
8547 ins->type = STACK_R8;
8548 MONO_ADD_INS (bblock, ins);
8550 *sp++ = mono_decompose_opcode (cfg, ins);
8552 ++ip;
8553 break;
8555 case CEE_REFANYVAL: {
8556 MonoInst *src_var, *src;
8558 int klass_reg = alloc_preg (cfg);
8559 int dreg = alloc_preg (cfg);
8561 CHECK_STACK (1);
8562 MONO_INST_NEW (cfg, ins, *ip);
8563 --sp;
8564 CHECK_OPSIZE (5);
8565 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8566 CHECK_TYPELOAD (klass);
8567 mono_class_init (klass);
8569 if (cfg->generic_sharing_context)
8570 context_used = mono_class_check_context_used (klass);
8572 // FIXME:
8573 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8574 if (!src_var)
8575 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8576 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8579 if (context_used) {
8580 MonoInst *klass_ins;
8582 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8583 klass, MONO_RGCTX_INFO_KLASS);
8585 // FIXME:
8586 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8587 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8588 } else {
8589 mini_emit_class_check (cfg, klass_reg, klass);
8591 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8592 ins->type = STACK_MP;
8593 *sp++ = ins;
8594 ip += 5;
8595 break;
8597 case CEE_MKREFANY: {
8598 MonoInst *loc, *addr;
8600 CHECK_STACK (1);
8601 MONO_INST_NEW (cfg, ins, *ip);
8602 --sp;
8603 CHECK_OPSIZE (5);
8604 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8605 CHECK_TYPELOAD (klass);
8606 mono_class_init (klass);
8608 if (cfg->generic_sharing_context)
8609 context_used = mono_class_check_context_used (klass);
8611 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8612 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8614 if (context_used) {
8615 MonoInst *const_ins;
8616 int type_reg = alloc_preg (cfg);
8618 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8619 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8621 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8622 } else if (cfg->compile_aot) {
8623 int const_reg = alloc_preg (cfg);
8624 int type_reg = alloc_preg (cfg);
8626 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8630 } else {
8631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8632 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8634 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8636 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8637 ins->type = STACK_VTYPE;
8638 ins->klass = mono_defaults.typed_reference_class;
8639 *sp++ = ins;
8640 ip += 5;
8641 break;
8643 case CEE_LDTOKEN: {
8644 gpointer handle;
8645 MonoClass *handle_class;
8647 CHECK_STACK_OVF (1);
8649 CHECK_OPSIZE (5);
8650 n = read32 (ip + 1);
8652 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8653 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8654 handle = mono_method_get_wrapper_data (method, n);
8655 handle_class = mono_method_get_wrapper_data (method, n + 1);
8656 if (handle_class == mono_defaults.typehandle_class)
8657 handle = &((MonoClass*)handle)->byval_arg;
8659 else {
8660 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8662 if (!handle)
8663 goto load_error;
8664 mono_class_init (handle_class);
8665 if (cfg->generic_sharing_context) {
8666 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8667 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8668 /* This case handles ldtoken
8669 of an open type, like for
8670 typeof(Gen<>). */
8671 context_used = 0;
8672 } else if (handle_class == mono_defaults.typehandle_class) {
8673 /* If we get a MONO_TYPE_CLASS
8674 then we need to provide the
8675 open type, not an
8676 instantiation of it. */
8677 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8678 context_used = 0;
8679 else
8680 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8681 } else if (handle_class == mono_defaults.fieldhandle_class)
8682 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8683 else if (handle_class == mono_defaults.methodhandle_class)
8684 context_used = mono_method_check_context_used (handle);
8685 else
8686 g_assert_not_reached ();
8689 if ((cfg->opt & MONO_OPT_SHARED) &&
8690 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8691 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8692 MonoInst *addr, *vtvar, *iargs [3];
8693 int method_context_used;
8695 if (cfg->generic_sharing_context)
8696 method_context_used = mono_method_check_context_used (method);
8697 else
8698 method_context_used = 0;
8700 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8702 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8703 EMIT_NEW_ICONST (cfg, iargs [1], n);
8704 if (method_context_used) {
8705 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8706 method, MONO_RGCTX_INFO_METHOD);
8707 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8708 } else {
8709 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8710 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8712 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8716 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8717 } else {
8718 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8719 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8720 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8721 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8722 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8723 MonoClass *tclass = mono_class_from_mono_type (handle);
8725 mono_class_init (tclass);
8726 if (context_used) {
8727 ins = emit_get_rgctx_klass (cfg, context_used,
8728 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8729 } else if (cfg->compile_aot) {
8730 if (method->wrapper_type) {
8731 /* FIXME: n is not a normal token */
8732 cfg->disable_aot = TRUE;
8733 EMIT_NEW_PCONST (cfg, ins, NULL);
8734 } else {
8735 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8737 } else {
8738 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8740 ins->type = STACK_OBJ;
8741 ins->klass = cmethod->klass;
8742 ip += 5;
8743 } else {
8744 MonoInst *addr, *vtvar;
8746 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8748 if (context_used) {
8749 if (handle_class == mono_defaults.typehandle_class) {
8750 ins = emit_get_rgctx_klass (cfg, context_used,
8751 mono_class_from_mono_type (handle),
8752 MONO_RGCTX_INFO_TYPE);
8753 } else if (handle_class == mono_defaults.methodhandle_class) {
8754 ins = emit_get_rgctx_method (cfg, context_used,
8755 handle, MONO_RGCTX_INFO_METHOD);
8756 } else if (handle_class == mono_defaults.fieldhandle_class) {
8757 ins = emit_get_rgctx_field (cfg, context_used,
8758 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8759 } else {
8760 g_assert_not_reached ();
8762 } else if (cfg->compile_aot) {
8763 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8764 } else {
8765 EMIT_NEW_PCONST (cfg, ins, handle);
8767 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8769 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8773 *sp++ = ins;
8774 ip += 5;
8775 break;
8777 case CEE_THROW:
8778 CHECK_STACK (1);
8779 MONO_INST_NEW (cfg, ins, OP_THROW);
8780 --sp;
8781 ins->sreg1 = sp [0]->dreg;
8782 ip++;
8783 bblock->out_of_line = TRUE;
8784 MONO_ADD_INS (bblock, ins);
8785 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8786 MONO_ADD_INS (bblock, ins);
8787 sp = stack_start;
8789 link_bblock (cfg, bblock, end_bblock);
8790 start_new_bblock = 1;
8791 break;
8792 case CEE_ENDFINALLY:
8793 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8794 MONO_ADD_INS (bblock, ins);
8795 ip++;
8796 start_new_bblock = 1;
8799 * Control will leave the method so empty the stack, otherwise
8800 * the next basic block will start with a nonempty stack.
8802 while (sp != stack_start) {
8803 sp--;
8805 break;
8806 case CEE_LEAVE:
8807 case CEE_LEAVE_S: {
8808 GList *handlers;
8810 if (*ip == CEE_LEAVE) {
8811 CHECK_OPSIZE (5);
8812 target = ip + 5 + (gint32)read32(ip + 1);
8813 } else {
8814 CHECK_OPSIZE (2);
8815 target = ip + 2 + (signed char)(ip [1]);
8818 /* empty the stack */
8819 while (sp != stack_start) {
8820 sp--;
8824 * If this leave statement is in a catch block, check for a
8825 * pending exception, and rethrow it if necessary.
8827 for (i = 0; i < header->num_clauses; ++i) {
8828 MonoExceptionClause *clause = &header->clauses [i];
8831 * Use <= in the final comparison to handle clauses with multiple
8832 * leave statements, like in bug #78024.
8833 * The ordering of the exception clauses guarantees that we find the
8834 * innermost clause.
8836 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8837 MonoInst *exc_ins;
8838 MonoBasicBlock *dont_throw;
8841 MonoInst *load;
8843 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8846 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8848 NEW_BBLOCK (cfg, dont_throw);
8851 * Currently, we allways rethrow the abort exception, despite the
8852 * fact that this is not correct. See thread6.cs for an example.
8853 * But propagating the abort exception is more important than
8854 * getting the sematics right.
8856 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8857 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8858 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8860 MONO_START_BB (cfg, dont_throw);
8861 bblock = cfg->cbb;
8865 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8866 GList *tmp;
8867 for (tmp = handlers; tmp; tmp = tmp->next) {
8868 tblock = tmp->data;
8869 link_bblock (cfg, bblock, tblock);
8870 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8871 ins->inst_target_bb = tblock;
8872 MONO_ADD_INS (bblock, ins);
8873 bblock->has_call_handler = 1;
8875 g_list_free (handlers);
8878 MONO_INST_NEW (cfg, ins, OP_BR);
8879 MONO_ADD_INS (bblock, ins);
8880 GET_BBLOCK (cfg, tblock, target);
8881 link_bblock (cfg, bblock, tblock);
8882 ins->inst_target_bb = tblock;
8883 start_new_bblock = 1;
8885 if (*ip == CEE_LEAVE)
8886 ip += 5;
8887 else
8888 ip += 2;
8890 break;
8894 * Mono specific opcodes
8896 case MONO_CUSTOM_PREFIX: {
8898 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8900 CHECK_OPSIZE (2);
8901 switch (ip [1]) {
8902 case CEE_MONO_ICALL: {
8903 gpointer func;
8904 MonoJitICallInfo *info;
8906 token = read32 (ip + 2);
8907 func = mono_method_get_wrapper_data (method, token);
8908 info = mono_find_jit_icall_by_addr (func);
8909 g_assert (info);
8911 CHECK_STACK (info->sig->param_count);
8912 sp -= info->sig->param_count;
8914 ins = mono_emit_jit_icall (cfg, info->func, sp);
8915 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8916 *sp++ = ins;
8918 ip += 6;
8919 inline_costs += 10 * num_calls++;
8921 break;
8923 case CEE_MONO_LDPTR: {
8924 gpointer ptr;
8926 CHECK_STACK_OVF (1);
8927 CHECK_OPSIZE (6);
8928 token = read32 (ip + 2);
8930 ptr = mono_method_get_wrapper_data (method, token);
8931 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8932 MonoJitICallInfo *callinfo;
8933 const char *icall_name;
8935 icall_name = method->name + strlen ("__icall_wrapper_");
8936 g_assert (icall_name);
8937 callinfo = mono_find_jit_icall_by_name (icall_name);
8938 g_assert (callinfo);
8940 if (ptr == callinfo->func) {
8941 /* Will be transformed into an AOTCONST later */
8942 EMIT_NEW_PCONST (cfg, ins, ptr);
8943 *sp++ = ins;
8944 ip += 6;
8945 break;
8948 /* FIXME: Generalize this */
8949 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8950 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8951 *sp++ = ins;
8952 ip += 6;
8953 break;
8955 EMIT_NEW_PCONST (cfg, ins, ptr);
8956 *sp++ = ins;
8957 ip += 6;
8958 inline_costs += 10 * num_calls++;
8959 /* Can't embed random pointers into AOT code */
8960 cfg->disable_aot = 1;
8961 break;
8963 case CEE_MONO_ICALL_ADDR: {
8964 MonoMethod *cmethod;
8965 gpointer ptr;
8967 CHECK_STACK_OVF (1);
8968 CHECK_OPSIZE (6);
8969 token = read32 (ip + 2);
8971 cmethod = mono_method_get_wrapper_data (method, token);
8973 if (cfg->compile_aot) {
8974 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8975 } else {
8976 ptr = mono_lookup_internal_call (cmethod);
8977 g_assert (ptr);
8978 EMIT_NEW_PCONST (cfg, ins, ptr);
8980 *sp++ = ins;
8981 ip += 6;
8982 break;
8984 case CEE_MONO_VTADDR: {
8985 MonoInst *src_var, *src;
8987 CHECK_STACK (1);
8988 --sp;
8990 // FIXME:
8991 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8992 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8993 *sp++ = src;
8994 ip += 2;
8995 break;
8997 case CEE_MONO_NEWOBJ: {
8998 MonoInst *iargs [2];
9000 CHECK_STACK_OVF (1);
9001 CHECK_OPSIZE (6);
9002 token = read32 (ip + 2);
9003 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9004 mono_class_init (klass);
9005 NEW_DOMAINCONST (cfg, iargs [0]);
9006 MONO_ADD_INS (cfg->cbb, iargs [0]);
9007 NEW_CLASSCONST (cfg, iargs [1], klass);
9008 MONO_ADD_INS (cfg->cbb, iargs [1]);
9009 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9010 ip += 6;
9011 inline_costs += 10 * num_calls++;
9012 break;
9014 case CEE_MONO_OBJADDR:
9015 CHECK_STACK (1);
9016 --sp;
9017 MONO_INST_NEW (cfg, ins, OP_MOVE);
9018 ins->dreg = alloc_preg (cfg);
9019 ins->sreg1 = sp [0]->dreg;
9020 ins->type = STACK_MP;
9021 MONO_ADD_INS (cfg->cbb, ins);
9022 *sp++ = ins;
9023 ip += 2;
9024 break;
9025 case CEE_MONO_LDNATIVEOBJ:
9027 * Similar to LDOBJ, but instead load the unmanaged
9028 * representation of the vtype to the stack.
9030 CHECK_STACK (1);
9031 CHECK_OPSIZE (6);
9032 --sp;
9033 token = read32 (ip + 2);
9034 klass = mono_method_get_wrapper_data (method, token);
9035 g_assert (klass->valuetype);
9036 mono_class_init (klass);
9039 MonoInst *src, *dest, *temp;
9041 src = sp [0];
9042 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9043 temp->backend.is_pinvoke = 1;
9044 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9045 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9047 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9048 dest->type = STACK_VTYPE;
9049 dest->klass = klass;
9051 *sp ++ = dest;
9052 ip += 6;
9054 break;
9055 case CEE_MONO_RETOBJ: {
9057 * Same as RET, but return the native representation of a vtype
9058 * to the caller.
9060 g_assert (cfg->ret);
9061 g_assert (mono_method_signature (method)->pinvoke);
9062 CHECK_STACK (1);
9063 --sp;
9065 CHECK_OPSIZE (6);
9066 token = read32 (ip + 2);
9067 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9069 if (!cfg->vret_addr) {
9070 g_assert (cfg->ret_var_is_local);
9072 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9073 } else {
9074 EMIT_NEW_RETLOADA (cfg, ins);
9076 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9078 if (sp != stack_start)
9079 UNVERIFIED;
9081 MONO_INST_NEW (cfg, ins, OP_BR);
9082 ins->inst_target_bb = end_bblock;
9083 MONO_ADD_INS (bblock, ins);
9084 link_bblock (cfg, bblock, end_bblock);
9085 start_new_bblock = 1;
9086 ip += 6;
9087 break;
9089 case CEE_MONO_CISINST:
9090 case CEE_MONO_CCASTCLASS: {
9091 int token;
9092 CHECK_STACK (1);
9093 --sp;
9094 CHECK_OPSIZE (6);
9095 token = read32 (ip + 2);
9096 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9097 if (ip [1] == CEE_MONO_CISINST)
9098 ins = handle_cisinst (cfg, klass, sp [0]);
9099 else
9100 ins = handle_ccastclass (cfg, klass, sp [0]);
9101 bblock = cfg->cbb;
9102 *sp++ = ins;
9103 ip += 6;
9104 break;
9106 case CEE_MONO_SAVE_LMF:
9107 case CEE_MONO_RESTORE_LMF:
9108 #ifdef MONO_ARCH_HAVE_LMF_OPS
9109 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9110 MONO_ADD_INS (bblock, ins);
9111 cfg->need_lmf_area = TRUE;
9112 #endif
9113 ip += 2;
9114 break;
9115 case CEE_MONO_CLASSCONST:
9116 CHECK_STACK_OVF (1);
9117 CHECK_OPSIZE (6);
9118 token = read32 (ip + 2);
9119 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9120 *sp++ = ins;
9121 ip += 6;
9122 inline_costs += 10 * num_calls++;
9123 break;
9124 case CEE_MONO_NOT_TAKEN:
9125 bblock->out_of_line = TRUE;
9126 ip += 2;
9127 break;
9128 case CEE_MONO_TLS:
9129 CHECK_STACK_OVF (1);
9130 CHECK_OPSIZE (6);
9131 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9132 ins->dreg = alloc_preg (cfg);
9133 ins->inst_offset = (gint32)read32 (ip + 2);
9134 ins->type = STACK_PTR;
9135 MONO_ADD_INS (bblock, ins);
9136 *sp++ = ins;
9137 ip += 6;
9138 break;
9139 default:
9140 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9141 break;
9143 break;
9146 case CEE_PREFIX1: {
9147 CHECK_OPSIZE (2);
9148 switch (ip [1]) {
9149 case CEE_ARGLIST: {
9150 /* somewhat similar to LDTOKEN */
9151 MonoInst *addr, *vtvar;
9152 CHECK_STACK_OVF (1);
9153 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9155 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9156 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9158 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9159 ins->type = STACK_VTYPE;
9160 ins->klass = mono_defaults.argumenthandle_class;
9161 *sp++ = ins;
9162 ip += 2;
9163 break;
9165 case CEE_CEQ:
9166 case CEE_CGT:
9167 case CEE_CGT_UN:
9168 case CEE_CLT:
9169 case CEE_CLT_UN: {
9170 MonoInst *cmp;
9171 CHECK_STACK (2);
9173 * The following transforms:
9174 * CEE_CEQ into OP_CEQ
9175 * CEE_CGT into OP_CGT
9176 * CEE_CGT_UN into OP_CGT_UN
9177 * CEE_CLT into OP_CLT
9178 * CEE_CLT_UN into OP_CLT_UN
9180 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9182 MONO_INST_NEW (cfg, ins, cmp->opcode);
9183 sp -= 2;
9184 cmp->sreg1 = sp [0]->dreg;
9185 cmp->sreg2 = sp [1]->dreg;
9186 type_from_op (cmp, sp [0], sp [1]);
9187 CHECK_TYPE (cmp);
9188 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9189 cmp->opcode = OP_LCOMPARE;
9190 else if (sp [0]->type == STACK_R8)
9191 cmp->opcode = OP_FCOMPARE;
9192 else
9193 cmp->opcode = OP_ICOMPARE;
9194 MONO_ADD_INS (bblock, cmp);
9195 ins->type = STACK_I4;
9196 ins->dreg = alloc_dreg (cfg, ins->type);
9197 type_from_op (ins, sp [0], sp [1]);
9199 if (cmp->opcode == OP_FCOMPARE) {
9201 * The backends expect the fceq opcodes to do the
9202 * comparison too.
9204 cmp->opcode = OP_NOP;
9205 ins->sreg1 = cmp->sreg1;
9206 ins->sreg2 = cmp->sreg2;
9208 MONO_ADD_INS (bblock, ins);
9209 *sp++ = ins;
9210 ip += 2;
9211 break;
9213 case CEE_LDFTN: {
9214 MonoInst *argconst;
9215 MonoMethod *cil_method;
9216 gboolean needs_static_rgctx_invoke;
9218 CHECK_STACK_OVF (1);
9219 CHECK_OPSIZE (6);
9220 n = read32 (ip + 2);
9221 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9222 if (!cmethod)
9223 goto load_error;
9224 mono_class_init (cmethod->klass);
9226 mono_save_token_info (cfg, image, n, cmethod);
9228 if (cfg->generic_sharing_context)
9229 context_used = mono_method_check_context_used (cmethod);
9231 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9233 cil_method = cmethod;
9234 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9235 METHOD_ACCESS_FAILURE;
9237 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9238 if (check_linkdemand (cfg, method, cmethod))
9239 INLINE_FAILURE;
9240 CHECK_CFG_EXCEPTION;
9241 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9242 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9246 * Optimize the common case of ldftn+delegate creation
9248 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9249 /* FIXME: SGEN support */
9250 /* FIXME: handle shared static generic methods */
9251 /* FIXME: handle this in shared code */
9252 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9253 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9254 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9255 MonoInst *target_ins;
9256 MonoMethod *invoke;
9258 invoke = mono_get_delegate_invoke (ctor_method->klass);
9259 if (!invoke || !mono_method_signature (invoke))
9260 goto load_error;
9262 ip += 6;
9263 if (cfg->verbose_level > 3)
9264 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9265 target_ins = sp [-1];
9266 sp --;
9267 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9268 ip += 5;
9269 sp ++;
9270 break;
9273 #endif
9275 if (context_used) {
9276 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9277 } else {
9278 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9280 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9281 *sp++ = ins;
9283 ip += 6;
9284 inline_costs += 10 * num_calls++;
9285 break;
9287 case CEE_LDVIRTFTN: {
9288 MonoInst *args [2];
9290 CHECK_STACK (1);
9291 CHECK_OPSIZE (6);
9292 n = read32 (ip + 2);
9293 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9294 if (!cmethod)
9295 goto load_error;
9296 mono_class_init (cmethod->klass);
9298 if (cfg->generic_sharing_context)
9299 context_used = mono_method_check_context_used (cmethod);
9301 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9302 if (check_linkdemand (cfg, method, cmethod))
9303 INLINE_FAILURE;
9304 CHECK_CFG_EXCEPTION;
9305 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9306 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9309 --sp;
9310 args [0] = *sp;
9312 if (context_used) {
9313 args [1] = emit_get_rgctx_method (cfg, context_used,
9314 cmethod, MONO_RGCTX_INFO_METHOD);
9315 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9316 } else {
9317 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9318 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9321 ip += 6;
9322 inline_costs += 10 * num_calls++;
9323 break;
9325 case CEE_LDARG:
9326 CHECK_STACK_OVF (1);
9327 CHECK_OPSIZE (4);
9328 n = read16 (ip + 2);
9329 CHECK_ARG (n);
9330 EMIT_NEW_ARGLOAD (cfg, ins, n);
9331 *sp++ = ins;
9332 ip += 4;
9333 break;
9334 case CEE_LDARGA:
9335 CHECK_STACK_OVF (1);
9336 CHECK_OPSIZE (4);
9337 n = read16 (ip + 2);
9338 CHECK_ARG (n);
9339 NEW_ARGLOADA (cfg, ins, n);
9340 MONO_ADD_INS (cfg->cbb, ins);
9341 *sp++ = ins;
9342 ip += 4;
9343 break;
9344 case CEE_STARG:
9345 CHECK_STACK (1);
9346 --sp;
9347 CHECK_OPSIZE (4);
9348 n = read16 (ip + 2);
9349 CHECK_ARG (n);
9350 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9351 UNVERIFIED;
9352 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9353 ip += 4;
9354 break;
9355 case CEE_LDLOC:
9356 CHECK_STACK_OVF (1);
9357 CHECK_OPSIZE (4);
9358 n = read16 (ip + 2);
9359 CHECK_LOCAL (n);
9360 EMIT_NEW_LOCLOAD (cfg, ins, n);
9361 *sp++ = ins;
9362 ip += 4;
9363 break;
9364 case CEE_LDLOCA: {
9365 unsigned char *tmp_ip;
9366 CHECK_STACK_OVF (1);
9367 CHECK_OPSIZE (4);
9368 n = read16 (ip + 2);
9369 CHECK_LOCAL (n);
9371 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9372 ip = tmp_ip;
9373 inline_costs += 1;
9374 break;
9377 EMIT_NEW_LOCLOADA (cfg, ins, n);
9378 *sp++ = ins;
9379 ip += 4;
9380 break;
9382 case CEE_STLOC:
9383 CHECK_STACK (1);
9384 --sp;
9385 CHECK_OPSIZE (4);
9386 n = read16 (ip + 2);
9387 CHECK_LOCAL (n);
9388 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9389 UNVERIFIED;
9390 emit_stloc_ir (cfg, sp, header, n);
9391 ip += 4;
9392 inline_costs += 1;
9393 break;
9394 case CEE_LOCALLOC:
9395 CHECK_STACK (1);
9396 --sp;
9397 if (sp != stack_start)
9398 UNVERIFIED;
9399 if (cfg->method != method)
9401 * Inlining this into a loop in a parent could lead to
9402 * stack overflows which is different behavior than the
9403 * non-inlined case, thus disable inlining in this case.
9405 goto inline_failure;
9407 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9408 ins->dreg = alloc_preg (cfg);
9409 ins->sreg1 = sp [0]->dreg;
9410 ins->type = STACK_PTR;
9411 MONO_ADD_INS (cfg->cbb, ins);
9413 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9414 if (init_locals)
9415 ins->flags |= MONO_INST_INIT;
9417 *sp++ = ins;
9418 ip += 2;
9419 break;
9420 case CEE_ENDFILTER: {
9421 MonoExceptionClause *clause, *nearest;
9422 int cc, nearest_num;
9424 CHECK_STACK (1);
9425 --sp;
9426 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9427 UNVERIFIED;
9428 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9429 ins->sreg1 = (*sp)->dreg;
9430 MONO_ADD_INS (bblock, ins);
9431 start_new_bblock = 1;
9432 ip += 2;
9434 nearest = NULL;
9435 nearest_num = 0;
9436 for (cc = 0; cc < header->num_clauses; ++cc) {
9437 clause = &header->clauses [cc];
9438 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9439 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9440 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9441 nearest = clause;
9442 nearest_num = cc;
9445 g_assert (nearest);
9446 if ((ip - header->code) != nearest->handler_offset)
9447 UNVERIFIED;
9449 break;
9451 case CEE_UNALIGNED_:
9452 ins_flag |= MONO_INST_UNALIGNED;
9453 /* FIXME: record alignment? we can assume 1 for now */
9454 CHECK_OPSIZE (3);
9455 ip += 3;
9456 break;
9457 case CEE_VOLATILE_:
9458 ins_flag |= MONO_INST_VOLATILE;
9459 ip += 2;
9460 break;
9461 case CEE_TAIL_:
9462 ins_flag |= MONO_INST_TAILCALL;
9463 cfg->flags |= MONO_CFG_HAS_TAIL;
9464 /* Can't inline tail calls at this time */
9465 inline_costs += 100000;
9466 ip += 2;
9467 break;
9468 case CEE_INITOBJ:
9469 CHECK_STACK (1);
9470 --sp;
9471 CHECK_OPSIZE (6);
9472 token = read32 (ip + 2);
9473 klass = mini_get_class (method, token, generic_context);
9474 CHECK_TYPELOAD (klass);
9475 if (generic_class_is_reference_type (cfg, klass))
9476 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9477 else
9478 mini_emit_initobj (cfg, *sp, NULL, klass);
9479 ip += 6;
9480 inline_costs += 1;
9481 break;
9482 case CEE_CONSTRAINED_:
9483 CHECK_OPSIZE (6);
9484 token = read32 (ip + 2);
9485 constrained_call = mono_class_get_full (image, token, generic_context);
9486 CHECK_TYPELOAD (constrained_call);
9487 ip += 6;
9488 break;
9489 case CEE_CPBLK:
9490 case CEE_INITBLK: {
9491 MonoInst *iargs [3];
9492 CHECK_STACK (3);
9493 sp -= 3;
9495 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9496 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9497 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9498 /* emit_memset only works when val == 0 */
9499 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9500 } else {
9501 iargs [0] = sp [0];
9502 iargs [1] = sp [1];
9503 iargs [2] = sp [2];
9504 if (ip [1] == CEE_CPBLK) {
9505 MonoMethod *memcpy_method = get_memcpy_method ();
9506 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9507 } else {
9508 MonoMethod *memset_method = get_memset_method ();
9509 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9512 ip += 2;
9513 inline_costs += 1;
9514 break;
9516 case CEE_NO_:
9517 CHECK_OPSIZE (3);
9518 if (ip [2] & 0x1)
9519 ins_flag |= MONO_INST_NOTYPECHECK;
9520 if (ip [2] & 0x2)
9521 ins_flag |= MONO_INST_NORANGECHECK;
9522 /* we ignore the no-nullcheck for now since we
9523 * really do it explicitly only when doing callvirt->call
9525 ip += 3;
9526 break;
9527 case CEE_RETHROW: {
9528 MonoInst *load;
9529 int handler_offset = -1;
9531 for (i = 0; i < header->num_clauses; ++i) {
9532 MonoExceptionClause *clause = &header->clauses [i];
9533 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9534 handler_offset = clause->handler_offset;
9535 break;
9539 bblock->flags |= BB_EXCEPTION_UNSAFE;
9541 g_assert (handler_offset != -1);
9543 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9544 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9545 ins->sreg1 = load->dreg;
9546 MONO_ADD_INS (bblock, ins);
9547 sp = stack_start;
9548 link_bblock (cfg, bblock, end_bblock);
9549 start_new_bblock = 1;
9550 ip += 2;
9551 break;
9553 case CEE_SIZEOF: {
9554 guint32 align;
9555 int ialign;
9557 CHECK_STACK_OVF (1);
9558 CHECK_OPSIZE (6);
9559 token = read32 (ip + 2);
9560 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9561 MonoType *type = mono_type_create_from_typespec (image, token);
9562 token = mono_type_size (type, &ialign);
9563 } else {
9564 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9565 CHECK_TYPELOAD (klass);
9566 mono_class_init (klass);
9567 token = mono_class_value_size (klass, &align);
9569 EMIT_NEW_ICONST (cfg, ins, token);
9570 *sp++= ins;
9571 ip += 6;
9572 break;
9574 case CEE_REFANYTYPE: {
9575 MonoInst *src_var, *src;
9577 CHECK_STACK (1);
9578 --sp;
9580 // FIXME:
9581 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9582 if (!src_var)
9583 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9584 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9585 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9586 *sp++ = ins;
9587 ip += 2;
9588 break;
9590 case CEE_READONLY_:
9591 readonly = TRUE;
9592 ip += 2;
9593 break;
9594 default:
9595 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9597 break;
9599 default:
9600 g_error ("opcode 0x%02x not handled", *ip);
9603 if (start_new_bblock != 1)
9604 UNVERIFIED;
9606 bblock->cil_length = ip - bblock->cil_code;
9607 bblock->next_bb = end_bblock;
9609 if (cfg->method == method && cfg->domainvar) {
9610 MonoInst *store;
9611 MonoInst *get_domain;
9613 cfg->cbb = init_localsbb;
9615 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9616 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9618 else {
9619 get_domain->dreg = alloc_preg (cfg);
9620 MONO_ADD_INS (cfg->cbb, get_domain);
9622 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9623 MONO_ADD_INS (cfg->cbb, store);
9626 #ifdef TARGET_POWERPC
9627 if (cfg->compile_aot)
9628 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9629 mono_get_got_var (cfg);
9630 #endif
9632 if (cfg->method == method && cfg->got_var)
9633 mono_emit_load_got_addr (cfg);
9635 if (init_locals) {
9636 MonoInst *store;
9638 cfg->cbb = init_localsbb;
9639 cfg->ip = NULL;
9640 for (i = 0; i < header->num_locals; ++i) {
9641 MonoType *ptype = header->locals [i];
9642 int t = ptype->type;
9643 dreg = cfg->locals [i]->dreg;
9645 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9646 t = mono_class_enum_basetype (ptype->data.klass)->type;
9647 if (ptype->byref) {
9648 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9649 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9650 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9651 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9652 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9653 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9654 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9655 ins->type = STACK_R8;
9656 ins->inst_p0 = (void*)&r8_0;
9657 ins->dreg = alloc_dreg (cfg, STACK_R8);
9658 MONO_ADD_INS (init_localsbb, ins);
9659 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9660 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9661 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9662 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9663 } else {
9664 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9669 cfg->ip = NULL;
9671 if (cfg->method == method) {
9672 MonoBasicBlock *bb;
9673 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9674 bb->region = mono_find_block_region (cfg, bb->real_offset);
9675 if (cfg->spvars)
9676 mono_create_spvar_for_region (cfg, bb->region);
9677 if (cfg->verbose_level > 2)
9678 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9682 g_slist_free (class_inits);
9683 dont_inline = g_list_remove (dont_inline, method);
9685 if (inline_costs < 0) {
9686 char *mname;
9688 /* Method is too large */
9689 mname = mono_method_full_name (method, TRUE);
9690 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9691 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9692 g_free (mname);
9693 return -1;
9696 if ((cfg->verbose_level > 2) && (cfg->method == method))
9697 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9699 return inline_costs;
9701 exception_exit:
9702 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9703 g_slist_free (class_inits);
9704 dont_inline = g_list_remove (dont_inline, method);
9705 return -1;
9707 inline_failure:
9708 g_slist_free (class_inits);
9709 dont_inline = g_list_remove (dont_inline, method);
9710 return -1;
9712 load_error:
9713 g_slist_free (class_inits);
9714 dont_inline = g_list_remove (dont_inline, method);
9715 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9716 return -1;
9718 unverified:
9719 g_slist_free (class_inits);
9720 dont_inline = g_list_remove (dont_inline, method);
9721 set_exception_type_from_invalid_il (cfg, method, ip);
9722 return -1;
9725 static int
9726 store_membase_reg_to_store_membase_imm (int opcode)
9728 switch (opcode) {
9729 case OP_STORE_MEMBASE_REG:
9730 return OP_STORE_MEMBASE_IMM;
9731 case OP_STOREI1_MEMBASE_REG:
9732 return OP_STOREI1_MEMBASE_IMM;
9733 case OP_STOREI2_MEMBASE_REG:
9734 return OP_STOREI2_MEMBASE_IMM;
9735 case OP_STOREI4_MEMBASE_REG:
9736 return OP_STOREI4_MEMBASE_IMM;
9737 case OP_STOREI8_MEMBASE_REG:
9738 return OP_STOREI8_MEMBASE_IMM;
9739 default:
9740 g_assert_not_reached ();
9743 return -1;
9746 #endif /* DISABLE_JIT */
9749 mono_op_to_op_imm (int opcode)
9751 switch (opcode) {
9752 case OP_IADD:
9753 return OP_IADD_IMM;
9754 case OP_ISUB:
9755 return OP_ISUB_IMM;
9756 case OP_IDIV:
9757 return OP_IDIV_IMM;
9758 case OP_IDIV_UN:
9759 return OP_IDIV_UN_IMM;
9760 case OP_IREM:
9761 return OP_IREM_IMM;
9762 case OP_IREM_UN:
9763 return OP_IREM_UN_IMM;
9764 case OP_IMUL:
9765 return OP_IMUL_IMM;
9766 case OP_IAND:
9767 return OP_IAND_IMM;
9768 case OP_IOR:
9769 return OP_IOR_IMM;
9770 case OP_IXOR:
9771 return OP_IXOR_IMM;
9772 case OP_ISHL:
9773 return OP_ISHL_IMM;
9774 case OP_ISHR:
9775 return OP_ISHR_IMM;
9776 case OP_ISHR_UN:
9777 return OP_ISHR_UN_IMM;
9779 case OP_LADD:
9780 return OP_LADD_IMM;
9781 case OP_LSUB:
9782 return OP_LSUB_IMM;
9783 case OP_LAND:
9784 return OP_LAND_IMM;
9785 case OP_LOR:
9786 return OP_LOR_IMM;
9787 case OP_LXOR:
9788 return OP_LXOR_IMM;
9789 case OP_LSHL:
9790 return OP_LSHL_IMM;
9791 case OP_LSHR:
9792 return OP_LSHR_IMM;
9793 case OP_LSHR_UN:
9794 return OP_LSHR_UN_IMM;
9796 case OP_COMPARE:
9797 return OP_COMPARE_IMM;
9798 case OP_ICOMPARE:
9799 return OP_ICOMPARE_IMM;
9800 case OP_LCOMPARE:
9801 return OP_LCOMPARE_IMM;
9803 case OP_STORE_MEMBASE_REG:
9804 return OP_STORE_MEMBASE_IMM;
9805 case OP_STOREI1_MEMBASE_REG:
9806 return OP_STOREI1_MEMBASE_IMM;
9807 case OP_STOREI2_MEMBASE_REG:
9808 return OP_STOREI2_MEMBASE_IMM;
9809 case OP_STOREI4_MEMBASE_REG:
9810 return OP_STOREI4_MEMBASE_IMM;
9812 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9813 case OP_X86_PUSH:
9814 return OP_X86_PUSH_IMM;
9815 case OP_X86_COMPARE_MEMBASE_REG:
9816 return OP_X86_COMPARE_MEMBASE_IMM;
9817 #endif
9818 #if defined(TARGET_AMD64)
9819 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9820 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9821 #endif
9822 case OP_VOIDCALL_REG:
9823 return OP_VOIDCALL;
9824 case OP_CALL_REG:
9825 return OP_CALL;
9826 case OP_LCALL_REG:
9827 return OP_LCALL;
9828 case OP_FCALL_REG:
9829 return OP_FCALL;
9830 case OP_LOCALLOC:
9831 return OP_LOCALLOC_IMM;
9834 return -1;
9837 static int
9838 ldind_to_load_membase (int opcode)
9840 switch (opcode) {
9841 case CEE_LDIND_I1:
9842 return OP_LOADI1_MEMBASE;
9843 case CEE_LDIND_U1:
9844 return OP_LOADU1_MEMBASE;
9845 case CEE_LDIND_I2:
9846 return OP_LOADI2_MEMBASE;
9847 case CEE_LDIND_U2:
9848 return OP_LOADU2_MEMBASE;
9849 case CEE_LDIND_I4:
9850 return OP_LOADI4_MEMBASE;
9851 case CEE_LDIND_U4:
9852 return OP_LOADU4_MEMBASE;
9853 case CEE_LDIND_I:
9854 return OP_LOAD_MEMBASE;
9855 case CEE_LDIND_REF:
9856 return OP_LOAD_MEMBASE;
9857 case CEE_LDIND_I8:
9858 return OP_LOADI8_MEMBASE;
9859 case CEE_LDIND_R4:
9860 return OP_LOADR4_MEMBASE;
9861 case CEE_LDIND_R8:
9862 return OP_LOADR8_MEMBASE;
9863 default:
9864 g_assert_not_reached ();
9867 return -1;
9870 static int
9871 stind_to_store_membase (int opcode)
9873 switch (opcode) {
9874 case CEE_STIND_I1:
9875 return OP_STOREI1_MEMBASE_REG;
9876 case CEE_STIND_I2:
9877 return OP_STOREI2_MEMBASE_REG;
9878 case CEE_STIND_I4:
9879 return OP_STOREI4_MEMBASE_REG;
9880 case CEE_STIND_I:
9881 case CEE_STIND_REF:
9882 return OP_STORE_MEMBASE_REG;
9883 case CEE_STIND_I8:
9884 return OP_STOREI8_MEMBASE_REG;
9885 case CEE_STIND_R4:
9886 return OP_STORER4_MEMBASE_REG;
9887 case CEE_STIND_R8:
9888 return OP_STORER8_MEMBASE_REG;
9889 default:
9890 g_assert_not_reached ();
9893 return -1;
9897 mono_load_membase_to_load_mem (int opcode)
9899 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9900 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9901 switch (opcode) {
9902 case OP_LOAD_MEMBASE:
9903 return OP_LOAD_MEM;
9904 case OP_LOADU1_MEMBASE:
9905 return OP_LOADU1_MEM;
9906 case OP_LOADU2_MEMBASE:
9907 return OP_LOADU2_MEM;
9908 case OP_LOADI4_MEMBASE:
9909 return OP_LOADI4_MEM;
9910 case OP_LOADU4_MEMBASE:
9911 return OP_LOADU4_MEM;
9912 #if SIZEOF_REGISTER == 8
9913 case OP_LOADI8_MEMBASE:
9914 return OP_LOADI8_MEM;
9915 #endif
9917 #endif
9919 return -1;
9922 static inline int
9923 op_to_op_dest_membase (int store_opcode, int opcode)
9925 #if defined(TARGET_X86)
9926 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9927 return -1;
9929 switch (opcode) {
9930 case OP_IADD:
9931 return OP_X86_ADD_MEMBASE_REG;
9932 case OP_ISUB:
9933 return OP_X86_SUB_MEMBASE_REG;
9934 case OP_IAND:
9935 return OP_X86_AND_MEMBASE_REG;
9936 case OP_IOR:
9937 return OP_X86_OR_MEMBASE_REG;
9938 case OP_IXOR:
9939 return OP_X86_XOR_MEMBASE_REG;
9940 case OP_ADD_IMM:
9941 case OP_IADD_IMM:
9942 return OP_X86_ADD_MEMBASE_IMM;
9943 case OP_SUB_IMM:
9944 case OP_ISUB_IMM:
9945 return OP_X86_SUB_MEMBASE_IMM;
9946 case OP_AND_IMM:
9947 case OP_IAND_IMM:
9948 return OP_X86_AND_MEMBASE_IMM;
9949 case OP_OR_IMM:
9950 case OP_IOR_IMM:
9951 return OP_X86_OR_MEMBASE_IMM;
9952 case OP_XOR_IMM:
9953 case OP_IXOR_IMM:
9954 return OP_X86_XOR_MEMBASE_IMM;
9955 case OP_MOVE:
9956 return OP_NOP;
9958 #endif
9960 #if defined(TARGET_AMD64)
9961 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9962 return -1;
9964 switch (opcode) {
9965 case OP_IADD:
9966 return OP_X86_ADD_MEMBASE_REG;
9967 case OP_ISUB:
9968 return OP_X86_SUB_MEMBASE_REG;
9969 case OP_IAND:
9970 return OP_X86_AND_MEMBASE_REG;
9971 case OP_IOR:
9972 return OP_X86_OR_MEMBASE_REG;
9973 case OP_IXOR:
9974 return OP_X86_XOR_MEMBASE_REG;
9975 case OP_IADD_IMM:
9976 return OP_X86_ADD_MEMBASE_IMM;
9977 case OP_ISUB_IMM:
9978 return OP_X86_SUB_MEMBASE_IMM;
9979 case OP_IAND_IMM:
9980 return OP_X86_AND_MEMBASE_IMM;
9981 case OP_IOR_IMM:
9982 return OP_X86_OR_MEMBASE_IMM;
9983 case OP_IXOR_IMM:
9984 return OP_X86_XOR_MEMBASE_IMM;
9985 case OP_LADD:
9986 return OP_AMD64_ADD_MEMBASE_REG;
9987 case OP_LSUB:
9988 return OP_AMD64_SUB_MEMBASE_REG;
9989 case OP_LAND:
9990 return OP_AMD64_AND_MEMBASE_REG;
9991 case OP_LOR:
9992 return OP_AMD64_OR_MEMBASE_REG;
9993 case OP_LXOR:
9994 return OP_AMD64_XOR_MEMBASE_REG;
9995 case OP_ADD_IMM:
9996 case OP_LADD_IMM:
9997 return OP_AMD64_ADD_MEMBASE_IMM;
9998 case OP_SUB_IMM:
9999 case OP_LSUB_IMM:
10000 return OP_AMD64_SUB_MEMBASE_IMM;
10001 case OP_AND_IMM:
10002 case OP_LAND_IMM:
10003 return OP_AMD64_AND_MEMBASE_IMM;
10004 case OP_OR_IMM:
10005 case OP_LOR_IMM:
10006 return OP_AMD64_OR_MEMBASE_IMM;
10007 case OP_XOR_IMM:
10008 case OP_LXOR_IMM:
10009 return OP_AMD64_XOR_MEMBASE_IMM;
10010 case OP_MOVE:
10011 return OP_NOP;
10013 #endif
10015 return -1;
10018 static inline int
10019 op_to_op_store_membase (int store_opcode, int opcode)
10021 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10022 switch (opcode) {
10023 case OP_ICEQ:
10024 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10025 return OP_X86_SETEQ_MEMBASE;
10026 case OP_CNE:
10027 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10028 return OP_X86_SETNE_MEMBASE;
10030 #endif
10032 return -1;
10035 static inline int
10036 op_to_op_src1_membase (int load_opcode, int opcode)
10038 #ifdef TARGET_X86
10039 /* FIXME: This has sign extension issues */
10041 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10042 return OP_X86_COMPARE_MEMBASE8_IMM;
10045 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10046 return -1;
10048 switch (opcode) {
10049 case OP_X86_PUSH:
10050 return OP_X86_PUSH_MEMBASE;
10051 case OP_COMPARE_IMM:
10052 case OP_ICOMPARE_IMM:
10053 return OP_X86_COMPARE_MEMBASE_IMM;
10054 case OP_COMPARE:
10055 case OP_ICOMPARE:
10056 return OP_X86_COMPARE_MEMBASE_REG;
10058 #endif
10060 #ifdef TARGET_AMD64
10061 /* FIXME: This has sign extension issues */
10063 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10064 return OP_X86_COMPARE_MEMBASE8_IMM;
10067 switch (opcode) {
10068 case OP_X86_PUSH:
10069 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10070 return OP_X86_PUSH_MEMBASE;
10071 break;
10072 /* FIXME: This only works for 32 bit immediates
10073 case OP_COMPARE_IMM:
10074 case OP_LCOMPARE_IMM:
10075 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10076 return OP_AMD64_COMPARE_MEMBASE_IMM;
10078 case OP_ICOMPARE_IMM:
10079 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10080 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10081 break;
10082 case OP_COMPARE:
10083 case OP_LCOMPARE:
10084 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10085 return OP_AMD64_COMPARE_MEMBASE_REG;
10086 break;
10087 case OP_ICOMPARE:
10088 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10089 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10090 break;
10092 #endif
10094 return -1;
10097 static inline int
10098 op_to_op_src2_membase (int load_opcode, int opcode)
10100 #ifdef TARGET_X86
10101 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10102 return -1;
10104 switch (opcode) {
10105 case OP_COMPARE:
10106 case OP_ICOMPARE:
10107 return OP_X86_COMPARE_REG_MEMBASE;
10108 case OP_IADD:
10109 return OP_X86_ADD_REG_MEMBASE;
10110 case OP_ISUB:
10111 return OP_X86_SUB_REG_MEMBASE;
10112 case OP_IAND:
10113 return OP_X86_AND_REG_MEMBASE;
10114 case OP_IOR:
10115 return OP_X86_OR_REG_MEMBASE;
10116 case OP_IXOR:
10117 return OP_X86_XOR_REG_MEMBASE;
10119 #endif
10121 #ifdef TARGET_AMD64
10122 switch (opcode) {
10123 case OP_ICOMPARE:
10124 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10125 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10126 break;
10127 case OP_COMPARE:
10128 case OP_LCOMPARE:
10129 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10130 return OP_AMD64_COMPARE_REG_MEMBASE;
10131 break;
10132 case OP_IADD:
10133 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10134 return OP_X86_ADD_REG_MEMBASE;
10135 case OP_ISUB:
10136 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10137 return OP_X86_SUB_REG_MEMBASE;
10138 case OP_IAND:
10139 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10140 return OP_X86_AND_REG_MEMBASE;
10141 case OP_IOR:
10142 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10143 return OP_X86_OR_REG_MEMBASE;
10144 case OP_IXOR:
10145 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10146 return OP_X86_XOR_REG_MEMBASE;
10147 case OP_LADD:
10148 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10149 return OP_AMD64_ADD_REG_MEMBASE;
10150 case OP_LSUB:
10151 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10152 return OP_AMD64_SUB_REG_MEMBASE;
10153 case OP_LAND:
10154 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10155 return OP_AMD64_AND_REG_MEMBASE;
10156 case OP_LOR:
10157 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10158 return OP_AMD64_OR_REG_MEMBASE;
10159 case OP_LXOR:
10160 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10161 return OP_AMD64_XOR_REG_MEMBASE;
10163 #endif
10165 return -1;
10169 mono_op_to_op_imm_noemul (int opcode)
10171 switch (opcode) {
10172 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10173 case OP_LSHR:
10174 case OP_LSHL:
10175 case OP_LSHR_UN:
10176 #endif
10177 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10178 case OP_IDIV:
10179 case OP_IDIV_UN:
10180 case OP_IREM:
10181 case OP_IREM_UN:
10182 #endif
10183 return -1;
10184 default:
10185 return mono_op_to_op_imm (opcode);
10189 #ifndef DISABLE_JIT
10192 * mono_handle_global_vregs:
10194 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10195 * for them.
10197 void
10198 mono_handle_global_vregs (MonoCompile *cfg)
10200 gint32 *vreg_to_bb;
10201 MonoBasicBlock *bb;
10202 int i, pos;
10204 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10206 #ifdef MONO_ARCH_SIMD_INTRINSICS
10207 if (cfg->uses_simd_intrinsics)
10208 mono_simd_simplify_indirection (cfg);
10209 #endif
10211 /* Find local vregs used in more than one bb */
10212 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10213 MonoInst *ins = bb->code;
10214 int block_num = bb->block_num;
10216 if (cfg->verbose_level > 2)
10217 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10219 cfg->cbb = bb;
10220 for (; ins; ins = ins->next) {
10221 const char *spec = INS_INFO (ins->opcode);
10222 int regtype, regindex;
10223 gint32 prev_bb;
10225 if (G_UNLIKELY (cfg->verbose_level > 2))
10226 mono_print_ins (ins);
10228 g_assert (ins->opcode >= MONO_CEE_LAST);
10230 for (regindex = 0; regindex < 4; regindex ++) {
10231 int vreg;
10233 if (regindex == 0) {
10234 regtype = spec [MONO_INST_DEST];
10235 if (regtype == ' ')
10236 continue;
10237 vreg = ins->dreg;
10238 } else if (regindex == 1) {
10239 regtype = spec [MONO_INST_SRC1];
10240 if (regtype == ' ')
10241 continue;
10242 vreg = ins->sreg1;
10243 } else if (regindex == 2) {
10244 regtype = spec [MONO_INST_SRC2];
10245 if (regtype == ' ')
10246 continue;
10247 vreg = ins->sreg2;
10248 } else if (regindex == 3) {
10249 regtype = spec [MONO_INST_SRC3];
10250 if (regtype == ' ')
10251 continue;
10252 vreg = ins->sreg3;
10255 #if SIZEOF_REGISTER == 4
10256 /* In the LLVM case, the long opcodes are not decomposed */
10257 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10259 * Since some instructions reference the original long vreg,
10260 * and some reference the two component vregs, it is quite hard
10261 * to determine when it needs to be global. So be conservative.
10263 if (!get_vreg_to_inst (cfg, vreg)) {
10264 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10266 if (cfg->verbose_level > 2)
10267 printf ("LONG VREG R%d made global.\n", vreg);
10271 * Make the component vregs volatile since the optimizations can
10272 * get confused otherwise.
10274 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10275 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10277 #endif
10279 g_assert (vreg != -1);
10281 prev_bb = vreg_to_bb [vreg];
10282 if (prev_bb == 0) {
10283 /* 0 is a valid block num */
10284 vreg_to_bb [vreg] = block_num + 1;
10285 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10286 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10287 continue;
10289 if (!get_vreg_to_inst (cfg, vreg)) {
10290 if (G_UNLIKELY (cfg->verbose_level > 2))
10291 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10293 switch (regtype) {
10294 case 'i':
10295 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10296 break;
10297 case 'f':
10298 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10299 break;
10300 case 'v':
10301 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10302 break;
10303 default:
10304 g_assert_not_reached ();
10308 /* Flag as having been used in more than one bb */
10309 vreg_to_bb [vreg] = -1;
10315 /* If a variable is used in only one bblock, convert it into a local vreg */
10316 for (i = 0; i < cfg->num_varinfo; i++) {
10317 MonoInst *var = cfg->varinfo [i];
10318 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10320 switch (var->type) {
10321 case STACK_I4:
10322 case STACK_OBJ:
10323 case STACK_PTR:
10324 case STACK_MP:
10325 case STACK_VTYPE:
10326 #if SIZEOF_REGISTER == 8
10327 case STACK_I8:
10328 #endif
10329 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10330 /* Enabling this screws up the fp stack on x86 */
10331 case STACK_R8:
10332 #endif
10333 /* Arguments are implicitly global */
10334 /* Putting R4 vars into registers doesn't work currently */
10335 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10337 * Make that the variable's liveness interval doesn't contain a call, since
10338 * that would cause the lvreg to be spilled, making the whole optimization
10339 * useless.
10341 /* This is too slow for JIT compilation */
10342 #if 0
10343 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10344 MonoInst *ins;
10345 int def_index, call_index, ins_index;
10346 gboolean spilled = FALSE;
10348 def_index = -1;
10349 call_index = -1;
10350 ins_index = 0;
10351 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10352 const char *spec = INS_INFO (ins->opcode);
10354 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10355 def_index = ins_index;
10357 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10358 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10359 if (call_index > def_index) {
10360 spilled = TRUE;
10361 break;
10365 if (MONO_IS_CALL (ins))
10366 call_index = ins_index;
10368 ins_index ++;
10371 if (spilled)
10372 break;
10374 #endif
10376 if (G_UNLIKELY (cfg->verbose_level > 2))
10377 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10378 var->flags |= MONO_INST_IS_DEAD;
10379 cfg->vreg_to_inst [var->dreg] = NULL;
10381 break;
10386 * Compress the varinfo and vars tables so the liveness computation is faster and
10387 * takes up less space.
10389 pos = 0;
10390 for (i = 0; i < cfg->num_varinfo; ++i) {
10391 MonoInst *var = cfg->varinfo [i];
10392 if (pos < i && cfg->locals_start == i)
10393 cfg->locals_start = pos;
10394 if (!(var->flags & MONO_INST_IS_DEAD)) {
10395 if (pos < i) {
10396 cfg->varinfo [pos] = cfg->varinfo [i];
10397 cfg->varinfo [pos]->inst_c0 = pos;
10398 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10399 cfg->vars [pos].idx = pos;
10400 #if SIZEOF_REGISTER == 4
10401 if (cfg->varinfo [pos]->type == STACK_I8) {
10402 /* Modify the two component vars too */
10403 MonoInst *var1;
10405 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10406 var1->inst_c0 = pos;
10407 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10408 var1->inst_c0 = pos;
10410 #endif
10412 pos ++;
10415 cfg->num_varinfo = pos;
10416 if (cfg->locals_start > cfg->num_varinfo)
10417 cfg->locals_start = cfg->num_varinfo;
10421 * mono_spill_global_vars:
10423 * Generate spill code for variables which are not allocated to registers,
10424 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10425 * code is generated which could be optimized by the local optimization passes.
10427 void
10428 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10430 MonoBasicBlock *bb;
10431 char spec2 [16];
10432 int orig_next_vreg;
10433 guint32 *vreg_to_lvreg;
10434 guint32 *lvregs;
10435 guint32 i, lvregs_len;
10436 gboolean dest_has_lvreg = FALSE;
10437 guint32 stacktypes [128];
10438 MonoInst **live_range_start, **live_range_end;
10439 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10441 *need_local_opts = FALSE;
10443 memset (spec2, 0, sizeof (spec2));
10445 /* FIXME: Move this function to mini.c */
10446 stacktypes ['i'] = STACK_PTR;
10447 stacktypes ['l'] = STACK_I8;
10448 stacktypes ['f'] = STACK_R8;
10449 #ifdef MONO_ARCH_SIMD_INTRINSICS
10450 stacktypes ['x'] = STACK_VTYPE;
10451 #endif
10453 #if SIZEOF_REGISTER == 4
10454 /* Create MonoInsts for longs */
10455 for (i = 0; i < cfg->num_varinfo; i++) {
10456 MonoInst *ins = cfg->varinfo [i];
10458 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10459 switch (ins->type) {
10460 #ifdef MONO_ARCH_SOFT_FLOAT
10461 case STACK_R8:
10462 #endif
10463 case STACK_I8: {
10464 MonoInst *tree;
10466 g_assert (ins->opcode == OP_REGOFFSET);
10468 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10469 g_assert (tree);
10470 tree->opcode = OP_REGOFFSET;
10471 tree->inst_basereg = ins->inst_basereg;
10472 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10474 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10475 g_assert (tree);
10476 tree->opcode = OP_REGOFFSET;
10477 tree->inst_basereg = ins->inst_basereg;
10478 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10479 break;
10481 default:
10482 break;
10486 #endif
10488 /* FIXME: widening and truncation */
10491 * As an optimization, when a variable allocated to the stack is first loaded into
10492 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10493 * the variable again.
10495 orig_next_vreg = cfg->next_vreg;
10496 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10497 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10498 lvregs_len = 0;
10501 * These arrays contain the first and last instructions accessing a given
10502 * variable.
10503 * Since we emit bblocks in the same order we process them here, and we
10504 * don't split live ranges, these will precisely describe the live range of
10505 * the variable, i.e. the instruction range where a valid value can be found
10506 * in the variables location.
10508 /* FIXME: Only do this if debugging info is requested */
10509 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10510 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10511 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10512 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10514 /* Add spill loads/stores */
10515 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10516 MonoInst *ins;
10518 if (cfg->verbose_level > 2)
10519 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10521 /* Clear vreg_to_lvreg array */
10522 for (i = 0; i < lvregs_len; i++)
10523 vreg_to_lvreg [lvregs [i]] = 0;
10524 lvregs_len = 0;
10526 cfg->cbb = bb;
10527 MONO_BB_FOR_EACH_INS (bb, ins) {
10528 const char *spec = INS_INFO (ins->opcode);
10529 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10530 gboolean store, no_lvreg;
10531 int sregs [MONO_MAX_SRC_REGS];
10533 if (G_UNLIKELY (cfg->verbose_level > 2))
10534 mono_print_ins (ins);
10536 if (ins->opcode == OP_NOP)
10537 continue;
10540 * We handle LDADDR here as well, since it can only be decomposed
10541 * when variable addresses are known.
10543 if (ins->opcode == OP_LDADDR) {
10544 MonoInst *var = ins->inst_p0;
10546 if (var->opcode == OP_VTARG_ADDR) {
10547 /* Happens on SPARC/S390 where vtypes are passed by reference */
10548 MonoInst *vtaddr = var->inst_left;
10549 if (vtaddr->opcode == OP_REGVAR) {
10550 ins->opcode = OP_MOVE;
10551 ins->sreg1 = vtaddr->dreg;
10553 else if (var->inst_left->opcode == OP_REGOFFSET) {
10554 ins->opcode = OP_LOAD_MEMBASE;
10555 ins->inst_basereg = vtaddr->inst_basereg;
10556 ins->inst_offset = vtaddr->inst_offset;
10557 } else
10558 NOT_IMPLEMENTED;
10559 } else {
10560 g_assert (var->opcode == OP_REGOFFSET);
10562 ins->opcode = OP_ADD_IMM;
10563 ins->sreg1 = var->inst_basereg;
10564 ins->inst_imm = var->inst_offset;
10567 *need_local_opts = TRUE;
10568 spec = INS_INFO (ins->opcode);
10571 if (ins->opcode < MONO_CEE_LAST) {
10572 mono_print_ins (ins);
10573 g_assert_not_reached ();
10577 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10578 * src register.
10579 * FIXME:
10581 if (MONO_IS_STORE_MEMBASE (ins)) {
10582 tmp_reg = ins->dreg;
10583 ins->dreg = ins->sreg2;
10584 ins->sreg2 = tmp_reg;
10585 store = TRUE;
10587 spec2 [MONO_INST_DEST] = ' ';
10588 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10589 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10590 spec2 [MONO_INST_SRC3] = ' ';
10591 spec = spec2;
10592 } else if (MONO_IS_STORE_MEMINDEX (ins))
10593 g_assert_not_reached ();
10594 else
10595 store = FALSE;
10596 no_lvreg = FALSE;
10598 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10599 printf ("\t %.3s %d", spec, ins->dreg);
10600 num_sregs = mono_inst_get_src_registers (ins, sregs);
10601 for (srcindex = 0; srcindex < 3; ++srcindex)
10602 printf (" %d", sregs [srcindex]);
10603 printf ("\n");
10606 /***************/
10607 /* DREG */
10608 /***************/
10609 regtype = spec [MONO_INST_DEST];
10610 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10611 prev_dreg = -1;
10613 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10614 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10615 MonoInst *store_ins;
10616 int store_opcode;
10617 MonoInst *def_ins = ins;
10618 int dreg = ins->dreg; /* The original vreg */
10620 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10622 if (var->opcode == OP_REGVAR) {
10623 ins->dreg = var->dreg;
10624 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10626 * Instead of emitting a load+store, use a _membase opcode.
10628 g_assert (var->opcode == OP_REGOFFSET);
10629 if (ins->opcode == OP_MOVE) {
10630 NULLIFY_INS (ins);
10631 def_ins = NULL;
10632 } else {
10633 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10634 ins->inst_basereg = var->inst_basereg;
10635 ins->inst_offset = var->inst_offset;
10636 ins->dreg = -1;
10638 spec = INS_INFO (ins->opcode);
10639 } else {
10640 guint32 lvreg;
10642 g_assert (var->opcode == OP_REGOFFSET);
10644 prev_dreg = ins->dreg;
10646 /* Invalidate any previous lvreg for this vreg */
10647 vreg_to_lvreg [ins->dreg] = 0;
10649 lvreg = 0;
10651 #ifdef MONO_ARCH_SOFT_FLOAT
10652 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10653 regtype = 'l';
10654 store_opcode = OP_STOREI8_MEMBASE_REG;
10656 #endif
10658 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10660 if (regtype == 'l') {
10661 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10662 mono_bblock_insert_after_ins (bb, ins, store_ins);
10663 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10664 mono_bblock_insert_after_ins (bb, ins, store_ins);
10665 def_ins = store_ins;
10667 else {
10668 g_assert (store_opcode != OP_STOREV_MEMBASE);
10670 /* Try to fuse the store into the instruction itself */
10671 /* FIXME: Add more instructions */
10672 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10673 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10674 ins->inst_imm = ins->inst_c0;
10675 ins->inst_destbasereg = var->inst_basereg;
10676 ins->inst_offset = var->inst_offset;
10677 spec = INS_INFO (ins->opcode);
10678 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10679 ins->opcode = store_opcode;
10680 ins->inst_destbasereg = var->inst_basereg;
10681 ins->inst_offset = var->inst_offset;
10683 no_lvreg = TRUE;
10685 tmp_reg = ins->dreg;
10686 ins->dreg = ins->sreg2;
10687 ins->sreg2 = tmp_reg;
10688 store = TRUE;
10690 spec2 [MONO_INST_DEST] = ' ';
10691 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10692 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10693 spec2 [MONO_INST_SRC3] = ' ';
10694 spec = spec2;
10695 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10696 // FIXME: The backends expect the base reg to be in inst_basereg
10697 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10698 ins->dreg = -1;
10699 ins->inst_basereg = var->inst_basereg;
10700 ins->inst_offset = var->inst_offset;
10701 spec = INS_INFO (ins->opcode);
10702 } else {
10703 /* printf ("INS: "); mono_print_ins (ins); */
10704 /* Create a store instruction */
10705 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10707 /* Insert it after the instruction */
10708 mono_bblock_insert_after_ins (bb, ins, store_ins);
10710 def_ins = store_ins;
10713 * We can't assign ins->dreg to var->dreg here, since the
10714 * sregs could use it. So set a flag, and do it after
10715 * the sregs.
10717 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10718 dest_has_lvreg = TRUE;
10723 if (def_ins && !live_range_start [dreg]) {
10724 live_range_start [dreg] = def_ins;
10725 live_range_start_bb [dreg] = bb;
10729 /************/
10730 /* SREGS */
10731 /************/
10732 num_sregs = mono_inst_get_src_registers (ins, sregs);
10733 for (srcindex = 0; srcindex < 3; ++srcindex) {
10734 regtype = spec [MONO_INST_SRC1 + srcindex];
10735 sreg = sregs [srcindex];
10737 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10738 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10739 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10740 MonoInst *use_ins = ins;
10741 MonoInst *load_ins;
10742 guint32 load_opcode;
10744 if (var->opcode == OP_REGVAR) {
10745 sregs [srcindex] = var->dreg;
10746 //mono_inst_set_src_registers (ins, sregs);
10747 live_range_end [sreg] = use_ins;
10748 live_range_end_bb [sreg] = bb;
10749 continue;
10752 g_assert (var->opcode == OP_REGOFFSET);
10754 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10756 g_assert (load_opcode != OP_LOADV_MEMBASE);
10758 if (vreg_to_lvreg [sreg]) {
10759 g_assert (vreg_to_lvreg [sreg] != -1);
10761 /* The variable is already loaded to an lvreg */
10762 if (G_UNLIKELY (cfg->verbose_level > 2))
10763 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10764 sregs [srcindex] = vreg_to_lvreg [sreg];
10765 //mono_inst_set_src_registers (ins, sregs);
10766 continue;
10769 /* Try to fuse the load into the instruction */
10770 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10771 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10772 sregs [0] = var->inst_basereg;
10773 //mono_inst_set_src_registers (ins, sregs);
10774 ins->inst_offset = var->inst_offset;
10775 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10776 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10777 sregs [1] = var->inst_basereg;
10778 //mono_inst_set_src_registers (ins, sregs);
10779 ins->inst_offset = var->inst_offset;
10780 } else {
10781 if (MONO_IS_REAL_MOVE (ins)) {
10782 ins->opcode = OP_NOP;
10783 sreg = ins->dreg;
10784 } else {
10785 //printf ("%d ", srcindex); mono_print_ins (ins);
10787 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10789 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10790 if (var->dreg == prev_dreg) {
10792 * sreg refers to the value loaded by the load
10793 * emitted below, but we need to use ins->dreg
10794 * since it refers to the store emitted earlier.
10796 sreg = ins->dreg;
10798 g_assert (sreg != -1);
10799 vreg_to_lvreg [var->dreg] = sreg;
10800 g_assert (lvregs_len < 1024);
10801 lvregs [lvregs_len ++] = var->dreg;
10805 sregs [srcindex] = sreg;
10806 //mono_inst_set_src_registers (ins, sregs);
10808 if (regtype == 'l') {
10809 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10810 mono_bblock_insert_before_ins (bb, ins, load_ins);
10811 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10812 mono_bblock_insert_before_ins (bb, ins, load_ins);
10813 use_ins = load_ins;
10815 else {
10816 #if SIZEOF_REGISTER == 4
10817 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10818 #endif
10819 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10820 mono_bblock_insert_before_ins (bb, ins, load_ins);
10821 use_ins = load_ins;
10825 if (var->dreg < orig_next_vreg) {
10826 live_range_end [var->dreg] = use_ins;
10827 live_range_end_bb [var->dreg] = bb;
10831 mono_inst_set_src_registers (ins, sregs);
10833 if (dest_has_lvreg) {
10834 g_assert (ins->dreg != -1);
10835 vreg_to_lvreg [prev_dreg] = ins->dreg;
10836 g_assert (lvregs_len < 1024);
10837 lvregs [lvregs_len ++] = prev_dreg;
10838 dest_has_lvreg = FALSE;
10841 if (store) {
10842 tmp_reg = ins->dreg;
10843 ins->dreg = ins->sreg2;
10844 ins->sreg2 = tmp_reg;
10847 if (MONO_IS_CALL (ins)) {
10848 /* Clear vreg_to_lvreg array */
10849 for (i = 0; i < lvregs_len; i++)
10850 vreg_to_lvreg [lvregs [i]] = 0;
10851 lvregs_len = 0;
10852 } else if (ins->opcode == OP_NOP) {
10853 ins->dreg = -1;
10854 MONO_INST_NULLIFY_SREGS (ins);
10857 if (cfg->verbose_level > 2)
10858 mono_print_ins_index (1, ins);
10862 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10864 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10865 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10867 for (i = 0; i < cfg->num_varinfo; ++i) {
10868 int vreg = MONO_VARINFO (cfg, i)->vreg;
10869 MonoInst *ins;
10871 if (live_range_start [vreg]) {
10872 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10873 ins->inst_c0 = i;
10874 ins->inst_c1 = vreg;
10875 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10877 if (live_range_end [vreg]) {
10878 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10879 ins->inst_c0 = i;
10880 ins->inst_c1 = vreg;
10881 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10884 #endif
10886 g_free (live_range_start);
10887 g_free (live_range_end);
10888 g_free (live_range_start_bb);
10889 g_free (live_range_end_bb);
10893 * FIXME:
10894 * - use 'iadd' instead of 'int_add'
10895 * - handling ovf opcodes: decompose in method_to_ir.
10896 * - unify iregs/fregs
10897 * -> partly done, the missing parts are:
10898 * - a more complete unification would involve unifying the hregs as well, so
10899 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10900 * would no longer map to the machine hregs, so the code generators would need to
10901 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10902 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10903 * fp/non-fp branches speeds it up by about 15%.
10904 * - use sext/zext opcodes instead of shifts
10905 * - add OP_ICALL
10906 * - get rid of TEMPLOADs if possible and use vregs instead
10907 * - clean up usage of OP_P/OP_ opcodes
10908 * - cleanup usage of DUMMY_USE
10909 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10910 * stack
10911 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10912 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10913 * - make sure handle_stack_args () is called before the branch is emitted
10914 * - when the new IR is done, get rid of all unused stuff
10915 * - COMPARE/BEQ as separate instructions or unify them ?
10916 * - keeping them separate allows specialized compare instructions like
10917 * compare_imm, compare_membase
10918 * - most back ends unify fp compare+branch, fp compare+ceq
10919 * - integrate mono_save_args into inline_method
10920 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10921 * - handle long shift opts on 32 bit platforms somehow: they require
10922 * 3 sregs (2 for arg1 and 1 for arg2)
10923 * - make byref a 'normal' type.
10924 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10925 * variable if needed.
10926 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10927 * like inline_method.
10928 * - remove inlining restrictions
10929 * - fix LNEG and enable cfold of INEG
10930 * - generalize x86 optimizations like ldelema as a peephole optimization
10931 * - add store_mem_imm for amd64
10932 * - optimize the loading of the interruption flag in the managed->native wrappers
10933 * - avoid special handling of OP_NOP in passes
10934 * - move code inserting instructions into one function/macro.
10935 * - try a coalescing phase after liveness analysis
10936 * - add float -> vreg conversion + local optimizations on !x86
10937 * - figure out how to handle decomposed branches during optimizations, ie.
10938 * compare+branch, op_jump_table+op_br etc.
10939 * - promote RuntimeXHandles to vregs
10940 * - vtype cleanups:
10941 * - add a NEW_VARLOADA_VREG macro
10942 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10943 * accessing vtype fields.
10944 * - get rid of I8CONST on 64 bit platforms
10945 * - dealing with the increase in code size due to branches created during opcode
10946 * decomposition:
10947 * - use extended basic blocks
10948 * - all parts of the JIT
10949 * - handle_global_vregs () && local regalloc
10950 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10951 * - sources of increase in code size:
10952 * - vtypes
10953 * - long compares
10954 * - isinst and castclass
10955 * - lvregs not allocated to global registers even if used multiple times
10956 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10957 * meaningful.
10958 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10959 * - add all micro optimizations from the old JIT
10960 * - put tree optimizations into the deadce pass
10961 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10962 * specific function.
10963 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10964 * fcompare + branchCC.
10965 * - create a helper function for allocating a stack slot, taking into account
10966 * MONO_CFG_HAS_SPILLUP.
10967 * - merge r68207.
10968 * - merge the ia64 switch changes.
10969 * - optimize mono_regstate2_alloc_int/float.
10970 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10971 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10972 * parts of the tree could be separated by other instructions, killing the tree
10973 * arguments, or stores killing loads etc. Also, should we fold loads into other
10974 * instructions if the result of the load is used multiple times ?
10975 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10976 * - LAST MERGE: 108395.
10977 * - when returning vtypes in registers, generate IR and append it to the end of the
10978 * last bb instead of doing it in the epilog.
10979 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10984 NOTES
10985 -----
10987 - When to decompose opcodes:
10988 - earlier: this makes some optimizations hard to implement, since the low level IR
10989 no longer contains the neccessary information. But it is easier to do.
10990 - later: harder to implement, enables more optimizations.
10991 - Branches inside bblocks:
10992 - created when decomposing complex opcodes.
10993 - branches to another bblock: harmless, but not tracked by the branch
10994 optimizations, so need to branch to a label at the start of the bblock.
10995 - branches to inside the same bblock: very problematic, trips up the local
10996 reg allocator. Can be fixed by spitting the current bblock, but that is a
10997 complex operation, since some local vregs can become global vregs etc.
10998 - Local/global vregs:
10999 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11000 local register allocator.
11001 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11002 structure, created by mono_create_var (). Assigned to hregs or the stack by
11003 the global register allocator.
11004 - When to do optimizations like alu->alu_imm:
11005 - earlier -> saves work later on since the IR will be smaller/simpler
11006 - later -> can work on more instructions
11007 - Handling of valuetypes:
11008 - When a vtype is pushed on the stack, a new temporary is created, an
11009 instruction computing its address (LDADDR) is emitted and pushed on
11010 the stack. Need to optimize cases when the vtype is used immediately as in
11011 argument passing, stloc etc.
11012 - Instead of the to_end stuff in the old JIT, simply call the function handling
11013 the values on the stack before emitting the last instruction of the bb.
11016 #endif /* DISABLE_JIT */