2009-03-19 Sebastien Pouliot <sebastien@ximian.com>
[mono-project.git] / mono / mini / method-to-ir.c
blob2bb20a8e07141da8a4a3a29bfa596b1463cb1105
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
54 #include "mini.h"
55 #include "trace.h"
57 #include "ir-emit.h"
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 goto inline_failure;\
66 } while (0)
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 goto exception_exit;\
70 } while (0)
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
79 } while (0)
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
88 } while (0)
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
95 } \
96 } while (0)
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
121 #ifdef MINI_OP
122 #undef MINI_OP
123 #endif
124 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2,
125 #define NONE ' '
126 #define IREG 'i'
127 #define FREG 'f'
128 #define VREG 'v'
129 #define XREG 'x'
130 #if SIZEOF_REGISTER == 8
131 #define LREG IREG
132 #else
133 #define LREG 'l'
134 #endif
135 /* keep in sync with the enum in mini.h */
136 const char
137 ins_info[] = {
138 #include "mini-ops.h"
140 #undef MINI_OP
142 extern GHashTable *jit_icall_name_hash;
144 #define MONO_INIT_VARINFO(vi,id) do { \
145 (vi)->range.first_use.pos.bid = 0xffff; \
146 (vi)->reg = -1; \
147 (vi)->idx = (id); \
148 } while (0)
150 guint32
151 mono_alloc_ireg (MonoCompile *cfg)
153 return alloc_ireg (cfg);
156 guint32
157 mono_alloc_freg (MonoCompile *cfg)
159 return alloc_freg (cfg);
162 guint32
163 mono_alloc_preg (MonoCompile *cfg)
165 return alloc_preg (cfg);
168 guint32
169 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
171 return alloc_dreg (cfg, stack_type);
174 guint
175 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
177 if (type->byref)
178 return OP_MOVE;
180 handle_enum:
181 switch (type->type) {
182 case MONO_TYPE_I1:
183 case MONO_TYPE_U1:
184 case MONO_TYPE_BOOLEAN:
185 return OP_MOVE;
186 case MONO_TYPE_I2:
187 case MONO_TYPE_U2:
188 case MONO_TYPE_CHAR:
189 return OP_MOVE;
190 case MONO_TYPE_I4:
191 case MONO_TYPE_U4:
192 return OP_MOVE;
193 case MONO_TYPE_I:
194 case MONO_TYPE_U:
195 case MONO_TYPE_PTR:
196 case MONO_TYPE_FNPTR:
197 return OP_MOVE;
198 case MONO_TYPE_CLASS:
199 case MONO_TYPE_STRING:
200 case MONO_TYPE_OBJECT:
201 case MONO_TYPE_SZARRAY:
202 case MONO_TYPE_ARRAY:
203 return OP_MOVE;
204 case MONO_TYPE_I8:
205 case MONO_TYPE_U8:
206 #if SIZEOF_REGISTER == 8
207 return OP_MOVE;
208 #else
209 return OP_LMOVE;
210 #endif
211 case MONO_TYPE_R4:
212 return OP_FMOVE;
213 case MONO_TYPE_R8:
214 return OP_FMOVE;
215 case MONO_TYPE_VALUETYPE:
216 if (type->data.klass->enumtype) {
217 type = mono_class_enum_basetype (type->data.klass);
218 goto handle_enum;
220 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
221 return OP_XMOVE;
222 return OP_VMOVE;
223 case MONO_TYPE_TYPEDBYREF:
224 return OP_VMOVE;
225 case MONO_TYPE_GENERICINST:
226 type = &type->data.generic_class->container_class->byval_arg;
227 goto handle_enum;
228 case MONO_TYPE_VAR:
229 case MONO_TYPE_MVAR:
230 g_assert (cfg->generic_sharing_context);
231 return OP_MOVE;
232 default:
233 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
235 return -1;
238 void
239 mono_print_bb (MonoBasicBlock *bb, const char *msg)
241 int i;
242 MonoInst *tree;
244 printf ("\n%s %d: [IN: ", msg, bb->block_num);
245 for (i = 0; i < bb->in_count; ++i)
246 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
247 printf (", OUT: ");
248 for (i = 0; i < bb->out_count; ++i)
249 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
250 printf (" ]\n");
251 for (tree = bb->code; tree; tree = tree->next)
252 mono_print_ins_index (-1, tree);
256 * Can't put this at the beginning, since other files reference stuff from this
257 * file.
259 #ifndef DISABLE_JIT
261 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
263 #define GET_BBLOCK(cfg,tblock,ip) do { \
264 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
265 if (!(tblock)) { \
266 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
267 NEW_BBLOCK (cfg, (tblock)); \
268 (tblock)->cil_code = (ip); \
269 ADD_BBLOCK (cfg, (tblock)); \
271 } while (0)
273 #if defined(__i386__) || defined(__x86_64__)
274 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
275 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
276 (dest)->dreg = alloc_preg ((cfg)); \
277 (dest)->sreg1 = (sr1); \
278 (dest)->sreg2 = (sr2); \
279 (dest)->inst_imm = (imm); \
280 (dest)->backend.shift_amount = (shift); \
281 MONO_ADD_INS ((cfg)->cbb, (dest)); \
282 } while (0)
283 #endif
285 #if SIZEOF_REGISTER == 8
286 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
287 /* FIXME: Need to add many more cases */ \
288 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
289 MonoInst *widen; \
290 int dr = alloc_preg (cfg); \
291 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
292 (ins)->sreg2 = widen->dreg; \
294 } while (0)
295 #else
296 #define ADD_WIDEN_OP(ins, arg1, arg2)
297 #endif
299 #define ADD_BINOP(op) do { \
300 MONO_INST_NEW (cfg, ins, (op)); \
301 sp -= 2; \
302 ins->sreg1 = sp [0]->dreg; \
303 ins->sreg2 = sp [1]->dreg; \
304 type_from_op (ins, sp [0], sp [1]); \
305 CHECK_TYPE (ins); \
306 /* Have to insert a widening op */ \
307 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
308 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
309 MONO_ADD_INS ((cfg)->cbb, (ins)); \
310 *sp++ = ins; \
311 mono_decompose_opcode ((cfg), (ins)); \
312 } while (0)
314 #define ADD_UNOP(op) do { \
315 MONO_INST_NEW (cfg, ins, (op)); \
316 sp--; \
317 ins->sreg1 = sp [0]->dreg; \
318 type_from_op (ins, sp [0], NULL); \
319 CHECK_TYPE (ins); \
320 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
321 MONO_ADD_INS ((cfg)->cbb, (ins)); \
322 *sp++ = ins; \
323 mono_decompose_opcode (cfg, ins); \
324 } while (0)
326 #define ADD_BINCOND(next_block) do { \
327 MonoInst *cmp; \
328 sp -= 2; \
329 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
330 cmp->sreg1 = sp [0]->dreg; \
331 cmp->sreg2 = sp [1]->dreg; \
332 type_from_op (cmp, sp [0], sp [1]); \
333 CHECK_TYPE (cmp); \
334 type_from_op (ins, sp [0], sp [1]); \
335 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
336 GET_BBLOCK (cfg, tblock, target); \
337 link_bblock (cfg, bblock, tblock); \
338 ins->inst_true_bb = tblock; \
339 if ((next_block)) { \
340 link_bblock (cfg, bblock, (next_block)); \
341 ins->inst_false_bb = (next_block); \
342 start_new_bblock = 1; \
343 } else { \
344 GET_BBLOCK (cfg, tblock, ip); \
345 link_bblock (cfg, bblock, tblock); \
346 ins->inst_false_bb = tblock; \
347 start_new_bblock = 2; \
349 if (sp != stack_start) { \
350 handle_stack_args (cfg, stack_start, sp - stack_start); \
351 CHECK_UNVERIFIABLE (cfg); \
353 MONO_ADD_INS (bblock, cmp); \
354 MONO_ADD_INS (bblock, ins); \
355 } while (0)
357 /* *
358 * link_bblock: Links two basic blocks
360 * links two basic blocks in the control flow graph, the 'from'
361 * argument is the starting block and the 'to' argument is the block
362 * the control flow ends to after 'from'.
364 static void
365 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
367 MonoBasicBlock **newa;
368 int i, found;
370 #if 0
371 if (from->cil_code) {
372 if (to->cil_code)
373 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
374 else
375 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
376 } else {
377 if (to->cil_code)
378 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
379 else
380 printf ("edge from entry to exit\n");
382 #endif
384 found = FALSE;
385 for (i = 0; i < from->out_count; ++i) {
386 if (to == from->out_bb [i]) {
387 found = TRUE;
388 break;
391 if (!found) {
392 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
393 for (i = 0; i < from->out_count; ++i) {
394 newa [i] = from->out_bb [i];
396 newa [i] = to;
397 from->out_count++;
398 from->out_bb = newa;
401 found = FALSE;
402 for (i = 0; i < to->in_count; ++i) {
403 if (from == to->in_bb [i]) {
404 found = TRUE;
405 break;
408 if (!found) {
409 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
410 for (i = 0; i < to->in_count; ++i) {
411 newa [i] = to->in_bb [i];
413 newa [i] = from;
414 to->in_count++;
415 to->in_bb = newa;
419 void
420 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
422 link_bblock (cfg, from, to);
426 * mono_find_block_region:
428 * We mark each basic block with a region ID. We use that to avoid BB
429 * optimizations when blocks are in different regions.
431 * Returns:
432 * A region token that encodes where this region is, and information
433 * about the clause owner for this block.
435 * The region encodes the try/catch/filter clause that owns this block
436 * as well as the type. -1 is a special value that represents a block
437 * that is in none of try/catch/filter.
439 static int
440 mono_find_block_region (MonoCompile *cfg, int offset)
442 MonoMethod *method = cfg->method;
443 MonoMethodHeader *header = mono_method_get_header (method);
444 MonoExceptionClause *clause;
445 int i;
447 /* first search for handlers and filters */
448 for (i = 0; i < header->num_clauses; ++i) {
449 clause = &header->clauses [i];
450 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
451 (offset < (clause->handler_offset)))
452 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
454 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
455 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
456 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
457 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
458 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
459 else
460 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
464 /* search the try blocks */
465 for (i = 0; i < header->num_clauses; ++i) {
466 clause = &header->clauses [i];
467 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
468 return ((i + 1) << 8) | clause->flags;
471 return -1;
474 static GList*
475 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
477 MonoMethod *method = cfg->method;
478 MonoMethodHeader *header = mono_method_get_header (method);
479 MonoExceptionClause *clause;
480 MonoBasicBlock *handler;
481 int i;
482 GList *res = NULL;
484 for (i = 0; i < header->num_clauses; ++i) {
485 clause = &header->clauses [i];
486 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
487 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
488 if (clause->flags == type) {
489 handler = cfg->cil_offset_to_bb [clause->handler_offset];
490 g_assert (handler);
491 res = g_list_append (res, handler);
495 return res;
498 static void
499 mono_create_spvar_for_region (MonoCompile *cfg, int region)
501 MonoInst *var;
503 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
504 if (var)
505 return;
507 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
508 /* prevent it from being register allocated */
509 var->flags |= MONO_INST_INDIRECT;
511 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
514 static MonoInst *
515 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
517 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
520 static MonoInst*
521 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
523 MonoInst *var;
525 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
526 if (var)
527 return var;
529 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
530 /* prevent it from being register allocated */
531 var->flags |= MONO_INST_INDIRECT;
533 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
535 return var;
539 * Returns the type used in the eval stack when @type is loaded.
540 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
542 void
543 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
545 MonoClass *klass;
547 inst->klass = klass = mono_class_from_mono_type (type);
548 if (type->byref) {
549 inst->type = STACK_MP;
550 return;
553 handle_enum:
554 switch (type->type) {
555 case MONO_TYPE_VOID:
556 inst->type = STACK_INV;
557 return;
558 case MONO_TYPE_I1:
559 case MONO_TYPE_U1:
560 case MONO_TYPE_BOOLEAN:
561 case MONO_TYPE_I2:
562 case MONO_TYPE_U2:
563 case MONO_TYPE_CHAR:
564 case MONO_TYPE_I4:
565 case MONO_TYPE_U4:
566 inst->type = STACK_I4;
567 return;
568 case MONO_TYPE_I:
569 case MONO_TYPE_U:
570 case MONO_TYPE_PTR:
571 case MONO_TYPE_FNPTR:
572 inst->type = STACK_PTR;
573 return;
574 case MONO_TYPE_CLASS:
575 case MONO_TYPE_STRING:
576 case MONO_TYPE_OBJECT:
577 case MONO_TYPE_SZARRAY:
578 case MONO_TYPE_ARRAY:
579 inst->type = STACK_OBJ;
580 return;
581 case MONO_TYPE_I8:
582 case MONO_TYPE_U8:
583 inst->type = STACK_I8;
584 return;
585 case MONO_TYPE_R4:
586 case MONO_TYPE_R8:
587 inst->type = STACK_R8;
588 return;
589 case MONO_TYPE_VALUETYPE:
590 if (type->data.klass->enumtype) {
591 type = mono_class_enum_basetype (type->data.klass);
592 goto handle_enum;
593 } else {
594 inst->klass = klass;
595 inst->type = STACK_VTYPE;
596 return;
598 case MONO_TYPE_TYPEDBYREF:
599 inst->klass = mono_defaults.typed_reference_class;
600 inst->type = STACK_VTYPE;
601 return;
602 case MONO_TYPE_GENERICINST:
603 type = &type->data.generic_class->container_class->byval_arg;
604 goto handle_enum;
605 case MONO_TYPE_VAR :
606 case MONO_TYPE_MVAR :
607 /* FIXME: all the arguments must be references for now,
608 * later look inside cfg and see if the arg num is
609 * really a reference
611 g_assert (cfg->generic_sharing_context);
612 inst->type = STACK_OBJ;
613 return;
614 default:
615 g_error ("unknown type 0x%02x in eval stack type", type->type);
620 * The following tables are used to quickly validate the IL code in type_from_op ().
622 static const char
623 bin_num_table [STACK_MAX] [STACK_MAX] = {
624 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
625 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
626 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
627 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
628 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
629 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
630 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
631 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
634 static const char
635 neg_table [] = {
636 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
639 /* reduce the size of this table */
640 static const char
641 bin_int_table [STACK_MAX] [STACK_MAX] = {
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
652 static const char
653 bin_comp_table [STACK_MAX] [STACK_MAX] = {
654 /* Inv i L p F & O vt */
655 {0},
656 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
657 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
658 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
659 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
660 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
661 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
662 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
665 /* reduce the size of this table */
666 static const char
667 shift_table [STACK_MAX] [STACK_MAX] = {
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
679 * Tables to map from the non-specific opcode to the matching
680 * type-specific opcode.
682 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
683 static const guint16
684 binops_op_map [STACK_MAX] = {
685 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
688 /* handles from CEE_NEG to CEE_CONV_U8 */
689 static const guint16
690 unops_op_map [STACK_MAX] = {
691 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
694 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
695 static const guint16
696 ovfops_op_map [STACK_MAX] = {
697 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
700 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
701 static const guint16
702 ovf2ops_op_map [STACK_MAX] = {
703 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
706 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
707 static const guint16
708 ovf3ops_op_map [STACK_MAX] = {
709 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
712 /* handles from CEE_BEQ to CEE_BLT_UN */
713 static const guint16
714 beqops_op_map [STACK_MAX] = {
715 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
718 /* handles from CEE_CEQ to CEE_CLT_UN */
719 static const guint16
720 ceqops_op_map [STACK_MAX] = {
721 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
725 * Sets ins->type (the type on the eval stack) according to the
726 * type of the opcode and the arguments to it.
727 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
729 * FIXME: this function sets ins->type unconditionally in some cases, but
730 * it should set it to invalid for some types (a conv.x on an object)
732 static void
733 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
735 switch (ins->opcode) {
736 /* binops */
737 case CEE_ADD:
738 case CEE_SUB:
739 case CEE_MUL:
740 case CEE_DIV:
741 case CEE_REM:
742 /* FIXME: check unverifiable args for STACK_MP */
743 ins->type = bin_num_table [src1->type] [src2->type];
744 ins->opcode += binops_op_map [ins->type];
745 break;
746 case CEE_DIV_UN:
747 case CEE_REM_UN:
748 case CEE_AND:
749 case CEE_OR:
750 case CEE_XOR:
751 ins->type = bin_int_table [src1->type] [src2->type];
752 ins->opcode += binops_op_map [ins->type];
753 break;
754 case CEE_SHL:
755 case CEE_SHR:
756 case CEE_SHR_UN:
757 ins->type = shift_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
759 break;
760 case OP_COMPARE:
761 case OP_LCOMPARE:
762 case OP_ICOMPARE:
763 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
764 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
765 ins->opcode = OP_LCOMPARE;
766 else if (src1->type == STACK_R8)
767 ins->opcode = OP_FCOMPARE;
768 else
769 ins->opcode = OP_ICOMPARE;
770 break;
771 case OP_ICOMPARE_IMM:
772 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
773 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
774 ins->opcode = OP_LCOMPARE_IMM;
775 break;
776 case CEE_BEQ:
777 case CEE_BGE:
778 case CEE_BGT:
779 case CEE_BLE:
780 case CEE_BLT:
781 case CEE_BNE_UN:
782 case CEE_BGE_UN:
783 case CEE_BGT_UN:
784 case CEE_BLE_UN:
785 case CEE_BLT_UN:
786 ins->opcode += beqops_op_map [src1->type];
787 break;
788 case OP_CEQ:
789 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
790 ins->opcode += ceqops_op_map [src1->type];
791 break;
792 case OP_CGT:
793 case OP_CGT_UN:
794 case OP_CLT:
795 case OP_CLT_UN:
796 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
797 ins->opcode += ceqops_op_map [src1->type];
798 break;
799 /* unops */
800 case CEE_NEG:
801 ins->type = neg_table [src1->type];
802 ins->opcode += unops_op_map [ins->type];
803 break;
804 case CEE_NOT:
805 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
806 ins->type = src1->type;
807 else
808 ins->type = STACK_INV;
809 ins->opcode += unops_op_map [ins->type];
810 break;
811 case CEE_CONV_I1:
812 case CEE_CONV_I2:
813 case CEE_CONV_I4:
814 case CEE_CONV_U4:
815 ins->type = STACK_I4;
816 ins->opcode += unops_op_map [src1->type];
817 break;
818 case CEE_CONV_R_UN:
819 ins->type = STACK_R8;
820 switch (src1->type) {
821 case STACK_I4:
822 case STACK_PTR:
823 ins->opcode = OP_ICONV_TO_R_UN;
824 break;
825 case STACK_I8:
826 ins->opcode = OP_LCONV_TO_R_UN;
827 break;
829 break;
830 case CEE_CONV_OVF_I1:
831 case CEE_CONV_OVF_U1:
832 case CEE_CONV_OVF_I2:
833 case CEE_CONV_OVF_U2:
834 case CEE_CONV_OVF_I4:
835 case CEE_CONV_OVF_U4:
836 ins->type = STACK_I4;
837 ins->opcode += ovf3ops_op_map [src1->type];
838 break;
839 case CEE_CONV_OVF_I_UN:
840 case CEE_CONV_OVF_U_UN:
841 ins->type = STACK_PTR;
842 ins->opcode += ovf2ops_op_map [src1->type];
843 break;
844 case CEE_CONV_OVF_I1_UN:
845 case CEE_CONV_OVF_I2_UN:
846 case CEE_CONV_OVF_I4_UN:
847 case CEE_CONV_OVF_U1_UN:
848 case CEE_CONV_OVF_U2_UN:
849 case CEE_CONV_OVF_U4_UN:
850 ins->type = STACK_I4;
851 ins->opcode += ovf2ops_op_map [src1->type];
852 break;
853 case CEE_CONV_U:
854 ins->type = STACK_PTR;
855 switch (src1->type) {
856 case STACK_I4:
857 ins->opcode = OP_ICONV_TO_U;
858 break;
859 case STACK_PTR:
860 case STACK_MP:
861 #if SIZEOF_REGISTER == 8
862 ins->opcode = OP_LCONV_TO_U;
863 #else
864 ins->opcode = OP_MOVE;
865 #endif
866 break;
867 case STACK_I8:
868 ins->opcode = OP_LCONV_TO_U;
869 break;
870 case STACK_R8:
871 ins->opcode = OP_FCONV_TO_U;
872 break;
874 break;
875 case CEE_CONV_I8:
876 case CEE_CONV_U8:
877 ins->type = STACK_I8;
878 ins->opcode += unops_op_map [src1->type];
879 break;
880 case CEE_CONV_OVF_I8:
881 case CEE_CONV_OVF_U8:
882 ins->type = STACK_I8;
883 ins->opcode += ovf3ops_op_map [src1->type];
884 break;
885 case CEE_CONV_OVF_U8_UN:
886 case CEE_CONV_OVF_I8_UN:
887 ins->type = STACK_I8;
888 ins->opcode += ovf2ops_op_map [src1->type];
889 break;
890 case CEE_CONV_R4:
891 case CEE_CONV_R8:
892 ins->type = STACK_R8;
893 ins->opcode += unops_op_map [src1->type];
894 break;
895 case OP_CKFINITE:
896 ins->type = STACK_R8;
897 break;
898 case CEE_CONV_U2:
899 case CEE_CONV_U1:
900 ins->type = STACK_I4;
901 ins->opcode += ovfops_op_map [src1->type];
902 break;
903 case CEE_CONV_I:
904 case CEE_CONV_OVF_I:
905 case CEE_CONV_OVF_U:
906 ins->type = STACK_PTR;
907 ins->opcode += ovfops_op_map [src1->type];
908 break;
909 case CEE_ADD_OVF:
910 case CEE_ADD_OVF_UN:
911 case CEE_MUL_OVF:
912 case CEE_MUL_OVF_UN:
913 case CEE_SUB_OVF:
914 case CEE_SUB_OVF_UN:
915 ins->type = bin_num_table [src1->type] [src2->type];
916 ins->opcode += ovfops_op_map [src1->type];
917 if (ins->type == STACK_R8)
918 ins->type = STACK_INV;
919 break;
920 case OP_LOAD_MEMBASE:
921 ins->type = STACK_PTR;
922 break;
923 case OP_LOADI1_MEMBASE:
924 case OP_LOADU1_MEMBASE:
925 case OP_LOADI2_MEMBASE:
926 case OP_LOADU2_MEMBASE:
927 case OP_LOADI4_MEMBASE:
928 case OP_LOADU4_MEMBASE:
929 ins->type = STACK_PTR;
930 break;
931 case OP_LOADI8_MEMBASE:
932 ins->type = STACK_I8;
933 break;
934 case OP_LOADR4_MEMBASE:
935 case OP_LOADR8_MEMBASE:
936 ins->type = STACK_R8;
937 break;
938 default:
939 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
940 break;
943 if (ins->type == STACK_MP)
944 ins->klass = mono_defaults.object_class;
947 static const char
948 ldind_type [] = {
949 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
952 #if 0
954 static const char
955 param_table [STACK_MAX] [STACK_MAX] = {
956 {0},
959 static int
960 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
961 int i;
963 if (sig->hasthis) {
964 switch (args->type) {
965 case STACK_I4:
966 case STACK_I8:
967 case STACK_R8:
968 case STACK_VTYPE:
969 case STACK_INV:
970 return 0;
972 args++;
974 for (i = 0; i < sig->param_count; ++i) {
975 switch (args [i].type) {
976 case STACK_INV:
977 return 0;
978 case STACK_MP:
979 if (!sig->params [i]->byref)
980 return 0;
981 continue;
982 case STACK_OBJ:
983 if (sig->params [i]->byref)
984 return 0;
985 switch (sig->params [i]->type) {
986 case MONO_TYPE_CLASS:
987 case MONO_TYPE_STRING:
988 case MONO_TYPE_OBJECT:
989 case MONO_TYPE_SZARRAY:
990 case MONO_TYPE_ARRAY:
991 break;
992 default:
993 return 0;
995 continue;
996 case STACK_R8:
997 if (sig->params [i]->byref)
998 return 0;
999 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1000 return 0;
1001 continue;
1002 case STACK_PTR:
1003 case STACK_I4:
1004 case STACK_I8:
1005 case STACK_VTYPE:
1006 break;
1008 /*if (!param_table [args [i].type] [sig->params [i]->type])
1009 return 0;*/
1011 return 1;
1013 #endif
1016 * When we need a pointer to the current domain many times in a method, we
1017 * call mono_domain_get() once and we store the result in a local variable.
1018 * This function returns the variable that represents the MonoDomain*.
1020 inline static MonoInst *
1021 mono_get_domainvar (MonoCompile *cfg)
1023 if (!cfg->domainvar)
1024 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1025 return cfg->domainvar;
1029 * The got_var contains the address of the Global Offset Table when AOT
1030 * compiling.
1032 inline static MonoInst *
1033 mono_get_got_var (MonoCompile *cfg)
1035 #ifdef MONO_ARCH_NEED_GOT_VAR
1036 if (!cfg->compile_aot)
1037 return NULL;
1038 if (!cfg->got_var) {
1039 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1041 return cfg->got_var;
1042 #else
1043 return NULL;
1044 #endif
1047 static MonoInst *
1048 mono_get_vtable_var (MonoCompile *cfg)
1050 g_assert (cfg->generic_sharing_context);
1052 if (!cfg->rgctx_var) {
1053 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1054 /* force the var to be stack allocated */
1055 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1058 return cfg->rgctx_var;
1061 static MonoType*
1062 type_from_stack_type (MonoInst *ins) {
1063 switch (ins->type) {
1064 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1065 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1066 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1067 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1068 case STACK_MP:
1069 return &ins->klass->this_arg;
1070 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1071 case STACK_VTYPE: return &ins->klass->byval_arg;
1072 default:
1073 g_error ("stack type %d to monotype not handled\n", ins->type);
1075 return NULL;
1078 static G_GNUC_UNUSED int
1079 type_to_stack_type (MonoType *t)
1081 switch (mono_type_get_underlying_type (t)->type) {
1082 case MONO_TYPE_I1:
1083 case MONO_TYPE_U1:
1084 case MONO_TYPE_BOOLEAN:
1085 case MONO_TYPE_I2:
1086 case MONO_TYPE_U2:
1087 case MONO_TYPE_CHAR:
1088 case MONO_TYPE_I4:
1089 case MONO_TYPE_U4:
1090 return STACK_I4;
1091 case MONO_TYPE_I:
1092 case MONO_TYPE_U:
1093 case MONO_TYPE_PTR:
1094 case MONO_TYPE_FNPTR:
1095 return STACK_PTR;
1096 case MONO_TYPE_CLASS:
1097 case MONO_TYPE_STRING:
1098 case MONO_TYPE_OBJECT:
1099 case MONO_TYPE_SZARRAY:
1100 case MONO_TYPE_ARRAY:
1101 return STACK_OBJ;
1102 case MONO_TYPE_I8:
1103 case MONO_TYPE_U8:
1104 return STACK_I8;
1105 case MONO_TYPE_R4:
1106 case MONO_TYPE_R8:
1107 return STACK_R8;
1108 case MONO_TYPE_VALUETYPE:
1109 case MONO_TYPE_TYPEDBYREF:
1110 return STACK_VTYPE;
1111 case MONO_TYPE_GENERICINST:
1112 if (mono_type_generic_inst_is_valuetype (t))
1113 return STACK_VTYPE;
1114 else
1115 return STACK_OBJ;
1116 break;
1117 default:
1118 g_assert_not_reached ();
1121 return -1;
1124 static MonoClass*
1125 array_access_to_klass (int opcode)
1127 switch (opcode) {
1128 case CEE_LDELEM_U1:
1129 return mono_defaults.byte_class;
1130 case CEE_LDELEM_U2:
1131 return mono_defaults.uint16_class;
1132 case CEE_LDELEM_I:
1133 case CEE_STELEM_I:
1134 return mono_defaults.int_class;
1135 case CEE_LDELEM_I1:
1136 case CEE_STELEM_I1:
1137 return mono_defaults.sbyte_class;
1138 case CEE_LDELEM_I2:
1139 case CEE_STELEM_I2:
1140 return mono_defaults.int16_class;
1141 case CEE_LDELEM_I4:
1142 case CEE_STELEM_I4:
1143 return mono_defaults.int32_class;
1144 case CEE_LDELEM_U4:
1145 return mono_defaults.uint32_class;
1146 case CEE_LDELEM_I8:
1147 case CEE_STELEM_I8:
1148 return mono_defaults.int64_class;
1149 case CEE_LDELEM_R4:
1150 case CEE_STELEM_R4:
1151 return mono_defaults.single_class;
1152 case CEE_LDELEM_R8:
1153 case CEE_STELEM_R8:
1154 return mono_defaults.double_class;
1155 case CEE_LDELEM_REF:
1156 case CEE_STELEM_REF:
1157 return mono_defaults.object_class;
1158 default:
1159 g_assert_not_reached ();
1161 return NULL;
1165 * We try to share variables when possible
1167 static MonoInst *
1168 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1170 MonoInst *res;
1171 int pos, vnum;
1173 /* inlining can result in deeper stacks */
1174 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1175 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1177 pos = ins->type - 1 + slot * STACK_MAX;
1179 switch (ins->type) {
1180 case STACK_I4:
1181 case STACK_I8:
1182 case STACK_R8:
1183 case STACK_PTR:
1184 case STACK_MP:
1185 case STACK_OBJ:
1186 if ((vnum = cfg->intvars [pos]))
1187 return cfg->varinfo [vnum];
1188 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1189 cfg->intvars [pos] = res->inst_c0;
1190 break;
1191 default:
1192 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1194 return res;
1197 static void
1198 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1201 * Don't use this if a generic_context is set, since that means AOT can't
1202 * look up the method using just the image+token.
1203 * table == 0 means this is a reference made from a wrapper.
1205 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1206 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1207 jump_info_token->image = image;
1208 jump_info_token->token = token;
1209 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1214 * This function is called to handle items that are left on the evaluation stack
1215 * at basic block boundaries. What happens is that we save the values to local variables
1216 * and we reload them later when first entering the target basic block (with the
1217 * handle_loaded_temps () function).
1218 * A single joint point will use the same variables (stored in the array bb->out_stack or
1219 * bb->in_stack, if the basic block is before or after the joint point).
1221 * This function needs to be called _before_ emitting the last instruction of
1222 * the bb (i.e. before emitting a branch).
1223 * If the stack merge fails at a join point, cfg->unverifiable is set.
1225 static void
1226 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1228 int i, bindex;
1229 MonoBasicBlock *bb = cfg->cbb;
1230 MonoBasicBlock *outb;
1231 MonoInst *inst, **locals;
1232 gboolean found;
1234 if (!count)
1235 return;
1236 if (cfg->verbose_level > 3)
1237 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1238 if (!bb->out_scount) {
1239 bb->out_scount = count;
1240 //printf ("bblock %d has out:", bb->block_num);
1241 found = FALSE;
1242 for (i = 0; i < bb->out_count; ++i) {
1243 outb = bb->out_bb [i];
1244 /* exception handlers are linked, but they should not be considered for stack args */
1245 if (outb->flags & BB_EXCEPTION_HANDLER)
1246 continue;
1247 //printf (" %d", outb->block_num);
1248 if (outb->in_stack) {
1249 found = TRUE;
1250 bb->out_stack = outb->in_stack;
1251 break;
1254 //printf ("\n");
1255 if (!found) {
1256 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1257 for (i = 0; i < count; ++i) {
1259 * try to reuse temps already allocated for this purpouse, if they occupy the same
1260 * stack slot and if they are of the same type.
1261 * This won't cause conflicts since if 'local' is used to
1262 * store one of the values in the in_stack of a bblock, then
1263 * the same variable will be used for the same outgoing stack
1264 * slot as well.
1265 * This doesn't work when inlining methods, since the bblocks
1266 * in the inlined methods do not inherit their in_stack from
1267 * the bblock they are inlined to. See bug #58863 for an
1268 * example.
1270 if (cfg->inlined_method)
1271 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1272 else
1273 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1278 for (i = 0; i < bb->out_count; ++i) {
1279 outb = bb->out_bb [i];
1280 /* exception handlers are linked, but they should not be considered for stack args */
1281 if (outb->flags & BB_EXCEPTION_HANDLER)
1282 continue;
1283 if (outb->in_scount) {
1284 if (outb->in_scount != bb->out_scount) {
1285 cfg->unverifiable = TRUE;
1286 return;
1288 continue; /* check they are the same locals */
1290 outb->in_scount = count;
1291 outb->in_stack = bb->out_stack;
1294 locals = bb->out_stack;
1295 cfg->cbb = bb;
1296 for (i = 0; i < count; ++i) {
1297 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1298 inst->cil_code = sp [i]->cil_code;
1299 sp [i] = locals [i];
1300 if (cfg->verbose_level > 3)
1301 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1305 * It is possible that the out bblocks already have in_stack assigned, and
1306 * the in_stacks differ. In this case, we will store to all the different
1307 * in_stacks.
1310 found = TRUE;
1311 bindex = 0;
1312 while (found) {
1313 /* Find a bblock which has a different in_stack */
1314 found = FALSE;
1315 while (bindex < bb->out_count) {
1316 outb = bb->out_bb [bindex];
1317 /* exception handlers are linked, but they should not be considered for stack args */
1318 if (outb->flags & BB_EXCEPTION_HANDLER) {
1319 bindex++;
1320 continue;
1322 if (outb->in_stack != locals) {
1323 for (i = 0; i < count; ++i) {
1324 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1325 inst->cil_code = sp [i]->cil_code;
1326 sp [i] = locals [i];
1327 if (cfg->verbose_level > 3)
1328 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1330 locals = outb->in_stack;
1331 found = TRUE;
1332 break;
1334 bindex ++;
1339 /* Emit code which loads interface_offsets [klass->interface_id]
1340 * The array is stored in memory before vtable.
1342 static void
1343 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1345 if (cfg->compile_aot) {
1346 int ioffset_reg = alloc_preg (cfg);
1347 int iid_reg = alloc_preg (cfg);
1349 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1350 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1353 else {
1354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1359 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1360 * stored in "klass_reg" implements the interface "klass".
1362 static void
1363 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1365 int ibitmap_reg = alloc_preg (cfg);
1366 int ibitmap_byte_reg = alloc_preg (cfg);
1368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1370 if (cfg->compile_aot) {
1371 int iid_reg = alloc_preg (cfg);
1372 int shifted_iid_reg = alloc_preg (cfg);
1373 int ibitmap_byte_address_reg = alloc_preg (cfg);
1374 int masked_iid_reg = alloc_preg (cfg);
1375 int iid_one_bit_reg = alloc_preg (cfg);
1376 int iid_bit_reg = alloc_preg (cfg);
1377 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1378 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1379 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1380 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1382 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1383 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1384 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1385 } else {
1386 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1392 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1393 * stored in "vtable_reg" implements the interface "klass".
1395 static void
1396 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1398 int ibitmap_reg = alloc_preg (cfg);
1399 int ibitmap_byte_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1403 if (cfg->compile_aot) {
1404 int iid_reg = alloc_preg (cfg);
1405 int shifted_iid_reg = alloc_preg (cfg);
1406 int ibitmap_byte_address_reg = alloc_preg (cfg);
1407 int masked_iid_reg = alloc_preg (cfg);
1408 int iid_one_bit_reg = alloc_preg (cfg);
1409 int iid_bit_reg = alloc_preg (cfg);
1410 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1412 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1415 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1416 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1417 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1418 } else {
1419 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1425 * Emit code which checks whenever the interface id of @klass is smaller than
1426 * than the value given by max_iid_reg.
1428 static void
1429 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1430 MonoBasicBlock *false_target)
1432 if (cfg->compile_aot) {
1433 int iid_reg = alloc_preg (cfg);
1434 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1435 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1437 else
1438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1439 if (false_target)
1440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1441 else
1442 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1445 /* Same as above, but obtains max_iid from a vtable */
1446 static void
1447 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1448 MonoBasicBlock *false_target)
1450 int max_iid_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1453 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1456 /* Same as above, but obtains max_iid from a klass */
1457 static void
1458 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1459 MonoBasicBlock *false_target)
1461 int max_iid_reg = alloc_preg (cfg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1464 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1467 static void
1468 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1470 int idepth_reg = alloc_preg (cfg);
1471 int stypes_reg = alloc_preg (cfg);
1472 int stype = alloc_preg (cfg);
1474 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1477 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1479 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1480 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1481 if (cfg->compile_aot) {
1482 int const_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1484 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1485 } else {
1486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1488 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1491 static void
1492 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1494 int intf_reg = alloc_preg (cfg);
1496 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1497 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1499 if (true_target)
1500 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1501 else
1502 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1506 * Variant of the above that takes a register to the class, not the vtable.
1508 static void
1509 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1511 int intf_bit_reg = alloc_preg (cfg);
1513 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1514 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1516 if (true_target)
1517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1518 else
1519 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1522 static inline void
1523 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1525 if (cfg->compile_aot) {
1526 int const_reg = alloc_preg (cfg);
1527 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1528 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1529 } else {
1530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1532 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1535 static inline void
1536 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1538 if (cfg->compile_aot) {
1539 int const_reg = alloc_preg (cfg);
1540 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1541 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1542 } else {
1543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1545 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1548 static void
1549 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1551 if (klass->rank) {
1552 int rank_reg = alloc_preg (cfg);
1553 int eclass_reg = alloc_preg (cfg);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1557 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1560 if (klass->cast_class == mono_defaults.object_class) {
1561 int parent_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1563 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1564 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1565 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1566 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1567 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1568 } else if (klass->cast_class == mono_defaults.enum_class) {
1569 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1570 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1571 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1572 } else {
1573 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1574 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1577 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1578 /* Check that the object is a vector too */
1579 int bounds_reg = alloc_preg (cfg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1582 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1584 } else {
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1590 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1592 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1596 mini_emit_class_check (cfg, stype, klass);
1600 static void
1601 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1603 int val_reg;
1605 g_assert (val == 0);
1607 if (align == 0)
1608 align = 4;
1610 if ((size <= 4) && (size <= align)) {
1611 switch (size) {
1612 case 1:
1613 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1614 return;
1615 case 2:
1616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1617 return;
1618 case 4:
1619 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1620 return;
1621 #if SIZEOF_REGISTER == 8
1622 case 8:
1623 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1624 return;
1625 #endif
1629 val_reg = alloc_preg (cfg);
1631 if (SIZEOF_REGISTER == 8)
1632 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1633 else
1634 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1636 if (align < 4) {
1637 /* This could be optimized further if neccesary */
1638 while (size >= 1) {
1639 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1640 offset += 1;
1641 size -= 1;
1643 return;
1646 #if !NO_UNALIGNED_ACCESS
1647 if (SIZEOF_REGISTER == 8) {
1648 if (offset % 8) {
1649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1650 offset += 4;
1651 size -= 4;
1653 while (size >= 8) {
1654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1655 offset += 8;
1656 size -= 8;
1659 #endif
1661 while (size >= 4) {
1662 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1663 offset += 4;
1664 size -= 4;
1666 while (size >= 2) {
1667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1668 offset += 2;
1669 size -= 2;
1671 while (size >= 1) {
1672 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1673 offset += 1;
1674 size -= 1;
1678 #endif /* DISABLE_JIT */
1680 void
1681 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1683 int cur_reg;
1685 if (align == 0)
1686 align = 4;
1688 if (align < 4) {
1689 /* This could be optimized further if neccesary */
1690 while (size >= 1) {
1691 cur_reg = alloc_preg (cfg);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1694 doffset += 1;
1695 soffset += 1;
1696 size -= 1;
1700 #if !NO_UNALIGNED_ACCESS
1701 if (SIZEOF_REGISTER == 8) {
1702 while (size >= 8) {
1703 cur_reg = alloc_preg (cfg);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1706 doffset += 8;
1707 soffset += 8;
1708 size -= 8;
1711 #endif
1713 while (size >= 4) {
1714 cur_reg = alloc_preg (cfg);
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1716 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1717 doffset += 4;
1718 soffset += 4;
1719 size -= 4;
1721 while (size >= 2) {
1722 cur_reg = alloc_preg (cfg);
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1725 doffset += 2;
1726 soffset += 2;
1727 size -= 2;
1729 while (size >= 1) {
1730 cur_reg = alloc_preg (cfg);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1733 doffset += 1;
1734 soffset += 1;
1735 size -= 1;
1739 #ifndef DISABLE_JIT
1741 static int
1742 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1744 if (type->byref)
1745 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1747 handle_enum:
1748 type = mini_get_basic_type_from_generic (gsctx, type);
1749 switch (type->type) {
1750 case MONO_TYPE_VOID:
1751 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1752 case MONO_TYPE_I1:
1753 case MONO_TYPE_U1:
1754 case MONO_TYPE_BOOLEAN:
1755 case MONO_TYPE_I2:
1756 case MONO_TYPE_U2:
1757 case MONO_TYPE_CHAR:
1758 case MONO_TYPE_I4:
1759 case MONO_TYPE_U4:
1760 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1761 case MONO_TYPE_I:
1762 case MONO_TYPE_U:
1763 case MONO_TYPE_PTR:
1764 case MONO_TYPE_FNPTR:
1765 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1766 case MONO_TYPE_CLASS:
1767 case MONO_TYPE_STRING:
1768 case MONO_TYPE_OBJECT:
1769 case MONO_TYPE_SZARRAY:
1770 case MONO_TYPE_ARRAY:
1771 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1772 case MONO_TYPE_I8:
1773 case MONO_TYPE_U8:
1774 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1775 case MONO_TYPE_R4:
1776 case MONO_TYPE_R8:
1777 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1778 case MONO_TYPE_VALUETYPE:
1779 if (type->data.klass->enumtype) {
1780 type = mono_class_enum_basetype (type->data.klass);
1781 goto handle_enum;
1782 } else
1783 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1784 case MONO_TYPE_TYPEDBYREF:
1785 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1786 case MONO_TYPE_GENERICINST:
1787 type = &type->data.generic_class->container_class->byval_arg;
1788 goto handle_enum;
1789 default:
1790 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1792 return -1;
1796 * target_type_is_incompatible:
1797 * @cfg: MonoCompile context
1799 * Check that the item @arg on the evaluation stack can be stored
1800 * in the target type (can be a local, or field, etc).
1801 * The cfg arg can be used to check if we need verification or just
1802 * validity checks.
1804 * Returns: non-0 value if arg can't be stored on a target.
1806 static int
1807 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1809 MonoType *simple_type;
1810 MonoClass *klass;
1812 if (target->byref) {
1813 /* FIXME: check that the pointed to types match */
1814 if (arg->type == STACK_MP)
1815 return arg->klass != mono_class_from_mono_type (target);
1816 if (arg->type == STACK_PTR)
1817 return 0;
1818 return 1;
1821 simple_type = mono_type_get_underlying_type (target);
1822 switch (simple_type->type) {
1823 case MONO_TYPE_VOID:
1824 return 1;
1825 case MONO_TYPE_I1:
1826 case MONO_TYPE_U1:
1827 case MONO_TYPE_BOOLEAN:
1828 case MONO_TYPE_I2:
1829 case MONO_TYPE_U2:
1830 case MONO_TYPE_CHAR:
1831 case MONO_TYPE_I4:
1832 case MONO_TYPE_U4:
1833 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1834 return 1;
1835 return 0;
1836 case MONO_TYPE_PTR:
1837 /* STACK_MP is needed when setting pinned locals */
1838 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1839 return 1;
1840 return 0;
1841 case MONO_TYPE_I:
1842 case MONO_TYPE_U:
1843 case MONO_TYPE_FNPTR:
1844 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1845 return 1;
1846 return 0;
1847 case MONO_TYPE_CLASS:
1848 case MONO_TYPE_STRING:
1849 case MONO_TYPE_OBJECT:
1850 case MONO_TYPE_SZARRAY:
1851 case MONO_TYPE_ARRAY:
1852 if (arg->type != STACK_OBJ)
1853 return 1;
1854 /* FIXME: check type compatibility */
1855 return 0;
1856 case MONO_TYPE_I8:
1857 case MONO_TYPE_U8:
1858 if (arg->type != STACK_I8)
1859 return 1;
1860 return 0;
1861 case MONO_TYPE_R4:
1862 case MONO_TYPE_R8:
1863 if (arg->type != STACK_R8)
1864 return 1;
1865 return 0;
1866 case MONO_TYPE_VALUETYPE:
1867 if (arg->type != STACK_VTYPE)
1868 return 1;
1869 klass = mono_class_from_mono_type (simple_type);
1870 if (klass != arg->klass)
1871 return 1;
1872 return 0;
1873 case MONO_TYPE_TYPEDBYREF:
1874 if (arg->type != STACK_VTYPE)
1875 return 1;
1876 klass = mono_class_from_mono_type (simple_type);
1877 if (klass != arg->klass)
1878 return 1;
1879 return 0;
1880 case MONO_TYPE_GENERICINST:
1881 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1882 if (arg->type != STACK_VTYPE)
1883 return 1;
1884 klass = mono_class_from_mono_type (simple_type);
1885 if (klass != arg->klass)
1886 return 1;
1887 return 0;
1888 } else {
1889 if (arg->type != STACK_OBJ)
1890 return 1;
1891 /* FIXME: check type compatibility */
1892 return 0;
1894 case MONO_TYPE_VAR:
1895 case MONO_TYPE_MVAR:
1896 /* FIXME: all the arguments must be references for now,
1897 * later look inside cfg and see if the arg num is
1898 * really a reference
1900 g_assert (cfg->generic_sharing_context);
1901 if (arg->type != STACK_OBJ)
1902 return 1;
1903 return 0;
1904 default:
1905 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1907 return 1;
1911 * Prepare arguments for passing to a function call.
1912 * Return a non-zero value if the arguments can't be passed to the given
1913 * signature.
1914 * The type checks are not yet complete and some conversions may need
1915 * casts on 32 or 64 bit architectures.
1917 * FIXME: implement this using target_type_is_incompatible ()
1919 static int
1920 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1922 MonoType *simple_type;
1923 int i;
1925 if (sig->hasthis) {
1926 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1927 return 1;
1928 args++;
1930 for (i = 0; i < sig->param_count; ++i) {
1931 if (sig->params [i]->byref) {
1932 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1933 return 1;
1934 continue;
1936 simple_type = sig->params [i];
1937 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1938 handle_enum:
1939 switch (simple_type->type) {
1940 case MONO_TYPE_VOID:
1941 return 1;
1942 continue;
1943 case MONO_TYPE_I1:
1944 case MONO_TYPE_U1:
1945 case MONO_TYPE_BOOLEAN:
1946 case MONO_TYPE_I2:
1947 case MONO_TYPE_U2:
1948 case MONO_TYPE_CHAR:
1949 case MONO_TYPE_I4:
1950 case MONO_TYPE_U4:
1951 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1952 return 1;
1953 continue;
1954 case MONO_TYPE_I:
1955 case MONO_TYPE_U:
1956 case MONO_TYPE_PTR:
1957 case MONO_TYPE_FNPTR:
1958 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1959 return 1;
1960 continue;
1961 case MONO_TYPE_CLASS:
1962 case MONO_TYPE_STRING:
1963 case MONO_TYPE_OBJECT:
1964 case MONO_TYPE_SZARRAY:
1965 case MONO_TYPE_ARRAY:
1966 if (args [i]->type != STACK_OBJ)
1967 return 1;
1968 continue;
1969 case MONO_TYPE_I8:
1970 case MONO_TYPE_U8:
1971 if (args [i]->type != STACK_I8)
1972 return 1;
1973 continue;
1974 case MONO_TYPE_R4:
1975 case MONO_TYPE_R8:
1976 if (args [i]->type != STACK_R8)
1977 return 1;
1978 continue;
1979 case MONO_TYPE_VALUETYPE:
1980 if (simple_type->data.klass->enumtype) {
1981 simple_type = mono_class_enum_basetype (simple_type->data.klass);
1982 goto handle_enum;
1984 if (args [i]->type != STACK_VTYPE)
1985 return 1;
1986 continue;
1987 case MONO_TYPE_TYPEDBYREF:
1988 if (args [i]->type != STACK_VTYPE)
1989 return 1;
1990 continue;
1991 case MONO_TYPE_GENERICINST:
1992 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
1993 goto handle_enum;
1995 default:
1996 g_error ("unknown type 0x%02x in check_call_signature",
1997 simple_type->type);
2000 return 0;
2003 static int
2004 callvirt_to_call (int opcode)
2006 switch (opcode) {
2007 case OP_CALLVIRT:
2008 return OP_CALL;
2009 case OP_VOIDCALLVIRT:
2010 return OP_VOIDCALL;
2011 case OP_FCALLVIRT:
2012 return OP_FCALL;
2013 case OP_VCALLVIRT:
2014 return OP_VCALL;
2015 case OP_LCALLVIRT:
2016 return OP_LCALL;
2017 default:
2018 g_assert_not_reached ();
2021 return -1;
2024 static int
2025 callvirt_to_call_membase (int opcode)
2027 switch (opcode) {
2028 case OP_CALLVIRT:
2029 return OP_CALL_MEMBASE;
2030 case OP_VOIDCALLVIRT:
2031 return OP_VOIDCALL_MEMBASE;
2032 case OP_FCALLVIRT:
2033 return OP_FCALL_MEMBASE;
2034 case OP_LCALLVIRT:
2035 return OP_LCALL_MEMBASE;
2036 case OP_VCALLVIRT:
2037 return OP_VCALL_MEMBASE;
2038 default:
2039 g_assert_not_reached ();
2042 return -1;
2045 #ifdef MONO_ARCH_HAVE_IMT
2046 static void
2047 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2049 #ifdef MONO_ARCH_IMT_REG
2050 int method_reg = alloc_preg (cfg);
2052 if (imt_arg) {
2053 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2054 } else if (cfg->compile_aot) {
2055 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2056 } else {
2057 MonoInst *ins;
2058 MONO_INST_NEW (cfg, ins, OP_PCONST);
2059 ins->inst_p0 = call->method;
2060 ins->dreg = method_reg;
2061 MONO_ADD_INS (cfg->cbb, ins);
2064 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2065 #else
2066 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2067 #endif
2069 #endif
2071 static MonoJumpInfo *
2072 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2074 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2076 ji->ip.i = ip;
2077 ji->type = type;
2078 ji->data.target = target;
2080 return ji;
2083 inline static MonoInst*
2084 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2086 inline static MonoCallInst *
2087 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2088 MonoInst **args, int calli, int virtual)
2090 MonoCallInst *call;
2091 #ifdef MONO_ARCH_SOFT_FLOAT
2092 int i;
2093 #endif
2095 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2097 call->args = args;
2098 call->signature = sig;
2100 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2102 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2103 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2104 MonoInst *loada;
2106 temp->backend.is_pinvoke = sig->pinvoke;
2109 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2110 * address of return value to increase optimization opportunities.
2111 * Before vtype decomposition, the dreg of the call ins itself represents the
2112 * fact the call modifies the return value. After decomposition, the call will
2113 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2114 * will be transformed into an LDADDR.
2116 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2117 loada->dreg = alloc_preg (cfg);
2118 loada->inst_p0 = temp;
2119 /* We reference the call too since call->dreg could change during optimization */
2120 loada->inst_p1 = call;
2121 MONO_ADD_INS (cfg->cbb, loada);
2123 call->inst.dreg = temp->dreg;
2125 call->vret_var = loada;
2126 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2127 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2131 * If the call has a float argument, we would need to do an r8->r4 conversion using
2132 * an icall, but that cannot be done during the call sequence since it would clobber
2133 * the call registers + the stack. So we do it before emitting the call.
2135 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2136 MonoType *t;
2137 MonoInst *in = call->args [i];
2139 if (i >= sig->hasthis)
2140 t = sig->params [i - sig->hasthis];
2141 else
2142 t = &mono_defaults.int_class->byval_arg;
2143 t = mono_type_get_underlying_type (t);
2145 if (!t->byref && t->type == MONO_TYPE_R4) {
2146 MonoInst *iargs [1];
2147 MonoInst *conv;
2149 iargs [0] = in;
2150 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2152 /* The result will be in an int vreg */
2153 call->args [i] = conv;
2156 #endif
2158 mono_arch_emit_call (cfg, call);
2160 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2161 cfg->flags |= MONO_CFG_HAS_CALLS;
2163 return call;
2166 inline static MonoInst*
2167 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2169 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2171 call->inst.sreg1 = addr->dreg;
2173 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2175 return (MonoInst*)call;
2178 inline static MonoInst*
2179 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2181 #ifdef MONO_ARCH_RGCTX_REG
2182 MonoCallInst *call;
2183 int rgctx_reg = -1;
2185 if (rgctx_arg) {
2186 rgctx_reg = mono_alloc_preg (cfg);
2187 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2189 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2190 if (rgctx_arg) {
2191 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2192 cfg->uses_rgctx_reg = TRUE;
2194 return (MonoInst*)call;
2195 #else
2196 g_assert_not_reached ();
2197 return NULL;
2198 #endif
2201 static MonoInst*
2202 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2203 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2205 gboolean virtual = this != NULL;
2206 gboolean enable_for_aot = TRUE;
2207 MonoCallInst *call;
2209 if (method->string_ctor) {
2210 /* Create the real signature */
2211 /* FIXME: Cache these */
2212 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2213 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2215 sig = ctor_sig;
2218 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2220 if (this && sig->hasthis &&
2221 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2222 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2223 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2224 } else {
2225 call->method = method;
2227 call->inst.flags |= MONO_INST_HAS_METHOD;
2228 call->inst.inst_left = this;
2230 if (virtual) {
2231 int vtable_reg, slot_reg, this_reg;
2233 this_reg = this->dreg;
2235 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2236 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2237 /* Make a call to delegate->invoke_impl */
2238 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2239 call->inst.inst_basereg = this_reg;
2240 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2241 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2243 return (MonoInst*)call;
2245 #endif
2247 if ((!cfg->compile_aot || enable_for_aot) &&
2248 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2249 (MONO_METHOD_IS_FINAL (method) &&
2250 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2252 * the method is not virtual, we just need to ensure this is not null
2253 * and then we can call the method directly.
2255 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2256 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2259 if (!method->string_ctor) {
2260 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2261 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2262 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2265 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2267 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2269 return (MonoInst*)call;
2272 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2274 * the method is virtual, but we can statically dispatch since either
2275 * it's class or the method itself are sealed.
2276 * But first we need to ensure it's not a null reference.
2278 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2279 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2280 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2282 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2283 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2285 return (MonoInst*)call;
2288 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2290 vtable_reg = alloc_preg (cfg);
2291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2292 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2293 slot_reg = -1;
2294 #ifdef MONO_ARCH_HAVE_IMT
2295 if (mono_use_imt) {
2296 guint32 imt_slot = mono_method_get_imt_slot (method);
2297 emit_imt_argument (cfg, call, imt_arg);
2298 slot_reg = vtable_reg;
2299 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2301 #endif
2302 if (slot_reg == -1) {
2303 slot_reg = alloc_preg (cfg);
2304 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2305 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2307 } else {
2308 slot_reg = vtable_reg;
2309 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2310 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2311 #ifdef MONO_ARCH_HAVE_IMT
2312 if (imt_arg) {
2313 g_assert (mono_method_signature (method)->generic_param_count);
2314 emit_imt_argument (cfg, call, imt_arg);
2316 #endif
2319 call->inst.sreg1 = slot_reg;
2320 call->virtual = TRUE;
2323 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2325 return (MonoInst*)call;
2328 static MonoInst*
2329 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2330 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2332 int rgctx_reg;
2333 MonoInst *ins;
2334 MonoCallInst *call;
2336 if (vtable_arg) {
2337 #ifdef MONO_ARCH_RGCTX_REG
2338 rgctx_reg = mono_alloc_preg (cfg);
2339 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2340 #else
2341 NOT_IMPLEMENTED;
2342 #endif
2344 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2346 call = (MonoCallInst*)ins;
2347 if (vtable_arg) {
2348 #ifdef MONO_ARCH_RGCTX_REG
2349 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2350 cfg->uses_rgctx_reg = TRUE;
2351 #else
2352 NOT_IMPLEMENTED;
2353 #endif
2356 return ins;
2359 static inline MonoInst*
2360 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2362 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2365 MonoInst*
2366 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2367 MonoInst **args)
2369 MonoCallInst *call;
2371 g_assert (sig);
2373 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2374 call->fptr = func;
2376 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2378 return (MonoInst*)call;
2381 inline static MonoInst*
2382 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2384 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2386 g_assert (info);
2388 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2392 * mono_emit_abs_call:
2394 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2396 inline static MonoInst*
2397 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2398 MonoMethodSignature *sig, MonoInst **args)
2400 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2401 MonoInst *ins;
2404 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2405 * handle it.
2407 if (cfg->abs_patches == NULL)
2408 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2409 g_hash_table_insert (cfg->abs_patches, ji, ji);
2410 ins = mono_emit_native_call (cfg, ji, sig, args);
2411 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2412 return ins;
2415 static MonoMethod*
2416 get_memcpy_method (void)
2418 static MonoMethod *memcpy_method = NULL;
2419 if (!memcpy_method) {
2420 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2421 if (!memcpy_method)
2422 g_error ("Old corlib found. Install a new one");
2424 return memcpy_method;
2428 * Emit code to copy a valuetype of type @klass whose address is stored in
2429 * @src->dreg to memory whose address is stored at @dest->dreg.
2431 void
2432 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2434 MonoInst *iargs [3];
2435 int n;
2436 guint32 align = 0;
2437 MonoMethod *memcpy_method;
2439 g_assert (klass);
2441 * This check breaks with spilled vars... need to handle it during verification anyway.
2442 * g_assert (klass && klass == src->klass && klass == dest->klass);
2445 if (native)
2446 n = mono_class_native_size (klass, &align);
2447 else
2448 n = mono_class_value_size (klass, &align);
2450 #if HAVE_WRITE_BARRIERS
2451 /* if native is true there should be no references in the struct */
2452 if (klass->has_references && !native) {
2453 /* Avoid barriers when storing to the stack */
2454 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2455 (dest->opcode == OP_LDADDR))) {
2456 iargs [0] = dest;
2457 iargs [1] = src;
2458 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2460 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2463 #endif
2465 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2466 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2467 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2468 } else {
2469 iargs [0] = dest;
2470 iargs [1] = src;
2471 EMIT_NEW_ICONST (cfg, iargs [2], n);
2473 memcpy_method = get_memcpy_method ();
2474 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2478 static MonoMethod*
2479 get_memset_method (void)
2481 static MonoMethod *memset_method = NULL;
2482 if (!memset_method) {
2483 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2484 if (!memset_method)
2485 g_error ("Old corlib found. Install a new one");
2487 return memset_method;
2490 void
2491 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2493 MonoInst *iargs [3];
2494 int n;
2495 guint32 align;
2496 MonoMethod *memset_method;
2498 /* FIXME: Optimize this for the case when dest is an LDADDR */
2500 mono_class_init (klass);
2501 n = mono_class_value_size (klass, &align);
2503 if (n <= sizeof (gpointer) * 5) {
2504 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2506 else {
2507 memset_method = get_memset_method ();
2508 iargs [0] = dest;
2509 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2510 EMIT_NEW_ICONST (cfg, iargs [2], n);
2511 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2515 static MonoInst*
2516 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2518 MonoInst *this = NULL;
2520 g_assert (cfg->generic_sharing_context);
2522 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2523 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2524 !method->klass->valuetype)
2525 EMIT_NEW_ARGLOAD (cfg, this, 0);
2527 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2528 MonoInst *mrgctx_loc, *mrgctx_var;
2530 g_assert (!this);
2531 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2533 mrgctx_loc = mono_get_vtable_var (cfg);
2534 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2536 return mrgctx_var;
2537 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2538 MonoInst *vtable_loc, *vtable_var;
2540 g_assert (!this);
2542 vtable_loc = mono_get_vtable_var (cfg);
2543 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2545 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2546 MonoInst *mrgctx_var = vtable_var;
2547 int vtable_reg;
2549 vtable_reg = alloc_preg (cfg);
2550 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2551 vtable_var->type = STACK_PTR;
2554 return vtable_var;
2555 } else {
2556 MonoInst *ins;
2557 int vtable_reg, res_reg;
2559 vtable_reg = alloc_preg (cfg);
2560 res_reg = alloc_preg (cfg);
2561 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2562 return ins;
2566 static MonoJumpInfoRgctxEntry *
2567 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2569 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2570 res->method = method;
2571 res->in_mrgctx = in_mrgctx;
2572 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2573 res->data->type = patch_type;
2574 res->data->data.target = patch_data;
2575 res->info_type = info_type;
2577 return res;
2580 static inline MonoInst*
2581 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2583 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2586 static MonoInst*
2587 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2588 MonoClass *klass, int rgctx_type)
2590 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2591 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2593 return emit_rgctx_fetch (cfg, rgctx, entry);
2596 static MonoInst*
2597 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2598 MonoMethod *cmethod, int rgctx_type)
2600 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2601 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2603 return emit_rgctx_fetch (cfg, rgctx, entry);
2606 static MonoInst*
2607 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2608 MonoClassField *field, int rgctx_type)
2610 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2611 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2613 return emit_rgctx_fetch (cfg, rgctx, entry);
2616 static void
2617 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2619 int vtable_reg = alloc_preg (cfg);
2620 int context_used = 0;
2622 if (cfg->generic_sharing_context)
2623 context_used = mono_class_check_context_used (array_class);
2625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2627 if (cfg->opt & MONO_OPT_SHARED) {
2628 int class_reg = alloc_preg (cfg);
2629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2630 if (cfg->compile_aot) {
2631 int klass_reg = alloc_preg (cfg);
2632 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2633 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2634 } else {
2635 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2637 } else if (context_used) {
2638 MonoInst *vtable_ins;
2640 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2641 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2642 } else {
2643 if (cfg->compile_aot) {
2644 int vt_reg = alloc_preg (cfg);
2645 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2646 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2647 } else {
2648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2652 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2655 static void
2656 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2658 if (mini_get_debug_options ()->better_cast_details) {
2659 int to_klass_reg = alloc_preg (cfg);
2660 int vtable_reg = alloc_preg (cfg);
2661 int klass_reg = alloc_preg (cfg);
2662 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2664 if (!tls_get) {
2665 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2666 exit (1);
2669 MONO_ADD_INS (cfg->cbb, tls_get);
2670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2674 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2679 static void
2680 reset_cast_details (MonoCompile *cfg)
2682 /* Reset the variables holding the cast details */
2683 if (mini_get_debug_options ()->better_cast_details) {
2684 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2686 MONO_ADD_INS (cfg->cbb, tls_get);
2687 /* It is enough to reset the from field */
2688 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2693 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2694 * generic code is generated.
2696 static MonoInst*
2697 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2699 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2701 if (context_used) {
2702 MonoInst *rgctx, *addr;
2704 /* FIXME: What if the class is shared? We might not
2705 have to get the address of the method from the
2706 RGCTX. */
2707 addr = emit_get_rgctx_method (cfg, context_used, method,
2708 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2710 rgctx = emit_get_rgctx (cfg, method, context_used);
2712 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2713 } else {
2714 return mono_emit_method_call (cfg, method, &val, NULL);
2718 static MonoInst*
2719 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2721 MonoInst *add;
2722 int obj_reg;
2723 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2724 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2725 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2726 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2728 obj_reg = sp [0]->dreg;
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2732 /* FIXME: generics */
2733 g_assert (klass->rank == 0);
2735 // Check rank == 0
2736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2737 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2742 if (context_used) {
2743 MonoInst *element_class;
2745 /* This assertion is from the unboxcast insn */
2746 g_assert (klass->rank == 0);
2748 element_class = emit_get_rgctx_klass (cfg, context_used,
2749 klass->element_class, MONO_RGCTX_INFO_KLASS);
2751 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2752 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2753 } else {
2754 save_cast_details (cfg, klass->element_class, obj_reg);
2755 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2756 reset_cast_details (cfg);
2759 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2760 MONO_ADD_INS (cfg->cbb, add);
2761 add->type = STACK_MP;
2762 add->klass = klass;
2764 return add;
2767 static MonoInst*
2768 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2770 MonoInst *iargs [2];
2771 void *alloc_ftn;
2773 if (cfg->opt & MONO_OPT_SHARED) {
2774 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2775 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2777 alloc_ftn = mono_object_new;
2778 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2779 /* This happens often in argument checking code, eg. throw new FooException... */
2780 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2781 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2782 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2783 } else {
2784 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2785 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2786 gboolean pass_lw;
2788 if (managed_alloc) {
2789 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2790 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2792 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2793 if (pass_lw) {
2794 guint32 lw = vtable->klass->instance_size;
2795 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2796 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2797 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2799 else {
2800 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2804 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2807 static MonoInst*
2808 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2809 gboolean for_box)
2811 MonoInst *iargs [2];
2812 MonoMethod *managed_alloc = NULL;
2813 void *alloc_ftn;
2816 FIXME: we cannot get managed_alloc here because we can't get
2817 the class's vtable (because it's not a closed class)
2819 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2820 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2823 if (cfg->opt & MONO_OPT_SHARED) {
2824 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2825 iargs [1] = data_inst;
2826 alloc_ftn = mono_object_new;
2827 } else {
2828 if (managed_alloc) {
2829 iargs [0] = data_inst;
2830 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2833 iargs [0] = data_inst;
2834 alloc_ftn = mono_object_new_specific;
2837 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2840 static MonoInst*
2841 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2843 MonoInst *alloc, *ins;
2845 if (mono_class_is_nullable (klass)) {
2846 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2847 return mono_emit_method_call (cfg, method, &val, NULL);
2850 alloc = handle_alloc (cfg, klass, TRUE);
2852 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2854 return alloc;
2857 static MonoInst *
2858 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2860 MonoInst *alloc, *ins;
2862 if (mono_class_is_nullable (klass)) {
2863 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2864 /* FIXME: What if the class is shared? We might not
2865 have to get the method address from the RGCTX. */
2866 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2867 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2868 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2870 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2871 } else {
2872 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2874 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2876 return alloc;
2880 static MonoInst*
2881 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2883 MonoBasicBlock *is_null_bb;
2884 int obj_reg = src->dreg;
2885 int vtable_reg = alloc_preg (cfg);
2887 NEW_BBLOCK (cfg, is_null_bb);
2889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2890 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2892 save_cast_details (cfg, klass, obj_reg);
2894 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2896 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2897 } else {
2898 int klass_reg = alloc_preg (cfg);
2900 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2902 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2903 /* the remoting code is broken, access the class for now */
2904 if (0) {
2905 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2907 } else {
2908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2911 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2912 } else {
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2914 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2918 MONO_START_BB (cfg, is_null_bb);
2920 reset_cast_details (cfg);
2922 return src;
2925 static MonoInst*
2926 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2928 MonoInst *ins;
2929 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2930 int obj_reg = src->dreg;
2931 int vtable_reg = alloc_preg (cfg);
2932 int res_reg = alloc_preg (cfg);
2934 NEW_BBLOCK (cfg, is_null_bb);
2935 NEW_BBLOCK (cfg, false_bb);
2936 NEW_BBLOCK (cfg, end_bb);
2938 /* Do the assignment at the beginning, so the other assignment can be if converted */
2939 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2940 ins->type = STACK_OBJ;
2941 ins->klass = klass;
2943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2946 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2947 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2948 /* the is_null_bb target simply copies the input register to the output */
2949 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2950 } else {
2951 int klass_reg = alloc_preg (cfg);
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2955 if (klass->rank) {
2956 int rank_reg = alloc_preg (cfg);
2957 int eclass_reg = alloc_preg (cfg);
2959 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2960 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2961 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2963 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2964 if (klass->cast_class == mono_defaults.object_class) {
2965 int parent_reg = alloc_preg (cfg);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2967 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2968 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2969 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2970 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2971 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2972 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2973 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2974 } else if (klass->cast_class == mono_defaults.enum_class) {
2975 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2976 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2977 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2978 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2979 } else {
2980 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2981 /* Check that the object is a vector too */
2982 int bounds_reg = alloc_preg (cfg);
2983 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
2984 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
2985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2988 /* the is_null_bb target simply copies the input register to the output */
2989 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2991 } else if (mono_class_is_nullable (klass)) {
2992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2993 /* the is_null_bb target simply copies the input register to the output */
2994 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
2995 } else {
2996 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2997 /* the remoting code is broken, access the class for now */
2998 if (0) {
2999 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3001 } else {
3002 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3005 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3007 } else {
3008 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3009 /* the is_null_bb target simply copies the input register to the output */
3010 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3015 MONO_START_BB (cfg, false_bb);
3017 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3018 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3020 MONO_START_BB (cfg, is_null_bb);
3022 MONO_START_BB (cfg, end_bb);
3024 return ins;
3027 static MonoInst*
3028 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3030 /* This opcode takes as input an object reference and a class, and returns:
3031 0) if the object is an instance of the class,
3032 1) if the object is not instance of the class,
3033 2) if the object is a proxy whose type cannot be determined */
3035 MonoInst *ins;
3036 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3037 int obj_reg = src->dreg;
3038 int dreg = alloc_ireg (cfg);
3039 int tmp_reg;
3040 int klass_reg = alloc_preg (cfg);
3042 NEW_BBLOCK (cfg, true_bb);
3043 NEW_BBLOCK (cfg, false_bb);
3044 NEW_BBLOCK (cfg, false2_bb);
3045 NEW_BBLOCK (cfg, end_bb);
3046 NEW_BBLOCK (cfg, no_proxy_bb);
3048 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3049 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3051 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3052 NEW_BBLOCK (cfg, interface_fail_bb);
3054 tmp_reg = alloc_preg (cfg);
3055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3056 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3057 MONO_START_BB (cfg, interface_fail_bb);
3058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3060 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3062 tmp_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3065 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3066 } else {
3067 tmp_reg = alloc_preg (cfg);
3068 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3071 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3074 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3076 tmp_reg = alloc_preg (cfg);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3079 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3081 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3082 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3084 MONO_START_BB (cfg, no_proxy_bb);
3086 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3089 MONO_START_BB (cfg, false_bb);
3091 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3094 MONO_START_BB (cfg, false2_bb);
3096 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3099 MONO_START_BB (cfg, true_bb);
3101 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3103 MONO_START_BB (cfg, end_bb);
3105 /* FIXME: */
3106 MONO_INST_NEW (cfg, ins, OP_ICONST);
3107 ins->dreg = dreg;
3108 ins->type = STACK_I4;
3110 return ins;
3113 static MonoInst*
3114 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3116 /* This opcode takes as input an object reference and a class, and returns:
3117 0) if the object is an instance of the class,
3118 1) if the object is a proxy whose type cannot be determined
3119 an InvalidCastException exception is thrown otherwhise*/
3121 MonoInst *ins;
3122 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3123 int obj_reg = src->dreg;
3124 int dreg = alloc_ireg (cfg);
3125 int tmp_reg = alloc_preg (cfg);
3126 int klass_reg = alloc_preg (cfg);
3128 NEW_BBLOCK (cfg, end_bb);
3129 NEW_BBLOCK (cfg, ok_result_bb);
3131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3134 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3135 NEW_BBLOCK (cfg, interface_fail_bb);
3137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3138 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3139 MONO_START_BB (cfg, interface_fail_bb);
3140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3142 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3144 tmp_reg = alloc_preg (cfg);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3147 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3149 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3152 } else {
3153 NEW_BBLOCK (cfg, no_proxy_bb);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3157 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3159 tmp_reg = alloc_preg (cfg);
3160 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3163 tmp_reg = alloc_preg (cfg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3168 NEW_BBLOCK (cfg, fail_1_bb);
3170 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3172 MONO_START_BB (cfg, fail_1_bb);
3174 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3177 MONO_START_BB (cfg, no_proxy_bb);
3179 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3182 MONO_START_BB (cfg, ok_result_bb);
3184 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3186 MONO_START_BB (cfg, end_bb);
3188 /* FIXME: */
3189 MONO_INST_NEW (cfg, ins, OP_ICONST);
3190 ins->dreg = dreg;
3191 ins->type = STACK_I4;
3193 return ins;
3196 static G_GNUC_UNUSED MonoInst*
3197 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3199 gpointer *trampoline;
3200 MonoInst *obj, *method_ins, *tramp_ins;
3201 MonoDomain *domain;
3202 guint8 **code_slot;
3204 obj = handle_alloc (cfg, klass, FALSE);
3206 /* Inline the contents of mono_delegate_ctor */
3208 /* Set target field */
3209 /* Optimize away setting of NULL target */
3210 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3211 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3213 /* Set method field */
3214 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3215 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3218 * To avoid looking up the compiled code belonging to the target method
3219 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3220 * store it, and we fill it after the method has been compiled.
3222 if (!cfg->compile_aot && !method->dynamic) {
3223 MonoInst *code_slot_ins;
3225 domain = mono_domain_get ();
3226 mono_domain_lock (domain);
3227 if (!domain_jit_info (domain)->method_code_hash)
3228 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3229 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3230 if (!code_slot) {
3231 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3232 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3234 mono_domain_unlock (domain);
3236 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3237 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3240 /* Set invoke_impl field */
3241 if (cfg->compile_aot) {
3242 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3243 } else {
3244 trampoline = mono_create_delegate_trampoline (klass);
3245 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3249 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3251 return obj;
3254 static MonoInst*
3255 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3257 MonoJitICallInfo *info;
3259 /* Need to register the icall so it gets an icall wrapper */
3260 info = mono_get_array_new_va_icall (rank);
3262 cfg->flags |= MONO_CFG_HAS_VARARGS;
3264 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3265 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3268 static void
3269 mono_emit_load_got_addr (MonoCompile *cfg)
3271 MonoInst *getaddr, *dummy_use;
3273 if (!cfg->got_var || cfg->got_var_allocated)
3274 return;
3276 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3277 getaddr->dreg = cfg->got_var->dreg;
3279 /* Add it to the start of the first bblock */
3280 if (cfg->bb_entry->code) {
3281 getaddr->next = cfg->bb_entry->code;
3282 cfg->bb_entry->code = getaddr;
3284 else
3285 MONO_ADD_INS (cfg->bb_entry, getaddr);
3287 cfg->got_var_allocated = TRUE;
3290 * Add a dummy use to keep the got_var alive, since real uses might
3291 * only be generated by the back ends.
3292 * Add it to end_bblock, so the variable's lifetime covers the whole
3293 * method.
3294 * It would be better to make the usage of the got var explicit in all
3295 * cases when the backend needs it (i.e. calls, throw etc.), so this
3296 * wouldn't be needed.
3298 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3299 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3302 static int inline_limit;
3303 static gboolean inline_limit_inited;
3305 static gboolean
3306 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3308 MonoMethodHeader *header;
3309 MonoVTable *vtable;
3310 #ifdef MONO_ARCH_SOFT_FLOAT
3311 MonoMethodSignature *sig = mono_method_signature (method);
3312 int i;
3313 #endif
3315 if (cfg->generic_sharing_context)
3316 return FALSE;
3318 #ifdef MONO_ARCH_HAVE_LMF_OPS
3319 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3320 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3321 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3322 return TRUE;
3323 #endif
3325 if (method->is_inflated)
3326 /* Avoid inflating the header */
3327 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3328 else
3329 header = mono_method_get_header (method);
3331 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3332 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3333 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3334 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3335 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3336 (method->klass->marshalbyref) ||
3337 !header || header->num_clauses)
3338 return FALSE;
3340 /* also consider num_locals? */
3341 /* Do the size check early to avoid creating vtables */
3342 if (!inline_limit_inited) {
3343 if (getenv ("MONO_INLINELIMIT"))
3344 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3345 else
3346 inline_limit = INLINE_LENGTH_LIMIT;
3347 inline_limit_inited = TRUE;
3349 if (header->code_size >= inline_limit)
3350 return FALSE;
3353 * if we can initialize the class of the method right away, we do,
3354 * otherwise we don't allow inlining if the class needs initialization,
3355 * since it would mean inserting a call to mono_runtime_class_init()
3356 * inside the inlined code
3358 if (!(cfg->opt & MONO_OPT_SHARED)) {
3359 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3360 if (cfg->run_cctors && method->klass->has_cctor) {
3361 if (!method->klass->runtime_info)
3362 /* No vtable created yet */
3363 return FALSE;
3364 vtable = mono_class_vtable (cfg->domain, method->klass);
3365 if (!vtable)
3366 return FALSE;
3367 /* This makes so that inline cannot trigger */
3368 /* .cctors: too many apps depend on them */
3369 /* running with a specific order... */
3370 if (! vtable->initialized)
3371 return FALSE;
3372 mono_runtime_class_init (vtable);
3374 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3375 if (!method->klass->runtime_info)
3376 /* No vtable created yet */
3377 return FALSE;
3378 vtable = mono_class_vtable (cfg->domain, method->klass);
3379 if (!vtable)
3380 return FALSE;
3381 if (!vtable->initialized)
3382 return FALSE;
3384 } else {
3386 * If we're compiling for shared code
3387 * the cctor will need to be run at aot method load time, for example,
3388 * or at the end of the compilation of the inlining method.
3390 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3391 return FALSE;
3395 * CAS - do not inline methods with declarative security
3396 * Note: this has to be before any possible return TRUE;
3398 if (mono_method_has_declsec (method))
3399 return FALSE;
3401 #ifdef MONO_ARCH_SOFT_FLOAT
3402 /* FIXME: */
3403 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3404 return FALSE;
3405 for (i = 0; i < sig->param_count; ++i)
3406 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3407 return FALSE;
3408 #endif
3410 return TRUE;
3413 static gboolean
3414 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3416 if (vtable->initialized && !cfg->compile_aot)
3417 return FALSE;
3419 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3420 return FALSE;
3422 if (!mono_class_needs_cctor_run (vtable->klass, method))
3423 return FALSE;
3425 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3426 /* The initialization is already done before the method is called */
3427 return FALSE;
3429 return TRUE;
3432 static MonoInst*
3433 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3435 MonoInst *ins;
3436 guint32 size;
3437 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3439 mono_class_init (klass);
3440 size = mono_class_array_element_size (klass);
3442 mult_reg = alloc_preg (cfg);
3443 array_reg = arr->dreg;
3444 index_reg = index->dreg;
3446 #if SIZEOF_REGISTER == 8
3447 /* The array reg is 64 bits but the index reg is only 32 */
3448 index2_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3450 #else
3451 if (index->type == STACK_I8) {
3452 index2_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3454 } else {
3455 index2_reg = index_reg;
3457 #endif
3459 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3461 #if defined(__i386__) || defined(__x86_64__)
3462 if (size == 1 || size == 2 || size == 4 || size == 8) {
3463 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3465 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3466 ins->type = STACK_PTR;
3468 return ins;
3470 #endif
3472 add_reg = alloc_preg (cfg);
3474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3475 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3476 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3477 ins->type = STACK_PTR;
3478 MONO_ADD_INS (cfg->cbb, ins);
3480 return ins;
3483 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3484 static MonoInst*
3485 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3487 int bounds_reg = alloc_preg (cfg);
3488 int add_reg = alloc_preg (cfg);
3489 int mult_reg = alloc_preg (cfg);
3490 int mult2_reg = alloc_preg (cfg);
3491 int low1_reg = alloc_preg (cfg);
3492 int low2_reg = alloc_preg (cfg);
3493 int high1_reg = alloc_preg (cfg);
3494 int high2_reg = alloc_preg (cfg);
3495 int realidx1_reg = alloc_preg (cfg);
3496 int realidx2_reg = alloc_preg (cfg);
3497 int sum_reg = alloc_preg (cfg);
3498 int index1, index2;
3499 MonoInst *ins;
3500 guint32 size;
3502 mono_class_init (klass);
3503 size = mono_class_array_element_size (klass);
3505 index1 = index_ins1->dreg;
3506 index2 = index_ins2->dreg;
3508 /* range checking */
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3510 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3513 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3514 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3516 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3517 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3518 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3521 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3522 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3523 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3524 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3525 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3526 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3528 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3529 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3531 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3532 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3534 ins->type = STACK_MP;
3535 ins->klass = klass;
3536 MONO_ADD_INS (cfg->cbb, ins);
3538 return ins;
3540 #endif
3542 static MonoInst*
3543 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3545 int rank;
3546 MonoInst *addr;
3547 MonoMethod *addr_method;
3548 int element_size;
3550 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3552 if (rank == 1)
3553 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3555 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3556 /* emit_ldelema_2 depends on OP_LMUL */
3557 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3558 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3560 #endif
3562 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3563 addr_method = mono_marshal_get_array_address (rank, element_size);
3564 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3566 return addr;
3569 static MonoInst*
3570 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3572 MonoInst *ins = NULL;
3574 static MonoClass *runtime_helpers_class = NULL;
3575 if (! runtime_helpers_class)
3576 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3577 "System.Runtime.CompilerServices", "RuntimeHelpers");
3579 if (cmethod->klass == mono_defaults.string_class) {
3580 if (strcmp (cmethod->name, "get_Chars") == 0) {
3581 int dreg = alloc_ireg (cfg);
3582 int index_reg = alloc_preg (cfg);
3583 int mult_reg = alloc_preg (cfg);
3584 int add_reg = alloc_preg (cfg);
3586 #if SIZEOF_REGISTER == 8
3587 /* The array reg is 64 bits but the index reg is only 32 */
3588 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3589 #else
3590 index_reg = args [1]->dreg;
3591 #endif
3592 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3594 #if defined(__i386__) || defined(__x86_64__)
3595 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3596 add_reg = ins->dreg;
3597 /* Avoid a warning */
3598 mult_reg = 0;
3599 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3600 add_reg, 0);
3601 #else
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3603 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3604 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3605 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3606 #endif
3607 type_from_op (ins, NULL, NULL);
3608 return ins;
3609 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3610 int dreg = alloc_ireg (cfg);
3611 /* Decompose later to allow more optimizations */
3612 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3613 ins->type = STACK_I4;
3614 cfg->cbb->has_array_access = TRUE;
3615 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3617 return ins;
3618 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3619 int mult_reg = alloc_preg (cfg);
3620 int add_reg = alloc_preg (cfg);
3622 /* The corlib functions check for oob already. */
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3624 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3625 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3626 } else
3627 return NULL;
3628 } else if (cmethod->klass == mono_defaults.object_class) {
3630 if (strcmp (cmethod->name, "GetType") == 0) {
3631 int dreg = alloc_preg (cfg);
3632 int vt_reg = alloc_preg (cfg);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3634 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3635 type_from_op (ins, NULL, NULL);
3637 return ins;
3638 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3639 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3640 int dreg = alloc_ireg (cfg);
3641 int t1 = alloc_ireg (cfg);
3643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3644 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3645 ins->type = STACK_I4;
3647 return ins;
3648 #endif
3649 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3650 MONO_INST_NEW (cfg, ins, OP_NOP);
3651 MONO_ADD_INS (cfg->cbb, ins);
3652 return ins;
3653 } else
3654 return NULL;
3655 } else if (cmethod->klass == mono_defaults.array_class) {
3656 if (cmethod->name [0] != 'g')
3657 return NULL;
3659 if (strcmp (cmethod->name, "get_Rank") == 0) {
3660 int dreg = alloc_ireg (cfg);
3661 int vtable_reg = alloc_preg (cfg);
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3663 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3664 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3665 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3666 type_from_op (ins, NULL, NULL);
3668 return ins;
3669 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3670 int dreg = alloc_ireg (cfg);
3672 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3673 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3674 type_from_op (ins, NULL, NULL);
3676 return ins;
3677 } else
3678 return NULL;
3679 } else if (cmethod->klass == runtime_helpers_class) {
3681 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3682 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3683 return ins;
3684 } else
3685 return NULL;
3686 } else if (cmethod->klass == mono_defaults.thread_class) {
3687 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3688 ins->dreg = alloc_preg (cfg);
3689 ins->type = STACK_OBJ;
3690 MONO_ADD_INS (cfg->cbb, ins);
3691 return ins;
3692 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3693 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3694 MONO_ADD_INS (cfg->cbb, ins);
3695 return ins;
3696 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3697 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3698 MONO_ADD_INS (cfg->cbb, ins);
3699 return ins;
3701 } else if (cmethod->klass == mono_defaults.monitor_class) {
3702 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3703 if (strcmp (cmethod->name, "Enter") == 0) {
3704 MonoCallInst *call;
3706 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3707 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3708 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3709 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3711 return (MonoInst*)call;
3712 } else if (strcmp (cmethod->name, "Exit") == 0) {
3713 MonoCallInst *call;
3715 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3716 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3717 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3718 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3720 return (MonoInst*)call;
3722 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3723 MonoMethod *fast_method = NULL;
3725 /* Avoid infinite recursion */
3726 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3727 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3728 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3729 return NULL;
3731 if (strcmp (cmethod->name, "Enter") == 0 ||
3732 strcmp (cmethod->name, "Exit") == 0)
3733 fast_method = mono_monitor_get_fast_path (cmethod);
3734 if (!fast_method)
3735 return NULL;
3737 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3738 #endif
3739 } else if (mini_class_is_system_array (cmethod->klass) &&
3740 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3741 MonoInst *addr, *store, *load;
3742 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3744 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3745 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3746 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3747 return store;
3748 } else if (cmethod->klass->image == mono_defaults.corlib &&
3749 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3750 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3751 ins = NULL;
3753 #if SIZEOF_REGISTER == 8
3754 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3755 /* 64 bit reads are already atomic */
3756 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3757 ins->dreg = mono_alloc_preg (cfg);
3758 ins->inst_basereg = args [0]->dreg;
3759 ins->inst_offset = 0;
3760 MONO_ADD_INS (cfg->cbb, ins);
3762 #endif
3764 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3765 if (strcmp (cmethod->name, "Increment") == 0) {
3766 MonoInst *ins_iconst;
3767 guint32 opcode = 0;
3769 if (fsig->params [0]->type == MONO_TYPE_I4)
3770 opcode = OP_ATOMIC_ADD_NEW_I4;
3771 #if SIZEOF_REGISTER == 8
3772 else if (fsig->params [0]->type == MONO_TYPE_I8)
3773 opcode = OP_ATOMIC_ADD_NEW_I8;
3774 #endif
3775 if (opcode) {
3776 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3777 ins_iconst->inst_c0 = 1;
3778 ins_iconst->dreg = mono_alloc_ireg (cfg);
3779 MONO_ADD_INS (cfg->cbb, ins_iconst);
3781 MONO_INST_NEW (cfg, ins, opcode);
3782 ins->dreg = mono_alloc_ireg (cfg);
3783 ins->inst_basereg = args [0]->dreg;
3784 ins->inst_offset = 0;
3785 ins->sreg2 = ins_iconst->dreg;
3786 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3787 MONO_ADD_INS (cfg->cbb, ins);
3789 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3790 MonoInst *ins_iconst;
3791 guint32 opcode = 0;
3793 if (fsig->params [0]->type == MONO_TYPE_I4)
3794 opcode = OP_ATOMIC_ADD_NEW_I4;
3795 #if SIZEOF_REGISTER == 8
3796 else if (fsig->params [0]->type == MONO_TYPE_I8)
3797 opcode = OP_ATOMIC_ADD_NEW_I8;
3798 #endif
3799 if (opcode) {
3800 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3801 ins_iconst->inst_c0 = -1;
3802 ins_iconst->dreg = mono_alloc_ireg (cfg);
3803 MONO_ADD_INS (cfg->cbb, ins_iconst);
3805 MONO_INST_NEW (cfg, ins, opcode);
3806 ins->dreg = mono_alloc_ireg (cfg);
3807 ins->inst_basereg = args [0]->dreg;
3808 ins->inst_offset = 0;
3809 ins->sreg2 = ins_iconst->dreg;
3810 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3811 MONO_ADD_INS (cfg->cbb, ins);
3813 } else if (strcmp (cmethod->name, "Add") == 0) {
3814 guint32 opcode = 0;
3816 if (fsig->params [0]->type == MONO_TYPE_I4)
3817 opcode = OP_ATOMIC_ADD_NEW_I4;
3818 #if SIZEOF_REGISTER == 8
3819 else if (fsig->params [0]->type == MONO_TYPE_I8)
3820 opcode = OP_ATOMIC_ADD_NEW_I8;
3821 #endif
3823 if (opcode) {
3824 MONO_INST_NEW (cfg, ins, opcode);
3825 ins->dreg = mono_alloc_ireg (cfg);
3826 ins->inst_basereg = args [0]->dreg;
3827 ins->inst_offset = 0;
3828 ins->sreg2 = args [1]->dreg;
3829 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3830 MONO_ADD_INS (cfg->cbb, ins);
3833 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3835 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3836 if (strcmp (cmethod->name, "Exchange") == 0) {
3837 guint32 opcode;
3839 if (fsig->params [0]->type == MONO_TYPE_I4)
3840 opcode = OP_ATOMIC_EXCHANGE_I4;
3841 #if SIZEOF_REGISTER == 8
3842 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3843 (fsig->params [0]->type == MONO_TYPE_I) ||
3844 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3845 opcode = OP_ATOMIC_EXCHANGE_I8;
3846 #else
3847 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3848 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3849 opcode = OP_ATOMIC_EXCHANGE_I4;
3850 #endif
3851 else
3852 return NULL;
3854 MONO_INST_NEW (cfg, ins, opcode);
3855 ins->dreg = mono_alloc_ireg (cfg);
3856 ins->inst_basereg = args [0]->dreg;
3857 ins->inst_offset = 0;
3858 ins->sreg2 = args [1]->dreg;
3859 MONO_ADD_INS (cfg->cbb, ins);
3861 switch (fsig->params [0]->type) {
3862 case MONO_TYPE_I4:
3863 ins->type = STACK_I4;
3864 break;
3865 case MONO_TYPE_I8:
3866 case MONO_TYPE_I:
3867 ins->type = STACK_I8;
3868 break;
3869 case MONO_TYPE_OBJECT:
3870 ins->type = STACK_OBJ;
3871 break;
3872 default:
3873 g_assert_not_reached ();
3876 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3878 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS_IMM
3880 * Can't implement CompareExchange methods this way since they have
3881 * three arguments. We can implement one of the common cases, where the new
3882 * value is a constant.
3884 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3885 if ((fsig->params [1]->type == MONO_TYPE_I4 ||
3886 (sizeof (gpointer) == 4 && fsig->params [1]->type == MONO_TYPE_I))
3887 && args [2]->opcode == OP_ICONST) {
3888 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_IMM_I4);
3889 ins->dreg = alloc_ireg (cfg);
3890 ins->sreg1 = args [0]->dreg;
3891 ins->sreg2 = args [1]->dreg;
3892 ins->backend.data = GINT_TO_POINTER (args [2]->inst_c0);
3893 ins->type = STACK_I4;
3894 MONO_ADD_INS (cfg->cbb, ins);
3896 /* The I8 case is hard to detect, since the arg might be a conv.i8 (iconst) tree */
3898 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS_IMM */
3900 if (ins)
3901 return ins;
3902 } else if (cmethod->klass->image == mono_defaults.corlib) {
3903 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3904 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3905 MONO_INST_NEW (cfg, ins, OP_BREAK);
3906 MONO_ADD_INS (cfg->cbb, ins);
3907 return ins;
3909 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3910 && strcmp (cmethod->klass->name, "Environment") == 0) {
3911 #ifdef PLATFORM_WIN32
3912 EMIT_NEW_ICONST (cfg, ins, 1);
3913 #else
3914 EMIT_NEW_ICONST (cfg, ins, 0);
3915 #endif
3916 return ins;
3918 } else if (cmethod->klass == mono_defaults.math_class) {
3920 * There is general branches code for Min/Max, but it does not work for
3921 * all inputs:
3922 * http://everything2.com/?node_id=1051618
3926 #ifdef MONO_ARCH_SIMD_INTRINSICS
3927 if (cfg->opt & MONO_OPT_SIMD) {
3928 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3929 if (ins)
3930 return ins;
3932 #endif
3934 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3938 * This entry point could be used later for arbitrary method
3939 * redirection.
3941 inline static MonoInst*
3942 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3943 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3945 if (method->klass == mono_defaults.string_class) {
3946 /* managed string allocation support */
3947 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3948 MonoInst *iargs [2];
3949 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3950 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3951 if (!managed_alloc)
3952 return NULL;
3953 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3954 iargs [1] = args [0];
3955 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3958 return NULL;
3961 static void
3962 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3964 MonoInst *store, *temp;
3965 int i;
3967 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3968 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3971 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3972 * would be different than the MonoInst's used to represent arguments, and
3973 * the ldelema implementation can't deal with that.
3974 * Solution: When ldelema is used on an inline argument, create a var for
3975 * it, emit ldelema on that var, and emit the saving code below in
3976 * inline_method () if needed.
3978 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
3979 cfg->args [i] = temp;
3980 /* This uses cfg->args [i] which is set by the preceeding line */
3981 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
3982 store->cil_code = sp [0]->cil_code;
3983 sp++;
3987 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
3988 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
3990 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
3991 static gboolean
3992 check_inline_called_method_name_limit (MonoMethod *called_method)
3994 int strncmp_result;
3995 static char *limit = NULL;
3997 if (limit == NULL) {
3998 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4000 if (limit_string != NULL)
4001 limit = limit_string;
4002 else
4003 limit = (char *) "";
4006 if (limit [0] != '\0') {
4007 char *called_method_name = mono_method_full_name (called_method, TRUE);
4009 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4010 g_free (called_method_name);
4012 //return (strncmp_result <= 0);
4013 return (strncmp_result == 0);
4014 } else {
4015 return TRUE;
4018 #endif
4020 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4021 static gboolean
4022 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4024 int strncmp_result;
4025 static char *limit = NULL;
4027 if (limit == NULL) {
4028 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4029 if (limit_string != NULL) {
4030 limit = limit_string;
4031 } else {
4032 limit = (char *) "";
4036 if (limit [0] != '\0') {
4037 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4039 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4040 g_free (caller_method_name);
4042 //return (strncmp_result <= 0);
4043 return (strncmp_result == 0);
4044 } else {
4045 return TRUE;
4048 #endif
4050 static int
4051 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4052 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4054 MonoInst *ins, *rvar = NULL;
4055 MonoMethodHeader *cheader;
4056 MonoBasicBlock *ebblock, *sbblock;
4057 int i, costs;
4058 MonoMethod *prev_inlined_method;
4059 MonoInst **prev_locals, **prev_args;
4060 MonoType **prev_arg_types;
4061 guint prev_real_offset;
4062 GHashTable *prev_cbb_hash;
4063 MonoBasicBlock **prev_cil_offset_to_bb;
4064 MonoBasicBlock *prev_cbb;
4065 unsigned char* prev_cil_start;
4066 guint32 prev_cil_offset_to_bb_len;
4067 MonoMethod *prev_current_method;
4068 MonoGenericContext *prev_generic_context;
4069 gboolean ret_var_set, prev_ret_var_set;
4071 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4073 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4074 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4075 return 0;
4076 #endif
4077 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4078 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4079 return 0;
4080 #endif
4082 if (cfg->verbose_level > 2)
4083 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4085 if (!cmethod->inline_info) {
4086 mono_jit_stats.inlineable_methods++;
4087 cmethod->inline_info = 1;
4089 /* allocate space to store the return value */
4090 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4091 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4094 /* allocate local variables */
4095 cheader = mono_method_get_header (cmethod);
4096 prev_locals = cfg->locals;
4097 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4098 for (i = 0; i < cheader->num_locals; ++i)
4099 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4101 /* allocate start and end blocks */
4102 /* This is needed so if the inline is aborted, we can clean up */
4103 NEW_BBLOCK (cfg, sbblock);
4104 sbblock->real_offset = real_offset;
4106 NEW_BBLOCK (cfg, ebblock);
4107 ebblock->block_num = cfg->num_bblocks++;
4108 ebblock->real_offset = real_offset;
4110 prev_args = cfg->args;
4111 prev_arg_types = cfg->arg_types;
4112 prev_inlined_method = cfg->inlined_method;
4113 cfg->inlined_method = cmethod;
4114 cfg->ret_var_set = FALSE;
4115 prev_real_offset = cfg->real_offset;
4116 prev_cbb_hash = cfg->cbb_hash;
4117 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4118 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4119 prev_cil_start = cfg->cil_start;
4120 prev_cbb = cfg->cbb;
4121 prev_current_method = cfg->current_method;
4122 prev_generic_context = cfg->generic_context;
4123 prev_ret_var_set = cfg->ret_var_set;
4125 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4127 ret_var_set = cfg->ret_var_set;
4129 cfg->inlined_method = prev_inlined_method;
4130 cfg->real_offset = prev_real_offset;
4131 cfg->cbb_hash = prev_cbb_hash;
4132 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4133 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4134 cfg->cil_start = prev_cil_start;
4135 cfg->locals = prev_locals;
4136 cfg->args = prev_args;
4137 cfg->arg_types = prev_arg_types;
4138 cfg->current_method = prev_current_method;
4139 cfg->generic_context = prev_generic_context;
4140 cfg->ret_var_set = prev_ret_var_set;
4142 if ((costs >= 0 && costs < 60) || inline_allways) {
4143 if (cfg->verbose_level > 2)
4144 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4146 mono_jit_stats.inlined_methods++;
4148 /* always add some code to avoid block split failures */
4149 MONO_INST_NEW (cfg, ins, OP_NOP);
4150 MONO_ADD_INS (prev_cbb, ins);
4152 prev_cbb->next_bb = sbblock;
4153 link_bblock (cfg, prev_cbb, sbblock);
4156 * Get rid of the begin and end bblocks if possible to aid local
4157 * optimizations.
4159 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4161 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4162 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4164 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4165 MonoBasicBlock *prev = ebblock->in_bb [0];
4166 mono_merge_basic_blocks (cfg, prev, ebblock);
4167 cfg->cbb = prev;
4168 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4169 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4170 cfg->cbb = prev_cbb;
4172 } else {
4173 cfg->cbb = ebblock;
4176 if (rvar) {
4178 * If the inlined method contains only a throw, then the ret var is not
4179 * set, so set it to a dummy value.
4181 if (!ret_var_set) {
4182 static double r8_0 = 0.0;
4184 switch (rvar->type) {
4185 case STACK_I4:
4186 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4187 break;
4188 case STACK_I8:
4189 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4190 break;
4191 case STACK_PTR:
4192 case STACK_MP:
4193 case STACK_OBJ:
4194 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4195 break;
4196 case STACK_R8:
4197 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4198 ins->type = STACK_R8;
4199 ins->inst_p0 = (void*)&r8_0;
4200 ins->dreg = rvar->dreg;
4201 MONO_ADD_INS (cfg->cbb, ins);
4202 break;
4203 case STACK_VTYPE:
4204 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4205 break;
4206 default:
4207 g_assert_not_reached ();
4211 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4212 *sp++ = ins;
4214 return costs + 1;
4215 } else {
4216 if (cfg->verbose_level > 2)
4217 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4218 cfg->exception_type = MONO_EXCEPTION_NONE;
4219 mono_loader_clear_error ();
4221 /* This gets rid of the newly added bblocks */
4222 cfg->cbb = prev_cbb;
4224 return 0;
4228 * Some of these comments may well be out-of-date.
4229 * Design decisions: we do a single pass over the IL code (and we do bblock
4230 * splitting/merging in the few cases when it's required: a back jump to an IL
4231 * address that was not already seen as bblock starting point).
4232 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4233 * Complex operations are decomposed in simpler ones right away. We need to let the
4234 * arch-specific code peek and poke inside this process somehow (except when the
4235 * optimizations can take advantage of the full semantic info of coarse opcodes).
4236 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4237 * MonoInst->opcode initially is the IL opcode or some simplification of that
4238 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4239 * opcode with value bigger than OP_LAST.
4240 * At this point the IR can be handed over to an interpreter, a dumb code generator
4241 * or to the optimizing code generator that will translate it to SSA form.
4243 * Profiling directed optimizations.
4244 * We may compile by default with few or no optimizations and instrument the code
4245 * or the user may indicate what methods to optimize the most either in a config file
4246 * or through repeated runs where the compiler applies offline the optimizations to
4247 * each method and then decides if it was worth it.
4250 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4251 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4252 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4253 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4254 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4255 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4256 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4257 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4259 /* offset from br.s -> br like opcodes */
4260 #define BIG_BRANCH_OFFSET 13
4262 static gboolean
4263 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4265 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4267 return b == NULL || b == bb;
4270 static int
4271 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4273 unsigned char *ip = start;
4274 unsigned char *target;
4275 int i;
4276 guint cli_addr;
4277 MonoBasicBlock *bblock;
4278 const MonoOpcode *opcode;
4280 while (ip < end) {
4281 cli_addr = ip - start;
4282 i = mono_opcode_value ((const guint8 **)&ip, end);
4283 if (i < 0)
4284 UNVERIFIED;
4285 opcode = &mono_opcodes [i];
4286 switch (opcode->argument) {
4287 case MonoInlineNone:
4288 ip++;
4289 break;
4290 case MonoInlineString:
4291 case MonoInlineType:
4292 case MonoInlineField:
4293 case MonoInlineMethod:
4294 case MonoInlineTok:
4295 case MonoInlineSig:
4296 case MonoShortInlineR:
4297 case MonoInlineI:
4298 ip += 5;
4299 break;
4300 case MonoInlineVar:
4301 ip += 3;
4302 break;
4303 case MonoShortInlineVar:
4304 case MonoShortInlineI:
4305 ip += 2;
4306 break;
4307 case MonoShortInlineBrTarget:
4308 target = start + cli_addr + 2 + (signed char)ip [1];
4309 GET_BBLOCK (cfg, bblock, target);
4310 ip += 2;
4311 if (ip < end)
4312 GET_BBLOCK (cfg, bblock, ip);
4313 break;
4314 case MonoInlineBrTarget:
4315 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4316 GET_BBLOCK (cfg, bblock, target);
4317 ip += 5;
4318 if (ip < end)
4319 GET_BBLOCK (cfg, bblock, ip);
4320 break;
4321 case MonoInlineSwitch: {
4322 guint32 n = read32 (ip + 1);
4323 guint32 j;
4324 ip += 5;
4325 cli_addr += 5 + 4 * n;
4326 target = start + cli_addr;
4327 GET_BBLOCK (cfg, bblock, target);
4329 for (j = 0; j < n; ++j) {
4330 target = start + cli_addr + (gint32)read32 (ip);
4331 GET_BBLOCK (cfg, bblock, target);
4332 ip += 4;
4334 break;
4336 case MonoInlineR:
4337 case MonoInlineI8:
4338 ip += 9;
4339 break;
4340 default:
4341 g_assert_not_reached ();
4344 if (i == CEE_THROW) {
4345 unsigned char *bb_start = ip - 1;
4347 /* Find the start of the bblock containing the throw */
4348 bblock = NULL;
4349 while ((bb_start >= start) && !bblock) {
4350 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4351 bb_start --;
4353 if (bblock)
4354 bblock->out_of_line = 1;
4357 return 0;
4358 unverified:
4359 *pos = ip;
4360 return 1;
4363 static inline MonoMethod *
4364 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4366 MonoMethod *method;
4368 if (m->wrapper_type != MONO_WRAPPER_NONE)
4369 return mono_method_get_wrapper_data (m, token);
4371 method = mono_get_method_full (m->klass->image, token, klass, context);
4373 return method;
4376 static inline MonoMethod *
4377 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4379 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4381 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4382 return NULL;
4384 return method;
4387 static inline MonoClass*
4388 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4390 MonoClass *klass;
4392 if (method->wrapper_type != MONO_WRAPPER_NONE)
4393 klass = mono_method_get_wrapper_data (method, token);
4394 else
4395 klass = mono_class_get_full (method->klass->image, token, context);
4396 if (klass)
4397 mono_class_init (klass);
4398 return klass;
4402 * Returns TRUE if the JIT should abort inlining because "callee"
4403 * is influenced by security attributes.
4405 static
4406 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4408 guint32 result;
4410 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4411 return TRUE;
4414 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4415 if (result == MONO_JIT_SECURITY_OK)
4416 return FALSE;
4418 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4419 /* Generate code to throw a SecurityException before the actual call/link */
4420 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4421 MonoInst *args [2];
4423 NEW_ICONST (cfg, args [0], 4);
4424 NEW_METHODCONST (cfg, args [1], caller);
4425 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4426 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4427 /* don't hide previous results */
4428 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4429 cfg->exception_data = result;
4430 return TRUE;
4433 return FALSE;
4436 static MonoMethod*
4437 method_access_exception (void)
4439 static MonoMethod *method = NULL;
4441 if (!method) {
4442 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4443 method = mono_class_get_method_from_name (secman->securitymanager,
4444 "MethodAccessException", 2);
4446 g_assert (method);
4447 return method;
4450 static void
4451 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4452 MonoBasicBlock *bblock, unsigned char *ip)
4454 MonoMethod *thrower = method_access_exception ();
4455 MonoInst *args [2];
4457 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4458 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4459 mono_emit_method_call (cfg, thrower, args, NULL);
4462 static MonoMethod*
4463 field_access_exception (void)
4465 static MonoMethod *method = NULL;
4467 if (!method) {
4468 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4469 method = mono_class_get_method_from_name (secman->securitymanager,
4470 "FieldAccessException", 2);
4472 g_assert (method);
4473 return method;
4476 static void
4477 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4478 MonoBasicBlock *bblock, unsigned char *ip)
4480 MonoMethod *thrower = field_access_exception ();
4481 MonoInst *args [2];
4483 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4484 EMIT_NEW_METHODCONST (cfg, args [1], field);
4485 mono_emit_method_call (cfg, thrower, args, NULL);
4489 * Return the original method is a wrapper is specified. We can only access
4490 * the custom attributes from the original method.
4492 static MonoMethod*
4493 get_original_method (MonoMethod *method)
4495 if (method->wrapper_type == MONO_WRAPPER_NONE)
4496 return method;
4498 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4499 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4500 return NULL;
4502 /* in other cases we need to find the original method */
4503 return mono_marshal_method_from_wrapper (method);
4506 static void
4507 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4508 MonoBasicBlock *bblock, unsigned char *ip)
4510 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4511 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4512 return;
4514 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4515 caller = get_original_method (caller);
4516 if (!caller)
4517 return;
4519 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4520 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4521 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4524 static void
4525 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4526 MonoBasicBlock *bblock, unsigned char *ip)
4528 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4529 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4530 return;
4532 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4533 caller = get_original_method (caller);
4534 if (!caller)
4535 return;
4537 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4538 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4539 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4543 * Check that the IL instructions at ip are the array initialization
4544 * sequence and return the pointer to the data and the size.
4546 static const char*
4547 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4550 * newarr[System.Int32]
4551 * dup
4552 * ldtoken field valuetype ...
4553 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4555 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4556 guint32 token = read32 (ip + 7);
4557 guint32 field_token = read32 (ip + 2);
4558 guint32 field_index = field_token & 0xffffff;
4559 guint32 rva;
4560 const char *data_ptr;
4561 int size = 0;
4562 MonoMethod *cmethod;
4563 MonoClass *dummy_class;
4564 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4565 int dummy_align;
4567 if (!field)
4568 return NULL;
4570 *out_field_token = field_token;
4572 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4573 if (!cmethod)
4574 return NULL;
4575 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4576 return NULL;
4577 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4578 case MONO_TYPE_BOOLEAN:
4579 case MONO_TYPE_I1:
4580 case MONO_TYPE_U1:
4581 size = 1; break;
4582 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4583 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4584 case MONO_TYPE_CHAR:
4585 case MONO_TYPE_I2:
4586 case MONO_TYPE_U2:
4587 size = 2; break;
4588 case MONO_TYPE_I4:
4589 case MONO_TYPE_U4:
4590 case MONO_TYPE_R4:
4591 size = 4; break;
4592 case MONO_TYPE_R8:
4593 #ifdef ARM_FPU_FPA
4594 return NULL; /* stupid ARM FP swapped format */
4595 #endif
4596 case MONO_TYPE_I8:
4597 case MONO_TYPE_U8:
4598 size = 8; break;
4599 #endif
4600 default:
4601 return NULL;
4603 size *= len;
4604 if (size > mono_type_size (field->type, &dummy_align))
4605 return NULL;
4606 *out_size = size;
4607 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4608 if (!method->klass->image->dynamic) {
4609 field_index = read32 (ip + 2) & 0xffffff;
4610 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4611 data_ptr = mono_image_rva_map (method->klass->image, rva);
4612 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4613 /* for aot code we do the lookup on load */
4614 if (aot && data_ptr)
4615 return GUINT_TO_POINTER (rva);
4616 } else {
4617 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4618 g_assert (!aot);
4619 data_ptr = mono_field_get_data (field);
4621 return data_ptr;
4623 return NULL;
4626 static void
4627 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4629 char *method_fname = mono_method_full_name (method, TRUE);
4630 char *method_code;
4632 if (mono_method_get_header (method)->code_size == 0)
4633 method_code = g_strdup ("method body is empty.");
4634 else
4635 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4636 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4637 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4638 g_free (method_fname);
4639 g_free (method_code);
4642 static void
4643 set_exception_object (MonoCompile *cfg, MonoException *exception)
4645 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4646 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4647 cfg->exception_ptr = exception;
4650 static gboolean
4651 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4653 MonoType *type;
4655 if (cfg->generic_sharing_context)
4656 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4657 else
4658 type = &klass->byval_arg;
4659 return MONO_TYPE_IS_REFERENCE (type);
4663 * mono_decompose_array_access_opts:
4665 * Decompose array access opcodes.
4666 * This should be in decompose.c, but it emits calls so it has to stay here until
4667 * the old JIT is gone.
4669 void
4670 mono_decompose_array_access_opts (MonoCompile *cfg)
4672 MonoBasicBlock *bb, *first_bb;
4675 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4676 * can be executed anytime. It should be run before decompose_long
4680 * Create a dummy bblock and emit code into it so we can use the normal
4681 * code generation macros.
4683 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4684 first_bb = cfg->cbb;
4686 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4687 MonoInst *ins;
4688 MonoInst *prev = NULL;
4689 MonoInst *dest;
4690 MonoInst *iargs [3];
4691 gboolean restart;
4693 if (!bb->has_array_access)
4694 continue;
4696 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4698 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4699 restart = TRUE;
4701 while (restart) {
4702 restart = FALSE;
4704 for (ins = bb->code; ins; ins = ins->next) {
4705 switch (ins->opcode) {
4706 case OP_LDLEN:
4707 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4708 G_STRUCT_OFFSET (MonoArray, max_length));
4709 MONO_ADD_INS (cfg->cbb, dest);
4710 break;
4711 case OP_BOUNDS_CHECK:
4712 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4713 break;
4714 case OP_NEWARR:
4715 if (cfg->opt & MONO_OPT_SHARED) {
4716 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4717 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4718 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4719 iargs [2]->dreg = ins->sreg1;
4721 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4722 dest->dreg = ins->dreg;
4723 } else {
4724 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4726 g_assert (vtable);
4727 NEW_VTABLECONST (cfg, iargs [0], vtable);
4728 MONO_ADD_INS (cfg->cbb, iargs [0]);
4729 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4730 iargs [1]->dreg = ins->sreg1;
4732 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4733 dest->dreg = ins->dreg;
4735 break;
4736 case OP_STRLEN:
4737 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4738 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4739 MONO_ADD_INS (cfg->cbb, dest);
4740 break;
4741 default:
4742 break;
4745 g_assert (cfg->cbb == first_bb);
4747 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4748 /* Replace the original instruction with the new code sequence */
4750 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4751 first_bb->code = first_bb->last_ins = NULL;
4752 first_bb->in_count = first_bb->out_count = 0;
4753 cfg->cbb = first_bb;
4755 else
4756 prev = ins;
4760 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4764 typedef union {
4765 guint32 vali [2];
4766 gint64 vall;
4767 double vald;
4768 } DVal;
4770 #ifdef MONO_ARCH_SOFT_FLOAT
4773 * mono_decompose_soft_float:
4775 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4776 * similar to long support on 32 bit platforms. 32 bit float values require special
4777 * handling when used as locals, arguments, and in calls.
4778 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4780 void
4781 mono_decompose_soft_float (MonoCompile *cfg)
4783 MonoBasicBlock *bb, *first_bb;
4786 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4790 * Create a dummy bblock and emit code into it so we can use the normal
4791 * code generation macros.
4793 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4794 first_bb = cfg->cbb;
4796 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4797 MonoInst *ins;
4798 MonoInst *prev = NULL;
4799 gboolean restart;
4801 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4803 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4804 restart = TRUE;
4806 while (restart) {
4807 restart = FALSE;
4809 for (ins = bb->code; ins; ins = ins->next) {
4810 const char *spec = INS_INFO (ins->opcode);
4812 /* Most fp operations are handled automatically by opcode emulation */
4814 switch (ins->opcode) {
4815 case OP_R8CONST: {
4816 DVal d;
4817 d.vald = *(double*)ins->inst_p0;
4818 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4819 break;
4821 case OP_R4CONST: {
4822 DVal d;
4823 /* We load the r8 value */
4824 d.vald = *(float*)ins->inst_p0;
4825 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4826 break;
4828 case OP_FMOVE:
4829 ins->opcode = OP_LMOVE;
4830 break;
4831 case OP_FGETLOW32:
4832 ins->opcode = OP_MOVE;
4833 ins->sreg1 = ins->sreg1 + 1;
4834 break;
4835 case OP_FGETHIGH32:
4836 ins->opcode = OP_MOVE;
4837 ins->sreg1 = ins->sreg1 + 2;
4838 break;
4839 case OP_SETFRET: {
4840 int reg = ins->sreg1;
4842 ins->opcode = OP_SETLRET;
4843 ins->dreg = -1;
4844 ins->sreg1 = reg + 1;
4845 ins->sreg2 = reg + 2;
4846 break;
4848 case OP_LOADR8_MEMBASE:
4849 ins->opcode = OP_LOADI8_MEMBASE;
4850 break;
4851 case OP_STORER8_MEMBASE_REG:
4852 ins->opcode = OP_STOREI8_MEMBASE_REG;
4853 break;
4854 case OP_STORER4_MEMBASE_REG: {
4855 MonoInst *iargs [2];
4856 int addr_reg;
4858 /* Arg 1 is the double value */
4859 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4860 iargs [0]->dreg = ins->sreg1;
4862 /* Arg 2 is the address to store to */
4863 addr_reg = mono_alloc_preg (cfg);
4864 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4865 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4866 restart = TRUE;
4867 break;
4869 case OP_LOADR4_MEMBASE: {
4870 MonoInst *iargs [1];
4871 MonoInst *conv;
4872 int addr_reg;
4874 addr_reg = mono_alloc_preg (cfg);
4875 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4876 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4877 conv->dreg = ins->dreg;
4878 break;
4880 case OP_FCALL:
4881 case OP_FCALL_REG:
4882 case OP_FCALL_MEMBASE: {
4883 MonoCallInst *call = (MonoCallInst*)ins;
4884 if (call->signature->ret->type == MONO_TYPE_R4) {
4885 MonoCallInst *call2;
4886 MonoInst *iargs [1];
4887 MonoInst *conv;
4889 /* Convert the call into a call returning an int */
4890 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4891 memcpy (call2, call, sizeof (MonoCallInst));
4892 switch (ins->opcode) {
4893 case OP_FCALL:
4894 call2->inst.opcode = OP_CALL;
4895 break;
4896 case OP_FCALL_REG:
4897 call2->inst.opcode = OP_CALL_REG;
4898 break;
4899 case OP_FCALL_MEMBASE:
4900 call2->inst.opcode = OP_CALL_MEMBASE;
4901 break;
4902 default:
4903 g_assert_not_reached ();
4905 call2->inst.dreg = mono_alloc_ireg (cfg);
4906 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4908 /* FIXME: Optimize this */
4910 /* Emit an r4->r8 conversion */
4911 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4912 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4913 conv->dreg = ins->dreg;
4914 } else {
4915 switch (ins->opcode) {
4916 case OP_FCALL:
4917 ins->opcode = OP_LCALL;
4918 break;
4919 case OP_FCALL_REG:
4920 ins->opcode = OP_LCALL_REG;
4921 break;
4922 case OP_FCALL_MEMBASE:
4923 ins->opcode = OP_LCALL_MEMBASE;
4924 break;
4925 default:
4926 g_assert_not_reached ();
4929 break;
4931 case OP_FCOMPARE: {
4932 MonoJitICallInfo *info;
4933 MonoInst *iargs [2];
4934 MonoInst *call, *cmp, *br;
4936 /* Convert fcompare+fbcc to icall+icompare+beq */
4938 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4939 g_assert (info);
4941 /* Create dummy MonoInst's for the arguments */
4942 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4943 iargs [0]->dreg = ins->sreg1;
4944 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4945 iargs [1]->dreg = ins->sreg2;
4947 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4949 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4950 cmp->sreg1 = call->dreg;
4951 cmp->inst_imm = 0;
4952 MONO_ADD_INS (cfg->cbb, cmp);
4954 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4955 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4956 br->inst_true_bb = ins->next->inst_true_bb;
4957 br->inst_false_bb = ins->next->inst_false_bb;
4958 MONO_ADD_INS (cfg->cbb, br);
4960 /* The call sequence might include fp ins */
4961 restart = TRUE;
4963 /* Skip fbcc or fccc */
4964 NULLIFY_INS (ins->next);
4965 break;
4967 case OP_FCEQ:
4968 case OP_FCGT:
4969 case OP_FCGT_UN:
4970 case OP_FCLT:
4971 case OP_FCLT_UN: {
4972 MonoJitICallInfo *info;
4973 MonoInst *iargs [2];
4974 MonoInst *call;
4976 /* Convert fccc to icall+icompare+iceq */
4978 info = mono_find_jit_opcode_emulation (ins->opcode);
4979 g_assert (info);
4981 /* Create dummy MonoInst's for the arguments */
4982 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4983 iargs [0]->dreg = ins->sreg1;
4984 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4985 iargs [1]->dreg = ins->sreg2;
4987 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4989 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
4990 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
4992 /* The call sequence might include fp ins */
4993 restart = TRUE;
4994 break;
4996 case OP_CKFINITE: {
4997 MonoInst *iargs [2];
4998 MonoInst *call, *cmp;
5000 /* Convert to icall+icompare+cond_exc+move */
5002 /* Create dummy MonoInst's for the arguments */
5003 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5004 iargs [0]->dreg = ins->sreg1;
5006 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5008 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5009 cmp->sreg1 = call->dreg;
5010 cmp->inst_imm = 1;
5011 MONO_ADD_INS (cfg->cbb, cmp);
5013 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5015 /* Do the assignment if the value is finite */
5016 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5018 restart = TRUE;
5019 break;
5021 default:
5022 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5023 mono_print_ins (ins);
5024 g_assert_not_reached ();
5026 break;
5029 g_assert (cfg->cbb == first_bb);
5031 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5032 /* Replace the original instruction with the new code sequence */
5034 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5035 first_bb->code = first_bb->last_ins = NULL;
5036 first_bb->in_count = first_bb->out_count = 0;
5037 cfg->cbb = first_bb;
5039 else
5040 prev = ins;
5044 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5047 mono_decompose_long_opts (cfg);
5050 #endif
5052 static void
5053 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5055 MonoInst *ins;
5056 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5057 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5058 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5059 /* Optimize reg-reg moves away */
5061 * Can't optimize other opcodes, since sp[0] might point to
5062 * the last ins of a decomposed opcode.
5064 sp [0]->dreg = (cfg)->locals [n]->dreg;
5065 } else {
5066 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5071 * ldloca inhibits many optimizations so try to get rid of it in common
5072 * cases.
5074 static inline unsigned char *
5075 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5077 int local, token;
5078 MonoClass *klass;
5080 if (size == 1) {
5081 local = ip [1];
5082 ip += 2;
5083 } else {
5084 local = read16 (ip + 2);
5085 ip += 4;
5088 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5089 gboolean skip = FALSE;
5091 /* From the INITOBJ case */
5092 token = read32 (ip + 2);
5093 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5094 CHECK_TYPELOAD (klass);
5095 if (generic_class_is_reference_type (cfg, klass)) {
5096 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5097 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5098 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5099 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5100 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5101 } else {
5102 skip = TRUE;
5105 if (!skip)
5106 return ip + 6;
5108 load_error:
5109 return NULL;
5112 static gboolean
5113 is_exception_class (MonoClass *class)
5115 while (class) {
5116 if (class == mono_defaults.exception_class)
5117 return TRUE;
5118 class = class->parent;
5120 return FALSE;
5124 * mono_method_to_ir:
5126 * Translate the .net IL into linear IR.
5129 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5130 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5131 guint inline_offset, gboolean is_virtual_call)
5133 MonoInst *ins, **sp, **stack_start;
5134 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5135 MonoMethod *cmethod, *method_definition;
5136 MonoInst **arg_array;
5137 MonoMethodHeader *header;
5138 MonoImage *image;
5139 guint32 token, ins_flag;
5140 MonoClass *klass;
5141 MonoClass *constrained_call = NULL;
5142 unsigned char *ip, *end, *target, *err_pos;
5143 static double r8_0 = 0.0;
5144 MonoMethodSignature *sig;
5145 MonoGenericContext *generic_context = NULL;
5146 MonoGenericContainer *generic_container = NULL;
5147 MonoType **param_types;
5148 int i, n, start_new_bblock, dreg;
5149 int num_calls = 0, inline_costs = 0;
5150 int breakpoint_id = 0;
5151 guint num_args;
5152 MonoBoolean security, pinvoke;
5153 MonoSecurityManager* secman = NULL;
5154 MonoDeclSecurityActions actions;
5155 GSList *class_inits = NULL;
5156 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5157 int context_used;
5159 /* serialization and xdomain stuff may need access to private fields and methods */
5160 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5161 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5162 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5163 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5164 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5165 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5167 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5169 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5170 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5171 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5172 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5174 image = method->klass->image;
5175 header = mono_method_get_header (method);
5176 generic_container = mono_method_get_generic_container (method);
5177 sig = mono_method_signature (method);
5178 num_args = sig->hasthis + sig->param_count;
5179 ip = (unsigned char*)header->code;
5180 cfg->cil_start = ip;
5181 end = ip + header->code_size;
5182 mono_jit_stats.cil_code_size += header->code_size;
5184 method_definition = method;
5185 while (method_definition->is_inflated) {
5186 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5187 method_definition = imethod->declaring;
5190 /* SkipVerification is not allowed if core-clr is enabled */
5191 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5192 dont_verify = TRUE;
5193 dont_verify_stloc = TRUE;
5196 if (!dont_verify && mini_method_verify (cfg, method_definition))
5197 goto exception_exit;
5199 if (mono_debug_using_mono_debugger ())
5200 cfg->keep_cil_nops = TRUE;
5202 if (sig->is_inflated)
5203 generic_context = mono_method_get_context (method);
5204 else if (generic_container)
5205 generic_context = &generic_container->context;
5206 cfg->generic_context = generic_context;
5208 if (!cfg->generic_sharing_context)
5209 g_assert (!sig->has_type_parameters);
5211 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5212 g_assert (method->is_inflated);
5213 g_assert (mono_method_get_context (method)->method_inst);
5215 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5216 g_assert (sig->generic_param_count);
5218 if (cfg->method == method) {
5219 cfg->real_offset = 0;
5220 } else {
5221 cfg->real_offset = inline_offset;
5224 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5225 cfg->cil_offset_to_bb_len = header->code_size;
5227 cfg->current_method = method;
5229 if (cfg->verbose_level > 2)
5230 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5232 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5233 if (sig->hasthis)
5234 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5235 for (n = 0; n < sig->param_count; ++n)
5236 param_types [n + sig->hasthis] = sig->params [n];
5237 cfg->arg_types = param_types;
5239 dont_inline = g_list_prepend (dont_inline, method);
5240 if (cfg->method == method) {
5242 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5243 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5245 /* ENTRY BLOCK */
5246 NEW_BBLOCK (cfg, start_bblock);
5247 cfg->bb_entry = start_bblock;
5248 start_bblock->cil_code = NULL;
5249 start_bblock->cil_length = 0;
5251 /* EXIT BLOCK */
5252 NEW_BBLOCK (cfg, end_bblock);
5253 cfg->bb_exit = end_bblock;
5254 end_bblock->cil_code = NULL;
5255 end_bblock->cil_length = 0;
5256 g_assert (cfg->num_bblocks == 2);
5258 arg_array = cfg->args;
5260 if (header->num_clauses) {
5261 cfg->spvars = g_hash_table_new (NULL, NULL);
5262 cfg->exvars = g_hash_table_new (NULL, NULL);
5264 /* handle exception clauses */
5265 for (i = 0; i < header->num_clauses; ++i) {
5266 MonoBasicBlock *try_bb;
5267 MonoExceptionClause *clause = &header->clauses [i];
5268 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5269 try_bb->real_offset = clause->try_offset;
5270 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5271 tblock->real_offset = clause->handler_offset;
5272 tblock->flags |= BB_EXCEPTION_HANDLER;
5274 link_bblock (cfg, try_bb, tblock);
5276 if (*(ip + clause->handler_offset) == CEE_POP)
5277 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5279 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5280 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5281 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5282 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5283 MONO_ADD_INS (tblock, ins);
5285 /* todo: is a fault block unsafe to optimize? */
5286 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5287 tblock->flags |= BB_EXCEPTION_UNSAFE;
5291 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5292 while (p < end) {
5293 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5295 /* catch and filter blocks get the exception object on the stack */
5296 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5297 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5298 MonoInst *dummy_use;
5300 /* mostly like handle_stack_args (), but just sets the input args */
5301 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5302 tblock->in_scount = 1;
5303 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5304 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5307 * Add a dummy use for the exvar so its liveness info will be
5308 * correct.
5310 cfg->cbb = tblock;
5311 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5313 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5314 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5315 tblock->flags |= BB_EXCEPTION_HANDLER;
5316 tblock->real_offset = clause->data.filter_offset;
5317 tblock->in_scount = 1;
5318 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5319 /* The filter block shares the exvar with the handler block */
5320 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5321 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5322 MONO_ADD_INS (tblock, ins);
5326 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5327 clause->data.catch_class &&
5328 cfg->generic_sharing_context &&
5329 mono_class_check_context_used (clause->data.catch_class)) {
5331 * In shared generic code with catch
5332 * clauses containing type variables
5333 * the exception handling code has to
5334 * be able to get to the rgctx.
5335 * Therefore we have to make sure that
5336 * the vtable/mrgctx argument (for
5337 * static or generic methods) or the
5338 * "this" argument (for non-static
5339 * methods) are live.
5341 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5342 mini_method_get_context (method)->method_inst ||
5343 method->klass->valuetype) {
5344 mono_get_vtable_var (cfg);
5345 } else {
5346 MonoInst *dummy_use;
5348 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5352 } else {
5353 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5354 cfg->cbb = start_bblock;
5355 cfg->args = arg_array;
5356 mono_save_args (cfg, sig, inline_args);
5359 /* FIRST CODE BLOCK */
5360 NEW_BBLOCK (cfg, bblock);
5361 bblock->cil_code = ip;
5362 cfg->cbb = bblock;
5363 cfg->ip = ip;
5365 ADD_BBLOCK (cfg, bblock);
5367 if (cfg->method == method) {
5368 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5369 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5370 MONO_INST_NEW (cfg, ins, OP_BREAK);
5371 MONO_ADD_INS (bblock, ins);
5375 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5376 secman = mono_security_manager_get_methods ();
5378 security = (secman && mono_method_has_declsec (method));
5379 /* at this point having security doesn't mean we have any code to generate */
5380 if (security && (cfg->method == method)) {
5381 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5382 * And we do not want to enter the next section (with allocation) if we
5383 * have nothing to generate */
5384 security = mono_declsec_get_demands (method, &actions);
5387 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5388 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5389 if (pinvoke) {
5390 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5391 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5392 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5394 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5395 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5396 pinvoke = FALSE;
5398 if (custom)
5399 mono_custom_attrs_free (custom);
5401 if (pinvoke) {
5402 custom = mono_custom_attrs_from_class (wrapped->klass);
5403 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5404 pinvoke = FALSE;
5406 if (custom)
5407 mono_custom_attrs_free (custom);
5409 } else {
5410 /* not a P/Invoke after all */
5411 pinvoke = FALSE;
5415 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5416 /* we use a separate basic block for the initialization code */
5417 NEW_BBLOCK (cfg, init_localsbb);
5418 cfg->bb_init = init_localsbb;
5419 init_localsbb->real_offset = cfg->real_offset;
5420 start_bblock->next_bb = init_localsbb;
5421 init_localsbb->next_bb = bblock;
5422 link_bblock (cfg, start_bblock, init_localsbb);
5423 link_bblock (cfg, init_localsbb, bblock);
5425 cfg->cbb = init_localsbb;
5426 } else {
5427 start_bblock->next_bb = bblock;
5428 link_bblock (cfg, start_bblock, bblock);
5431 /* at this point we know, if security is TRUE, that some code needs to be generated */
5432 if (security && (cfg->method == method)) {
5433 MonoInst *args [2];
5435 mono_jit_stats.cas_demand_generation++;
5437 if (actions.demand.blob) {
5438 /* Add code for SecurityAction.Demand */
5439 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5440 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5441 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5442 mono_emit_method_call (cfg, secman->demand, args, NULL);
5444 if (actions.noncasdemand.blob) {
5445 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5446 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5447 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5448 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5449 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5450 mono_emit_method_call (cfg, secman->demand, args, NULL);
5452 if (actions.demandchoice.blob) {
5453 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5454 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5455 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5456 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5457 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5461 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5462 if (pinvoke) {
5463 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5466 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5467 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5468 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5469 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5470 if (!(method->klass && method->klass->image &&
5471 mono_security_core_clr_is_platform_image (method->klass->image))) {
5472 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5478 if (header->code_size == 0)
5479 UNVERIFIED;
5481 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5482 ip = err_pos;
5483 UNVERIFIED;
5486 if (cfg->method == method)
5487 mono_debug_init_method (cfg, bblock, breakpoint_id);
5489 for (n = 0; n < header->num_locals; ++n) {
5490 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5491 UNVERIFIED;
5493 class_inits = NULL;
5495 /* We force the vtable variable here for all shared methods
5496 for the possibility that they might show up in a stack
5497 trace where their exact instantiation is needed. */
5498 if (cfg->generic_sharing_context && method == cfg->method) {
5499 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5500 mini_method_get_context (method)->method_inst ||
5501 method->klass->valuetype) {
5502 mono_get_vtable_var (cfg);
5503 } else {
5504 /* FIXME: Is there a better way to do this?
5505 We need the variable live for the duration
5506 of the whole method. */
5507 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5511 /* add a check for this != NULL to inlined methods */
5512 if (is_virtual_call) {
5513 MonoInst *arg_ins;
5515 NEW_ARGLOAD (cfg, arg_ins, 0);
5516 MONO_ADD_INS (cfg->cbb, arg_ins);
5517 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5518 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5519 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5522 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5523 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5525 ins_flag = 0;
5526 start_new_bblock = 0;
5527 cfg->cbb = bblock;
5528 while (ip < end) {
5530 if (cfg->method == method)
5531 cfg->real_offset = ip - header->code;
5532 else
5533 cfg->real_offset = inline_offset;
5534 cfg->ip = ip;
5536 context_used = 0;
5538 if (start_new_bblock) {
5539 bblock->cil_length = ip - bblock->cil_code;
5540 if (start_new_bblock == 2) {
5541 g_assert (ip == tblock->cil_code);
5542 } else {
5543 GET_BBLOCK (cfg, tblock, ip);
5545 bblock->next_bb = tblock;
5546 bblock = tblock;
5547 cfg->cbb = bblock;
5548 start_new_bblock = 0;
5549 for (i = 0; i < bblock->in_scount; ++i) {
5550 if (cfg->verbose_level > 3)
5551 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5552 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5553 *sp++ = ins;
5555 if (class_inits)
5556 g_slist_free (class_inits);
5557 class_inits = NULL;
5558 } else {
5559 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5560 link_bblock (cfg, bblock, tblock);
5561 if (sp != stack_start) {
5562 handle_stack_args (cfg, stack_start, sp - stack_start);
5563 sp = stack_start;
5564 CHECK_UNVERIFIABLE (cfg);
5566 bblock->next_bb = tblock;
5567 bblock = tblock;
5568 cfg->cbb = bblock;
5569 for (i = 0; i < bblock->in_scount; ++i) {
5570 if (cfg->verbose_level > 3)
5571 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5572 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5573 *sp++ = ins;
5575 g_slist_free (class_inits);
5576 class_inits = NULL;
5580 bblock->real_offset = cfg->real_offset;
5582 if ((cfg->method == method) && cfg->coverage_info) {
5583 guint32 cil_offset = ip - header->code;
5584 cfg->coverage_info->data [cil_offset].cil_code = ip;
5586 /* TODO: Use an increment here */
5587 #if defined(__i386__)
5588 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5589 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5590 ins->inst_imm = 1;
5591 MONO_ADD_INS (cfg->cbb, ins);
5592 #else
5593 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5594 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5595 #endif
5598 if (cfg->verbose_level > 3)
5599 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5601 switch (*ip) {
5602 case CEE_NOP:
5603 if (cfg->keep_cil_nops)
5604 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5605 else
5606 MONO_INST_NEW (cfg, ins, OP_NOP);
5607 ip++;
5608 MONO_ADD_INS (bblock, ins);
5609 break;
5610 case CEE_BREAK:
5611 MONO_INST_NEW (cfg, ins, OP_BREAK);
5612 ip++;
5613 MONO_ADD_INS (bblock, ins);
5614 break;
5615 case CEE_LDARG_0:
5616 case CEE_LDARG_1:
5617 case CEE_LDARG_2:
5618 case CEE_LDARG_3:
5619 CHECK_STACK_OVF (1);
5620 n = (*ip)-CEE_LDARG_0;
5621 CHECK_ARG (n);
5622 EMIT_NEW_ARGLOAD (cfg, ins, n);
5623 ip++;
5624 *sp++ = ins;
5625 break;
5626 case CEE_LDLOC_0:
5627 case CEE_LDLOC_1:
5628 case CEE_LDLOC_2:
5629 case CEE_LDLOC_3:
5630 CHECK_STACK_OVF (1);
5631 n = (*ip)-CEE_LDLOC_0;
5632 CHECK_LOCAL (n);
5633 EMIT_NEW_LOCLOAD (cfg, ins, n);
5634 ip++;
5635 *sp++ = ins;
5636 break;
5637 case CEE_STLOC_0:
5638 case CEE_STLOC_1:
5639 case CEE_STLOC_2:
5640 case CEE_STLOC_3: {
5641 CHECK_STACK (1);
5642 n = (*ip)-CEE_STLOC_0;
5643 CHECK_LOCAL (n);
5644 --sp;
5645 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5646 UNVERIFIED;
5647 emit_stloc_ir (cfg, sp, header, n);
5648 ++ip;
5649 inline_costs += 1;
5650 break;
5652 case CEE_LDARG_S:
5653 CHECK_OPSIZE (2);
5654 CHECK_STACK_OVF (1);
5655 n = ip [1];
5656 CHECK_ARG (n);
5657 EMIT_NEW_ARGLOAD (cfg, ins, n);
5658 *sp++ = ins;
5659 ip += 2;
5660 break;
5661 case CEE_LDARGA_S:
5662 CHECK_OPSIZE (2);
5663 CHECK_STACK_OVF (1);
5664 n = ip [1];
5665 CHECK_ARG (n);
5666 NEW_ARGLOADA (cfg, ins, n);
5667 MONO_ADD_INS (cfg->cbb, ins);
5668 *sp++ = ins;
5669 ip += 2;
5670 break;
5671 case CEE_STARG_S:
5672 CHECK_OPSIZE (2);
5673 CHECK_STACK (1);
5674 --sp;
5675 n = ip [1];
5676 CHECK_ARG (n);
5677 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5678 UNVERIFIED;
5679 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5680 ip += 2;
5681 break;
5682 case CEE_LDLOC_S:
5683 CHECK_OPSIZE (2);
5684 CHECK_STACK_OVF (1);
5685 n = ip [1];
5686 CHECK_LOCAL (n);
5687 EMIT_NEW_LOCLOAD (cfg, ins, n);
5688 *sp++ = ins;
5689 ip += 2;
5690 break;
5691 case CEE_LDLOCA_S: {
5692 unsigned char *tmp_ip;
5693 CHECK_OPSIZE (2);
5694 CHECK_STACK_OVF (1);
5695 CHECK_LOCAL (ip [1]);
5697 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5698 ip = tmp_ip;
5699 inline_costs += 1;
5700 break;
5703 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5704 *sp++ = ins;
5705 ip += 2;
5706 break;
5708 case CEE_STLOC_S:
5709 CHECK_OPSIZE (2);
5710 CHECK_STACK (1);
5711 --sp;
5712 CHECK_LOCAL (ip [1]);
5713 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5714 UNVERIFIED;
5715 emit_stloc_ir (cfg, sp, header, ip [1]);
5716 ip += 2;
5717 inline_costs += 1;
5718 break;
5719 case CEE_LDNULL:
5720 CHECK_STACK_OVF (1);
5721 EMIT_NEW_PCONST (cfg, ins, NULL);
5722 ins->type = STACK_OBJ;
5723 ++ip;
5724 *sp++ = ins;
5725 break;
5726 case CEE_LDC_I4_M1:
5727 CHECK_STACK_OVF (1);
5728 EMIT_NEW_ICONST (cfg, ins, -1);
5729 ++ip;
5730 *sp++ = ins;
5731 break;
5732 case CEE_LDC_I4_0:
5733 case CEE_LDC_I4_1:
5734 case CEE_LDC_I4_2:
5735 case CEE_LDC_I4_3:
5736 case CEE_LDC_I4_4:
5737 case CEE_LDC_I4_5:
5738 case CEE_LDC_I4_6:
5739 case CEE_LDC_I4_7:
5740 case CEE_LDC_I4_8:
5741 CHECK_STACK_OVF (1);
5742 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5743 ++ip;
5744 *sp++ = ins;
5745 break;
5746 case CEE_LDC_I4_S:
5747 CHECK_OPSIZE (2);
5748 CHECK_STACK_OVF (1);
5749 ++ip;
5750 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5751 ++ip;
5752 *sp++ = ins;
5753 break;
5754 case CEE_LDC_I4:
5755 CHECK_OPSIZE (5);
5756 CHECK_STACK_OVF (1);
5757 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5758 ip += 5;
5759 *sp++ = ins;
5760 break;
5761 case CEE_LDC_I8:
5762 CHECK_OPSIZE (9);
5763 CHECK_STACK_OVF (1);
5764 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5765 ins->type = STACK_I8;
5766 ins->dreg = alloc_dreg (cfg, STACK_I8);
5767 ++ip;
5768 ins->inst_l = (gint64)read64 (ip);
5769 MONO_ADD_INS (bblock, ins);
5770 ip += 8;
5771 *sp++ = ins;
5772 break;
5773 case CEE_LDC_R4: {
5774 float *f;
5775 /* FIXME: we should really allocate this only late in the compilation process */
5776 f = mono_domain_alloc (cfg->domain, sizeof (float));
5777 CHECK_OPSIZE (5);
5778 CHECK_STACK_OVF (1);
5779 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5780 ins->type = STACK_R8;
5781 ins->dreg = alloc_dreg (cfg, STACK_R8);
5782 ++ip;
5783 readr4 (ip, f);
5784 ins->inst_p0 = f;
5785 MONO_ADD_INS (bblock, ins);
5787 ip += 4;
5788 *sp++ = ins;
5789 break;
5791 case CEE_LDC_R8: {
5792 double *d;
5793 /* FIXME: we should really allocate this only late in the compilation process */
5794 d = mono_domain_alloc (cfg->domain, sizeof (double));
5795 CHECK_OPSIZE (9);
5796 CHECK_STACK_OVF (1);
5797 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5798 ins->type = STACK_R8;
5799 ins->dreg = alloc_dreg (cfg, STACK_R8);
5800 ++ip;
5801 readr8 (ip, d);
5802 ins->inst_p0 = d;
5803 MONO_ADD_INS (bblock, ins);
5805 ip += 8;
5806 *sp++ = ins;
5807 break;
5809 case CEE_DUP: {
5810 MonoInst *temp, *store;
5811 CHECK_STACK (1);
5812 CHECK_STACK_OVF (1);
5813 sp--;
5814 ins = *sp;
5816 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5817 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5819 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5820 *sp++ = ins;
5822 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5823 *sp++ = ins;
5825 ++ip;
5826 inline_costs += 2;
5827 break;
5829 case CEE_POP:
5830 CHECK_STACK (1);
5831 ip++;
5832 --sp;
5834 #ifdef __i386__
5835 if (sp [0]->type == STACK_R8)
5836 /* we need to pop the value from the x86 FP stack */
5837 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5838 #endif
5839 break;
5840 case CEE_JMP: {
5841 MonoCallInst *call;
5843 INLINE_FAILURE;
5845 CHECK_OPSIZE (5);
5846 if (stack_start != sp)
5847 UNVERIFIED;
5848 token = read32 (ip + 1);
5849 /* FIXME: check the signature matches */
5850 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5852 if (!cmethod)
5853 goto load_error;
5855 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5856 GENERIC_SHARING_FAILURE (CEE_JMP);
5858 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5859 CHECK_CFG_EXCEPTION;
5861 #ifdef __x86_64__
5863 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5864 int i, n;
5866 /* Handle tail calls similarly to calls */
5867 n = fsig->param_count + fsig->hasthis;
5869 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5870 call->method = cmethod;
5871 call->tail_call = TRUE;
5872 call->signature = mono_method_signature (cmethod);
5873 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5874 call->inst.inst_p0 = cmethod;
5875 for (i = 0; i < n; ++i)
5876 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5878 mono_arch_emit_call (cfg, call);
5879 MONO_ADD_INS (bblock, (MonoInst*)call);
5881 #else
5882 for (i = 0; i < num_args; ++i)
5883 /* Prevent arguments from being optimized away */
5884 arg_array [i]->flags |= MONO_INST_VOLATILE;
5886 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5887 ins = (MonoInst*)call;
5888 ins->inst_p0 = cmethod;
5889 MONO_ADD_INS (bblock, ins);
5890 #endif
5892 ip += 5;
5893 start_new_bblock = 1;
5894 break;
5896 case CEE_CALLI:
5897 case CEE_CALL:
5898 case CEE_CALLVIRT: {
5899 MonoInst *addr = NULL;
5900 MonoMethodSignature *fsig = NULL;
5901 int array_rank = 0;
5902 int virtual = *ip == CEE_CALLVIRT;
5903 int calli = *ip == CEE_CALLI;
5904 gboolean pass_imt_from_rgctx = FALSE;
5905 MonoInst *imt_arg = NULL;
5906 gboolean pass_vtable = FALSE;
5907 gboolean pass_mrgctx = FALSE;
5908 MonoInst *vtable_arg = NULL;
5909 gboolean check_this = FALSE;
5911 CHECK_OPSIZE (5);
5912 token = read32 (ip + 1);
5914 if (calli) {
5915 cmethod = NULL;
5916 CHECK_STACK (1);
5917 --sp;
5918 addr = *sp;
5919 if (method->wrapper_type != MONO_WRAPPER_NONE)
5920 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5921 else
5922 fsig = mono_metadata_parse_signature (image, token);
5924 n = fsig->param_count + fsig->hasthis;
5925 } else {
5926 MonoMethod *cil_method;
5928 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5929 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5930 cil_method = cmethod;
5931 } else if (constrained_call) {
5932 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
5934 * This is needed since get_method_constrained can't find
5935 * the method in klass representing a type var.
5936 * The type var is guaranteed to be a reference type in this
5937 * case.
5939 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5940 cil_method = cmethod;
5941 g_assert (!cmethod->klass->valuetype);
5942 } else {
5943 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5945 } else {
5946 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5947 cil_method = cmethod;
5950 if (!cmethod)
5951 goto load_error;
5952 if (!dont_verify && !cfg->skip_visibility) {
5953 MonoMethod *target_method = cil_method;
5954 if (method->is_inflated) {
5955 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5957 if (!mono_method_can_access_method (method_definition, target_method) &&
5958 !mono_method_can_access_method (method, cil_method))
5959 METHOD_ACCESS_FAILURE;
5962 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5963 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5965 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5966 /* MS.NET seems to silently convert this to a callvirt */
5967 virtual = 1;
5969 if (!cmethod->klass->inited)
5970 if (!mono_class_init (cmethod->klass))
5971 goto load_error;
5973 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5974 mini_class_is_system_array (cmethod->klass)) {
5975 array_rank = cmethod->klass->rank;
5976 fsig = mono_method_signature (cmethod);
5977 } else {
5978 if (mono_method_signature (cmethod)->pinvoke) {
5979 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5980 check_for_pending_exc, FALSE);
5981 fsig = mono_method_signature (wrapper);
5982 } else if (constrained_call) {
5983 fsig = mono_method_signature (cmethod);
5984 } else {
5985 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5989 mono_save_token_info (cfg, image, token, cil_method);
5991 n = fsig->param_count + fsig->hasthis;
5993 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5994 if (check_linkdemand (cfg, method, cmethod))
5995 INLINE_FAILURE;
5996 CHECK_CFG_EXCEPTION;
5999 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6000 g_assert_not_reached ();
6003 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6004 UNVERIFIED;
6006 if (!cfg->generic_sharing_context && cmethod)
6007 g_assert (!mono_method_check_context_used (cmethod));
6009 CHECK_STACK (n);
6011 //g_assert (!virtual || fsig->hasthis);
6013 sp -= n;
6015 if (constrained_call) {
6017 * We have the `constrained.' prefix opcode.
6019 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6020 int dreg;
6023 * The type parameter is instantiated as a valuetype,
6024 * but that type doesn't override the method we're
6025 * calling, so we need to box `this'.
6027 dreg = alloc_dreg (cfg, STACK_VTYPE);
6028 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6029 ins->klass = constrained_call;
6030 sp [0] = handle_box (cfg, ins, constrained_call);
6031 } else if (!constrained_call->valuetype) {
6032 int dreg = alloc_preg (cfg);
6035 * The type parameter is instantiated as a reference
6036 * type. We have a managed pointer on the stack, so
6037 * we need to dereference it here.
6039 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6040 ins->type = STACK_OBJ;
6041 sp [0] = ins;
6042 } else if (cmethod->klass->valuetype)
6043 virtual = 0;
6044 constrained_call = NULL;
6047 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6048 UNVERIFIED;
6051 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6052 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6053 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6054 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6055 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6058 * Pass vtable iff target method might
6059 * be shared, which means that sharing
6060 * is enabled for its class and its
6061 * context is sharable (and it's not a
6062 * generic method).
6064 if (sharing_enabled && context_sharable &&
6065 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6066 pass_vtable = TRUE;
6069 if (cmethod && mini_method_get_context (cmethod) &&
6070 mini_method_get_context (cmethod)->method_inst) {
6071 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6072 MonoGenericContext *context = mini_method_get_context (cmethod);
6073 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6075 g_assert (!pass_vtable);
6077 if (sharing_enabled && context_sharable)
6078 pass_mrgctx = TRUE;
6081 if (cfg->generic_sharing_context && cmethod) {
6082 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6084 context_used = mono_method_check_context_used (cmethod);
6086 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6087 /* Generic method interface
6088 calls are resolved via a
6089 helper function and don't
6090 need an imt. */
6091 if (!cmethod_context || !cmethod_context->method_inst)
6092 pass_imt_from_rgctx = TRUE;
6096 * If a shared method calls another
6097 * shared method then the caller must
6098 * have a generic sharing context
6099 * because the magic trampoline
6100 * requires it. FIXME: We shouldn't
6101 * have to force the vtable/mrgctx
6102 * variable here. Instead there
6103 * should be a flag in the cfg to
6104 * request a generic sharing context.
6106 if (context_used &&
6107 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6108 mono_get_vtable_var (cfg);
6111 if (pass_vtable) {
6112 if (context_used) {
6113 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6114 } else {
6115 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6117 CHECK_TYPELOAD (cmethod->klass);
6118 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6122 if (pass_mrgctx) {
6123 g_assert (!vtable_arg);
6125 if (context_used) {
6126 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6127 } else {
6128 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6131 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6132 MONO_METHOD_IS_FINAL (cmethod)) {
6133 if (virtual)
6134 check_this = TRUE;
6135 virtual = 0;
6139 if (pass_imt_from_rgctx) {
6140 g_assert (!pass_vtable);
6141 g_assert (cmethod);
6143 imt_arg = emit_get_rgctx_method (cfg, context_used,
6144 cmethod, MONO_RGCTX_INFO_METHOD);
6147 if (check_this) {
6148 MonoInst *check;
6150 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6151 check->sreg1 = sp [0]->dreg;
6152 MONO_ADD_INS (cfg->cbb, check);
6155 /* Calling virtual generic methods */
6156 if (cmethod && virtual &&
6157 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6158 !(MONO_METHOD_IS_FINAL (cmethod) &&
6159 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6160 mono_method_signature (cmethod)->generic_param_count) {
6161 MonoInst *this_temp, *this_arg_temp, *store;
6162 MonoInst *iargs [4];
6164 g_assert (mono_method_signature (cmethod)->is_inflated);
6166 /* Prevent inlining of methods that contain indirect calls */
6167 INLINE_FAILURE;
6169 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6170 if (cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6171 g_assert (!imt_arg);
6172 if (context_used) {
6173 imt_arg = emit_get_rgctx_method (cfg, context_used,
6174 cmethod, MONO_RGCTX_INFO_METHOD);
6176 } else {
6177 g_assert (cmethod->is_inflated);
6178 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6180 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6181 } else
6182 #endif
6184 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6185 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6186 MONO_ADD_INS (bblock, store);
6188 /* FIXME: This should be a managed pointer */
6189 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6191 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6192 if (context_used) {
6193 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6194 cmethod, MONO_RGCTX_INFO_METHOD);
6195 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6196 addr = mono_emit_jit_icall (cfg,
6197 mono_helper_compile_generic_method, iargs);
6198 } else {
6199 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6200 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6201 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6204 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6206 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6209 if (!MONO_TYPE_IS_VOID (fsig->ret))
6210 *sp++ = ins;
6212 ip += 5;
6213 ins_flag = 0;
6214 break;
6217 /* Tail prefix */
6218 /* FIXME: runtime generic context pointer for jumps? */
6219 /* FIXME: handle this for generic sharing eventually */
6220 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6221 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
6222 MonoCallInst *call;
6224 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6225 INLINE_FAILURE;
6227 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6228 call->tail_call = TRUE;
6229 call->method = cmethod;
6230 call->signature = mono_method_signature (cmethod);
6232 #ifdef __x86_64__
6233 /* Handle tail calls similarly to calls */
6234 call->inst.opcode = OP_TAILCALL;
6235 call->args = sp;
6236 mono_arch_emit_call (cfg, call);
6237 #else
6239 * We implement tail calls by storing the actual arguments into the
6240 * argument variables, then emitting a CEE_JMP.
6242 for (i = 0; i < n; ++i) {
6243 /* Prevent argument from being register allocated */
6244 arg_array [i]->flags |= MONO_INST_VOLATILE;
6245 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6247 #endif
6249 ins = (MonoInst*)call;
6250 ins->inst_p0 = cmethod;
6251 ins->inst_p1 = arg_array [0];
6252 MONO_ADD_INS (bblock, ins);
6253 link_bblock (cfg, bblock, end_bblock);
6254 start_new_bblock = 1;
6255 /* skip CEE_RET as well */
6256 ip += 6;
6257 ins_flag = 0;
6258 break;
6261 /* Conversion to a JIT intrinsic */
6262 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6263 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6264 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6265 *sp = ins;
6266 sp++;
6269 ip += 5;
6270 ins_flag = 0;
6271 break;
6274 /* Inlining */
6275 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6276 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6277 mono_method_check_inlining (cfg, cmethod) &&
6278 !g_list_find (dont_inline, cmethod)) {
6279 int costs;
6280 gboolean allways = FALSE;
6282 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6283 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6284 /* Prevent inlining of methods that call wrappers */
6285 INLINE_FAILURE;
6286 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6287 allways = TRUE;
6290 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6291 ip += 5;
6292 cfg->real_offset += 5;
6293 bblock = cfg->cbb;
6295 if (!MONO_TYPE_IS_VOID (fsig->ret))
6296 /* *sp is already set by inline_method */
6297 sp++;
6299 inline_costs += costs;
6300 ins_flag = 0;
6301 break;
6305 inline_costs += 10 * num_calls++;
6307 /* Tail recursion elimination */
6308 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6309 gboolean has_vtargs = FALSE;
6310 int i;
6312 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6313 INLINE_FAILURE;
6315 /* keep it simple */
6316 for (i = fsig->param_count - 1; i >= 0; i--) {
6317 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6318 has_vtargs = TRUE;
6321 if (!has_vtargs) {
6322 for (i = 0; i < n; ++i)
6323 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6324 MONO_INST_NEW (cfg, ins, OP_BR);
6325 MONO_ADD_INS (bblock, ins);
6326 tblock = start_bblock->out_bb [0];
6327 link_bblock (cfg, bblock, tblock);
6328 ins->inst_target_bb = tblock;
6329 start_new_bblock = 1;
6331 /* skip the CEE_RET, too */
6332 if (ip_in_bb (cfg, bblock, ip + 5))
6333 ip += 6;
6334 else
6335 ip += 5;
6337 ins_flag = 0;
6338 break;
6342 /* Generic sharing */
6343 /* FIXME: only do this for generic methods if
6344 they are not shared! */
6345 if (context_used && !imt_arg && !array_rank &&
6346 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6347 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6348 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6349 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6350 INLINE_FAILURE;
6352 g_assert (cfg->generic_sharing_context && cmethod);
6353 g_assert (!addr);
6356 * We are compiling a call to a
6357 * generic method from shared code,
6358 * which means that we have to look up
6359 * the method in the rgctx and do an
6360 * indirect call.
6362 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6365 /* Indirect calls */
6366 if (addr) {
6367 g_assert (!imt_arg);
6369 if (*ip == CEE_CALL)
6370 g_assert (context_used);
6371 else if (*ip == CEE_CALLI)
6372 g_assert (!vtable_arg);
6373 else
6374 /* FIXME: what the hell is this??? */
6375 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6376 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6378 /* Prevent inlining of methods with indirect calls */
6379 INLINE_FAILURE;
6381 if (vtable_arg) {
6382 #ifdef MONO_ARCH_RGCTX_REG
6383 MonoCallInst *call;
6384 int rgctx_reg = mono_alloc_preg (cfg);
6386 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6387 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6388 call = (MonoCallInst*)ins;
6389 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6390 cfg->uses_rgctx_reg = TRUE;
6391 #else
6392 NOT_IMPLEMENTED;
6393 #endif
6394 } else {
6395 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6397 * Instead of emitting an indirect call, emit a direct call
6398 * with the contents of the aotconst as the patch info.
6400 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6401 NULLIFY_INS (addr);
6402 } else {
6403 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6406 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6407 if (fsig->pinvoke && !fsig->ret->byref) {
6408 int widen_op = -1;
6411 * Native code might return non register sized integers
6412 * without initializing the upper bits.
6414 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6415 case OP_LOADI1_MEMBASE:
6416 widen_op = OP_ICONV_TO_I1;
6417 break;
6418 case OP_LOADU1_MEMBASE:
6419 widen_op = OP_ICONV_TO_U1;
6420 break;
6421 case OP_LOADI2_MEMBASE:
6422 widen_op = OP_ICONV_TO_I2;
6423 break;
6424 case OP_LOADU2_MEMBASE:
6425 widen_op = OP_ICONV_TO_U2;
6426 break;
6427 default:
6428 break;
6431 if (widen_op != -1) {
6432 int dreg = alloc_preg (cfg);
6433 MonoInst *widen;
6435 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6436 widen->type = ins->type;
6437 ins = widen;
6441 *sp++ = ins;
6444 ip += 5;
6445 ins_flag = 0;
6446 break;
6449 /* Array methods */
6450 if (array_rank) {
6451 MonoInst *addr;
6453 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6454 if (sp [fsig->param_count]->type == STACK_OBJ) {
6455 MonoInst *iargs [2];
6457 iargs [0] = sp [0];
6458 iargs [1] = sp [fsig->param_count];
6460 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6463 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6464 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6465 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6466 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6468 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6470 *sp++ = ins;
6471 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6472 if (!cmethod->klass->element_class->valuetype && !readonly)
6473 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6475 readonly = FALSE;
6476 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6477 *sp++ = addr;
6478 } else {
6479 g_assert_not_reached ();
6482 ip += 5;
6483 ins_flag = 0;
6484 break;
6487 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6488 if (ins) {
6489 if (!MONO_TYPE_IS_VOID (fsig->ret))
6490 *sp++ = ins;
6492 ip += 5;
6493 ins_flag = 0;
6494 break;
6497 /* Common call */
6498 INLINE_FAILURE;
6499 if (vtable_arg) {
6500 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6501 NULL, vtable_arg);
6502 } else if (imt_arg) {
6503 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6504 } else {
6505 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6508 if (!MONO_TYPE_IS_VOID (fsig->ret))
6509 *sp++ = ins;
6511 ip += 5;
6512 ins_flag = 0;
6513 break;
6515 case CEE_RET:
6516 if (cfg->method != method) {
6517 /* return from inlined method */
6519 * If in_count == 0, that means the ret is unreachable due to
6520 * being preceeded by a throw. In that case, inline_method () will
6521 * handle setting the return value
6522 * (test case: test_0_inline_throw ()).
6524 if (return_var && cfg->cbb->in_count) {
6525 MonoInst *store;
6526 CHECK_STACK (1);
6527 --sp;
6528 //g_assert (returnvar != -1);
6529 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6530 cfg->ret_var_set = TRUE;
6532 } else {
6533 if (cfg->ret) {
6534 MonoType *ret_type = mono_method_signature (method)->ret;
6536 g_assert (!return_var);
6537 CHECK_STACK (1);
6538 --sp;
6539 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6540 MonoInst *ret_addr;
6542 if (!cfg->vret_addr) {
6543 MonoInst *ins;
6545 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6546 } else {
6547 EMIT_NEW_RETLOADA (cfg, ret_addr);
6549 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6550 ins->klass = mono_class_from_mono_type (ret_type);
6552 } else {
6553 #ifdef MONO_ARCH_SOFT_FLOAT
6554 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6555 MonoInst *iargs [1];
6556 MonoInst *conv;
6558 iargs [0] = *sp;
6559 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6560 mono_arch_emit_setret (cfg, method, conv);
6561 } else {
6562 mono_arch_emit_setret (cfg, method, *sp);
6564 #else
6565 mono_arch_emit_setret (cfg, method, *sp);
6566 #endif
6570 if (sp != stack_start)
6571 UNVERIFIED;
6572 MONO_INST_NEW (cfg, ins, OP_BR);
6573 ip++;
6574 ins->inst_target_bb = end_bblock;
6575 MONO_ADD_INS (bblock, ins);
6576 link_bblock (cfg, bblock, end_bblock);
6577 start_new_bblock = 1;
6578 break;
6579 case CEE_BR_S:
6580 CHECK_OPSIZE (2);
6581 MONO_INST_NEW (cfg, ins, OP_BR);
6582 ip++;
6583 target = ip + 1 + (signed char)(*ip);
6584 ++ip;
6585 GET_BBLOCK (cfg, tblock, target);
6586 link_bblock (cfg, bblock, tblock);
6587 ins->inst_target_bb = tblock;
6588 if (sp != stack_start) {
6589 handle_stack_args (cfg, stack_start, sp - stack_start);
6590 sp = stack_start;
6591 CHECK_UNVERIFIABLE (cfg);
6593 MONO_ADD_INS (bblock, ins);
6594 start_new_bblock = 1;
6595 inline_costs += BRANCH_COST;
6596 break;
6597 case CEE_BEQ_S:
6598 case CEE_BGE_S:
6599 case CEE_BGT_S:
6600 case CEE_BLE_S:
6601 case CEE_BLT_S:
6602 case CEE_BNE_UN_S:
6603 case CEE_BGE_UN_S:
6604 case CEE_BGT_UN_S:
6605 case CEE_BLE_UN_S:
6606 case CEE_BLT_UN_S:
6607 CHECK_OPSIZE (2);
6608 CHECK_STACK (2);
6609 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6610 ip++;
6611 target = ip + 1 + *(signed char*)ip;
6612 ip++;
6614 ADD_BINCOND (NULL);
6616 sp = stack_start;
6617 inline_costs += BRANCH_COST;
6618 break;
6619 case CEE_BR:
6620 CHECK_OPSIZE (5);
6621 MONO_INST_NEW (cfg, ins, OP_BR);
6622 ip++;
6624 target = ip + 4 + (gint32)read32(ip);
6625 ip += 4;
6626 GET_BBLOCK (cfg, tblock, target);
6627 link_bblock (cfg, bblock, tblock);
6628 ins->inst_target_bb = tblock;
6629 if (sp != stack_start) {
6630 handle_stack_args (cfg, stack_start, sp - stack_start);
6631 sp = stack_start;
6632 CHECK_UNVERIFIABLE (cfg);
6635 MONO_ADD_INS (bblock, ins);
6637 start_new_bblock = 1;
6638 inline_costs += BRANCH_COST;
6639 break;
6640 case CEE_BRFALSE_S:
6641 case CEE_BRTRUE_S:
6642 case CEE_BRFALSE:
6643 case CEE_BRTRUE: {
6644 MonoInst *cmp;
6645 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6646 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6647 guint32 opsize = is_short ? 1 : 4;
6649 CHECK_OPSIZE (opsize);
6650 CHECK_STACK (1);
6651 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6652 UNVERIFIED;
6653 ip ++;
6654 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6655 ip += opsize;
6657 sp--;
6659 GET_BBLOCK (cfg, tblock, target);
6660 link_bblock (cfg, bblock, tblock);
6661 GET_BBLOCK (cfg, tblock, ip);
6662 link_bblock (cfg, bblock, tblock);
6664 if (sp != stack_start) {
6665 handle_stack_args (cfg, stack_start, sp - stack_start);
6666 CHECK_UNVERIFIABLE (cfg);
6669 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6670 cmp->sreg1 = sp [0]->dreg;
6671 type_from_op (cmp, sp [0], NULL);
6672 CHECK_TYPE (cmp);
6674 #if SIZEOF_REGISTER == 4
6675 if (cmp->opcode == OP_LCOMPARE_IMM) {
6676 /* Convert it to OP_LCOMPARE */
6677 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6678 ins->type = STACK_I8;
6679 ins->dreg = alloc_dreg (cfg, STACK_I8);
6680 ins->inst_l = 0;
6681 MONO_ADD_INS (bblock, ins);
6682 cmp->opcode = OP_LCOMPARE;
6683 cmp->sreg2 = ins->dreg;
6685 #endif
6686 MONO_ADD_INS (bblock, cmp);
6688 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6689 type_from_op (ins, sp [0], NULL);
6690 MONO_ADD_INS (bblock, ins);
6691 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6692 GET_BBLOCK (cfg, tblock, target);
6693 ins->inst_true_bb = tblock;
6694 GET_BBLOCK (cfg, tblock, ip);
6695 ins->inst_false_bb = tblock;
6696 start_new_bblock = 2;
6698 sp = stack_start;
6699 inline_costs += BRANCH_COST;
6700 break;
6702 case CEE_BEQ:
6703 case CEE_BGE:
6704 case CEE_BGT:
6705 case CEE_BLE:
6706 case CEE_BLT:
6707 case CEE_BNE_UN:
6708 case CEE_BGE_UN:
6709 case CEE_BGT_UN:
6710 case CEE_BLE_UN:
6711 case CEE_BLT_UN:
6712 CHECK_OPSIZE (5);
6713 CHECK_STACK (2);
6714 MONO_INST_NEW (cfg, ins, *ip);
6715 ip++;
6716 target = ip + 4 + (gint32)read32(ip);
6717 ip += 4;
6719 ADD_BINCOND (NULL);
6721 sp = stack_start;
6722 inline_costs += BRANCH_COST;
6723 break;
6724 case CEE_SWITCH: {
6725 MonoInst *src1;
6726 MonoBasicBlock **targets;
6727 MonoBasicBlock *default_bblock;
6728 MonoJumpInfoBBTable *table;
6729 int offset_reg = alloc_preg (cfg);
6730 int target_reg = alloc_preg (cfg);
6731 int table_reg = alloc_preg (cfg);
6732 int sum_reg = alloc_preg (cfg);
6733 gboolean use_op_switch;
6735 CHECK_OPSIZE (5);
6736 CHECK_STACK (1);
6737 n = read32 (ip + 1);
6738 --sp;
6739 src1 = sp [0];
6740 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6741 UNVERIFIED;
6743 ip += 5;
6744 CHECK_OPSIZE (n * sizeof (guint32));
6745 target = ip + n * sizeof (guint32);
6747 GET_BBLOCK (cfg, default_bblock, target);
6749 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6750 for (i = 0; i < n; ++i) {
6751 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6752 targets [i] = tblock;
6753 ip += 4;
6756 if (sp != stack_start) {
6758 * Link the current bb with the targets as well, so handle_stack_args
6759 * will set their in_stack correctly.
6761 link_bblock (cfg, bblock, default_bblock);
6762 for (i = 0; i < n; ++i)
6763 link_bblock (cfg, bblock, targets [i]);
6765 handle_stack_args (cfg, stack_start, sp - stack_start);
6766 sp = stack_start;
6767 CHECK_UNVERIFIABLE (cfg);
6770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6772 bblock = cfg->cbb;
6774 for (i = 0; i < n; ++i)
6775 link_bblock (cfg, bblock, targets [i]);
6777 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6778 table->table = targets;
6779 table->table_size = n;
6781 use_op_switch = FALSE;
6782 #ifdef __arm__
6783 /* ARM implements SWITCH statements differently */
6784 /* FIXME: Make it use the generic implementation */
6785 if (!cfg->compile_aot)
6786 use_op_switch = TRUE;
6787 #endif
6789 if (use_op_switch) {
6790 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6791 ins->sreg1 = src1->dreg;
6792 ins->inst_p0 = table;
6793 ins->inst_many_bb = targets;
6794 ins->klass = GUINT_TO_POINTER (n);
6795 MONO_ADD_INS (cfg->cbb, ins);
6796 } else {
6797 if (sizeof (gpointer) == 8)
6798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6799 else
6800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6802 #if SIZEOF_REGISTER == 8
6803 /* The upper word might not be zero, and we add it to a 64 bit address later */
6804 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6805 #endif
6807 if (cfg->compile_aot) {
6808 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6809 } else {
6810 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6811 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6812 ins->inst_p0 = table;
6813 ins->dreg = table_reg;
6814 MONO_ADD_INS (cfg->cbb, ins);
6817 /* FIXME: Use load_memindex */
6818 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6820 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6822 start_new_bblock = 1;
6823 inline_costs += (BRANCH_COST * 2);
6824 break;
6826 case CEE_LDIND_I1:
6827 case CEE_LDIND_U1:
6828 case CEE_LDIND_I2:
6829 case CEE_LDIND_U2:
6830 case CEE_LDIND_I4:
6831 case CEE_LDIND_U4:
6832 case CEE_LDIND_I8:
6833 case CEE_LDIND_I:
6834 case CEE_LDIND_R4:
6835 case CEE_LDIND_R8:
6836 case CEE_LDIND_REF:
6837 CHECK_STACK (1);
6838 --sp;
6840 switch (*ip) {
6841 case CEE_LDIND_R4:
6842 case CEE_LDIND_R8:
6843 dreg = alloc_freg (cfg);
6844 break;
6845 case CEE_LDIND_I8:
6846 dreg = alloc_lreg (cfg);
6847 break;
6848 default:
6849 dreg = alloc_preg (cfg);
6852 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6853 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6854 ins->flags |= ins_flag;
6855 ins_flag = 0;
6856 MONO_ADD_INS (bblock, ins);
6857 *sp++ = ins;
6858 ++ip;
6859 break;
6860 case CEE_STIND_REF:
6861 case CEE_STIND_I1:
6862 case CEE_STIND_I2:
6863 case CEE_STIND_I4:
6864 case CEE_STIND_I8:
6865 case CEE_STIND_R4:
6866 case CEE_STIND_R8:
6867 case CEE_STIND_I:
6868 CHECK_STACK (2);
6869 sp -= 2;
6871 #if HAVE_WRITE_BARRIERS
6872 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6873 /* insert call to write barrier */
6874 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6875 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6876 ins_flag = 0;
6877 ip++;
6878 break;
6880 #endif
6882 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6883 ins->flags |= ins_flag;
6884 ins_flag = 0;
6885 MONO_ADD_INS (bblock, ins);
6886 inline_costs += 1;
6887 ++ip;
6888 break;
6890 case CEE_MUL:
6891 CHECK_STACK (2);
6893 MONO_INST_NEW (cfg, ins, (*ip));
6894 sp -= 2;
6895 ins->sreg1 = sp [0]->dreg;
6896 ins->sreg2 = sp [1]->dreg;
6897 type_from_op (ins, sp [0], sp [1]);
6898 CHECK_TYPE (ins);
6899 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6901 /* Use the immediate opcodes if possible */
6902 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6903 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6904 if (imm_opcode != -1) {
6905 ins->opcode = imm_opcode;
6906 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6907 ins->sreg2 = -1;
6909 sp [1]->opcode = OP_NOP;
6913 MONO_ADD_INS ((cfg)->cbb, (ins));
6914 *sp++ = ins;
6916 mono_decompose_opcode (cfg, ins);
6917 ip++;
6918 break;
6919 case CEE_ADD:
6920 case CEE_SUB:
6921 case CEE_DIV:
6922 case CEE_DIV_UN:
6923 case CEE_REM:
6924 case CEE_REM_UN:
6925 case CEE_AND:
6926 case CEE_OR:
6927 case CEE_XOR:
6928 case CEE_SHL:
6929 case CEE_SHR:
6930 case CEE_SHR_UN:
6931 CHECK_STACK (2);
6933 MONO_INST_NEW (cfg, ins, (*ip));
6934 sp -= 2;
6935 ins->sreg1 = sp [0]->dreg;
6936 ins->sreg2 = sp [1]->dreg;
6937 type_from_op (ins, sp [0], sp [1]);
6938 CHECK_TYPE (ins);
6939 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6940 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6942 /* FIXME: Pass opcode to is_inst_imm */
6944 /* Use the immediate opcodes if possible */
6945 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6946 int imm_opcode;
6948 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6949 if (imm_opcode != -1) {
6950 ins->opcode = imm_opcode;
6951 if (sp [1]->opcode == OP_I8CONST) {
6952 #if SIZEOF_REGISTER == 8
6953 ins->inst_imm = sp [1]->inst_l;
6954 #else
6955 ins->inst_ls_word = sp [1]->inst_ls_word;
6956 ins->inst_ms_word = sp [1]->inst_ms_word;
6957 #endif
6959 else
6960 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6961 ins->sreg2 = -1;
6963 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6964 if (sp [1]->next == NULL)
6965 sp [1]->opcode = OP_NOP;
6968 MONO_ADD_INS ((cfg)->cbb, (ins));
6969 *sp++ = ins;
6971 mono_decompose_opcode (cfg, ins);
6972 ip++;
6973 break;
6974 case CEE_NEG:
6975 case CEE_NOT:
6976 case CEE_CONV_I1:
6977 case CEE_CONV_I2:
6978 case CEE_CONV_I4:
6979 case CEE_CONV_R4:
6980 case CEE_CONV_R8:
6981 case CEE_CONV_U4:
6982 case CEE_CONV_I8:
6983 case CEE_CONV_U8:
6984 case CEE_CONV_OVF_I8:
6985 case CEE_CONV_OVF_U8:
6986 case CEE_CONV_R_UN:
6987 CHECK_STACK (1);
6989 /* Special case this earlier so we have long constants in the IR */
6990 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6991 int data = sp [-1]->inst_c0;
6992 sp [-1]->opcode = OP_I8CONST;
6993 sp [-1]->type = STACK_I8;
6994 #if SIZEOF_REGISTER == 8
6995 if ((*ip) == CEE_CONV_U8)
6996 sp [-1]->inst_c0 = (guint32)data;
6997 else
6998 sp [-1]->inst_c0 = data;
6999 #else
7000 sp [-1]->inst_ls_word = data;
7001 if ((*ip) == CEE_CONV_U8)
7002 sp [-1]->inst_ms_word = 0;
7003 else
7004 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7005 #endif
7006 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7008 else {
7009 ADD_UNOP (*ip);
7011 ip++;
7012 break;
7013 case CEE_CONV_OVF_I4:
7014 case CEE_CONV_OVF_I1:
7015 case CEE_CONV_OVF_I2:
7016 case CEE_CONV_OVF_I:
7017 case CEE_CONV_OVF_U:
7018 CHECK_STACK (1);
7020 if (sp [-1]->type == STACK_R8) {
7021 ADD_UNOP (CEE_CONV_OVF_I8);
7022 ADD_UNOP (*ip);
7023 } else {
7024 ADD_UNOP (*ip);
7026 ip++;
7027 break;
7028 case CEE_CONV_OVF_U1:
7029 case CEE_CONV_OVF_U2:
7030 case CEE_CONV_OVF_U4:
7031 CHECK_STACK (1);
7033 if (sp [-1]->type == STACK_R8) {
7034 ADD_UNOP (CEE_CONV_OVF_U8);
7035 ADD_UNOP (*ip);
7036 } else {
7037 ADD_UNOP (*ip);
7039 ip++;
7040 break;
7041 case CEE_CONV_OVF_I1_UN:
7042 case CEE_CONV_OVF_I2_UN:
7043 case CEE_CONV_OVF_I4_UN:
7044 case CEE_CONV_OVF_I8_UN:
7045 case CEE_CONV_OVF_U1_UN:
7046 case CEE_CONV_OVF_U2_UN:
7047 case CEE_CONV_OVF_U4_UN:
7048 case CEE_CONV_OVF_U8_UN:
7049 case CEE_CONV_OVF_I_UN:
7050 case CEE_CONV_OVF_U_UN:
7051 case CEE_CONV_U2:
7052 case CEE_CONV_U1:
7053 case CEE_CONV_I:
7054 case CEE_CONV_U:
7055 CHECK_STACK (1);
7056 ADD_UNOP (*ip);
7057 ip++;
7058 break;
7059 case CEE_ADD_OVF:
7060 case CEE_ADD_OVF_UN:
7061 case CEE_MUL_OVF:
7062 case CEE_MUL_OVF_UN:
7063 case CEE_SUB_OVF:
7064 case CEE_SUB_OVF_UN:
7065 CHECK_STACK (2);
7066 ADD_BINOP (*ip);
7067 ip++;
7068 break;
7069 case CEE_CPOBJ:
7070 CHECK_OPSIZE (5);
7071 CHECK_STACK (2);
7072 token = read32 (ip + 1);
7073 klass = mini_get_class (method, token, generic_context);
7074 CHECK_TYPELOAD (klass);
7075 sp -= 2;
7076 if (generic_class_is_reference_type (cfg, klass)) {
7077 MonoInst *store, *load;
7078 int dreg = alloc_preg (cfg);
7080 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7081 load->flags |= ins_flag;
7082 MONO_ADD_INS (cfg->cbb, load);
7084 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7085 store->flags |= ins_flag;
7086 MONO_ADD_INS (cfg->cbb, store);
7087 } else {
7088 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7090 ins_flag = 0;
7091 ip += 5;
7092 break;
7093 case CEE_LDOBJ: {
7094 int loc_index = -1;
7095 int stloc_len = 0;
7097 CHECK_OPSIZE (5);
7098 CHECK_STACK (1);
7099 --sp;
7100 token = read32 (ip + 1);
7101 klass = mini_get_class (method, token, generic_context);
7102 CHECK_TYPELOAD (klass);
7104 /* Optimize the common ldobj+stloc combination */
7105 switch (ip [5]) {
7106 case CEE_STLOC_S:
7107 loc_index = ip [6];
7108 stloc_len = 2;
7109 break;
7110 case CEE_STLOC_0:
7111 case CEE_STLOC_1:
7112 case CEE_STLOC_2:
7113 case CEE_STLOC_3:
7114 loc_index = ip [5] - CEE_STLOC_0;
7115 stloc_len = 1;
7116 break;
7117 default:
7118 break;
7121 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7122 CHECK_LOCAL (loc_index);
7124 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7125 ins->dreg = cfg->locals [loc_index]->dreg;
7126 ip += 5;
7127 ip += stloc_len;
7128 break;
7131 /* Optimize the ldobj+stobj combination */
7132 /* The reference case ends up being a load+store anyway */
7133 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7134 CHECK_STACK (1);
7136 sp --;
7138 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7140 ip += 5 + 5;
7141 ins_flag = 0;
7142 break;
7145 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7146 *sp++ = ins;
7148 ip += 5;
7149 ins_flag = 0;
7150 inline_costs += 1;
7151 break;
7153 case CEE_LDSTR:
7154 CHECK_STACK_OVF (1);
7155 CHECK_OPSIZE (5);
7156 n = read32 (ip + 1);
7158 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7159 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7160 ins->type = STACK_OBJ;
7161 *sp = ins;
7163 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7164 MonoInst *iargs [1];
7166 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7167 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7168 } else {
7169 if (cfg->opt & MONO_OPT_SHARED) {
7170 MonoInst *iargs [3];
7172 if (cfg->compile_aot) {
7173 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7175 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7176 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7177 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7178 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7179 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7180 } else {
7181 if (bblock->out_of_line) {
7182 MonoInst *iargs [2];
7184 if (image == mono_defaults.corlib) {
7186 * Avoid relocations in AOT and save some space by using a
7187 * version of helper_ldstr specialized to mscorlib.
7189 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7190 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7191 } else {
7192 /* Avoid creating the string object */
7193 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7194 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7195 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7198 else
7199 if (cfg->compile_aot) {
7200 NEW_LDSTRCONST (cfg, ins, image, n);
7201 *sp = ins;
7202 MONO_ADD_INS (bblock, ins);
7204 else {
7205 NEW_PCONST (cfg, ins, NULL);
7206 ins->type = STACK_OBJ;
7207 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7208 *sp = ins;
7209 MONO_ADD_INS (bblock, ins);
7214 sp++;
7215 ip += 5;
7216 break;
7217 case CEE_NEWOBJ: {
7218 MonoInst *iargs [2];
7219 MonoMethodSignature *fsig;
7220 MonoInst this_ins;
7221 MonoInst *alloc;
7222 MonoInst *vtable_arg = NULL;
7224 CHECK_OPSIZE (5);
7225 token = read32 (ip + 1);
7226 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7227 if (!cmethod)
7228 goto load_error;
7229 fsig = mono_method_get_signature (cmethod, image, token);
7231 mono_save_token_info (cfg, image, token, cmethod);
7233 if (!mono_class_init (cmethod->klass))
7234 goto load_error;
7236 if (cfg->generic_sharing_context)
7237 context_used = mono_method_check_context_used (cmethod);
7239 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7240 if (check_linkdemand (cfg, method, cmethod))
7241 INLINE_FAILURE;
7242 CHECK_CFG_EXCEPTION;
7243 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7244 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7247 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7248 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7249 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7250 if (context_used) {
7251 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7252 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7253 } else {
7254 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7256 } else {
7257 if (context_used) {
7258 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7259 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7260 } else {
7261 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7263 CHECK_TYPELOAD (cmethod->klass);
7264 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7269 n = fsig->param_count;
7270 CHECK_STACK (n);
7273 * Generate smaller code for the common newobj <exception> instruction in
7274 * argument checking code.
7276 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7277 is_exception_class (cmethod->klass) && n <= 2 &&
7278 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7279 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7280 MonoInst *iargs [3];
7282 g_assert (!vtable_arg);
7284 sp -= n;
7286 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7287 switch (n) {
7288 case 0:
7289 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7290 break;
7291 case 1:
7292 iargs [1] = sp [0];
7293 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7294 break;
7295 case 2:
7296 iargs [1] = sp [0];
7297 iargs [2] = sp [1];
7298 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7299 break;
7300 default:
7301 g_assert_not_reached ();
7304 ip += 5;
7305 inline_costs += 5;
7306 break;
7309 /* move the args to allow room for 'this' in the first position */
7310 while (n--) {
7311 --sp;
7312 sp [1] = sp [0];
7315 /* check_call_signature () requires sp[0] to be set */
7316 this_ins.type = STACK_OBJ;
7317 sp [0] = &this_ins;
7318 if (check_call_signature (cfg, fsig, sp))
7319 UNVERIFIED;
7321 iargs [0] = NULL;
7323 if (mini_class_is_system_array (cmethod->klass)) {
7324 g_assert (!vtable_arg);
7326 if (context_used) {
7327 *sp = emit_get_rgctx_method (cfg, context_used,
7328 cmethod, MONO_RGCTX_INFO_METHOD);
7329 } else {
7330 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7333 /* Avoid varargs in the common case */
7334 if (fsig->param_count == 1)
7335 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7336 else if (fsig->param_count == 2)
7337 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7338 else
7339 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7340 } else if (cmethod->string_ctor) {
7341 g_assert (!context_used);
7342 g_assert (!vtable_arg);
7343 /* we simply pass a null pointer */
7344 EMIT_NEW_PCONST (cfg, *sp, NULL);
7345 /* now call the string ctor */
7346 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7347 } else {
7348 MonoInst* callvirt_this_arg = NULL;
7350 if (cmethod->klass->valuetype) {
7351 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7352 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7353 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7355 alloc = NULL;
7358 * The code generated by mini_emit_virtual_call () expects
7359 * iargs [0] to be a boxed instance, but luckily the vcall
7360 * will be transformed into a normal call there.
7362 } else if (context_used) {
7363 MonoInst *data;
7364 int rgctx_info;
7366 if (cfg->opt & MONO_OPT_SHARED)
7367 rgctx_info = MONO_RGCTX_INFO_KLASS;
7368 else
7369 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7370 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7372 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7373 *sp = alloc;
7374 } else {
7375 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7377 CHECK_TYPELOAD (cmethod->klass);
7380 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7381 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7382 * As a workaround, we call class cctors before allocating objects.
7384 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7385 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7386 if (cfg->verbose_level > 2)
7387 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7388 class_inits = g_slist_prepend (class_inits, vtable);
7391 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7392 *sp = alloc;
7395 if (alloc)
7396 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7398 /* Now call the actual ctor */
7399 /* Avoid virtual calls to ctors if possible */
7400 if (cmethod->klass->marshalbyref)
7401 callvirt_this_arg = sp [0];
7403 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7404 mono_method_check_inlining (cfg, cmethod) &&
7405 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7406 !g_list_find (dont_inline, cmethod)) {
7407 int costs;
7409 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7410 cfg->real_offset += 5;
7411 bblock = cfg->cbb;
7413 inline_costs += costs - 5;
7414 } else {
7415 INLINE_FAILURE;
7416 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7418 } else if (context_used &&
7419 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7420 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7421 MonoInst *cmethod_addr;
7423 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7424 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7426 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7427 } else {
7428 INLINE_FAILURE;
7429 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7430 callvirt_this_arg, NULL, vtable_arg);
7431 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7432 GENERIC_SHARING_FAILURE (*ip);
7436 if (alloc == NULL) {
7437 /* Valuetype */
7438 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7439 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7440 *sp++= ins;
7442 else
7443 *sp++ = alloc;
7445 ip += 5;
7446 inline_costs += 5;
7447 break;
7449 case CEE_CASTCLASS:
7450 CHECK_STACK (1);
7451 --sp;
7452 CHECK_OPSIZE (5);
7453 token = read32 (ip + 1);
7454 klass = mini_get_class (method, token, generic_context);
7455 CHECK_TYPELOAD (klass);
7456 if (sp [0]->type != STACK_OBJ)
7457 UNVERIFIED;
7459 if (cfg->generic_sharing_context)
7460 context_used = mono_class_check_context_used (klass);
7462 if (context_used) {
7463 MonoInst *args [2];
7465 /* obj */
7466 args [0] = *sp;
7468 /* klass */
7469 args [1] = emit_get_rgctx_klass (cfg, context_used,
7470 klass, MONO_RGCTX_INFO_KLASS);
7472 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7473 *sp ++ = ins;
7474 ip += 5;
7475 inline_costs += 2;
7476 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7477 MonoMethod *mono_castclass;
7478 MonoInst *iargs [1];
7479 int costs;
7481 mono_castclass = mono_marshal_get_castclass (klass);
7482 iargs [0] = sp [0];
7484 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7485 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7486 g_assert (costs > 0);
7488 ip += 5;
7489 cfg->real_offset += 5;
7490 bblock = cfg->cbb;
7492 *sp++ = iargs [0];
7494 inline_costs += costs;
7496 else {
7497 ins = handle_castclass (cfg, klass, *sp);
7498 bblock = cfg->cbb;
7499 *sp ++ = ins;
7500 ip += 5;
7502 break;
7503 case CEE_ISINST: {
7504 CHECK_STACK (1);
7505 --sp;
7506 CHECK_OPSIZE (5);
7507 token = read32 (ip + 1);
7508 klass = mini_get_class (method, token, generic_context);
7509 CHECK_TYPELOAD (klass);
7510 if (sp [0]->type != STACK_OBJ)
7511 UNVERIFIED;
7513 if (cfg->generic_sharing_context)
7514 context_used = mono_class_check_context_used (klass);
7516 if (context_used) {
7517 MonoInst *args [2];
7519 /* obj */
7520 args [0] = *sp;
7522 /* klass */
7523 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7525 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7526 sp++;
7527 ip += 5;
7528 inline_costs += 2;
7529 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7530 MonoMethod *mono_isinst;
7531 MonoInst *iargs [1];
7532 int costs;
7534 mono_isinst = mono_marshal_get_isinst (klass);
7535 iargs [0] = sp [0];
7537 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7538 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7539 g_assert (costs > 0);
7541 ip += 5;
7542 cfg->real_offset += 5;
7543 bblock = cfg->cbb;
7545 *sp++= iargs [0];
7547 inline_costs += costs;
7549 else {
7550 ins = handle_isinst (cfg, klass, *sp);
7551 bblock = cfg->cbb;
7552 *sp ++ = ins;
7553 ip += 5;
7555 break;
7557 case CEE_UNBOX_ANY: {
7558 CHECK_STACK (1);
7559 --sp;
7560 CHECK_OPSIZE (5);
7561 token = read32 (ip + 1);
7562 klass = mini_get_class (method, token, generic_context);
7563 CHECK_TYPELOAD (klass);
7565 mono_save_token_info (cfg, image, token, klass);
7567 if (cfg->generic_sharing_context)
7568 context_used = mono_class_check_context_used (klass);
7570 if (generic_class_is_reference_type (cfg, klass)) {
7571 /* CASTCLASS */
7572 if (context_used) {
7573 MonoInst *iargs [2];
7575 /* obj */
7576 iargs [0] = *sp;
7577 /* klass */
7578 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7579 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7580 *sp ++ = ins;
7581 ip += 5;
7582 inline_costs += 2;
7583 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7584 MonoMethod *mono_castclass;
7585 MonoInst *iargs [1];
7586 int costs;
7588 mono_castclass = mono_marshal_get_castclass (klass);
7589 iargs [0] = sp [0];
7591 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7592 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7594 g_assert (costs > 0);
7596 ip += 5;
7597 cfg->real_offset += 5;
7598 bblock = cfg->cbb;
7600 *sp++ = iargs [0];
7601 inline_costs += costs;
7602 } else {
7603 ins = handle_castclass (cfg, klass, *sp);
7604 bblock = cfg->cbb;
7605 *sp ++ = ins;
7606 ip += 5;
7608 break;
7611 if (mono_class_is_nullable (klass)) {
7612 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7613 *sp++= ins;
7614 ip += 5;
7615 break;
7618 /* UNBOX */
7619 ins = handle_unbox (cfg, klass, sp, context_used);
7620 *sp = ins;
7622 ip += 5;
7624 /* LDOBJ */
7625 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7626 *sp++ = ins;
7628 inline_costs += 2;
7629 break;
7631 case CEE_BOX: {
7632 MonoInst *val;
7634 CHECK_STACK (1);
7635 --sp;
7636 val = *sp;
7637 CHECK_OPSIZE (5);
7638 token = read32 (ip + 1);
7639 klass = mini_get_class (method, token, generic_context);
7640 CHECK_TYPELOAD (klass);
7642 mono_save_token_info (cfg, image, token, klass);
7644 if (cfg->generic_sharing_context)
7645 context_used = mono_class_check_context_used (klass);
7647 if (generic_class_is_reference_type (cfg, klass)) {
7648 *sp++ = val;
7649 ip += 5;
7650 break;
7653 if (klass == mono_defaults.void_class)
7654 UNVERIFIED;
7655 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7656 UNVERIFIED;
7657 /* frequent check in generic code: box (struct), brtrue */
7658 if (!mono_class_is_nullable (klass) &&
7659 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7660 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7661 ip += 5;
7662 MONO_INST_NEW (cfg, ins, OP_BR);
7663 if (*ip == CEE_BRTRUE_S) {
7664 CHECK_OPSIZE (2);
7665 ip++;
7666 target = ip + 1 + (signed char)(*ip);
7667 ip++;
7668 } else {
7669 CHECK_OPSIZE (5);
7670 ip++;
7671 target = ip + 4 + (gint)(read32 (ip));
7672 ip += 4;
7674 GET_BBLOCK (cfg, tblock, target);
7675 link_bblock (cfg, bblock, tblock);
7676 ins->inst_target_bb = tblock;
7677 GET_BBLOCK (cfg, tblock, ip);
7679 * This leads to some inconsistency, since the two bblocks are
7680 * not really connected, but it is needed for handling stack
7681 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7682 * FIXME: This should only be needed if sp != stack_start, but that
7683 * doesn't work for some reason (test failure in mcs/tests on x86).
7685 link_bblock (cfg, bblock, tblock);
7686 if (sp != stack_start) {
7687 handle_stack_args (cfg, stack_start, sp - stack_start);
7688 sp = stack_start;
7689 CHECK_UNVERIFIABLE (cfg);
7691 MONO_ADD_INS (bblock, ins);
7692 start_new_bblock = 1;
7693 break;
7696 if (context_used) {
7697 MonoInst *data;
7698 int rgctx_info;
7700 if (cfg->opt & MONO_OPT_SHARED)
7701 rgctx_info = MONO_RGCTX_INFO_KLASS;
7702 else
7703 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7704 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7705 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7706 } else {
7707 *sp++ = handle_box (cfg, val, klass);
7710 ip += 5;
7711 inline_costs += 1;
7712 break;
7714 case CEE_UNBOX: {
7715 CHECK_STACK (1);
7716 --sp;
7717 CHECK_OPSIZE (5);
7718 token = read32 (ip + 1);
7719 klass = mini_get_class (method, token, generic_context);
7720 CHECK_TYPELOAD (klass);
7722 mono_save_token_info (cfg, image, token, klass);
7724 if (cfg->generic_sharing_context)
7725 context_used = mono_class_check_context_used (klass);
7727 if (mono_class_is_nullable (klass)) {
7728 MonoInst *val;
7730 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7731 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7733 *sp++= ins;
7734 } else {
7735 ins = handle_unbox (cfg, klass, sp, context_used);
7736 *sp++ = ins;
7738 ip += 5;
7739 inline_costs += 2;
7740 break;
7742 case CEE_LDFLD:
7743 case CEE_LDFLDA:
7744 case CEE_STFLD: {
7745 MonoClassField *field;
7746 int costs;
7747 guint foffset;
7749 if (*ip == CEE_STFLD) {
7750 CHECK_STACK (2);
7751 sp -= 2;
7752 } else {
7753 CHECK_STACK (1);
7754 --sp;
7756 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7757 UNVERIFIED;
7758 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7759 UNVERIFIED;
7760 CHECK_OPSIZE (5);
7761 token = read32 (ip + 1);
7762 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7763 field = mono_method_get_wrapper_data (method, token);
7764 klass = field->parent;
7766 else {
7767 field = mono_field_from_token (image, token, &klass, generic_context);
7769 if (!field)
7770 goto load_error;
7771 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7772 FIELD_ACCESS_FAILURE;
7773 mono_class_init (klass);
7775 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7776 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7777 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7778 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7781 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7782 if (*ip == CEE_STFLD) {
7783 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7784 UNVERIFIED;
7785 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7786 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7787 MonoInst *iargs [5];
7789 iargs [0] = sp [0];
7790 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7791 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7792 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7793 field->offset);
7794 iargs [4] = sp [1];
7796 if (cfg->opt & MONO_OPT_INLINE) {
7797 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7798 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7799 g_assert (costs > 0);
7801 cfg->real_offset += 5;
7802 bblock = cfg->cbb;
7804 inline_costs += costs;
7805 } else {
7806 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7808 } else {
7809 MonoInst *store;
7811 #if HAVE_WRITE_BARRIERS
7812 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7813 /* insert call to write barrier */
7814 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7815 MonoInst *iargs [2];
7816 int dreg;
7818 dreg = alloc_preg (cfg);
7819 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7820 iargs [1] = sp [1];
7821 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7823 #endif
7825 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7827 store->flags |= ins_flag;
7829 ins_flag = 0;
7830 ip += 5;
7831 break;
7834 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7835 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7836 MonoInst *iargs [4];
7838 iargs [0] = sp [0];
7839 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7840 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7841 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7842 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7843 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7844 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7845 bblock = cfg->cbb;
7846 g_assert (costs > 0);
7848 cfg->real_offset += 5;
7850 *sp++ = iargs [0];
7852 inline_costs += costs;
7853 } else {
7854 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7855 *sp++ = ins;
7857 } else {
7858 if (sp [0]->type == STACK_VTYPE) {
7859 MonoInst *var;
7861 /* Have to compute the address of the variable */
7863 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7864 if (!var)
7865 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7866 else
7867 g_assert (var->klass == klass);
7869 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7870 sp [0] = ins;
7873 if (*ip == CEE_LDFLDA) {
7874 dreg = alloc_preg (cfg);
7876 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7877 ins->klass = mono_class_from_mono_type (field->type);
7878 ins->type = STACK_MP;
7879 *sp++ = ins;
7880 } else {
7881 MonoInst *load;
7883 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7884 load->flags |= ins_flag;
7885 *sp++ = load;
7888 ins_flag = 0;
7889 ip += 5;
7890 break;
7892 case CEE_LDSFLD:
7893 case CEE_LDSFLDA:
7894 case CEE_STSFLD: {
7895 MonoClassField *field;
7896 gpointer addr = NULL;
7897 gboolean is_special_static;
7899 CHECK_OPSIZE (5);
7900 token = read32 (ip + 1);
7902 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7903 field = mono_method_get_wrapper_data (method, token);
7904 klass = field->parent;
7906 else
7907 field = mono_field_from_token (image, token, &klass, generic_context);
7908 if (!field)
7909 goto load_error;
7910 mono_class_init (klass);
7911 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7912 FIELD_ACCESS_FAILURE;
7914 /* if the class is Critical then transparent code cannot access it's fields */
7915 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7916 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7919 * We can only support shared generic static
7920 * field access on architectures where the
7921 * trampoline code has been extended to handle
7922 * the generic class init.
7924 #ifndef MONO_ARCH_VTABLE_REG
7925 GENERIC_SHARING_FAILURE (*ip);
7926 #endif
7928 if (cfg->generic_sharing_context)
7929 context_used = mono_class_check_context_used (klass);
7931 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7933 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7934 * to be called here.
7936 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7937 mono_class_vtable (cfg->domain, klass);
7938 CHECK_TYPELOAD (klass);
7940 mono_domain_lock (cfg->domain);
7941 if (cfg->domain->special_static_fields)
7942 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7943 mono_domain_unlock (cfg->domain);
7945 is_special_static = mono_class_field_is_special_static (field);
7947 /* Generate IR to compute the field address */
7949 if ((cfg->opt & MONO_OPT_SHARED) ||
7950 (cfg->compile_aot && is_special_static) ||
7951 (context_used && is_special_static)) {
7952 MonoInst *iargs [2];
7954 g_assert (field->parent);
7955 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7956 if (context_used) {
7957 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7958 field, MONO_RGCTX_INFO_CLASS_FIELD);
7959 } else {
7960 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7962 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7963 } else if (context_used) {
7964 MonoInst *static_data;
7967 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7968 method->klass->name_space, method->klass->name, method->name,
7969 depth, field->offset);
7972 if (mono_class_needs_cctor_run (klass, method)) {
7973 MonoCallInst *call;
7974 MonoInst *vtable;
7976 vtable = emit_get_rgctx_klass (cfg, context_used,
7977 klass, MONO_RGCTX_INFO_VTABLE);
7979 // FIXME: This doesn't work since it tries to pass the argument
7980 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7982 * The vtable pointer is always passed in a register regardless of
7983 * the calling convention, so assign it manually, and make a call
7984 * using a signature without parameters.
7986 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7987 #ifdef MONO_ARCH_VTABLE_REG
7988 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7989 cfg->uses_vtable_reg = TRUE;
7990 #else
7991 NOT_IMPLEMENTED;
7992 #endif
7996 * The pointer we're computing here is
7998 * super_info.static_data + field->offset
8000 static_data = emit_get_rgctx_klass (cfg, context_used,
8001 klass, MONO_RGCTX_INFO_STATIC_DATA);
8003 if (field->offset == 0) {
8004 ins = static_data;
8005 } else {
8006 int addr_reg = mono_alloc_preg (cfg);
8007 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8009 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8010 MonoInst *iargs [2];
8012 g_assert (field->parent);
8013 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8014 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8015 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8016 } else {
8017 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8019 CHECK_TYPELOAD (klass);
8020 if (!addr) {
8021 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8022 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8023 if (cfg->verbose_level > 2)
8024 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8025 class_inits = g_slist_prepend (class_inits, vtable);
8026 } else {
8027 if (cfg->run_cctors) {
8028 MonoException *ex;
8029 /* This makes so that inline cannot trigger */
8030 /* .cctors: too many apps depend on them */
8031 /* running with a specific order... */
8032 if (! vtable->initialized)
8033 INLINE_FAILURE;
8034 ex = mono_runtime_class_init_full (vtable, FALSE);
8035 if (ex) {
8036 set_exception_object (cfg, ex);
8037 goto exception_exit;
8041 addr = (char*)vtable->data + field->offset;
8043 if (cfg->compile_aot)
8044 EMIT_NEW_SFLDACONST (cfg, ins, field);
8045 else
8046 EMIT_NEW_PCONST (cfg, ins, addr);
8047 } else {
8049 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8050 * This could be later optimized to do just a couple of
8051 * memory dereferences with constant offsets.
8053 MonoInst *iargs [1];
8054 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8055 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8059 /* Generate IR to do the actual load/store operation */
8061 if (*ip == CEE_LDSFLDA) {
8062 ins->klass = mono_class_from_mono_type (field->type);
8063 ins->type = STACK_PTR;
8064 *sp++ = ins;
8065 } else if (*ip == CEE_STSFLD) {
8066 MonoInst *store;
8067 CHECK_STACK (1);
8068 sp--;
8070 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8071 store->flags |= ins_flag;
8072 } else {
8073 gboolean is_const = FALSE;
8074 MonoVTable *vtable = NULL;
8076 if (!context_used) {
8077 vtable = mono_class_vtable (cfg->domain, klass);
8078 CHECK_TYPELOAD (klass);
8080 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8081 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8082 gpointer addr = (char*)vtable->data + field->offset;
8083 int ro_type = field->type->type;
8084 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8085 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8087 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8088 is_const = TRUE;
8089 switch (ro_type) {
8090 case MONO_TYPE_BOOLEAN:
8091 case MONO_TYPE_U1:
8092 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8093 sp++;
8094 break;
8095 case MONO_TYPE_I1:
8096 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8097 sp++;
8098 break;
8099 case MONO_TYPE_CHAR:
8100 case MONO_TYPE_U2:
8101 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8102 sp++;
8103 break;
8104 case MONO_TYPE_I2:
8105 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8106 sp++;
8107 break;
8108 break;
8109 case MONO_TYPE_I4:
8110 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8111 sp++;
8112 break;
8113 case MONO_TYPE_U4:
8114 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8115 sp++;
8116 break;
8117 #ifndef HAVE_MOVING_COLLECTOR
8118 case MONO_TYPE_I:
8119 case MONO_TYPE_U:
8120 case MONO_TYPE_STRING:
8121 case MONO_TYPE_OBJECT:
8122 case MONO_TYPE_CLASS:
8123 case MONO_TYPE_SZARRAY:
8124 case MONO_TYPE_PTR:
8125 case MONO_TYPE_FNPTR:
8126 case MONO_TYPE_ARRAY:
8127 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8128 type_to_eval_stack_type ((cfg), field->type, *sp);
8129 sp++;
8130 break;
8131 #endif
8132 case MONO_TYPE_I8:
8133 case MONO_TYPE_U8:
8134 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8135 sp++;
8136 break;
8137 case MONO_TYPE_R4:
8138 case MONO_TYPE_R8:
8139 case MONO_TYPE_VALUETYPE:
8140 default:
8141 is_const = FALSE;
8142 break;
8146 if (!is_const) {
8147 MonoInst *load;
8149 CHECK_STACK_OVF (1);
8151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8152 load->flags |= ins_flag;
8153 ins_flag = 0;
8154 *sp++ = load;
8157 ins_flag = 0;
8158 ip += 5;
8159 break;
8161 case CEE_STOBJ:
8162 CHECK_STACK (2);
8163 sp -= 2;
8164 CHECK_OPSIZE (5);
8165 token = read32 (ip + 1);
8166 klass = mini_get_class (method, token, generic_context);
8167 CHECK_TYPELOAD (klass);
8168 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8169 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8170 ins_flag = 0;
8171 ip += 5;
8172 inline_costs += 1;
8173 break;
8176 * Array opcodes
8178 case CEE_NEWARR: {
8179 MonoInst *len_ins;
8180 const char *data_ptr;
8181 int data_size = 0;
8182 guint32 field_token;
8184 CHECK_STACK (1);
8185 --sp;
8187 CHECK_OPSIZE (5);
8188 token = read32 (ip + 1);
8190 klass = mini_get_class (method, token, generic_context);
8191 CHECK_TYPELOAD (klass);
8193 if (cfg->generic_sharing_context)
8194 context_used = mono_class_check_context_used (klass);
8196 if (context_used) {
8197 MonoInst *args [2];
8199 /* FIXME: Decompose later to help abcrem */
8201 /* vtable */
8202 args [0] = emit_get_rgctx_klass (cfg, context_used,
8203 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8205 /* array len */
8206 args [1] = sp [0];
8208 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8209 } else {
8210 if (cfg->opt & MONO_OPT_SHARED) {
8211 /* Decompose now to avoid problems with references to the domainvar */
8212 MonoInst *iargs [3];
8214 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8215 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8216 iargs [2] = sp [0];
8218 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8219 } else {
8220 /* Decompose later since it is needed by abcrem */
8221 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8222 ins->dreg = alloc_preg (cfg);
8223 ins->sreg1 = sp [0]->dreg;
8224 ins->inst_newa_class = klass;
8225 ins->type = STACK_OBJ;
8226 ins->klass = klass;
8227 MONO_ADD_INS (cfg->cbb, ins);
8228 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8229 cfg->cbb->has_array_access = TRUE;
8231 /* Needed so mono_emit_load_get_addr () gets called */
8232 mono_get_got_var (cfg);
8236 len_ins = sp [0];
8237 ip += 5;
8238 *sp++ = ins;
8239 inline_costs += 1;
8242 * we inline/optimize the initialization sequence if possible.
8243 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8244 * for small sizes open code the memcpy
8245 * ensure the rva field is big enough
8247 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8248 MonoMethod *memcpy_method = get_memcpy_method ();
8249 MonoInst *iargs [3];
8250 int add_reg = alloc_preg (cfg);
8252 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8253 if (cfg->compile_aot) {
8254 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8255 } else {
8256 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8258 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8259 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8260 ip += 11;
8263 break;
8265 case CEE_LDLEN:
8266 CHECK_STACK (1);
8267 --sp;
8268 if (sp [0]->type != STACK_OBJ)
8269 UNVERIFIED;
8271 dreg = alloc_preg (cfg);
8272 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8273 ins->dreg = alloc_preg (cfg);
8274 ins->sreg1 = sp [0]->dreg;
8275 ins->type = STACK_I4;
8276 MONO_ADD_INS (cfg->cbb, ins);
8277 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8278 cfg->cbb->has_array_access = TRUE;
8279 ip ++;
8280 *sp++ = ins;
8281 break;
8282 case CEE_LDELEMA:
8283 CHECK_STACK (2);
8284 sp -= 2;
8285 CHECK_OPSIZE (5);
8286 if (sp [0]->type != STACK_OBJ)
8287 UNVERIFIED;
8289 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8291 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8292 CHECK_TYPELOAD (klass);
8293 /* we need to make sure that this array is exactly the type it needs
8294 * to be for correctness. the wrappers are lax with their usage
8295 * so we need to ignore them here
8297 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8298 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8300 readonly = FALSE;
8301 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8302 *sp++ = ins;
8303 ip += 5;
8304 break;
8305 case CEE_LDELEM_ANY:
8306 case CEE_LDELEM_I1:
8307 case CEE_LDELEM_U1:
8308 case CEE_LDELEM_I2:
8309 case CEE_LDELEM_U2:
8310 case CEE_LDELEM_I4:
8311 case CEE_LDELEM_U4:
8312 case CEE_LDELEM_I8:
8313 case CEE_LDELEM_I:
8314 case CEE_LDELEM_R4:
8315 case CEE_LDELEM_R8:
8316 case CEE_LDELEM_REF: {
8317 MonoInst *addr;
8319 CHECK_STACK (2);
8320 sp -= 2;
8322 if (*ip == CEE_LDELEM_ANY) {
8323 CHECK_OPSIZE (5);
8324 token = read32 (ip + 1);
8325 klass = mini_get_class (method, token, generic_context);
8326 CHECK_TYPELOAD (klass);
8327 mono_class_init (klass);
8329 else
8330 klass = array_access_to_klass (*ip);
8332 if (sp [0]->type != STACK_OBJ)
8333 UNVERIFIED;
8335 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8337 if (sp [1]->opcode == OP_ICONST) {
8338 int array_reg = sp [0]->dreg;
8339 int index_reg = sp [1]->dreg;
8340 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8342 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8344 } else {
8345 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8346 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8348 *sp++ = ins;
8349 if (*ip == CEE_LDELEM_ANY)
8350 ip += 5;
8351 else
8352 ++ip;
8353 break;
8355 case CEE_STELEM_I:
8356 case CEE_STELEM_I1:
8357 case CEE_STELEM_I2:
8358 case CEE_STELEM_I4:
8359 case CEE_STELEM_I8:
8360 case CEE_STELEM_R4:
8361 case CEE_STELEM_R8:
8362 case CEE_STELEM_REF:
8363 case CEE_STELEM_ANY: {
8364 MonoInst *addr;
8366 CHECK_STACK (3);
8367 sp -= 3;
8369 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8371 if (*ip == CEE_STELEM_ANY) {
8372 CHECK_OPSIZE (5);
8373 token = read32 (ip + 1);
8374 klass = mini_get_class (method, token, generic_context);
8375 CHECK_TYPELOAD (klass);
8376 mono_class_init (klass);
8378 else
8379 klass = array_access_to_klass (*ip);
8381 if (sp [0]->type != STACK_OBJ)
8382 UNVERIFIED;
8384 /* storing a NULL doesn't need any of the complex checks in stelemref */
8385 if (generic_class_is_reference_type (cfg, klass) &&
8386 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8387 MonoMethod* helper = mono_marshal_get_stelemref ();
8388 MonoInst *iargs [3];
8390 if (sp [0]->type != STACK_OBJ)
8391 UNVERIFIED;
8392 if (sp [2]->type != STACK_OBJ)
8393 UNVERIFIED;
8395 iargs [2] = sp [2];
8396 iargs [1] = sp [1];
8397 iargs [0] = sp [0];
8399 mono_emit_method_call (cfg, helper, iargs, NULL);
8400 } else {
8401 if (sp [1]->opcode == OP_ICONST) {
8402 int array_reg = sp [0]->dreg;
8403 int index_reg = sp [1]->dreg;
8404 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8406 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8407 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8408 } else {
8409 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8410 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8414 if (*ip == CEE_STELEM_ANY)
8415 ip += 5;
8416 else
8417 ++ip;
8418 inline_costs += 1;
8419 break;
8421 case CEE_CKFINITE: {
8422 CHECK_STACK (1);
8423 --sp;
8425 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8426 ins->sreg1 = sp [0]->dreg;
8427 ins->dreg = alloc_freg (cfg);
8428 ins->type = STACK_R8;
8429 MONO_ADD_INS (bblock, ins);
8430 *sp++ = ins;
8432 mono_decompose_opcode (cfg, ins);
8434 ++ip;
8435 break;
8437 case CEE_REFANYVAL: {
8438 MonoInst *src_var, *src;
8440 int klass_reg = alloc_preg (cfg);
8441 int dreg = alloc_preg (cfg);
8443 CHECK_STACK (1);
8444 MONO_INST_NEW (cfg, ins, *ip);
8445 --sp;
8446 CHECK_OPSIZE (5);
8447 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8448 CHECK_TYPELOAD (klass);
8449 mono_class_init (klass);
8451 if (cfg->generic_sharing_context)
8452 context_used = mono_class_check_context_used (klass);
8454 // FIXME:
8455 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8456 if (!src_var)
8457 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8458 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8461 if (context_used) {
8462 MonoInst *klass_ins;
8464 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8465 klass, MONO_RGCTX_INFO_KLASS);
8467 // FIXME:
8468 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8469 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8470 } else {
8471 mini_emit_class_check (cfg, klass_reg, klass);
8473 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8474 ins->type = STACK_MP;
8475 *sp++ = ins;
8476 ip += 5;
8477 break;
8479 case CEE_MKREFANY: {
8480 MonoInst *loc, *addr;
8482 CHECK_STACK (1);
8483 MONO_INST_NEW (cfg, ins, *ip);
8484 --sp;
8485 CHECK_OPSIZE (5);
8486 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8487 CHECK_TYPELOAD (klass);
8488 mono_class_init (klass);
8490 if (cfg->generic_sharing_context)
8491 context_used = mono_class_check_context_used (klass);
8493 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8494 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8496 if (context_used) {
8497 MonoInst *const_ins;
8498 int type_reg = alloc_preg (cfg);
8500 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8501 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8503 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8504 } else if (cfg->compile_aot) {
8505 int const_reg = alloc_preg (cfg);
8506 int type_reg = alloc_preg (cfg);
8508 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8511 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8512 } else {
8513 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8514 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8516 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8518 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8519 ins->type = STACK_VTYPE;
8520 ins->klass = mono_defaults.typed_reference_class;
8521 *sp++ = ins;
8522 ip += 5;
8523 break;
8525 case CEE_LDTOKEN: {
8526 gpointer handle;
8527 MonoClass *handle_class;
8529 CHECK_STACK_OVF (1);
8531 CHECK_OPSIZE (5);
8532 n = read32 (ip + 1);
8534 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8535 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8536 handle = mono_method_get_wrapper_data (method, n);
8537 handle_class = mono_method_get_wrapper_data (method, n + 1);
8538 if (handle_class == mono_defaults.typehandle_class)
8539 handle = &((MonoClass*)handle)->byval_arg;
8541 else {
8542 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8544 if (!handle)
8545 goto load_error;
8546 mono_class_init (handle_class);
8547 if (cfg->generic_sharing_context) {
8548 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8549 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8550 /* This case handles ldtoken
8551 of an open type, like for
8552 typeof(Gen<>). */
8553 context_used = 0;
8554 } else if (handle_class == mono_defaults.typehandle_class) {
8555 /* If we get a MONO_TYPE_CLASS
8556 then we need to provide the
8557 open type, not an
8558 instantiation of it. */
8559 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8560 context_used = 0;
8561 else
8562 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8563 } else if (handle_class == mono_defaults.fieldhandle_class)
8564 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8565 else if (handle_class == mono_defaults.methodhandle_class)
8566 context_used = mono_method_check_context_used (handle);
8567 else
8568 g_assert_not_reached ();
8571 if ((cfg->opt & MONO_OPT_SHARED) &&
8572 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8573 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8574 MonoInst *addr, *vtvar, *iargs [3];
8575 int method_context_used;
8577 if (cfg->generic_sharing_context)
8578 method_context_used = mono_method_check_context_used (method);
8579 else
8580 method_context_used = 0;
8582 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8584 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8585 EMIT_NEW_ICONST (cfg, iargs [1], n);
8586 if (method_context_used) {
8587 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8588 method, MONO_RGCTX_INFO_METHOD);
8589 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8590 } else {
8591 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8592 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8594 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8596 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8598 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8599 } else {
8600 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8601 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8602 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8603 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8604 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8605 MonoClass *tclass = mono_class_from_mono_type (handle);
8607 mono_class_init (tclass);
8608 if (context_used) {
8609 ins = emit_get_rgctx_klass (cfg, context_used,
8610 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8611 } else if (cfg->compile_aot) {
8612 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8613 } else {
8614 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8616 ins->type = STACK_OBJ;
8617 ins->klass = cmethod->klass;
8618 ip += 5;
8619 } else {
8620 MonoInst *addr, *vtvar;
8622 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8624 if (context_used) {
8625 if (handle_class == mono_defaults.typehandle_class) {
8626 ins = emit_get_rgctx_klass (cfg, context_used,
8627 mono_class_from_mono_type (handle),
8628 MONO_RGCTX_INFO_TYPE);
8629 } else if (handle_class == mono_defaults.methodhandle_class) {
8630 ins = emit_get_rgctx_method (cfg, context_used,
8631 handle, MONO_RGCTX_INFO_METHOD);
8632 } else if (handle_class == mono_defaults.fieldhandle_class) {
8633 ins = emit_get_rgctx_field (cfg, context_used,
8634 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8635 } else {
8636 g_assert_not_reached ();
8638 } else if (cfg->compile_aot) {
8639 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8640 } else {
8641 EMIT_NEW_PCONST (cfg, ins, handle);
8643 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8645 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8649 *sp++ = ins;
8650 ip += 5;
8651 break;
8653 case CEE_THROW:
8654 CHECK_STACK (1);
8655 MONO_INST_NEW (cfg, ins, OP_THROW);
8656 --sp;
8657 ins->sreg1 = sp [0]->dreg;
8658 ip++;
8659 bblock->out_of_line = TRUE;
8660 MONO_ADD_INS (bblock, ins);
8661 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8662 MONO_ADD_INS (bblock, ins);
8663 sp = stack_start;
8665 link_bblock (cfg, bblock, end_bblock);
8666 start_new_bblock = 1;
8667 break;
8668 case CEE_ENDFINALLY:
8669 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8670 MONO_ADD_INS (bblock, ins);
8671 ip++;
8672 start_new_bblock = 1;
8675 * Control will leave the method so empty the stack, otherwise
8676 * the next basic block will start with a nonempty stack.
8678 while (sp != stack_start) {
8679 sp--;
8681 break;
8682 case CEE_LEAVE:
8683 case CEE_LEAVE_S: {
8684 GList *handlers;
8686 if (*ip == CEE_LEAVE) {
8687 CHECK_OPSIZE (5);
8688 target = ip + 5 + (gint32)read32(ip + 1);
8689 } else {
8690 CHECK_OPSIZE (2);
8691 target = ip + 2 + (signed char)(ip [1]);
8694 /* empty the stack */
8695 while (sp != stack_start) {
8696 sp--;
8700 * If this leave statement is in a catch block, check for a
8701 * pending exception, and rethrow it if necessary.
8703 for (i = 0; i < header->num_clauses; ++i) {
8704 MonoExceptionClause *clause = &header->clauses [i];
8707 * Use <= in the final comparison to handle clauses with multiple
8708 * leave statements, like in bug #78024.
8709 * The ordering of the exception clauses guarantees that we find the
8710 * innermost clause.
8712 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8713 MonoInst *exc_ins;
8714 MonoBasicBlock *dont_throw;
8717 MonoInst *load;
8719 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8722 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8724 NEW_BBLOCK (cfg, dont_throw);
8727 * Currently, we allways rethrow the abort exception, despite the
8728 * fact that this is not correct. See thread6.cs for an example.
8729 * But propagating the abort exception is more important than
8730 * getting the sematics right.
8732 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8734 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8736 MONO_START_BB (cfg, dont_throw);
8737 bblock = cfg->cbb;
8741 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8742 GList *tmp;
8743 for (tmp = handlers; tmp; tmp = tmp->next) {
8744 tblock = tmp->data;
8745 link_bblock (cfg, bblock, tblock);
8746 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8747 ins->inst_target_bb = tblock;
8748 MONO_ADD_INS (bblock, ins);
8750 g_list_free (handlers);
8753 MONO_INST_NEW (cfg, ins, OP_BR);
8754 MONO_ADD_INS (bblock, ins);
8755 GET_BBLOCK (cfg, tblock, target);
8756 link_bblock (cfg, bblock, tblock);
8757 ins->inst_target_bb = tblock;
8758 start_new_bblock = 1;
8760 if (*ip == CEE_LEAVE)
8761 ip += 5;
8762 else
8763 ip += 2;
8765 break;
8769 * Mono specific opcodes
8771 case MONO_CUSTOM_PREFIX: {
8773 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8775 CHECK_OPSIZE (2);
8776 switch (ip [1]) {
8777 case CEE_MONO_ICALL: {
8778 gpointer func;
8779 MonoJitICallInfo *info;
8781 token = read32 (ip + 2);
8782 func = mono_method_get_wrapper_data (method, token);
8783 info = mono_find_jit_icall_by_addr (func);
8784 g_assert (info);
8786 CHECK_STACK (info->sig->param_count);
8787 sp -= info->sig->param_count;
8789 ins = mono_emit_jit_icall (cfg, info->func, sp);
8790 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8791 *sp++ = ins;
8793 ip += 6;
8794 inline_costs += 10 * num_calls++;
8796 break;
8798 case CEE_MONO_LDPTR: {
8799 gpointer ptr;
8801 CHECK_STACK_OVF (1);
8802 CHECK_OPSIZE (6);
8803 token = read32 (ip + 2);
8805 ptr = mono_method_get_wrapper_data (method, token);
8806 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8807 MonoJitICallInfo *callinfo;
8808 const char *icall_name;
8810 icall_name = method->name + strlen ("__icall_wrapper_");
8811 g_assert (icall_name);
8812 callinfo = mono_find_jit_icall_by_name (icall_name);
8813 g_assert (callinfo);
8815 if (ptr == callinfo->func) {
8816 /* Will be transformed into an AOTCONST later */
8817 EMIT_NEW_PCONST (cfg, ins, ptr);
8818 *sp++ = ins;
8819 ip += 6;
8820 break;
8823 /* FIXME: Generalize this */
8824 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8825 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8826 *sp++ = ins;
8827 ip += 6;
8828 break;
8830 EMIT_NEW_PCONST (cfg, ins, ptr);
8831 *sp++ = ins;
8832 ip += 6;
8833 inline_costs += 10 * num_calls++;
8834 /* Can't embed random pointers into AOT code */
8835 cfg->disable_aot = 1;
8836 break;
8838 case CEE_MONO_ICALL_ADDR: {
8839 MonoMethod *cmethod;
8840 gpointer ptr;
8842 CHECK_STACK_OVF (1);
8843 CHECK_OPSIZE (6);
8844 token = read32 (ip + 2);
8846 cmethod = mono_method_get_wrapper_data (method, token);
8848 if (cfg->compile_aot) {
8849 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8850 } else {
8851 ptr = mono_lookup_internal_call (cmethod);
8852 g_assert (ptr);
8853 EMIT_NEW_PCONST (cfg, ins, ptr);
8855 *sp++ = ins;
8856 ip += 6;
8857 break;
8859 case CEE_MONO_VTADDR: {
8860 MonoInst *src_var, *src;
8862 CHECK_STACK (1);
8863 --sp;
8865 // FIXME:
8866 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8867 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8868 *sp++ = src;
8869 ip += 2;
8870 break;
8872 case CEE_MONO_NEWOBJ: {
8873 MonoInst *iargs [2];
8875 CHECK_STACK_OVF (1);
8876 CHECK_OPSIZE (6);
8877 token = read32 (ip + 2);
8878 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8879 mono_class_init (klass);
8880 NEW_DOMAINCONST (cfg, iargs [0]);
8881 MONO_ADD_INS (cfg->cbb, iargs [0]);
8882 NEW_CLASSCONST (cfg, iargs [1], klass);
8883 MONO_ADD_INS (cfg->cbb, iargs [1]);
8884 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8885 ip += 6;
8886 inline_costs += 10 * num_calls++;
8887 break;
8889 case CEE_MONO_OBJADDR:
8890 CHECK_STACK (1);
8891 --sp;
8892 MONO_INST_NEW (cfg, ins, OP_MOVE);
8893 ins->dreg = alloc_preg (cfg);
8894 ins->sreg1 = sp [0]->dreg;
8895 ins->type = STACK_MP;
8896 MONO_ADD_INS (cfg->cbb, ins);
8897 *sp++ = ins;
8898 ip += 2;
8899 break;
8900 case CEE_MONO_LDNATIVEOBJ:
8902 * Similar to LDOBJ, but instead load the unmanaged
8903 * representation of the vtype to the stack.
8905 CHECK_STACK (1);
8906 CHECK_OPSIZE (6);
8907 --sp;
8908 token = read32 (ip + 2);
8909 klass = mono_method_get_wrapper_data (method, token);
8910 g_assert (klass->valuetype);
8911 mono_class_init (klass);
8914 MonoInst *src, *dest, *temp;
8916 src = sp [0];
8917 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8918 temp->backend.is_pinvoke = 1;
8919 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8920 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8922 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8923 dest->type = STACK_VTYPE;
8924 dest->klass = klass;
8926 *sp ++ = dest;
8927 ip += 6;
8929 break;
8930 case CEE_MONO_RETOBJ: {
8932 * Same as RET, but return the native representation of a vtype
8933 * to the caller.
8935 g_assert (cfg->ret);
8936 g_assert (mono_method_signature (method)->pinvoke);
8937 CHECK_STACK (1);
8938 --sp;
8940 CHECK_OPSIZE (6);
8941 token = read32 (ip + 2);
8942 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8944 if (!cfg->vret_addr) {
8945 g_assert (cfg->ret_var_is_local);
8947 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8948 } else {
8949 EMIT_NEW_RETLOADA (cfg, ins);
8951 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8953 if (sp != stack_start)
8954 UNVERIFIED;
8956 MONO_INST_NEW (cfg, ins, OP_BR);
8957 ins->inst_target_bb = end_bblock;
8958 MONO_ADD_INS (bblock, ins);
8959 link_bblock (cfg, bblock, end_bblock);
8960 start_new_bblock = 1;
8961 ip += 6;
8962 break;
8964 case CEE_MONO_CISINST:
8965 case CEE_MONO_CCASTCLASS: {
8966 int token;
8967 CHECK_STACK (1);
8968 --sp;
8969 CHECK_OPSIZE (6);
8970 token = read32 (ip + 2);
8971 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8972 if (ip [1] == CEE_MONO_CISINST)
8973 ins = handle_cisinst (cfg, klass, sp [0]);
8974 else
8975 ins = handle_ccastclass (cfg, klass, sp [0]);
8976 bblock = cfg->cbb;
8977 *sp++ = ins;
8978 ip += 6;
8979 break;
8981 case CEE_MONO_SAVE_LMF:
8982 case CEE_MONO_RESTORE_LMF:
8983 #ifdef MONO_ARCH_HAVE_LMF_OPS
8984 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
8985 MONO_ADD_INS (bblock, ins);
8986 cfg->need_lmf_area = TRUE;
8987 #endif
8988 ip += 2;
8989 break;
8990 case CEE_MONO_CLASSCONST:
8991 CHECK_STACK_OVF (1);
8992 CHECK_OPSIZE (6);
8993 token = read32 (ip + 2);
8994 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
8995 *sp++ = ins;
8996 ip += 6;
8997 inline_costs += 10 * num_calls++;
8998 break;
8999 case CEE_MONO_NOT_TAKEN:
9000 bblock->out_of_line = TRUE;
9001 ip += 2;
9002 break;
9003 case CEE_MONO_TLS:
9004 CHECK_STACK_OVF (1);
9005 CHECK_OPSIZE (6);
9006 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9007 ins->dreg = alloc_preg (cfg);
9008 ins->inst_offset = (gint32)read32 (ip + 2);
9009 ins->type = STACK_PTR;
9010 MONO_ADD_INS (bblock, ins);
9011 *sp++ = ins;
9012 ip += 6;
9013 break;
9014 default:
9015 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9016 break;
9018 break;
9021 case CEE_PREFIX1: {
9022 CHECK_OPSIZE (2);
9023 switch (ip [1]) {
9024 case CEE_ARGLIST: {
9025 /* somewhat similar to LDTOKEN */
9026 MonoInst *addr, *vtvar;
9027 CHECK_STACK_OVF (1);
9028 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9030 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9031 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9033 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9034 ins->type = STACK_VTYPE;
9035 ins->klass = mono_defaults.argumenthandle_class;
9036 *sp++ = ins;
9037 ip += 2;
9038 break;
9040 case CEE_CEQ:
9041 case CEE_CGT:
9042 case CEE_CGT_UN:
9043 case CEE_CLT:
9044 case CEE_CLT_UN: {
9045 MonoInst *cmp;
9046 CHECK_STACK (2);
9048 * The following transforms:
9049 * CEE_CEQ into OP_CEQ
9050 * CEE_CGT into OP_CGT
9051 * CEE_CGT_UN into OP_CGT_UN
9052 * CEE_CLT into OP_CLT
9053 * CEE_CLT_UN into OP_CLT_UN
9055 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9057 MONO_INST_NEW (cfg, ins, cmp->opcode);
9058 sp -= 2;
9059 cmp->sreg1 = sp [0]->dreg;
9060 cmp->sreg2 = sp [1]->dreg;
9061 type_from_op (cmp, sp [0], sp [1]);
9062 CHECK_TYPE (cmp);
9063 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9064 cmp->opcode = OP_LCOMPARE;
9065 else if (sp [0]->type == STACK_R8)
9066 cmp->opcode = OP_FCOMPARE;
9067 else
9068 cmp->opcode = OP_ICOMPARE;
9069 MONO_ADD_INS (bblock, cmp);
9070 ins->type = STACK_I4;
9071 ins->dreg = alloc_dreg (cfg, ins->type);
9072 type_from_op (ins, sp [0], sp [1]);
9074 if (cmp->opcode == OP_FCOMPARE) {
9076 * The backends expect the fceq opcodes to do the
9077 * comparison too.
9079 cmp->opcode = OP_NOP;
9080 ins->sreg1 = cmp->sreg1;
9081 ins->sreg2 = cmp->sreg2;
9083 MONO_ADD_INS (bblock, ins);
9084 *sp++ = ins;
9085 ip += 2;
9086 break;
9088 case CEE_LDFTN: {
9089 MonoInst *argconst;
9090 MonoMethod *cil_method;
9091 gboolean needs_static_rgctx_invoke;
9093 CHECK_STACK_OVF (1);
9094 CHECK_OPSIZE (6);
9095 n = read32 (ip + 2);
9096 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9097 if (!cmethod)
9098 goto load_error;
9099 mono_class_init (cmethod->klass);
9101 mono_save_token_info (cfg, image, n, cmethod);
9103 if (cfg->generic_sharing_context)
9104 context_used = mono_method_check_context_used (cmethod);
9106 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9108 cil_method = cmethod;
9109 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9110 METHOD_ACCESS_FAILURE;
9112 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9113 if (check_linkdemand (cfg, method, cmethod))
9114 INLINE_FAILURE;
9115 CHECK_CFG_EXCEPTION;
9116 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9117 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9121 * Optimize the common case of ldftn+delegate creation
9123 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9124 /* FIXME: SGEN support */
9125 /* FIXME: handle shared static generic methods */
9126 /* FIXME: handle this in shared code */
9127 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9128 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9129 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9130 MonoInst *target_ins;
9131 MonoMethod *invoke;
9133 invoke = mono_get_delegate_invoke (ctor_method->klass);
9134 if (!invoke || !mono_method_signature (invoke))
9135 goto load_error;
9137 ip += 6;
9138 if (cfg->verbose_level > 3)
9139 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9140 target_ins = sp [-1];
9141 sp --;
9142 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9143 ip += 5;
9144 sp ++;
9145 break;
9148 #endif
9150 if (context_used) {
9151 if (needs_static_rgctx_invoke)
9152 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9154 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9155 } else if (needs_static_rgctx_invoke) {
9156 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9157 } else {
9158 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9160 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9161 *sp++ = ins;
9163 ip += 6;
9164 inline_costs += 10 * num_calls++;
9165 break;
9167 case CEE_LDVIRTFTN: {
9168 MonoInst *args [2];
9170 CHECK_STACK (1);
9171 CHECK_OPSIZE (6);
9172 n = read32 (ip + 2);
9173 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9174 if (!cmethod)
9175 goto load_error;
9176 mono_class_init (cmethod->klass);
9178 if (cfg->generic_sharing_context)
9179 context_used = mono_method_check_context_used (cmethod);
9181 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9182 if (check_linkdemand (cfg, method, cmethod))
9183 INLINE_FAILURE;
9184 CHECK_CFG_EXCEPTION;
9185 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9186 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9189 --sp;
9190 args [0] = *sp;
9192 if (context_used) {
9193 args [1] = emit_get_rgctx_method (cfg, context_used,
9194 cmethod, MONO_RGCTX_INFO_METHOD);
9195 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9196 } else {
9197 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9198 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9201 ip += 6;
9202 inline_costs += 10 * num_calls++;
9203 break;
9205 case CEE_LDARG:
9206 CHECK_STACK_OVF (1);
9207 CHECK_OPSIZE (4);
9208 n = read16 (ip + 2);
9209 CHECK_ARG (n);
9210 EMIT_NEW_ARGLOAD (cfg, ins, n);
9211 *sp++ = ins;
9212 ip += 4;
9213 break;
9214 case CEE_LDARGA:
9215 CHECK_STACK_OVF (1);
9216 CHECK_OPSIZE (4);
9217 n = read16 (ip + 2);
9218 CHECK_ARG (n);
9219 NEW_ARGLOADA (cfg, ins, n);
9220 MONO_ADD_INS (cfg->cbb, ins);
9221 *sp++ = ins;
9222 ip += 4;
9223 break;
9224 case CEE_STARG:
9225 CHECK_STACK (1);
9226 --sp;
9227 CHECK_OPSIZE (4);
9228 n = read16 (ip + 2);
9229 CHECK_ARG (n);
9230 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9231 UNVERIFIED;
9232 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9233 ip += 4;
9234 break;
9235 case CEE_LDLOC:
9236 CHECK_STACK_OVF (1);
9237 CHECK_OPSIZE (4);
9238 n = read16 (ip + 2);
9239 CHECK_LOCAL (n);
9240 EMIT_NEW_LOCLOAD (cfg, ins, n);
9241 *sp++ = ins;
9242 ip += 4;
9243 break;
9244 case CEE_LDLOCA: {
9245 unsigned char *tmp_ip;
9246 CHECK_STACK_OVF (1);
9247 CHECK_OPSIZE (4);
9248 n = read16 (ip + 2);
9249 CHECK_LOCAL (n);
9251 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9252 ip = tmp_ip;
9253 inline_costs += 1;
9254 break;
9257 EMIT_NEW_LOCLOADA (cfg, ins, n);
9258 *sp++ = ins;
9259 ip += 4;
9260 break;
9262 case CEE_STLOC:
9263 CHECK_STACK (1);
9264 --sp;
9265 CHECK_OPSIZE (4);
9266 n = read16 (ip + 2);
9267 CHECK_LOCAL (n);
9268 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9269 UNVERIFIED;
9270 emit_stloc_ir (cfg, sp, header, n);
9271 ip += 4;
9272 inline_costs += 1;
9273 break;
9274 case CEE_LOCALLOC:
9275 CHECK_STACK (1);
9276 --sp;
9277 if (sp != stack_start)
9278 UNVERIFIED;
9279 if (cfg->method != method)
9281 * Inlining this into a loop in a parent could lead to
9282 * stack overflows which is different behavior than the
9283 * non-inlined case, thus disable inlining in this case.
9285 goto inline_failure;
9287 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9288 ins->dreg = alloc_preg (cfg);
9289 ins->sreg1 = sp [0]->dreg;
9290 ins->type = STACK_PTR;
9291 MONO_ADD_INS (cfg->cbb, ins);
9293 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9294 if (header->init_locals)
9295 ins->flags |= MONO_INST_INIT;
9297 *sp++ = ins;
9298 ip += 2;
9299 break;
9300 case CEE_ENDFILTER: {
9301 MonoExceptionClause *clause, *nearest;
9302 int cc, nearest_num;
9304 CHECK_STACK (1);
9305 --sp;
9306 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9307 UNVERIFIED;
9308 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9309 ins->sreg1 = (*sp)->dreg;
9310 MONO_ADD_INS (bblock, ins);
9311 start_new_bblock = 1;
9312 ip += 2;
9314 nearest = NULL;
9315 nearest_num = 0;
9316 for (cc = 0; cc < header->num_clauses; ++cc) {
9317 clause = &header->clauses [cc];
9318 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9319 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9320 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9321 nearest = clause;
9322 nearest_num = cc;
9325 g_assert (nearest);
9326 if ((ip - header->code) != nearest->handler_offset)
9327 UNVERIFIED;
9329 break;
9331 case CEE_UNALIGNED_:
9332 ins_flag |= MONO_INST_UNALIGNED;
9333 /* FIXME: record alignment? we can assume 1 for now */
9334 CHECK_OPSIZE (3);
9335 ip += 3;
9336 break;
9337 case CEE_VOLATILE_:
9338 ins_flag |= MONO_INST_VOLATILE;
9339 ip += 2;
9340 break;
9341 case CEE_TAIL_:
9342 ins_flag |= MONO_INST_TAILCALL;
9343 cfg->flags |= MONO_CFG_HAS_TAIL;
9344 /* Can't inline tail calls at this time */
9345 inline_costs += 100000;
9346 ip += 2;
9347 break;
9348 case CEE_INITOBJ:
9349 CHECK_STACK (1);
9350 --sp;
9351 CHECK_OPSIZE (6);
9352 token = read32 (ip + 2);
9353 klass = mini_get_class (method, token, generic_context);
9354 CHECK_TYPELOAD (klass);
9355 if (generic_class_is_reference_type (cfg, klass))
9356 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9357 else
9358 mini_emit_initobj (cfg, *sp, NULL, klass);
9359 ip += 6;
9360 inline_costs += 1;
9361 break;
9362 case CEE_CONSTRAINED_:
9363 CHECK_OPSIZE (6);
9364 token = read32 (ip + 2);
9365 constrained_call = mono_class_get_full (image, token, generic_context);
9366 CHECK_TYPELOAD (constrained_call);
9367 ip += 6;
9368 break;
9369 case CEE_CPBLK:
9370 case CEE_INITBLK: {
9371 MonoInst *iargs [3];
9372 CHECK_STACK (3);
9373 sp -= 3;
9375 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9376 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9377 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9378 /* emit_memset only works when val == 0 */
9379 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9380 } else {
9381 iargs [0] = sp [0];
9382 iargs [1] = sp [1];
9383 iargs [2] = sp [2];
9384 if (ip [1] == CEE_CPBLK) {
9385 MonoMethod *memcpy_method = get_memcpy_method ();
9386 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9387 } else {
9388 MonoMethod *memset_method = get_memset_method ();
9389 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9392 ip += 2;
9393 inline_costs += 1;
9394 break;
9396 case CEE_NO_:
9397 CHECK_OPSIZE (3);
9398 if (ip [2] & 0x1)
9399 ins_flag |= MONO_INST_NOTYPECHECK;
9400 if (ip [2] & 0x2)
9401 ins_flag |= MONO_INST_NORANGECHECK;
9402 /* we ignore the no-nullcheck for now since we
9403 * really do it explicitly only when doing callvirt->call
9405 ip += 3;
9406 break;
9407 case CEE_RETHROW: {
9408 MonoInst *load;
9409 int handler_offset = -1;
9411 for (i = 0; i < header->num_clauses; ++i) {
9412 MonoExceptionClause *clause = &header->clauses [i];
9413 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9414 handler_offset = clause->handler_offset;
9415 break;
9419 bblock->flags |= BB_EXCEPTION_UNSAFE;
9421 g_assert (handler_offset != -1);
9423 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9424 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9425 ins->sreg1 = load->dreg;
9426 MONO_ADD_INS (bblock, ins);
9427 sp = stack_start;
9428 link_bblock (cfg, bblock, end_bblock);
9429 start_new_bblock = 1;
9430 ip += 2;
9431 break;
9433 case CEE_SIZEOF: {
9434 guint32 align;
9435 int ialign;
9437 CHECK_STACK_OVF (1);
9438 CHECK_OPSIZE (6);
9439 token = read32 (ip + 2);
9440 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9441 MonoType *type = mono_type_create_from_typespec (image, token);
9442 token = mono_type_size (type, &ialign);
9443 } else {
9444 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9445 CHECK_TYPELOAD (klass);
9446 mono_class_init (klass);
9447 token = mono_class_value_size (klass, &align);
9449 EMIT_NEW_ICONST (cfg, ins, token);
9450 *sp++= ins;
9451 ip += 6;
9452 break;
9454 case CEE_REFANYTYPE: {
9455 MonoInst *src_var, *src;
9457 CHECK_STACK (1);
9458 --sp;
9460 // FIXME:
9461 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9462 if (!src_var)
9463 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9464 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9465 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9466 *sp++ = ins;
9467 ip += 2;
9468 break;
9470 case CEE_READONLY_:
9471 readonly = TRUE;
9472 ip += 2;
9473 break;
9474 default:
9475 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9477 break;
9479 default:
9480 g_error ("opcode 0x%02x not handled", *ip);
9483 if (start_new_bblock != 1)
9484 UNVERIFIED;
9486 bblock->cil_length = ip - bblock->cil_code;
9487 bblock->next_bb = end_bblock;
9489 if (cfg->method == method && cfg->domainvar) {
9490 MonoInst *store;
9491 MonoInst *get_domain;
9493 cfg->cbb = init_localsbb;
9495 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9496 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9498 else {
9499 get_domain->dreg = alloc_preg (cfg);
9500 MONO_ADD_INS (cfg->cbb, get_domain);
9502 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9503 MONO_ADD_INS (cfg->cbb, store);
9506 if (cfg->method == method && cfg->got_var)
9507 mono_emit_load_got_addr (cfg);
9509 if (header->init_locals) {
9510 MonoInst *store;
9512 cfg->cbb = init_localsbb;
9513 cfg->ip = NULL;
9514 for (i = 0; i < header->num_locals; ++i) {
9515 MonoType *ptype = header->locals [i];
9516 int t = ptype->type;
9517 dreg = cfg->locals [i]->dreg;
9519 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9520 t = mono_class_enum_basetype (ptype->data.klass)->type;
9521 if (ptype->byref) {
9522 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9523 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9524 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9525 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9526 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9527 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9528 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9529 ins->type = STACK_R8;
9530 ins->inst_p0 = (void*)&r8_0;
9531 ins->dreg = alloc_dreg (cfg, STACK_R8);
9532 MONO_ADD_INS (init_localsbb, ins);
9533 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9534 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9535 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9536 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9537 } else {
9538 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9543 cfg->ip = NULL;
9545 if (cfg->method == method) {
9546 MonoBasicBlock *bb;
9547 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9548 bb->region = mono_find_block_region (cfg, bb->real_offset);
9549 if (cfg->spvars)
9550 mono_create_spvar_for_region (cfg, bb->region);
9551 if (cfg->verbose_level > 2)
9552 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9556 g_slist_free (class_inits);
9557 dont_inline = g_list_remove (dont_inline, method);
9559 if (inline_costs < 0) {
9560 char *mname;
9562 /* Method is too large */
9563 mname = mono_method_full_name (method, TRUE);
9564 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9565 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9566 g_free (mname);
9567 return -1;
9570 if ((cfg->verbose_level > 2) && (cfg->method == method))
9571 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9573 return inline_costs;
9575 exception_exit:
9576 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9577 g_slist_free (class_inits);
9578 dont_inline = g_list_remove (dont_inline, method);
9579 return -1;
9581 inline_failure:
9582 g_slist_free (class_inits);
9583 dont_inline = g_list_remove (dont_inline, method);
9584 return -1;
9586 load_error:
9587 g_slist_free (class_inits);
9588 dont_inline = g_list_remove (dont_inline, method);
9589 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9590 return -1;
9592 unverified:
9593 g_slist_free (class_inits);
9594 dont_inline = g_list_remove (dont_inline, method);
9595 set_exception_type_from_invalid_il (cfg, method, ip);
9596 return -1;
9599 static int
9600 store_membase_reg_to_store_membase_imm (int opcode)
9602 switch (opcode) {
9603 case OP_STORE_MEMBASE_REG:
9604 return OP_STORE_MEMBASE_IMM;
9605 case OP_STOREI1_MEMBASE_REG:
9606 return OP_STOREI1_MEMBASE_IMM;
9607 case OP_STOREI2_MEMBASE_REG:
9608 return OP_STOREI2_MEMBASE_IMM;
9609 case OP_STOREI4_MEMBASE_REG:
9610 return OP_STOREI4_MEMBASE_IMM;
9611 case OP_STOREI8_MEMBASE_REG:
9612 return OP_STOREI8_MEMBASE_IMM;
9613 default:
9614 g_assert_not_reached ();
9617 return -1;
9620 #endif /* DISABLE_JIT */
9623 mono_op_to_op_imm (int opcode)
9625 switch (opcode) {
9626 case OP_IADD:
9627 return OP_IADD_IMM;
9628 case OP_ISUB:
9629 return OP_ISUB_IMM;
9630 case OP_IDIV:
9631 return OP_IDIV_IMM;
9632 case OP_IDIV_UN:
9633 return OP_IDIV_UN_IMM;
9634 case OP_IREM:
9635 return OP_IREM_IMM;
9636 case OP_IREM_UN:
9637 return OP_IREM_UN_IMM;
9638 case OP_IMUL:
9639 return OP_IMUL_IMM;
9640 case OP_IAND:
9641 return OP_IAND_IMM;
9642 case OP_IOR:
9643 return OP_IOR_IMM;
9644 case OP_IXOR:
9645 return OP_IXOR_IMM;
9646 case OP_ISHL:
9647 return OP_ISHL_IMM;
9648 case OP_ISHR:
9649 return OP_ISHR_IMM;
9650 case OP_ISHR_UN:
9651 return OP_ISHR_UN_IMM;
9653 case OP_LADD:
9654 return OP_LADD_IMM;
9655 case OP_LSUB:
9656 return OP_LSUB_IMM;
9657 case OP_LAND:
9658 return OP_LAND_IMM;
9659 case OP_LOR:
9660 return OP_LOR_IMM;
9661 case OP_LXOR:
9662 return OP_LXOR_IMM;
9663 case OP_LSHL:
9664 return OP_LSHL_IMM;
9665 case OP_LSHR:
9666 return OP_LSHR_IMM;
9667 case OP_LSHR_UN:
9668 return OP_LSHR_UN_IMM;
9670 case OP_COMPARE:
9671 return OP_COMPARE_IMM;
9672 case OP_ICOMPARE:
9673 return OP_ICOMPARE_IMM;
9674 case OP_LCOMPARE:
9675 return OP_LCOMPARE_IMM;
9677 case OP_STORE_MEMBASE_REG:
9678 return OP_STORE_MEMBASE_IMM;
9679 case OP_STOREI1_MEMBASE_REG:
9680 return OP_STOREI1_MEMBASE_IMM;
9681 case OP_STOREI2_MEMBASE_REG:
9682 return OP_STOREI2_MEMBASE_IMM;
9683 case OP_STOREI4_MEMBASE_REG:
9684 return OP_STOREI4_MEMBASE_IMM;
9686 #if defined(__i386__) || defined (__x86_64__)
9687 case OP_X86_PUSH:
9688 return OP_X86_PUSH_IMM;
9689 case OP_X86_COMPARE_MEMBASE_REG:
9690 return OP_X86_COMPARE_MEMBASE_IMM;
9691 #endif
9692 #if defined(__x86_64__)
9693 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9694 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9695 #endif
9696 case OP_VOIDCALL_REG:
9697 return OP_VOIDCALL;
9698 case OP_CALL_REG:
9699 return OP_CALL;
9700 case OP_LCALL_REG:
9701 return OP_LCALL;
9702 case OP_FCALL_REG:
9703 return OP_FCALL;
9704 case OP_LOCALLOC:
9705 return OP_LOCALLOC_IMM;
9708 return -1;
9711 static int
9712 ldind_to_load_membase (int opcode)
9714 switch (opcode) {
9715 case CEE_LDIND_I1:
9716 return OP_LOADI1_MEMBASE;
9717 case CEE_LDIND_U1:
9718 return OP_LOADU1_MEMBASE;
9719 case CEE_LDIND_I2:
9720 return OP_LOADI2_MEMBASE;
9721 case CEE_LDIND_U2:
9722 return OP_LOADU2_MEMBASE;
9723 case CEE_LDIND_I4:
9724 return OP_LOADI4_MEMBASE;
9725 case CEE_LDIND_U4:
9726 return OP_LOADU4_MEMBASE;
9727 case CEE_LDIND_I:
9728 return OP_LOAD_MEMBASE;
9729 case CEE_LDIND_REF:
9730 return OP_LOAD_MEMBASE;
9731 case CEE_LDIND_I8:
9732 return OP_LOADI8_MEMBASE;
9733 case CEE_LDIND_R4:
9734 return OP_LOADR4_MEMBASE;
9735 case CEE_LDIND_R8:
9736 return OP_LOADR8_MEMBASE;
9737 default:
9738 g_assert_not_reached ();
9741 return -1;
9744 static int
9745 stind_to_store_membase (int opcode)
9747 switch (opcode) {
9748 case CEE_STIND_I1:
9749 return OP_STOREI1_MEMBASE_REG;
9750 case CEE_STIND_I2:
9751 return OP_STOREI2_MEMBASE_REG;
9752 case CEE_STIND_I4:
9753 return OP_STOREI4_MEMBASE_REG;
9754 case CEE_STIND_I:
9755 case CEE_STIND_REF:
9756 return OP_STORE_MEMBASE_REG;
9757 case CEE_STIND_I8:
9758 return OP_STOREI8_MEMBASE_REG;
9759 case CEE_STIND_R4:
9760 return OP_STORER4_MEMBASE_REG;
9761 case CEE_STIND_R8:
9762 return OP_STORER8_MEMBASE_REG;
9763 default:
9764 g_assert_not_reached ();
9767 return -1;
9771 mono_load_membase_to_load_mem (int opcode)
9773 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9774 #if defined(__i386__) || defined(__x86_64__)
9775 switch (opcode) {
9776 case OP_LOAD_MEMBASE:
9777 return OP_LOAD_MEM;
9778 case OP_LOADU1_MEMBASE:
9779 return OP_LOADU1_MEM;
9780 case OP_LOADU2_MEMBASE:
9781 return OP_LOADU2_MEM;
9782 case OP_LOADI4_MEMBASE:
9783 return OP_LOADI4_MEM;
9784 case OP_LOADU4_MEMBASE:
9785 return OP_LOADU4_MEM;
9786 #if SIZEOF_REGISTER == 8
9787 case OP_LOADI8_MEMBASE:
9788 return OP_LOADI8_MEM;
9789 #endif
9791 #endif
9793 return -1;
9796 static inline int
9797 op_to_op_dest_membase (int store_opcode, int opcode)
9799 #if defined(__i386__)
9800 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9801 return -1;
9803 switch (opcode) {
9804 case OP_IADD:
9805 return OP_X86_ADD_MEMBASE_REG;
9806 case OP_ISUB:
9807 return OP_X86_SUB_MEMBASE_REG;
9808 case OP_IAND:
9809 return OP_X86_AND_MEMBASE_REG;
9810 case OP_IOR:
9811 return OP_X86_OR_MEMBASE_REG;
9812 case OP_IXOR:
9813 return OP_X86_XOR_MEMBASE_REG;
9814 case OP_ADD_IMM:
9815 case OP_IADD_IMM:
9816 return OP_X86_ADD_MEMBASE_IMM;
9817 case OP_SUB_IMM:
9818 case OP_ISUB_IMM:
9819 return OP_X86_SUB_MEMBASE_IMM;
9820 case OP_AND_IMM:
9821 case OP_IAND_IMM:
9822 return OP_X86_AND_MEMBASE_IMM;
9823 case OP_OR_IMM:
9824 case OP_IOR_IMM:
9825 return OP_X86_OR_MEMBASE_IMM;
9826 case OP_XOR_IMM:
9827 case OP_IXOR_IMM:
9828 return OP_X86_XOR_MEMBASE_IMM;
9829 case OP_MOVE:
9830 return OP_NOP;
9832 #endif
9834 #if defined(__x86_64__)
9835 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9836 return -1;
9838 switch (opcode) {
9839 case OP_IADD:
9840 return OP_X86_ADD_MEMBASE_REG;
9841 case OP_ISUB:
9842 return OP_X86_SUB_MEMBASE_REG;
9843 case OP_IAND:
9844 return OP_X86_AND_MEMBASE_REG;
9845 case OP_IOR:
9846 return OP_X86_OR_MEMBASE_REG;
9847 case OP_IXOR:
9848 return OP_X86_XOR_MEMBASE_REG;
9849 case OP_IADD_IMM:
9850 return OP_X86_ADD_MEMBASE_IMM;
9851 case OP_ISUB_IMM:
9852 return OP_X86_SUB_MEMBASE_IMM;
9853 case OP_IAND_IMM:
9854 return OP_X86_AND_MEMBASE_IMM;
9855 case OP_IOR_IMM:
9856 return OP_X86_OR_MEMBASE_IMM;
9857 case OP_IXOR_IMM:
9858 return OP_X86_XOR_MEMBASE_IMM;
9859 case OP_LADD:
9860 return OP_AMD64_ADD_MEMBASE_REG;
9861 case OP_LSUB:
9862 return OP_AMD64_SUB_MEMBASE_REG;
9863 case OP_LAND:
9864 return OP_AMD64_AND_MEMBASE_REG;
9865 case OP_LOR:
9866 return OP_AMD64_OR_MEMBASE_REG;
9867 case OP_LXOR:
9868 return OP_AMD64_XOR_MEMBASE_REG;
9869 case OP_ADD_IMM:
9870 case OP_LADD_IMM:
9871 return OP_AMD64_ADD_MEMBASE_IMM;
9872 case OP_SUB_IMM:
9873 case OP_LSUB_IMM:
9874 return OP_AMD64_SUB_MEMBASE_IMM;
9875 case OP_AND_IMM:
9876 case OP_LAND_IMM:
9877 return OP_AMD64_AND_MEMBASE_IMM;
9878 case OP_OR_IMM:
9879 case OP_LOR_IMM:
9880 return OP_AMD64_OR_MEMBASE_IMM;
9881 case OP_XOR_IMM:
9882 case OP_LXOR_IMM:
9883 return OP_AMD64_XOR_MEMBASE_IMM;
9884 case OP_MOVE:
9885 return OP_NOP;
9887 #endif
9889 return -1;
9892 static inline int
9893 op_to_op_store_membase (int store_opcode, int opcode)
9895 #if defined(__i386__) || defined(__x86_64__)
9896 switch (opcode) {
9897 case OP_ICEQ:
9898 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9899 return OP_X86_SETEQ_MEMBASE;
9900 case OP_CNE:
9901 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9902 return OP_X86_SETNE_MEMBASE;
9904 #endif
9906 return -1;
9909 static inline int
9910 op_to_op_src1_membase (int load_opcode, int opcode)
9912 #ifdef __i386__
9913 /* FIXME: This has sign extension issues */
9915 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9916 return OP_X86_COMPARE_MEMBASE8_IMM;
9919 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9920 return -1;
9922 switch (opcode) {
9923 case OP_X86_PUSH:
9924 return OP_X86_PUSH_MEMBASE;
9925 case OP_COMPARE_IMM:
9926 case OP_ICOMPARE_IMM:
9927 return OP_X86_COMPARE_MEMBASE_IMM;
9928 case OP_COMPARE:
9929 case OP_ICOMPARE:
9930 return OP_X86_COMPARE_MEMBASE_REG;
9932 #endif
9934 #ifdef __x86_64__
9935 /* FIXME: This has sign extension issues */
9937 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9938 return OP_X86_COMPARE_MEMBASE8_IMM;
9941 switch (opcode) {
9942 case OP_X86_PUSH:
9943 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9944 return OP_X86_PUSH_MEMBASE;
9945 break;
9946 /* FIXME: This only works for 32 bit immediates
9947 case OP_COMPARE_IMM:
9948 case OP_LCOMPARE_IMM:
9949 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9950 return OP_AMD64_COMPARE_MEMBASE_IMM;
9952 case OP_ICOMPARE_IMM:
9953 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9954 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9955 break;
9956 case OP_COMPARE:
9957 case OP_LCOMPARE:
9958 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9959 return OP_AMD64_COMPARE_MEMBASE_REG;
9960 break;
9961 case OP_ICOMPARE:
9962 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9963 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9964 break;
9966 #endif
9968 return -1;
9971 static inline int
9972 op_to_op_src2_membase (int load_opcode, int opcode)
9974 #ifdef __i386__
9975 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9976 return -1;
9978 switch (opcode) {
9979 case OP_COMPARE:
9980 case OP_ICOMPARE:
9981 return OP_X86_COMPARE_REG_MEMBASE;
9982 case OP_IADD:
9983 return OP_X86_ADD_REG_MEMBASE;
9984 case OP_ISUB:
9985 return OP_X86_SUB_REG_MEMBASE;
9986 case OP_IAND:
9987 return OP_X86_AND_REG_MEMBASE;
9988 case OP_IOR:
9989 return OP_X86_OR_REG_MEMBASE;
9990 case OP_IXOR:
9991 return OP_X86_XOR_REG_MEMBASE;
9993 #endif
9995 #ifdef __x86_64__
9996 switch (opcode) {
9997 case OP_ICOMPARE:
9998 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9999 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10000 break;
10001 case OP_COMPARE:
10002 case OP_LCOMPARE:
10003 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10004 return OP_AMD64_COMPARE_REG_MEMBASE;
10005 break;
10006 case OP_IADD:
10007 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10008 return OP_X86_ADD_REG_MEMBASE;
10009 case OP_ISUB:
10010 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10011 return OP_X86_SUB_REG_MEMBASE;
10012 case OP_IAND:
10013 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10014 return OP_X86_AND_REG_MEMBASE;
10015 case OP_IOR:
10016 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10017 return OP_X86_OR_REG_MEMBASE;
10018 case OP_IXOR:
10019 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10020 return OP_X86_XOR_REG_MEMBASE;
10021 case OP_LADD:
10022 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10023 return OP_AMD64_ADD_REG_MEMBASE;
10024 case OP_LSUB:
10025 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10026 return OP_AMD64_SUB_REG_MEMBASE;
10027 case OP_LAND:
10028 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10029 return OP_AMD64_AND_REG_MEMBASE;
10030 case OP_LOR:
10031 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10032 return OP_AMD64_OR_REG_MEMBASE;
10033 case OP_LXOR:
10034 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10035 return OP_AMD64_XOR_REG_MEMBASE;
10037 #endif
10039 return -1;
10043 mono_op_to_op_imm_noemul (int opcode)
10045 switch (opcode) {
10046 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10047 case OP_LSHR:
10048 case OP_LSHL:
10049 case OP_LSHR_UN:
10050 #endif
10051 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10052 case OP_IDIV:
10053 case OP_IDIV_UN:
10054 case OP_IREM:
10055 case OP_IREM_UN:
10056 #endif
10057 return -1;
10058 default:
10059 return mono_op_to_op_imm (opcode);
10063 #ifndef DISABLE_JIT
10066 * mono_handle_global_vregs:
10068 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10069 * for them.
10071 void
10072 mono_handle_global_vregs (MonoCompile *cfg)
10074 gint32 *vreg_to_bb;
10075 MonoBasicBlock *bb;
10076 int i, pos;
10078 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10080 #ifdef MONO_ARCH_SIMD_INTRINSICS
10081 if (cfg->uses_simd_intrinsics)
10082 mono_simd_simplify_indirection (cfg);
10083 #endif
10085 /* Find local vregs used in more than one bb */
10086 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10087 MonoInst *ins = bb->code;
10088 int block_num = bb->block_num;
10090 if (cfg->verbose_level > 2)
10091 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10093 cfg->cbb = bb;
10094 for (; ins; ins = ins->next) {
10095 const char *spec = INS_INFO (ins->opcode);
10096 int regtype, regindex;
10097 gint32 prev_bb;
10099 if (G_UNLIKELY (cfg->verbose_level > 2))
10100 mono_print_ins (ins);
10102 g_assert (ins->opcode >= MONO_CEE_LAST);
10104 for (regindex = 0; regindex < 3; regindex ++) {
10105 int vreg;
10107 if (regindex == 0) {
10108 regtype = spec [MONO_INST_DEST];
10109 if (regtype == ' ')
10110 continue;
10111 vreg = ins->dreg;
10112 } else if (regindex == 1) {
10113 regtype = spec [MONO_INST_SRC1];
10114 if (regtype == ' ')
10115 continue;
10116 vreg = ins->sreg1;
10117 } else {
10118 regtype = spec [MONO_INST_SRC2];
10119 if (regtype == ' ')
10120 continue;
10121 vreg = ins->sreg2;
10124 #if SIZEOF_REGISTER == 4
10125 if (regtype == 'l') {
10127 * Since some instructions reference the original long vreg,
10128 * and some reference the two component vregs, it is quite hard
10129 * to determine when it needs to be global. So be conservative.
10131 if (!get_vreg_to_inst (cfg, vreg)) {
10132 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10134 if (cfg->verbose_level > 2)
10135 printf ("LONG VREG R%d made global.\n", vreg);
10139 * Make the component vregs volatile since the optimizations can
10140 * get confused otherwise.
10142 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10143 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10145 #endif
10147 g_assert (vreg != -1);
10149 prev_bb = vreg_to_bb [vreg];
10150 if (prev_bb == 0) {
10151 /* 0 is a valid block num */
10152 vreg_to_bb [vreg] = block_num + 1;
10153 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10154 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10155 continue;
10157 if (!get_vreg_to_inst (cfg, vreg)) {
10158 if (G_UNLIKELY (cfg->verbose_level > 2))
10159 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10161 switch (regtype) {
10162 case 'i':
10163 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10164 break;
10165 case 'f':
10166 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10167 break;
10168 case 'v':
10169 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10170 break;
10171 default:
10172 g_assert_not_reached ();
10176 /* Flag as having been used in more than one bb */
10177 vreg_to_bb [vreg] = -1;
10183 /* If a variable is used in only one bblock, convert it into a local vreg */
10184 for (i = 0; i < cfg->num_varinfo; i++) {
10185 MonoInst *var = cfg->varinfo [i];
10186 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10188 switch (var->type) {
10189 case STACK_I4:
10190 case STACK_OBJ:
10191 case STACK_PTR:
10192 case STACK_MP:
10193 case STACK_VTYPE:
10194 #if SIZEOF_REGISTER == 8
10195 case STACK_I8:
10196 #endif
10197 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10198 /* Enabling this screws up the fp stack on x86 */
10199 case STACK_R8:
10200 #endif
10201 /* Arguments are implicitly global */
10202 /* Putting R4 vars into registers doesn't work currently */
10203 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10205 * Make that the variable's liveness interval doesn't contain a call, since
10206 * that would cause the lvreg to be spilled, making the whole optimization
10207 * useless.
10209 /* This is too slow for JIT compilation */
10210 #if 0
10211 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10212 MonoInst *ins;
10213 int def_index, call_index, ins_index;
10214 gboolean spilled = FALSE;
10216 def_index = -1;
10217 call_index = -1;
10218 ins_index = 0;
10219 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10220 const char *spec = INS_INFO (ins->opcode);
10222 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10223 def_index = ins_index;
10225 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10226 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10227 if (call_index > def_index) {
10228 spilled = TRUE;
10229 break;
10233 if (MONO_IS_CALL (ins))
10234 call_index = ins_index;
10236 ins_index ++;
10239 if (spilled)
10240 break;
10242 #endif
10244 if (G_UNLIKELY (cfg->verbose_level > 2))
10245 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10246 var->flags |= MONO_INST_IS_DEAD;
10247 cfg->vreg_to_inst [var->dreg] = NULL;
10249 break;
10254 * Compress the varinfo and vars tables so the liveness computation is faster and
10255 * takes up less space.
10257 pos = 0;
10258 for (i = 0; i < cfg->num_varinfo; ++i) {
10259 MonoInst *var = cfg->varinfo [i];
10260 if (pos < i && cfg->locals_start == i)
10261 cfg->locals_start = pos;
10262 if (!(var->flags & MONO_INST_IS_DEAD)) {
10263 if (pos < i) {
10264 cfg->varinfo [pos] = cfg->varinfo [i];
10265 cfg->varinfo [pos]->inst_c0 = pos;
10266 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10267 cfg->vars [pos].idx = pos;
10268 #if SIZEOF_REGISTER == 4
10269 if (cfg->varinfo [pos]->type == STACK_I8) {
10270 /* Modify the two component vars too */
10271 MonoInst *var1;
10273 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10274 var1->inst_c0 = pos;
10275 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10276 var1->inst_c0 = pos;
10278 #endif
10280 pos ++;
10283 cfg->num_varinfo = pos;
10284 if (cfg->locals_start > cfg->num_varinfo)
10285 cfg->locals_start = cfg->num_varinfo;
10289 * mono_spill_global_vars:
10291 * Generate spill code for variables which are not allocated to registers,
10292 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10293 * code is generated which could be optimized by the local optimization passes.
10295 void
10296 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10298 MonoBasicBlock *bb;
10299 char spec2 [16];
10300 int orig_next_vreg;
10301 guint32 *vreg_to_lvreg;
10302 guint32 *lvregs;
10303 guint32 i, lvregs_len;
10304 gboolean dest_has_lvreg = FALSE;
10305 guint32 stacktypes [128];
10306 MonoInst **live_range_start, **live_range_end;
10307 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10309 *need_local_opts = FALSE;
10311 memset (spec2, 0, sizeof (spec2));
10313 /* FIXME: Move this function to mini.c */
10314 stacktypes ['i'] = STACK_PTR;
10315 stacktypes ['l'] = STACK_I8;
10316 stacktypes ['f'] = STACK_R8;
10317 #ifdef MONO_ARCH_SIMD_INTRINSICS
10318 stacktypes ['x'] = STACK_VTYPE;
10319 #endif
10321 #if SIZEOF_REGISTER == 4
10322 /* Create MonoInsts for longs */
10323 for (i = 0; i < cfg->num_varinfo; i++) {
10324 MonoInst *ins = cfg->varinfo [i];
10326 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10327 switch (ins->type) {
10328 #ifdef MONO_ARCH_SOFT_FLOAT
10329 case STACK_R8:
10330 #endif
10331 case STACK_I8: {
10332 MonoInst *tree;
10334 g_assert (ins->opcode == OP_REGOFFSET);
10336 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10337 g_assert (tree);
10338 tree->opcode = OP_REGOFFSET;
10339 tree->inst_basereg = ins->inst_basereg;
10340 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10342 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10343 g_assert (tree);
10344 tree->opcode = OP_REGOFFSET;
10345 tree->inst_basereg = ins->inst_basereg;
10346 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10347 break;
10349 default:
10350 break;
10354 #endif
10356 /* FIXME: widening and truncation */
10359 * As an optimization, when a variable allocated to the stack is first loaded into
10360 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10361 * the variable again.
10363 orig_next_vreg = cfg->next_vreg;
10364 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10365 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10366 lvregs_len = 0;
10369 * These arrays contain the first and last instructions accessing a given
10370 * variable.
10371 * Since we emit bblocks in the same order we process them here, and we
10372 * don't split live ranges, these will precisely describe the live range of
10373 * the variable, i.e. the instruction range where a valid value can be found
10374 * in the variables location.
10376 /* FIXME: Only do this if debugging info is requested */
10377 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10378 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10379 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10380 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10382 /* Add spill loads/stores */
10383 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10384 MonoInst *ins;
10386 if (cfg->verbose_level > 2)
10387 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10389 /* Clear vreg_to_lvreg array */
10390 for (i = 0; i < lvregs_len; i++)
10391 vreg_to_lvreg [lvregs [i]] = 0;
10392 lvregs_len = 0;
10394 cfg->cbb = bb;
10395 MONO_BB_FOR_EACH_INS (bb, ins) {
10396 const char *spec = INS_INFO (ins->opcode);
10397 int regtype, srcindex, sreg, tmp_reg, prev_dreg;
10398 gboolean store, no_lvreg;
10400 if (G_UNLIKELY (cfg->verbose_level > 2))
10401 mono_print_ins (ins);
10403 if (ins->opcode == OP_NOP)
10404 continue;
10407 * We handle LDADDR here as well, since it can only be decomposed
10408 * when variable addresses are known.
10410 if (ins->opcode == OP_LDADDR) {
10411 MonoInst *var = ins->inst_p0;
10413 if (var->opcode == OP_VTARG_ADDR) {
10414 /* Happens on SPARC/S390 where vtypes are passed by reference */
10415 MonoInst *vtaddr = var->inst_left;
10416 if (vtaddr->opcode == OP_REGVAR) {
10417 ins->opcode = OP_MOVE;
10418 ins->sreg1 = vtaddr->dreg;
10420 else if (var->inst_left->opcode == OP_REGOFFSET) {
10421 ins->opcode = OP_LOAD_MEMBASE;
10422 ins->inst_basereg = vtaddr->inst_basereg;
10423 ins->inst_offset = vtaddr->inst_offset;
10424 } else
10425 NOT_IMPLEMENTED;
10426 } else {
10427 g_assert (var->opcode == OP_REGOFFSET);
10429 ins->opcode = OP_ADD_IMM;
10430 ins->sreg1 = var->inst_basereg;
10431 ins->inst_imm = var->inst_offset;
10434 *need_local_opts = TRUE;
10435 spec = INS_INFO (ins->opcode);
10438 if (ins->opcode < MONO_CEE_LAST) {
10439 mono_print_ins (ins);
10440 g_assert_not_reached ();
10444 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10445 * src register.
10446 * FIXME:
10448 if (MONO_IS_STORE_MEMBASE (ins)) {
10449 tmp_reg = ins->dreg;
10450 ins->dreg = ins->sreg2;
10451 ins->sreg2 = tmp_reg;
10452 store = TRUE;
10454 spec2 [MONO_INST_DEST] = ' ';
10455 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10456 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10457 spec = spec2;
10458 } else if (MONO_IS_STORE_MEMINDEX (ins))
10459 g_assert_not_reached ();
10460 else
10461 store = FALSE;
10462 no_lvreg = FALSE;
10464 if (G_UNLIKELY (cfg->verbose_level > 2))
10465 printf ("\t %.3s %d %d %d\n", spec, ins->dreg, ins->sreg1, ins->sreg2);
10467 /***************/
10468 /* DREG */
10469 /***************/
10470 regtype = spec [MONO_INST_DEST];
10471 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10472 prev_dreg = -1;
10474 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10475 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10476 MonoInst *store_ins;
10477 int store_opcode;
10478 MonoInst *def_ins = ins;
10479 int dreg = ins->dreg; /* The original vreg */
10481 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10483 if (var->opcode == OP_REGVAR) {
10484 ins->dreg = var->dreg;
10485 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10487 * Instead of emitting a load+store, use a _membase opcode.
10489 g_assert (var->opcode == OP_REGOFFSET);
10490 if (ins->opcode == OP_MOVE) {
10491 NULLIFY_INS (ins);
10492 def_ins = NULL;
10493 } else {
10494 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10495 ins->inst_basereg = var->inst_basereg;
10496 ins->inst_offset = var->inst_offset;
10497 ins->dreg = -1;
10499 spec = INS_INFO (ins->opcode);
10500 } else {
10501 guint32 lvreg;
10503 g_assert (var->opcode == OP_REGOFFSET);
10505 prev_dreg = ins->dreg;
10507 /* Invalidate any previous lvreg for this vreg */
10508 vreg_to_lvreg [ins->dreg] = 0;
10510 lvreg = 0;
10512 #ifdef MONO_ARCH_SOFT_FLOAT
10513 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10514 regtype = 'l';
10515 store_opcode = OP_STOREI8_MEMBASE_REG;
10517 #endif
10519 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10521 if (regtype == 'l') {
10522 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10523 mono_bblock_insert_after_ins (bb, ins, store_ins);
10524 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10525 mono_bblock_insert_after_ins (bb, ins, store_ins);
10526 def_ins = store_ins;
10528 else {
10529 g_assert (store_opcode != OP_STOREV_MEMBASE);
10531 /* Try to fuse the store into the instruction itself */
10532 /* FIXME: Add more instructions */
10533 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10534 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10535 ins->inst_imm = ins->inst_c0;
10536 ins->inst_destbasereg = var->inst_basereg;
10537 ins->inst_offset = var->inst_offset;
10538 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10539 ins->opcode = store_opcode;
10540 ins->inst_destbasereg = var->inst_basereg;
10541 ins->inst_offset = var->inst_offset;
10543 no_lvreg = TRUE;
10545 tmp_reg = ins->dreg;
10546 ins->dreg = ins->sreg2;
10547 ins->sreg2 = tmp_reg;
10548 store = TRUE;
10550 spec2 [MONO_INST_DEST] = ' ';
10551 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10552 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10553 spec = spec2;
10554 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10555 // FIXME: The backends expect the base reg to be in inst_basereg
10556 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10557 ins->dreg = -1;
10558 ins->inst_basereg = var->inst_basereg;
10559 ins->inst_offset = var->inst_offset;
10560 spec = INS_INFO (ins->opcode);
10561 } else {
10562 /* printf ("INS: "); mono_print_ins (ins); */
10563 /* Create a store instruction */
10564 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10566 /* Insert it after the instruction */
10567 mono_bblock_insert_after_ins (bb, ins, store_ins);
10569 def_ins = store_ins;
10572 * We can't assign ins->dreg to var->dreg here, since the
10573 * sregs could use it. So set a flag, and do it after
10574 * the sregs.
10576 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10577 dest_has_lvreg = TRUE;
10582 if (def_ins && !live_range_start [dreg]) {
10583 live_range_start [dreg] = def_ins;
10584 live_range_start_bb [dreg] = bb;
10588 /************/
10589 /* SREGS */
10590 /************/
10591 for (srcindex = 0; srcindex < 2; ++srcindex) {
10592 regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
10593 sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
10595 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10596 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10597 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10598 MonoInst *use_ins = ins;
10599 MonoInst *load_ins;
10600 guint32 load_opcode;
10602 if (var->opcode == OP_REGVAR) {
10603 if (srcindex == 0)
10604 ins->sreg1 = var->dreg;
10605 else
10606 ins->sreg2 = var->dreg;
10607 live_range_end [var->dreg] = use_ins;
10608 live_range_end_bb [var->dreg] = bb;
10609 continue;
10612 g_assert (var->opcode == OP_REGOFFSET);
10614 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10616 g_assert (load_opcode != OP_LOADV_MEMBASE);
10618 if (vreg_to_lvreg [sreg]) {
10619 /* The variable is already loaded to an lvreg */
10620 if (G_UNLIKELY (cfg->verbose_level > 2))
10621 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10622 if (srcindex == 0)
10623 ins->sreg1 = vreg_to_lvreg [sreg];
10624 else
10625 ins->sreg2 = vreg_to_lvreg [sreg];
10626 continue;
10629 /* Try to fuse the load into the instruction */
10630 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10631 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10632 ins->inst_basereg = var->inst_basereg;
10633 ins->inst_offset = var->inst_offset;
10634 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10635 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10636 ins->sreg2 = var->inst_basereg;
10637 ins->inst_offset = var->inst_offset;
10638 } else {
10639 if (MONO_IS_REAL_MOVE (ins)) {
10640 ins->opcode = OP_NOP;
10641 sreg = ins->dreg;
10642 } else {
10643 //printf ("%d ", srcindex); mono_print_ins (ins);
10645 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10647 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10648 if (var->dreg == prev_dreg) {
10650 * sreg refers to the value loaded by the load
10651 * emitted below, but we need to use ins->dreg
10652 * since it refers to the store emitted earlier.
10654 sreg = ins->dreg;
10656 vreg_to_lvreg [var->dreg] = sreg;
10657 g_assert (lvregs_len < 1024);
10658 lvregs [lvregs_len ++] = var->dreg;
10662 if (srcindex == 0)
10663 ins->sreg1 = sreg;
10664 else
10665 ins->sreg2 = sreg;
10667 if (regtype == 'l') {
10668 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10669 mono_bblock_insert_before_ins (bb, ins, load_ins);
10670 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10671 mono_bblock_insert_before_ins (bb, ins, load_ins);
10672 use_ins = load_ins;
10674 else {
10675 #if SIZEOF_REGISTER == 4
10676 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10677 #endif
10678 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10679 mono_bblock_insert_before_ins (bb, ins, load_ins);
10680 use_ins = load_ins;
10684 if (var->dreg < orig_next_vreg) {
10685 live_range_end [var->dreg] = use_ins;
10686 live_range_end_bb [var->dreg] = bb;
10691 if (dest_has_lvreg) {
10692 vreg_to_lvreg [prev_dreg] = ins->dreg;
10693 g_assert (lvregs_len < 1024);
10694 lvregs [lvregs_len ++] = prev_dreg;
10695 dest_has_lvreg = FALSE;
10698 if (store) {
10699 tmp_reg = ins->dreg;
10700 ins->dreg = ins->sreg2;
10701 ins->sreg2 = tmp_reg;
10704 if (MONO_IS_CALL (ins)) {
10705 /* Clear vreg_to_lvreg array */
10706 for (i = 0; i < lvregs_len; i++)
10707 vreg_to_lvreg [lvregs [i]] = 0;
10708 lvregs_len = 0;
10711 if (cfg->verbose_level > 2)
10712 mono_print_ins_index (1, ins);
10716 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10718 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10719 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10721 for (i = 0; i < cfg->num_varinfo; ++i) {
10722 int vreg = MONO_VARINFO (cfg, i)->vreg;
10723 MonoInst *ins;
10725 if (live_range_start [vreg]) {
10726 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10727 ins->inst_c0 = i;
10728 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10730 if (live_range_end [vreg]) {
10731 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10732 ins->inst_c0 = i;
10733 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10736 #endif
10738 g_free (live_range_start);
10739 g_free (live_range_end);
10740 g_free (live_range_start_bb);
10741 g_free (live_range_end_bb);
10745 * FIXME:
10746 * - use 'iadd' instead of 'int_add'
10747 * - handling ovf opcodes: decompose in method_to_ir.
10748 * - unify iregs/fregs
10749 * -> partly done, the missing parts are:
10750 * - a more complete unification would involve unifying the hregs as well, so
10751 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10752 * would no longer map to the machine hregs, so the code generators would need to
10753 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10754 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10755 * fp/non-fp branches speeds it up by about 15%.
10756 * - use sext/zext opcodes instead of shifts
10757 * - add OP_ICALL
10758 * - get rid of TEMPLOADs if possible and use vregs instead
10759 * - clean up usage of OP_P/OP_ opcodes
10760 * - cleanup usage of DUMMY_USE
10761 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10762 * stack
10763 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10764 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10765 * - make sure handle_stack_args () is called before the branch is emitted
10766 * - when the new IR is done, get rid of all unused stuff
10767 * - COMPARE/BEQ as separate instructions or unify them ?
10768 * - keeping them separate allows specialized compare instructions like
10769 * compare_imm, compare_membase
10770 * - most back ends unify fp compare+branch, fp compare+ceq
10771 * - integrate mono_save_args into inline_method
10772 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10773 * - handle long shift opts on 32 bit platforms somehow: they require
10774 * 3 sregs (2 for arg1 and 1 for arg2)
10775 * - make byref a 'normal' type.
10776 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10777 * variable if needed.
10778 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10779 * like inline_method.
10780 * - remove inlining restrictions
10781 * - fix LNEG and enable cfold of INEG
10782 * - generalize x86 optimizations like ldelema as a peephole optimization
10783 * - add store_mem_imm for amd64
10784 * - optimize the loading of the interruption flag in the managed->native wrappers
10785 * - avoid special handling of OP_NOP in passes
10786 * - move code inserting instructions into one function/macro.
10787 * - try a coalescing phase after liveness analysis
10788 * - add float -> vreg conversion + local optimizations on !x86
10789 * - figure out how to handle decomposed branches during optimizations, ie.
10790 * compare+branch, op_jump_table+op_br etc.
10791 * - promote RuntimeXHandles to vregs
10792 * - vtype cleanups:
10793 * - add a NEW_VARLOADA_VREG macro
10794 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10795 * accessing vtype fields.
10796 * - get rid of I8CONST on 64 bit platforms
10797 * - dealing with the increase in code size due to branches created during opcode
10798 * decomposition:
10799 * - use extended basic blocks
10800 * - all parts of the JIT
10801 * - handle_global_vregs () && local regalloc
10802 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10803 * - sources of increase in code size:
10804 * - vtypes
10805 * - long compares
10806 * - isinst and castclass
10807 * - lvregs not allocated to global registers even if used multiple times
10808 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10809 * meaningful.
10810 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10811 * - add all micro optimizations from the old JIT
10812 * - put tree optimizations into the deadce pass
10813 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10814 * specific function.
10815 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10816 * fcompare + branchCC.
10817 * - create a helper function for allocating a stack slot, taking into account
10818 * MONO_CFG_HAS_SPILLUP.
10819 * - merge r68207.
10820 * - merge the ia64 switch changes.
10821 * - optimize mono_regstate2_alloc_int/float.
10822 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10823 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10824 * parts of the tree could be separated by other instructions, killing the tree
10825 * arguments, or stores killing loads etc. Also, should we fold loads into other
10826 * instructions if the result of the load is used multiple times ?
10827 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10828 * - LAST MERGE: 108395.
10829 * - when returning vtypes in registers, generate IR and append it to the end of the
10830 * last bb instead of doing it in the epilog.
10831 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10836 NOTES
10837 -----
10839 - When to decompose opcodes:
10840 - earlier: this makes some optimizations hard to implement, since the low level IR
10841 no longer contains the neccessary information. But it is easier to do.
10842 - later: harder to implement, enables more optimizations.
10843 - Branches inside bblocks:
10844 - created when decomposing complex opcodes.
10845 - branches to another bblock: harmless, but not tracked by the branch
10846 optimizations, so need to branch to a label at the start of the bblock.
10847 - branches to inside the same bblock: very problematic, trips up the local
10848 reg allocator. Can be fixed by spitting the current bblock, but that is a
10849 complex operation, since some local vregs can become global vregs etc.
10850 - Local/global vregs:
10851 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10852 local register allocator.
10853 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10854 structure, created by mono_create_var (). Assigned to hregs or the stack by
10855 the global register allocator.
10856 - When to do optimizations like alu->alu_imm:
10857 - earlier -> saves work later on since the IR will be smaller/simpler
10858 - later -> can work on more instructions
10859 - Handling of valuetypes:
10860 - When a vtype is pushed on the stack, a new temporary is created, an
10861 instruction computing its address (LDADDR) is emitted and pushed on
10862 the stack. Need to optimize cases when the vtype is used immediately as in
10863 argument passing, stloc etc.
10864 - Instead of the to_end stuff in the old JIT, simply call the function handling
10865 the values on the stack before emitting the last instruction of the bb.
10868 #endif /* DISABLE_JIT */