2009-04-11 Zoltan Varga <vargaz@gmail.com>
[mono-project.git] / mono / mini / method-to-ir.c
blob5ecac9d7b4d03efd527e62de86c8cbfb25312779
1 /*
2 * method-to-ir.c: Convert CIL to the JIT internal representation
4 * Author:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <signal.h>
14 #ifdef HAVE_UNISTD_H
15 #include <unistd.h>
16 #endif
18 #include <math.h>
19 #include <string.h>
20 #include <ctype.h>
22 #ifdef HAVE_SYS_TIME_H
23 #include <sys/time.h>
24 #endif
26 #ifdef HAVE_ALLOCA_H
27 #include <alloca.h>
28 #endif
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
54 #include "mini.h"
55 #include "trace.h"
57 #include "ir-emit.h"
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
65 goto inline_failure;\
66 } while (0)
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
69 goto exception_exit;\
70 } while (0)
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
79 } while (0)
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
88 } while (0)
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
95 } \
96 } while (0)
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
121 #ifdef MINI_OP
122 #undef MINI_OP
123 #endif
124 #ifdef MINI_OP3
125 #undef MINI_OP3
126 #endif
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
129 #define NONE ' '
130 #define IREG 'i'
131 #define FREG 'f'
132 #define VREG 'v'
133 #define XREG 'x'
134 #if SIZEOF_REGISTER == 8
135 #define LREG IREG
136 #else
137 #define LREG 'l'
138 #endif
139 /* keep in sync with the enum in mini.h */
140 const char
141 ins_info[] = {
142 #include "mini-ops.h"
144 #undef MINI_OP
145 #undef MINI_OP3
147 #define MINI_OP(a,b,dest,src1,src2) (((src1) != NONE) + ((src2) != NONE)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) (((src1) != NONE) + ((src2) != NONE) + ((src3) != NONE)),
149 const gint8 ins_sreg_counts[] = {
150 #include "mini-ops.h"
152 #undef MINI_OP
153 #undef MINI_OP3
155 extern GHashTable *jit_icall_name_hash;
157 #define MONO_INIT_VARINFO(vi,id) do { \
158 (vi)->range.first_use.pos.bid = 0xffff; \
159 (vi)->reg = -1; \
160 (vi)->idx = (id); \
161 } while (0)
163 void
164 mono_inst_set_src_registers (MonoInst *ins, int *regs)
166 ins->sreg1 = regs [0];
167 ins->sreg2 = regs [1];
168 ins->sreg3 = regs [2];
171 guint32
172 mono_alloc_ireg (MonoCompile *cfg)
174 return alloc_ireg (cfg);
177 guint32
178 mono_alloc_freg (MonoCompile *cfg)
180 return alloc_freg (cfg);
183 guint32
184 mono_alloc_preg (MonoCompile *cfg)
186 return alloc_preg (cfg);
189 guint32
190 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
192 return alloc_dreg (cfg, stack_type);
195 guint
196 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
198 if (type->byref)
199 return OP_MOVE;
201 handle_enum:
202 switch (type->type) {
203 case MONO_TYPE_I1:
204 case MONO_TYPE_U1:
205 case MONO_TYPE_BOOLEAN:
206 return OP_MOVE;
207 case MONO_TYPE_I2:
208 case MONO_TYPE_U2:
209 case MONO_TYPE_CHAR:
210 return OP_MOVE;
211 case MONO_TYPE_I4:
212 case MONO_TYPE_U4:
213 return OP_MOVE;
214 case MONO_TYPE_I:
215 case MONO_TYPE_U:
216 case MONO_TYPE_PTR:
217 case MONO_TYPE_FNPTR:
218 return OP_MOVE;
219 case MONO_TYPE_CLASS:
220 case MONO_TYPE_STRING:
221 case MONO_TYPE_OBJECT:
222 case MONO_TYPE_SZARRAY:
223 case MONO_TYPE_ARRAY:
224 return OP_MOVE;
225 case MONO_TYPE_I8:
226 case MONO_TYPE_U8:
227 #if SIZEOF_REGISTER == 8
228 return OP_MOVE;
229 #else
230 return OP_LMOVE;
231 #endif
232 case MONO_TYPE_R4:
233 return OP_FMOVE;
234 case MONO_TYPE_R8:
235 return OP_FMOVE;
236 case MONO_TYPE_VALUETYPE:
237 if (type->data.klass->enumtype) {
238 type = mono_class_enum_basetype (type->data.klass);
239 goto handle_enum;
241 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
242 return OP_XMOVE;
243 return OP_VMOVE;
244 case MONO_TYPE_TYPEDBYREF:
245 return OP_VMOVE;
246 case MONO_TYPE_GENERICINST:
247 type = &type->data.generic_class->container_class->byval_arg;
248 goto handle_enum;
249 case MONO_TYPE_VAR:
250 case MONO_TYPE_MVAR:
251 g_assert (cfg->generic_sharing_context);
252 return OP_MOVE;
253 default:
254 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
256 return -1;
259 void
260 mono_print_bb (MonoBasicBlock *bb, const char *msg)
262 int i;
263 MonoInst *tree;
265 printf ("\n%s %d: [IN: ", msg, bb->block_num);
266 for (i = 0; i < bb->in_count; ++i)
267 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
268 printf (", OUT: ");
269 for (i = 0; i < bb->out_count; ++i)
270 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
271 printf (" ]\n");
272 for (tree = bb->code; tree; tree = tree->next)
273 mono_print_ins_index (-1, tree);
277 * Can't put this at the beginning, since other files reference stuff from this
278 * file.
280 #ifndef DISABLE_JIT
282 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
284 #define GET_BBLOCK(cfg,tblock,ip) do { \
285 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
286 if (!(tblock)) { \
287 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
288 NEW_BBLOCK (cfg, (tblock)); \
289 (tblock)->cil_code = (ip); \
290 ADD_BBLOCK (cfg, (tblock)); \
292 } while (0)
294 #if defined(__i386__) || defined(__x86_64__)
295 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
296 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
297 (dest)->dreg = alloc_preg ((cfg)); \
298 (dest)->sreg1 = (sr1); \
299 (dest)->sreg2 = (sr2); \
300 (dest)->inst_imm = (imm); \
301 (dest)->backend.shift_amount = (shift); \
302 MONO_ADD_INS ((cfg)->cbb, (dest)); \
303 } while (0)
304 #endif
306 #if SIZEOF_REGISTER == 8
307 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
308 /* FIXME: Need to add many more cases */ \
309 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
310 MonoInst *widen; \
311 int dr = alloc_preg (cfg); \
312 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
313 (ins)->sreg2 = widen->dreg; \
315 } while (0)
316 #else
317 #define ADD_WIDEN_OP(ins, arg1, arg2)
318 #endif
320 #define ADD_BINOP(op) do { \
321 MONO_INST_NEW (cfg, ins, (op)); \
322 sp -= 2; \
323 ins->sreg1 = sp [0]->dreg; \
324 ins->sreg2 = sp [1]->dreg; \
325 type_from_op (ins, sp [0], sp [1]); \
326 CHECK_TYPE (ins); \
327 /* Have to insert a widening op */ \
328 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
329 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
330 MONO_ADD_INS ((cfg)->cbb, (ins)); \
331 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
332 } while (0)
334 #define ADD_UNOP(op) do { \
335 MONO_INST_NEW (cfg, ins, (op)); \
336 sp--; \
337 ins->sreg1 = sp [0]->dreg; \
338 type_from_op (ins, sp [0], NULL); \
339 CHECK_TYPE (ins); \
340 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
341 MONO_ADD_INS ((cfg)->cbb, (ins)); \
342 *sp++ = mono_decompose_opcode (cfg, ins); \
343 } while (0)
345 #define ADD_BINCOND(next_block) do { \
346 MonoInst *cmp; \
347 sp -= 2; \
348 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
349 cmp->sreg1 = sp [0]->dreg; \
350 cmp->sreg2 = sp [1]->dreg; \
351 type_from_op (cmp, sp [0], sp [1]); \
352 CHECK_TYPE (cmp); \
353 type_from_op (ins, sp [0], sp [1]); \
354 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
355 GET_BBLOCK (cfg, tblock, target); \
356 link_bblock (cfg, bblock, tblock); \
357 ins->inst_true_bb = tblock; \
358 if ((next_block)) { \
359 link_bblock (cfg, bblock, (next_block)); \
360 ins->inst_false_bb = (next_block); \
361 start_new_bblock = 1; \
362 } else { \
363 GET_BBLOCK (cfg, tblock, ip); \
364 link_bblock (cfg, bblock, tblock); \
365 ins->inst_false_bb = tblock; \
366 start_new_bblock = 2; \
368 if (sp != stack_start) { \
369 handle_stack_args (cfg, stack_start, sp - stack_start); \
370 CHECK_UNVERIFIABLE (cfg); \
372 MONO_ADD_INS (bblock, cmp); \
373 MONO_ADD_INS (bblock, ins); \
374 } while (0)
376 /* *
377 * link_bblock: Links two basic blocks
379 * links two basic blocks in the control flow graph, the 'from'
380 * argument is the starting block and the 'to' argument is the block
381 * the control flow ends to after 'from'.
383 static void
384 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
386 MonoBasicBlock **newa;
387 int i, found;
389 #if 0
390 if (from->cil_code) {
391 if (to->cil_code)
392 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
393 else
394 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
395 } else {
396 if (to->cil_code)
397 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
398 else
399 printf ("edge from entry to exit\n");
401 #endif
403 found = FALSE;
404 for (i = 0; i < from->out_count; ++i) {
405 if (to == from->out_bb [i]) {
406 found = TRUE;
407 break;
410 if (!found) {
411 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
412 for (i = 0; i < from->out_count; ++i) {
413 newa [i] = from->out_bb [i];
415 newa [i] = to;
416 from->out_count++;
417 from->out_bb = newa;
420 found = FALSE;
421 for (i = 0; i < to->in_count; ++i) {
422 if (from == to->in_bb [i]) {
423 found = TRUE;
424 break;
427 if (!found) {
428 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
429 for (i = 0; i < to->in_count; ++i) {
430 newa [i] = to->in_bb [i];
432 newa [i] = from;
433 to->in_count++;
434 to->in_bb = newa;
438 void
439 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
441 link_bblock (cfg, from, to);
445 * mono_find_block_region:
447 * We mark each basic block with a region ID. We use that to avoid BB
448 * optimizations when blocks are in different regions.
450 * Returns:
451 * A region token that encodes where this region is, and information
452 * about the clause owner for this block.
454 * The region encodes the try/catch/filter clause that owns this block
455 * as well as the type. -1 is a special value that represents a block
456 * that is in none of try/catch/filter.
458 static int
459 mono_find_block_region (MonoCompile *cfg, int offset)
461 MonoMethod *method = cfg->method;
462 MonoMethodHeader *header = mono_method_get_header (method);
463 MonoExceptionClause *clause;
464 int i;
466 for (i = 0; i < header->num_clauses; ++i) {
467 clause = &header->clauses [i];
468 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
469 (offset < (clause->handler_offset)))
470 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
472 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
473 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
474 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
475 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
476 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
477 else
478 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
481 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
482 return ((i + 1) << 8) | clause->flags;
485 return -1;
488 static GList*
489 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
491 MonoMethod *method = cfg->method;
492 MonoMethodHeader *header = mono_method_get_header (method);
493 MonoExceptionClause *clause;
494 MonoBasicBlock *handler;
495 int i;
496 GList *res = NULL;
498 for (i = 0; i < header->num_clauses; ++i) {
499 clause = &header->clauses [i];
500 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
501 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
502 if (clause->flags == type) {
503 handler = cfg->cil_offset_to_bb [clause->handler_offset];
504 g_assert (handler);
505 res = g_list_append (res, handler);
509 return res;
512 static void
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
515 MonoInst *var;
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
518 if (var)
519 return;
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
528 static MonoInst *
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
534 static MonoInst*
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
537 MonoInst *var;
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
540 if (var)
541 return var;
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
549 return var;
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
556 void
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
559 MonoClass *klass;
561 inst->klass = klass = mono_class_from_mono_type (type);
562 if (type->byref) {
563 inst->type = STACK_MP;
564 return;
567 handle_enum:
568 switch (type->type) {
569 case MONO_TYPE_VOID:
570 inst->type = STACK_INV;
571 return;
572 case MONO_TYPE_I1:
573 case MONO_TYPE_U1:
574 case MONO_TYPE_BOOLEAN:
575 case MONO_TYPE_I2:
576 case MONO_TYPE_U2:
577 case MONO_TYPE_CHAR:
578 case MONO_TYPE_I4:
579 case MONO_TYPE_U4:
580 inst->type = STACK_I4;
581 return;
582 case MONO_TYPE_I:
583 case MONO_TYPE_U:
584 case MONO_TYPE_PTR:
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
587 return;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
594 return;
595 case MONO_TYPE_I8:
596 case MONO_TYPE_U8:
597 inst->type = STACK_I8;
598 return;
599 case MONO_TYPE_R4:
600 case MONO_TYPE_R8:
601 inst->type = STACK_R8;
602 return;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
606 goto handle_enum;
607 } else {
608 inst->klass = klass;
609 inst->type = STACK_VTYPE;
610 return;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
615 return;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
618 goto handle_enum;
619 case MONO_TYPE_VAR :
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
623 * really a reference
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
627 return;
628 default:
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
636 static const char
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
648 static const char
649 neg_table [] = {
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
654 static const char
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
666 static const char
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
669 {0},
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
680 static const char
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
697 static const guint16
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
703 static const guint16
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
709 static const guint16
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
715 static const guint16
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
721 static const guint16
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
727 static const guint16
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
733 static const guint16
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
746 static void
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
750 /* binops */
751 case CEE_ADD:
752 case CEE_SUB:
753 case CEE_MUL:
754 case CEE_DIV:
755 case CEE_REM:
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
759 break;
760 case CEE_DIV_UN:
761 case CEE_REM_UN:
762 case CEE_AND:
763 case CEE_OR:
764 case CEE_XOR:
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
767 break;
768 case CEE_SHL:
769 case CEE_SHR:
770 case CEE_SHR_UN:
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
773 break;
774 case OP_COMPARE:
775 case OP_LCOMPARE:
776 case OP_ICOMPARE:
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
782 else
783 ins->opcode = OP_ICOMPARE;
784 break;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
789 break;
790 case CEE_BEQ:
791 case CEE_BGE:
792 case CEE_BGT:
793 case CEE_BLE:
794 case CEE_BLT:
795 case CEE_BNE_UN:
796 case CEE_BGE_UN:
797 case CEE_BGT_UN:
798 case CEE_BLE_UN:
799 case CEE_BLT_UN:
800 ins->opcode += beqops_op_map [src1->type];
801 break;
802 case OP_CEQ:
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
805 break;
806 case OP_CGT:
807 case OP_CGT_UN:
808 case OP_CLT:
809 case OP_CLT_UN:
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
812 break;
813 /* unops */
814 case CEE_NEG:
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
817 break;
818 case CEE_NOT:
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
821 else
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
824 break;
825 case CEE_CONV_I1:
826 case CEE_CONV_I2:
827 case CEE_CONV_I4:
828 case CEE_CONV_U4:
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
831 break;
832 case CEE_CONV_R_UN:
833 ins->type = STACK_R8;
834 switch (src1->type) {
835 case STACK_I4:
836 case STACK_PTR:
837 ins->opcode = OP_ICONV_TO_R_UN;
838 break;
839 case STACK_I8:
840 ins->opcode = OP_LCONV_TO_R_UN;
841 break;
843 break;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
852 break;
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
857 break;
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
866 break;
867 case CEE_CONV_U:
868 ins->type = STACK_PTR;
869 switch (src1->type) {
870 case STACK_I4:
871 ins->opcode = OP_ICONV_TO_U;
872 break;
873 case STACK_PTR:
874 case STACK_MP:
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
877 #else
878 ins->opcode = OP_MOVE;
879 #endif
880 break;
881 case STACK_I8:
882 ins->opcode = OP_LCONV_TO_U;
883 break;
884 case STACK_R8:
885 ins->opcode = OP_FCONV_TO_U;
886 break;
888 break;
889 case CEE_CONV_I8:
890 case CEE_CONV_U8:
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
893 break;
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
898 break;
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
903 break;
904 case CEE_CONV_R4:
905 case CEE_CONV_R8:
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
908 break;
909 case OP_CKFINITE:
910 ins->type = STACK_R8;
911 break;
912 case CEE_CONV_U2:
913 case CEE_CONV_U1:
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
916 break;
917 case CEE_CONV_I:
918 case CEE_CONV_OVF_I:
919 case CEE_CONV_OVF_U:
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
922 break;
923 case CEE_ADD_OVF:
924 case CEE_ADD_OVF_UN:
925 case CEE_MUL_OVF:
926 case CEE_MUL_OVF_UN:
927 case CEE_SUB_OVF:
928 case CEE_SUB_OVF_UN:
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
933 break;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
936 break;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
944 break;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
947 break;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
951 break;
952 default:
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
954 break;
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
961 static const char
962 ldind_type [] = {
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
966 #if 0
968 static const char
969 param_table [STACK_MAX] [STACK_MAX] = {
970 {0},
973 static int
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
975 int i;
977 if (sig->hasthis) {
978 switch (args->type) {
979 case STACK_I4:
980 case STACK_I8:
981 case STACK_R8:
982 case STACK_VTYPE:
983 case STACK_INV:
984 return 0;
986 args++;
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
990 case STACK_INV:
991 return 0;
992 case STACK_MP:
993 if (!sig->params [i]->byref)
994 return 0;
995 continue;
996 case STACK_OBJ:
997 if (sig->params [i]->byref)
998 return 0;
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1005 break;
1006 default:
1007 return 0;
1009 continue;
1010 case STACK_R8:
1011 if (sig->params [i]->byref)
1012 return 0;
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1014 return 0;
1015 continue;
1016 case STACK_PTR:
1017 case STACK_I4:
1018 case STACK_I8:
1019 case STACK_VTYPE:
1020 break;
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1023 return 0;*/
1025 return 1;
1027 #endif
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1044 * compiling.
1046 inline static MonoInst *
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1051 return NULL;
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1056 #else
1057 return NULL;
1058 #endif
1061 static MonoInst *
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1075 static MonoType*
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1082 case STACK_MP:
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1086 default:
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1089 return NULL;
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 switch (mono_type_get_underlying_type (t)->type) {
1096 case MONO_TYPE_I1:
1097 case MONO_TYPE_U1:
1098 case MONO_TYPE_BOOLEAN:
1099 case MONO_TYPE_I2:
1100 case MONO_TYPE_U2:
1101 case MONO_TYPE_CHAR:
1102 case MONO_TYPE_I4:
1103 case MONO_TYPE_U4:
1104 return STACK_I4;
1105 case MONO_TYPE_I:
1106 case MONO_TYPE_U:
1107 case MONO_TYPE_PTR:
1108 case MONO_TYPE_FNPTR:
1109 return STACK_PTR;
1110 case MONO_TYPE_CLASS:
1111 case MONO_TYPE_STRING:
1112 case MONO_TYPE_OBJECT:
1113 case MONO_TYPE_SZARRAY:
1114 case MONO_TYPE_ARRAY:
1115 return STACK_OBJ;
1116 case MONO_TYPE_I8:
1117 case MONO_TYPE_U8:
1118 return STACK_I8;
1119 case MONO_TYPE_R4:
1120 case MONO_TYPE_R8:
1121 return STACK_R8;
1122 case MONO_TYPE_VALUETYPE:
1123 case MONO_TYPE_TYPEDBYREF:
1124 return STACK_VTYPE;
1125 case MONO_TYPE_GENERICINST:
1126 if (mono_type_generic_inst_is_valuetype (t))
1127 return STACK_VTYPE;
1128 else
1129 return STACK_OBJ;
1130 break;
1131 default:
1132 g_assert_not_reached ();
1135 return -1;
1138 static MonoClass*
1139 array_access_to_klass (int opcode)
1141 switch (opcode) {
1142 case CEE_LDELEM_U1:
1143 return mono_defaults.byte_class;
1144 case CEE_LDELEM_U2:
1145 return mono_defaults.uint16_class;
1146 case CEE_LDELEM_I:
1147 case CEE_STELEM_I:
1148 return mono_defaults.int_class;
1149 case CEE_LDELEM_I1:
1150 case CEE_STELEM_I1:
1151 return mono_defaults.sbyte_class;
1152 case CEE_LDELEM_I2:
1153 case CEE_STELEM_I2:
1154 return mono_defaults.int16_class;
1155 case CEE_LDELEM_I4:
1156 case CEE_STELEM_I4:
1157 return mono_defaults.int32_class;
1158 case CEE_LDELEM_U4:
1159 return mono_defaults.uint32_class;
1160 case CEE_LDELEM_I8:
1161 case CEE_STELEM_I8:
1162 return mono_defaults.int64_class;
1163 case CEE_LDELEM_R4:
1164 case CEE_STELEM_R4:
1165 return mono_defaults.single_class;
1166 case CEE_LDELEM_R8:
1167 case CEE_STELEM_R8:
1168 return mono_defaults.double_class;
1169 case CEE_LDELEM_REF:
1170 case CEE_STELEM_REF:
1171 return mono_defaults.object_class;
1172 default:
1173 g_assert_not_reached ();
1175 return NULL;
1179 * We try to share variables when possible
1181 static MonoInst *
1182 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1184 MonoInst *res;
1185 int pos, vnum;
1187 /* inlining can result in deeper stacks */
1188 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1189 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1191 pos = ins->type - 1 + slot * STACK_MAX;
1193 switch (ins->type) {
1194 case STACK_I4:
1195 case STACK_I8:
1196 case STACK_R8:
1197 case STACK_PTR:
1198 case STACK_MP:
1199 case STACK_OBJ:
1200 if ((vnum = cfg->intvars [pos]))
1201 return cfg->varinfo [vnum];
1202 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1203 cfg->intvars [pos] = res->inst_c0;
1204 break;
1205 default:
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1208 return res;
1211 static void
1212 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1215 * Don't use this if a generic_context is set, since that means AOT can't
1216 * look up the method using just the image+token.
1217 * table == 0 means this is a reference made from a wrapper.
1219 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1220 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1221 jump_info_token->image = image;
1222 jump_info_token->token = token;
1223 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1228 * This function is called to handle items that are left on the evaluation stack
1229 * at basic block boundaries. What happens is that we save the values to local variables
1230 * and we reload them later when first entering the target basic block (with the
1231 * handle_loaded_temps () function).
1232 * A single joint point will use the same variables (stored in the array bb->out_stack or
1233 * bb->in_stack, if the basic block is before or after the joint point).
1235 * This function needs to be called _before_ emitting the last instruction of
1236 * the bb (i.e. before emitting a branch).
1237 * If the stack merge fails at a join point, cfg->unverifiable is set.
1239 static void
1240 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1242 int i, bindex;
1243 MonoBasicBlock *bb = cfg->cbb;
1244 MonoBasicBlock *outb;
1245 MonoInst *inst, **locals;
1246 gboolean found;
1248 if (!count)
1249 return;
1250 if (cfg->verbose_level > 3)
1251 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1252 if (!bb->out_scount) {
1253 bb->out_scount = count;
1254 //printf ("bblock %d has out:", bb->block_num);
1255 found = FALSE;
1256 for (i = 0; i < bb->out_count; ++i) {
1257 outb = bb->out_bb [i];
1258 /* exception handlers are linked, but they should not be considered for stack args */
1259 if (outb->flags & BB_EXCEPTION_HANDLER)
1260 continue;
1261 //printf (" %d", outb->block_num);
1262 if (outb->in_stack) {
1263 found = TRUE;
1264 bb->out_stack = outb->in_stack;
1265 break;
1268 //printf ("\n");
1269 if (!found) {
1270 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1271 for (i = 0; i < count; ++i) {
1273 * try to reuse temps already allocated for this purpouse, if they occupy the same
1274 * stack slot and if they are of the same type.
1275 * This won't cause conflicts since if 'local' is used to
1276 * store one of the values in the in_stack of a bblock, then
1277 * the same variable will be used for the same outgoing stack
1278 * slot as well.
1279 * This doesn't work when inlining methods, since the bblocks
1280 * in the inlined methods do not inherit their in_stack from
1281 * the bblock they are inlined to. See bug #58863 for an
1282 * example.
1284 if (cfg->inlined_method)
1285 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1286 else
1287 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1292 for (i = 0; i < bb->out_count; ++i) {
1293 outb = bb->out_bb [i];
1294 /* exception handlers are linked, but they should not be considered for stack args */
1295 if (outb->flags & BB_EXCEPTION_HANDLER)
1296 continue;
1297 if (outb->in_scount) {
1298 if (outb->in_scount != bb->out_scount) {
1299 cfg->unverifiable = TRUE;
1300 return;
1302 continue; /* check they are the same locals */
1304 outb->in_scount = count;
1305 outb->in_stack = bb->out_stack;
1308 locals = bb->out_stack;
1309 cfg->cbb = bb;
1310 for (i = 0; i < count; ++i) {
1311 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1312 inst->cil_code = sp [i]->cil_code;
1313 sp [i] = locals [i];
1314 if (cfg->verbose_level > 3)
1315 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1319 * It is possible that the out bblocks already have in_stack assigned, and
1320 * the in_stacks differ. In this case, we will store to all the different
1321 * in_stacks.
1324 found = TRUE;
1325 bindex = 0;
1326 while (found) {
1327 /* Find a bblock which has a different in_stack */
1328 found = FALSE;
1329 while (bindex < bb->out_count) {
1330 outb = bb->out_bb [bindex];
1331 /* exception handlers are linked, but they should not be considered for stack args */
1332 if (outb->flags & BB_EXCEPTION_HANDLER) {
1333 bindex++;
1334 continue;
1336 if (outb->in_stack != locals) {
1337 for (i = 0; i < count; ++i) {
1338 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1339 inst->cil_code = sp [i]->cil_code;
1340 sp [i] = locals [i];
1341 if (cfg->verbose_level > 3)
1342 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1344 locals = outb->in_stack;
1345 found = TRUE;
1346 break;
1348 bindex ++;
1353 /* Emit code which loads interface_offsets [klass->interface_id]
1354 * The array is stored in memory before vtable.
1356 static void
1357 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1359 if (cfg->compile_aot) {
1360 int ioffset_reg = alloc_preg (cfg);
1361 int iid_reg = alloc_preg (cfg);
1363 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1364 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1367 else {
1368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1373 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1374 * stored in "klass_reg" implements the interface "klass".
1376 static void
1377 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1379 int ibitmap_reg = alloc_preg (cfg);
1380 int ibitmap_byte_reg = alloc_preg (cfg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1384 if (cfg->compile_aot) {
1385 int iid_reg = alloc_preg (cfg);
1386 int shifted_iid_reg = alloc_preg (cfg);
1387 int ibitmap_byte_address_reg = alloc_preg (cfg);
1388 int masked_iid_reg = alloc_preg (cfg);
1389 int iid_one_bit_reg = alloc_preg (cfg);
1390 int iid_bit_reg = alloc_preg (cfg);
1391 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1393 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1394 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1396 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1398 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1399 } else {
1400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1406 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1407 * stored in "vtable_reg" implements the interface "klass".
1409 static void
1410 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1412 int ibitmap_reg = alloc_preg (cfg);
1413 int ibitmap_byte_reg = alloc_preg (cfg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1417 if (cfg->compile_aot) {
1418 int iid_reg = alloc_preg (cfg);
1419 int shifted_iid_reg = alloc_preg (cfg);
1420 int ibitmap_byte_address_reg = alloc_preg (cfg);
1421 int masked_iid_reg = alloc_preg (cfg);
1422 int iid_one_bit_reg = alloc_preg (cfg);
1423 int iid_bit_reg = alloc_preg (cfg);
1424 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1426 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1427 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1429 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1432 } else {
1433 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1439 * Emit code which checks whenever the interface id of @klass is smaller than
1440 * than the value given by max_iid_reg.
1442 static void
1443 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 if (cfg->compile_aot) {
1447 int iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1449 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1451 else
1452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1453 if (false_target)
1454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1455 else
1456 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1459 /* Same as above, but obtains max_iid from a vtable */
1460 static void
1461 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1462 MonoBasicBlock *false_target)
1464 int max_iid_reg = alloc_preg (cfg);
1466 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1467 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1470 /* Same as above, but obtains max_iid from a klass */
1471 static void
1472 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1473 MonoBasicBlock *false_target)
1475 int max_iid_reg = alloc_preg (cfg);
1477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1478 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1481 static void
1482 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1484 int idepth_reg = alloc_preg (cfg);
1485 int stypes_reg = alloc_preg (cfg);
1486 int stype = alloc_preg (cfg);
1488 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1489 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1491 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1499 } else {
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1505 static void
1506 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 int intf_reg = alloc_preg (cfg);
1510 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1511 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1513 if (true_target)
1514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1515 else
1516 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1520 * Variant of the above that takes a register to the class, not the vtable.
1522 static void
1523 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1525 int intf_bit_reg = alloc_preg (cfg);
1527 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1528 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1530 if (true_target)
1531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1532 else
1533 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1536 static inline void
1537 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1539 if (cfg->compile_aot) {
1540 int const_reg = alloc_preg (cfg);
1541 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1542 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1543 } else {
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1546 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1549 static inline void
1550 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1552 if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1556 } else {
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1562 static void
1563 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1565 if (klass->rank) {
1566 int rank_reg = alloc_preg (cfg);
1567 int eclass_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1571 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1572 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1574 if (klass->cast_class == mono_defaults.object_class) {
1575 int parent_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1577 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1578 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1579 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1580 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1581 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1582 } else if (klass->cast_class == mono_defaults.enum_class) {
1583 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1584 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1585 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1586 } else {
1587 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1588 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1591 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1592 /* Check that the object is a vector too */
1593 int bounds_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1596 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1598 } else {
1599 int idepth_reg = alloc_preg (cfg);
1600 int stypes_reg = alloc_preg (cfg);
1601 int stype = alloc_preg (cfg);
1603 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1604 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1606 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1610 mini_emit_class_check (cfg, stype, klass);
1614 static void
1615 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1617 int val_reg;
1619 g_assert (val == 0);
1621 if (align == 0)
1622 align = 4;
1624 if ((size <= 4) && (size <= align)) {
1625 switch (size) {
1626 case 1:
1627 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1628 return;
1629 case 2:
1630 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1631 return;
1632 case 4:
1633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1634 return;
1635 #if SIZEOF_REGISTER == 8
1636 case 8:
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1638 return;
1639 #endif
1643 val_reg = alloc_preg (cfg);
1645 if (SIZEOF_REGISTER == 8)
1646 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1647 else
1648 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1650 if (align < 4) {
1651 /* This could be optimized further if neccesary */
1652 while (size >= 1) {
1653 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1654 offset += 1;
1655 size -= 1;
1657 return;
1660 #if !NO_UNALIGNED_ACCESS
1661 if (SIZEOF_REGISTER == 8) {
1662 if (offset % 8) {
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1664 offset += 4;
1665 size -= 4;
1667 while (size >= 8) {
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1669 offset += 8;
1670 size -= 8;
1673 #endif
1675 while (size >= 4) {
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1677 offset += 4;
1678 size -= 4;
1680 while (size >= 2) {
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1682 offset += 2;
1683 size -= 2;
1685 while (size >= 1) {
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1687 offset += 1;
1688 size -= 1;
1692 #endif /* DISABLE_JIT */
1694 void
1695 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1697 int cur_reg;
1699 if (align == 0)
1700 align = 4;
1702 if (align < 4) {
1703 /* This could be optimized further if neccesary */
1704 while (size >= 1) {
1705 cur_reg = alloc_preg (cfg);
1706 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1708 doffset += 1;
1709 soffset += 1;
1710 size -= 1;
1714 #if !NO_UNALIGNED_ACCESS
1715 if (SIZEOF_REGISTER == 8) {
1716 while (size >= 8) {
1717 cur_reg = alloc_preg (cfg);
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1720 doffset += 8;
1721 soffset += 8;
1722 size -= 8;
1725 #endif
1727 while (size >= 4) {
1728 cur_reg = alloc_preg (cfg);
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1731 doffset += 4;
1732 soffset += 4;
1733 size -= 4;
1735 while (size >= 2) {
1736 cur_reg = alloc_preg (cfg);
1737 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1738 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1739 doffset += 2;
1740 soffset += 2;
1741 size -= 2;
1743 while (size >= 1) {
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1747 doffset += 1;
1748 soffset += 1;
1749 size -= 1;
1753 #ifndef DISABLE_JIT
1755 static int
1756 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1758 if (type->byref)
1759 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1761 handle_enum:
1762 type = mini_get_basic_type_from_generic (gsctx, type);
1763 switch (type->type) {
1764 case MONO_TYPE_VOID:
1765 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1766 case MONO_TYPE_I1:
1767 case MONO_TYPE_U1:
1768 case MONO_TYPE_BOOLEAN:
1769 case MONO_TYPE_I2:
1770 case MONO_TYPE_U2:
1771 case MONO_TYPE_CHAR:
1772 case MONO_TYPE_I4:
1773 case MONO_TYPE_U4:
1774 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1775 case MONO_TYPE_I:
1776 case MONO_TYPE_U:
1777 case MONO_TYPE_PTR:
1778 case MONO_TYPE_FNPTR:
1779 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1780 case MONO_TYPE_CLASS:
1781 case MONO_TYPE_STRING:
1782 case MONO_TYPE_OBJECT:
1783 case MONO_TYPE_SZARRAY:
1784 case MONO_TYPE_ARRAY:
1785 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 case MONO_TYPE_I8:
1787 case MONO_TYPE_U8:
1788 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1789 case MONO_TYPE_R4:
1790 case MONO_TYPE_R8:
1791 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1792 case MONO_TYPE_VALUETYPE:
1793 if (type->data.klass->enumtype) {
1794 type = mono_class_enum_basetype (type->data.klass);
1795 goto handle_enum;
1796 } else
1797 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1798 case MONO_TYPE_TYPEDBYREF:
1799 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1800 case MONO_TYPE_GENERICINST:
1801 type = &type->data.generic_class->container_class->byval_arg;
1802 goto handle_enum;
1803 default:
1804 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1806 return -1;
1810 * target_type_is_incompatible:
1811 * @cfg: MonoCompile context
1813 * Check that the item @arg on the evaluation stack can be stored
1814 * in the target type (can be a local, or field, etc).
1815 * The cfg arg can be used to check if we need verification or just
1816 * validity checks.
1818 * Returns: non-0 value if arg can't be stored on a target.
1820 static int
1821 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1823 MonoType *simple_type;
1824 MonoClass *klass;
1826 if (target->byref) {
1827 /* FIXME: check that the pointed to types match */
1828 if (arg->type == STACK_MP)
1829 return arg->klass != mono_class_from_mono_type (target);
1830 if (arg->type == STACK_PTR)
1831 return 0;
1832 return 1;
1835 simple_type = mono_type_get_underlying_type (target);
1836 switch (simple_type->type) {
1837 case MONO_TYPE_VOID:
1838 return 1;
1839 case MONO_TYPE_I1:
1840 case MONO_TYPE_U1:
1841 case MONO_TYPE_BOOLEAN:
1842 case MONO_TYPE_I2:
1843 case MONO_TYPE_U2:
1844 case MONO_TYPE_CHAR:
1845 case MONO_TYPE_I4:
1846 case MONO_TYPE_U4:
1847 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1848 return 1;
1849 return 0;
1850 case MONO_TYPE_PTR:
1851 /* STACK_MP is needed when setting pinned locals */
1852 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1853 return 1;
1854 return 0;
1855 case MONO_TYPE_I:
1856 case MONO_TYPE_U:
1857 case MONO_TYPE_FNPTR:
1858 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1859 return 1;
1860 return 0;
1861 case MONO_TYPE_CLASS:
1862 case MONO_TYPE_STRING:
1863 case MONO_TYPE_OBJECT:
1864 case MONO_TYPE_SZARRAY:
1865 case MONO_TYPE_ARRAY:
1866 if (arg->type != STACK_OBJ)
1867 return 1;
1868 /* FIXME: check type compatibility */
1869 return 0;
1870 case MONO_TYPE_I8:
1871 case MONO_TYPE_U8:
1872 if (arg->type != STACK_I8)
1873 return 1;
1874 return 0;
1875 case MONO_TYPE_R4:
1876 case MONO_TYPE_R8:
1877 if (arg->type != STACK_R8)
1878 return 1;
1879 return 0;
1880 case MONO_TYPE_VALUETYPE:
1881 if (arg->type != STACK_VTYPE)
1882 return 1;
1883 klass = mono_class_from_mono_type (simple_type);
1884 if (klass != arg->klass)
1885 return 1;
1886 return 0;
1887 case MONO_TYPE_TYPEDBYREF:
1888 if (arg->type != STACK_VTYPE)
1889 return 1;
1890 klass = mono_class_from_mono_type (simple_type);
1891 if (klass != arg->klass)
1892 return 1;
1893 return 0;
1894 case MONO_TYPE_GENERICINST:
1895 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1896 if (arg->type != STACK_VTYPE)
1897 return 1;
1898 klass = mono_class_from_mono_type (simple_type);
1899 if (klass != arg->klass)
1900 return 1;
1901 return 0;
1902 } else {
1903 if (arg->type != STACK_OBJ)
1904 return 1;
1905 /* FIXME: check type compatibility */
1906 return 0;
1908 case MONO_TYPE_VAR:
1909 case MONO_TYPE_MVAR:
1910 /* FIXME: all the arguments must be references for now,
1911 * later look inside cfg and see if the arg num is
1912 * really a reference
1914 g_assert (cfg->generic_sharing_context);
1915 if (arg->type != STACK_OBJ)
1916 return 1;
1917 return 0;
1918 default:
1919 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1921 return 1;
1925 * Prepare arguments for passing to a function call.
1926 * Return a non-zero value if the arguments can't be passed to the given
1927 * signature.
1928 * The type checks are not yet complete and some conversions may need
1929 * casts on 32 or 64 bit architectures.
1931 * FIXME: implement this using target_type_is_incompatible ()
1933 static int
1934 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1936 MonoType *simple_type;
1937 int i;
1939 if (sig->hasthis) {
1940 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1941 return 1;
1942 args++;
1944 for (i = 0; i < sig->param_count; ++i) {
1945 if (sig->params [i]->byref) {
1946 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1947 return 1;
1948 continue;
1950 simple_type = sig->params [i];
1951 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1952 handle_enum:
1953 switch (simple_type->type) {
1954 case MONO_TYPE_VOID:
1955 return 1;
1956 continue;
1957 case MONO_TYPE_I1:
1958 case MONO_TYPE_U1:
1959 case MONO_TYPE_BOOLEAN:
1960 case MONO_TYPE_I2:
1961 case MONO_TYPE_U2:
1962 case MONO_TYPE_CHAR:
1963 case MONO_TYPE_I4:
1964 case MONO_TYPE_U4:
1965 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1966 return 1;
1967 continue;
1968 case MONO_TYPE_I:
1969 case MONO_TYPE_U:
1970 case MONO_TYPE_PTR:
1971 case MONO_TYPE_FNPTR:
1972 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1973 return 1;
1974 continue;
1975 case MONO_TYPE_CLASS:
1976 case MONO_TYPE_STRING:
1977 case MONO_TYPE_OBJECT:
1978 case MONO_TYPE_SZARRAY:
1979 case MONO_TYPE_ARRAY:
1980 if (args [i]->type != STACK_OBJ)
1981 return 1;
1982 continue;
1983 case MONO_TYPE_I8:
1984 case MONO_TYPE_U8:
1985 if (args [i]->type != STACK_I8)
1986 return 1;
1987 continue;
1988 case MONO_TYPE_R4:
1989 case MONO_TYPE_R8:
1990 if (args [i]->type != STACK_R8)
1991 return 1;
1992 continue;
1993 case MONO_TYPE_VALUETYPE:
1994 if (simple_type->data.klass->enumtype) {
1995 simple_type = mono_class_enum_basetype (simple_type->data.klass);
1996 goto handle_enum;
1998 if (args [i]->type != STACK_VTYPE)
1999 return 1;
2000 continue;
2001 case MONO_TYPE_TYPEDBYREF:
2002 if (args [i]->type != STACK_VTYPE)
2003 return 1;
2004 continue;
2005 case MONO_TYPE_GENERICINST:
2006 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2007 goto handle_enum;
2009 default:
2010 g_error ("unknown type 0x%02x in check_call_signature",
2011 simple_type->type);
2014 return 0;
2017 static int
2018 callvirt_to_call (int opcode)
2020 switch (opcode) {
2021 case OP_CALLVIRT:
2022 return OP_CALL;
2023 case OP_VOIDCALLVIRT:
2024 return OP_VOIDCALL;
2025 case OP_FCALLVIRT:
2026 return OP_FCALL;
2027 case OP_VCALLVIRT:
2028 return OP_VCALL;
2029 case OP_LCALLVIRT:
2030 return OP_LCALL;
2031 default:
2032 g_assert_not_reached ();
2035 return -1;
2038 static int
2039 callvirt_to_call_membase (int opcode)
2041 switch (opcode) {
2042 case OP_CALLVIRT:
2043 return OP_CALL_MEMBASE;
2044 case OP_VOIDCALLVIRT:
2045 return OP_VOIDCALL_MEMBASE;
2046 case OP_FCALLVIRT:
2047 return OP_FCALL_MEMBASE;
2048 case OP_LCALLVIRT:
2049 return OP_LCALL_MEMBASE;
2050 case OP_VCALLVIRT:
2051 return OP_VCALL_MEMBASE;
2052 default:
2053 g_assert_not_reached ();
2056 return -1;
2059 #ifdef MONO_ARCH_HAVE_IMT
2060 static void
2061 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2063 #ifdef MONO_ARCH_IMT_REG
2064 int method_reg = alloc_preg (cfg);
2066 if (imt_arg) {
2067 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2068 } else if (cfg->compile_aot) {
2069 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2070 } else {
2071 MonoInst *ins;
2072 MONO_INST_NEW (cfg, ins, OP_PCONST);
2073 ins->inst_p0 = call->method;
2074 ins->dreg = method_reg;
2075 MONO_ADD_INS (cfg->cbb, ins);
2078 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2079 #else
2080 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2081 #endif
2083 #endif
2085 static MonoJumpInfo *
2086 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2088 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2090 ji->ip.i = ip;
2091 ji->type = type;
2092 ji->data.target = target;
2094 return ji;
2097 inline static MonoInst*
2098 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2100 inline static MonoCallInst *
2101 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2102 MonoInst **args, int calli, int virtual)
2104 MonoCallInst *call;
2105 #ifdef MONO_ARCH_SOFT_FLOAT
2106 int i;
2107 #endif
2109 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2111 call->args = args;
2112 call->signature = sig;
2114 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2116 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2117 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2118 MonoInst *loada;
2120 temp->backend.is_pinvoke = sig->pinvoke;
2123 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2124 * address of return value to increase optimization opportunities.
2125 * Before vtype decomposition, the dreg of the call ins itself represents the
2126 * fact the call modifies the return value. After decomposition, the call will
2127 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2128 * will be transformed into an LDADDR.
2130 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2131 loada->dreg = alloc_preg (cfg);
2132 loada->inst_p0 = temp;
2133 /* We reference the call too since call->dreg could change during optimization */
2134 loada->inst_p1 = call;
2135 MONO_ADD_INS (cfg->cbb, loada);
2137 call->inst.dreg = temp->dreg;
2139 call->vret_var = loada;
2140 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2141 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2143 #ifdef MONO_ARCH_SOFT_FLOAT
2145 * If the call has a float argument, we would need to do an r8->r4 conversion using
2146 * an icall, but that cannot be done during the call sequence since it would clobber
2147 * the call registers + the stack. So we do it before emitting the call.
2149 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2150 MonoType *t;
2151 MonoInst *in = call->args [i];
2153 if (i >= sig->hasthis)
2154 t = sig->params [i - sig->hasthis];
2155 else
2156 t = &mono_defaults.int_class->byval_arg;
2157 t = mono_type_get_underlying_type (t);
2159 if (!t->byref && t->type == MONO_TYPE_R4) {
2160 MonoInst *iargs [1];
2161 MonoInst *conv;
2163 iargs [0] = in;
2164 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2166 /* The result will be in an int vreg */
2167 call->args [i] = conv;
2170 #endif
2172 mono_arch_emit_call (cfg, call);
2174 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2175 cfg->flags |= MONO_CFG_HAS_CALLS;
2177 return call;
2180 inline static MonoInst*
2181 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2183 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2185 call->inst.sreg1 = addr->dreg;
2187 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2189 return (MonoInst*)call;
2192 inline static MonoInst*
2193 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2195 #ifdef MONO_ARCH_RGCTX_REG
2196 MonoCallInst *call;
2197 int rgctx_reg = -1;
2199 if (rgctx_arg) {
2200 rgctx_reg = mono_alloc_preg (cfg);
2201 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2203 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2204 if (rgctx_arg) {
2205 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2206 cfg->uses_rgctx_reg = TRUE;
2207 call->rgctx_reg = TRUE;
2209 return (MonoInst*)call;
2210 #else
2211 g_assert_not_reached ();
2212 return NULL;
2213 #endif
2216 static MonoInst*
2217 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2218 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2220 gboolean virtual = this != NULL;
2221 gboolean enable_for_aot = TRUE;
2222 MonoCallInst *call;
2224 if (method->string_ctor) {
2225 /* Create the real signature */
2226 /* FIXME: Cache these */
2227 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2228 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2230 sig = ctor_sig;
2233 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2235 if (this && sig->hasthis &&
2236 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2237 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2238 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2239 } else {
2240 call->method = method;
2242 call->inst.flags |= MONO_INST_HAS_METHOD;
2243 call->inst.inst_left = this;
2245 if (virtual) {
2246 int vtable_reg, slot_reg, this_reg;
2248 this_reg = this->dreg;
2250 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2251 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2252 /* Make a call to delegate->invoke_impl */
2253 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2254 call->inst.inst_basereg = this_reg;
2255 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2256 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2258 return (MonoInst*)call;
2260 #endif
2262 if ((!cfg->compile_aot || enable_for_aot) &&
2263 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2264 (MONO_METHOD_IS_FINAL (method) &&
2265 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2267 * the method is not virtual, we just need to ensure this is not null
2268 * and then we can call the method directly.
2270 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2271 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2274 if (!method->string_ctor) {
2275 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2276 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2277 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2280 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2282 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2284 return (MonoInst*)call;
2287 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2289 * the method is virtual, but we can statically dispatch since either
2290 * it's class or the method itself are sealed.
2291 * But first we need to ensure it's not a null reference.
2293 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2294 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2295 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2297 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2298 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2300 return (MonoInst*)call;
2303 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2305 vtable_reg = alloc_preg (cfg);
2306 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2307 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2308 slot_reg = -1;
2309 #ifdef MONO_ARCH_HAVE_IMT
2310 if (mono_use_imt) {
2311 guint32 imt_slot = mono_method_get_imt_slot (method);
2312 emit_imt_argument (cfg, call, imt_arg);
2313 slot_reg = vtable_reg;
2314 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2316 #endif
2317 if (slot_reg == -1) {
2318 slot_reg = alloc_preg (cfg);
2319 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2320 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2322 } else {
2323 slot_reg = vtable_reg;
2324 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2325 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2326 #ifdef MONO_ARCH_HAVE_IMT
2327 if (imt_arg) {
2328 g_assert (mono_method_signature (method)->generic_param_count);
2329 emit_imt_argument (cfg, call, imt_arg);
2331 #endif
2334 call->inst.sreg1 = slot_reg;
2335 call->virtual = TRUE;
2338 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2340 return (MonoInst*)call;
2343 static MonoInst*
2344 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2345 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2347 int rgctx_reg;
2348 MonoInst *ins;
2349 MonoCallInst *call;
2351 if (vtable_arg) {
2352 #ifdef MONO_ARCH_RGCTX_REG
2353 rgctx_reg = mono_alloc_preg (cfg);
2354 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2355 #else
2356 NOT_IMPLEMENTED;
2357 #endif
2359 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2361 call = (MonoCallInst*)ins;
2362 if (vtable_arg) {
2363 #ifdef MONO_ARCH_RGCTX_REG
2364 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2365 cfg->uses_rgctx_reg = TRUE;
2366 call->rgctx_reg = TRUE;
2367 #else
2368 NOT_IMPLEMENTED;
2369 #endif
2372 return ins;
2375 static inline MonoInst*
2376 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2378 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2381 MonoInst*
2382 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2383 MonoInst **args)
2385 MonoCallInst *call;
2387 g_assert (sig);
2389 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2390 call->fptr = func;
2392 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2394 return (MonoInst*)call;
2397 inline static MonoInst*
2398 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2400 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2402 g_assert (info);
2404 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2408 * mono_emit_abs_call:
2410 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2412 inline static MonoInst*
2413 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2414 MonoMethodSignature *sig, MonoInst **args)
2416 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2417 MonoInst *ins;
2420 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2421 * handle it.
2423 if (cfg->abs_patches == NULL)
2424 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2425 g_hash_table_insert (cfg->abs_patches, ji, ji);
2426 ins = mono_emit_native_call (cfg, ji, sig, args);
2427 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2428 return ins;
2431 static MonoInst*
2432 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2434 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2435 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2436 int widen_op = -1;
2439 * Native code might return non register sized integers
2440 * without initializing the upper bits.
2442 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2443 case OP_LOADI1_MEMBASE:
2444 widen_op = OP_ICONV_TO_I1;
2445 break;
2446 case OP_LOADU1_MEMBASE:
2447 widen_op = OP_ICONV_TO_U1;
2448 break;
2449 case OP_LOADI2_MEMBASE:
2450 widen_op = OP_ICONV_TO_I2;
2451 break;
2452 case OP_LOADU2_MEMBASE:
2453 widen_op = OP_ICONV_TO_U2;
2454 break;
2455 default:
2456 break;
2459 if (widen_op != -1) {
2460 int dreg = alloc_preg (cfg);
2461 MonoInst *widen;
2463 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2464 widen->type = ins->type;
2465 ins = widen;
2470 return ins;
2473 static MonoMethod*
2474 get_memcpy_method (void)
2476 static MonoMethod *memcpy_method = NULL;
2477 if (!memcpy_method) {
2478 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2479 if (!memcpy_method)
2480 g_error ("Old corlib found. Install a new one");
2482 return memcpy_method;
2486 * Emit code to copy a valuetype of type @klass whose address is stored in
2487 * @src->dreg to memory whose address is stored at @dest->dreg.
2489 void
2490 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2492 MonoInst *iargs [3];
2493 int n;
2494 guint32 align = 0;
2495 MonoMethod *memcpy_method;
2497 g_assert (klass);
2499 * This check breaks with spilled vars... need to handle it during verification anyway.
2500 * g_assert (klass && klass == src->klass && klass == dest->klass);
2503 if (native)
2504 n = mono_class_native_size (klass, &align);
2505 else
2506 n = mono_class_value_size (klass, &align);
2508 #if HAVE_WRITE_BARRIERS
2509 /* if native is true there should be no references in the struct */
2510 if (klass->has_references && !native) {
2511 /* Avoid barriers when storing to the stack */
2512 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2513 (dest->opcode == OP_LDADDR))) {
2514 iargs [0] = dest;
2515 iargs [1] = src;
2516 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2518 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2521 #endif
2523 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2524 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2525 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2526 } else {
2527 iargs [0] = dest;
2528 iargs [1] = src;
2529 EMIT_NEW_ICONST (cfg, iargs [2], n);
2531 memcpy_method = get_memcpy_method ();
2532 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2536 static MonoMethod*
2537 get_memset_method (void)
2539 static MonoMethod *memset_method = NULL;
2540 if (!memset_method) {
2541 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2542 if (!memset_method)
2543 g_error ("Old corlib found. Install a new one");
2545 return memset_method;
2548 void
2549 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2551 MonoInst *iargs [3];
2552 int n;
2553 guint32 align;
2554 MonoMethod *memset_method;
2556 /* FIXME: Optimize this for the case when dest is an LDADDR */
2558 mono_class_init (klass);
2559 n = mono_class_value_size (klass, &align);
2561 if (n <= sizeof (gpointer) * 5) {
2562 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2564 else {
2565 memset_method = get_memset_method ();
2566 iargs [0] = dest;
2567 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2568 EMIT_NEW_ICONST (cfg, iargs [2], n);
2569 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2573 static MonoInst*
2574 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2576 MonoInst *this = NULL;
2578 g_assert (cfg->generic_sharing_context);
2580 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2581 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2582 !method->klass->valuetype)
2583 EMIT_NEW_ARGLOAD (cfg, this, 0);
2585 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2586 MonoInst *mrgctx_loc, *mrgctx_var;
2588 g_assert (!this);
2589 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2591 mrgctx_loc = mono_get_vtable_var (cfg);
2592 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2594 return mrgctx_var;
2595 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2596 MonoInst *vtable_loc, *vtable_var;
2598 g_assert (!this);
2600 vtable_loc = mono_get_vtable_var (cfg);
2601 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2603 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2604 MonoInst *mrgctx_var = vtable_var;
2605 int vtable_reg;
2607 vtable_reg = alloc_preg (cfg);
2608 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2609 vtable_var->type = STACK_PTR;
2612 return vtable_var;
2613 } else {
2614 MonoInst *ins;
2615 int vtable_reg, res_reg;
2617 vtable_reg = alloc_preg (cfg);
2618 res_reg = alloc_preg (cfg);
2619 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2620 return ins;
2624 static MonoJumpInfoRgctxEntry *
2625 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2627 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2628 res->method = method;
2629 res->in_mrgctx = in_mrgctx;
2630 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2631 res->data->type = patch_type;
2632 res->data->data.target = patch_data;
2633 res->info_type = info_type;
2635 return res;
2638 static inline MonoInst*
2639 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2641 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2644 static MonoInst*
2645 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2646 MonoClass *klass, int rgctx_type)
2648 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2649 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2651 return emit_rgctx_fetch (cfg, rgctx, entry);
2654 static MonoInst*
2655 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2656 MonoMethod *cmethod, int rgctx_type)
2658 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2659 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2661 return emit_rgctx_fetch (cfg, rgctx, entry);
2664 static MonoInst*
2665 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2666 MonoClassField *field, int rgctx_type)
2668 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2669 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2671 return emit_rgctx_fetch (cfg, rgctx, entry);
2674 static void
2675 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2677 int vtable_reg = alloc_preg (cfg);
2678 int context_used = 0;
2680 if (cfg->generic_sharing_context)
2681 context_used = mono_class_check_context_used (array_class);
2683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2685 if (cfg->opt & MONO_OPT_SHARED) {
2686 int class_reg = alloc_preg (cfg);
2687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2688 if (cfg->compile_aot) {
2689 int klass_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2692 } else {
2693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2695 } else if (context_used) {
2696 MonoInst *vtable_ins;
2698 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2699 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2700 } else {
2701 if (cfg->compile_aot) {
2702 int vt_reg = alloc_preg (cfg);
2703 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2704 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2705 } else {
2706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2710 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2713 static void
2714 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2716 if (mini_get_debug_options ()->better_cast_details) {
2717 int to_klass_reg = alloc_preg (cfg);
2718 int vtable_reg = alloc_preg (cfg);
2719 int klass_reg = alloc_preg (cfg);
2720 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2722 if (!tls_get) {
2723 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2724 exit (1);
2727 MONO_ADD_INS (cfg->cbb, tls_get);
2728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2731 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2732 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2737 static void
2738 reset_cast_details (MonoCompile *cfg)
2740 /* Reset the variables holding the cast details */
2741 if (mini_get_debug_options ()->better_cast_details) {
2742 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2744 MONO_ADD_INS (cfg->cbb, tls_get);
2745 /* It is enough to reset the from field */
2746 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2751 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2752 * generic code is generated.
2754 static MonoInst*
2755 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2757 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2759 if (context_used) {
2760 MonoInst *rgctx, *addr;
2762 /* FIXME: What if the class is shared? We might not
2763 have to get the address of the method from the
2764 RGCTX. */
2765 addr = emit_get_rgctx_method (cfg, context_used, method,
2766 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2768 rgctx = emit_get_rgctx (cfg, method, context_used);
2770 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2771 } else {
2772 return mono_emit_method_call (cfg, method, &val, NULL);
2776 static MonoInst*
2777 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2779 MonoInst *add;
2780 int obj_reg;
2781 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2782 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2783 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2784 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2786 obj_reg = sp [0]->dreg;
2787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2790 /* FIXME: generics */
2791 g_assert (klass->rank == 0);
2793 // Check rank == 0
2794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2795 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2800 if (context_used) {
2801 MonoInst *element_class;
2803 /* This assertion is from the unboxcast insn */
2804 g_assert (klass->rank == 0);
2806 element_class = emit_get_rgctx_klass (cfg, context_used,
2807 klass->element_class, MONO_RGCTX_INFO_KLASS);
2809 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2810 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2811 } else {
2812 save_cast_details (cfg, klass->element_class, obj_reg);
2813 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2814 reset_cast_details (cfg);
2817 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2818 MONO_ADD_INS (cfg->cbb, add);
2819 add->type = STACK_MP;
2820 add->klass = klass;
2822 return add;
2825 static MonoInst*
2826 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2828 MonoInst *iargs [2];
2829 void *alloc_ftn;
2831 if (cfg->opt & MONO_OPT_SHARED) {
2832 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2833 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2835 alloc_ftn = mono_object_new;
2836 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2837 /* This happens often in argument checking code, eg. throw new FooException... */
2838 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2839 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2840 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2841 } else {
2842 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2843 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2844 gboolean pass_lw;
2846 if (managed_alloc) {
2847 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2848 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2850 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2851 if (pass_lw) {
2852 guint32 lw = vtable->klass->instance_size;
2853 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2854 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2855 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2857 else {
2858 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2862 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2865 static MonoInst*
2866 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2867 gboolean for_box)
2869 MonoInst *iargs [2];
2870 MonoMethod *managed_alloc = NULL;
2871 void *alloc_ftn;
2874 FIXME: we cannot get managed_alloc here because we can't get
2875 the class's vtable (because it's not a closed class)
2877 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2878 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2881 if (cfg->opt & MONO_OPT_SHARED) {
2882 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2883 iargs [1] = data_inst;
2884 alloc_ftn = mono_object_new;
2885 } else {
2886 if (managed_alloc) {
2887 iargs [0] = data_inst;
2888 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2891 iargs [0] = data_inst;
2892 alloc_ftn = mono_object_new_specific;
2895 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2898 static MonoInst*
2899 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2901 MonoInst *alloc, *ins;
2903 if (mono_class_is_nullable (klass)) {
2904 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2905 return mono_emit_method_call (cfg, method, &val, NULL);
2908 alloc = handle_alloc (cfg, klass, TRUE);
2910 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2912 return alloc;
2915 static MonoInst *
2916 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2918 MonoInst *alloc, *ins;
2920 if (mono_class_is_nullable (klass)) {
2921 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2922 /* FIXME: What if the class is shared? We might not
2923 have to get the method address from the RGCTX. */
2924 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2925 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2926 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2928 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2929 } else {
2930 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2932 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2934 return alloc;
2938 static MonoInst*
2939 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2941 MonoBasicBlock *is_null_bb;
2942 int obj_reg = src->dreg;
2943 int vtable_reg = alloc_preg (cfg);
2945 NEW_BBLOCK (cfg, is_null_bb);
2947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2948 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2950 save_cast_details (cfg, klass, obj_reg);
2952 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2954 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2955 } else {
2956 int klass_reg = alloc_preg (cfg);
2958 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2960 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2961 /* the remoting code is broken, access the class for now */
2962 if (0) {
2963 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2964 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2965 } else {
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2969 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2970 } else {
2971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2972 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2976 MONO_START_BB (cfg, is_null_bb);
2978 reset_cast_details (cfg);
2980 return src;
2983 static MonoInst*
2984 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2986 MonoInst *ins;
2987 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2988 int obj_reg = src->dreg;
2989 int vtable_reg = alloc_preg (cfg);
2990 int res_reg = alloc_preg (cfg);
2992 NEW_BBLOCK (cfg, is_null_bb);
2993 NEW_BBLOCK (cfg, false_bb);
2994 NEW_BBLOCK (cfg, end_bb);
2996 /* Do the assignment at the beginning, so the other assignment can be if converted */
2997 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2998 ins->type = STACK_OBJ;
2999 ins->klass = klass;
3001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3004 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3006 /* the is_null_bb target simply copies the input register to the output */
3007 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3008 } else {
3009 int klass_reg = alloc_preg (cfg);
3011 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3013 if (klass->rank) {
3014 int rank_reg = alloc_preg (cfg);
3015 int eclass_reg = alloc_preg (cfg);
3017 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3019 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3020 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3022 if (klass->cast_class == mono_defaults.object_class) {
3023 int parent_reg = alloc_preg (cfg);
3024 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3025 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3026 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3027 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3028 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3029 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3030 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3031 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3032 } else if (klass->cast_class == mono_defaults.enum_class) {
3033 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3035 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3036 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3037 } else {
3038 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3039 /* Check that the object is a vector too */
3040 int bounds_reg = alloc_preg (cfg);
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3043 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3046 /* the is_null_bb target simply copies the input register to the output */
3047 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3049 } else if (mono_class_is_nullable (klass)) {
3050 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3051 /* the is_null_bb target simply copies the input register to the output */
3052 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3053 } else {
3054 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3055 /* the remoting code is broken, access the class for now */
3056 if (0) {
3057 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3058 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3059 } else {
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3063 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3064 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3065 } else {
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3067 /* the is_null_bb target simply copies the input register to the output */
3068 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3073 MONO_START_BB (cfg, false_bb);
3075 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3076 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3078 MONO_START_BB (cfg, is_null_bb);
3080 MONO_START_BB (cfg, end_bb);
3082 return ins;
3085 static MonoInst*
3086 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3088 /* This opcode takes as input an object reference and a class, and returns:
3089 0) if the object is an instance of the class,
3090 1) if the object is not instance of the class,
3091 2) if the object is a proxy whose type cannot be determined */
3093 MonoInst *ins;
3094 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3095 int obj_reg = src->dreg;
3096 int dreg = alloc_ireg (cfg);
3097 int tmp_reg;
3098 int klass_reg = alloc_preg (cfg);
3100 NEW_BBLOCK (cfg, true_bb);
3101 NEW_BBLOCK (cfg, false_bb);
3102 NEW_BBLOCK (cfg, false2_bb);
3103 NEW_BBLOCK (cfg, end_bb);
3104 NEW_BBLOCK (cfg, no_proxy_bb);
3106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3109 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3110 NEW_BBLOCK (cfg, interface_fail_bb);
3112 tmp_reg = alloc_preg (cfg);
3113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3114 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3115 MONO_START_BB (cfg, interface_fail_bb);
3116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3118 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3120 tmp_reg = alloc_preg (cfg);
3121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3124 } else {
3125 tmp_reg = alloc_preg (cfg);
3126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3129 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3130 tmp_reg = alloc_preg (cfg);
3131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3134 tmp_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3139 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3142 MONO_START_BB (cfg, no_proxy_bb);
3144 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3147 MONO_START_BB (cfg, false_bb);
3149 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3152 MONO_START_BB (cfg, false2_bb);
3154 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3157 MONO_START_BB (cfg, true_bb);
3159 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3161 MONO_START_BB (cfg, end_bb);
3163 /* FIXME: */
3164 MONO_INST_NEW (cfg, ins, OP_ICONST);
3165 ins->dreg = dreg;
3166 ins->type = STACK_I4;
3168 return ins;
3171 static MonoInst*
3172 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3174 /* This opcode takes as input an object reference and a class, and returns:
3175 0) if the object is an instance of the class,
3176 1) if the object is a proxy whose type cannot be determined
3177 an InvalidCastException exception is thrown otherwhise*/
3179 MonoInst *ins;
3180 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3181 int obj_reg = src->dreg;
3182 int dreg = alloc_ireg (cfg);
3183 int tmp_reg = alloc_preg (cfg);
3184 int klass_reg = alloc_preg (cfg);
3186 NEW_BBLOCK (cfg, end_bb);
3187 NEW_BBLOCK (cfg, ok_result_bb);
3189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3192 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3193 NEW_BBLOCK (cfg, interface_fail_bb);
3195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3196 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3197 MONO_START_BB (cfg, interface_fail_bb);
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3200 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3202 tmp_reg = alloc_preg (cfg);
3203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3205 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3207 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3208 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3210 } else {
3211 NEW_BBLOCK (cfg, no_proxy_bb);
3213 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3214 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3215 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3217 tmp_reg = alloc_preg (cfg);
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3221 tmp_reg = alloc_preg (cfg);
3222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3226 NEW_BBLOCK (cfg, fail_1_bb);
3228 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3230 MONO_START_BB (cfg, fail_1_bb);
3232 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3233 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3235 MONO_START_BB (cfg, no_proxy_bb);
3237 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3240 MONO_START_BB (cfg, ok_result_bb);
3242 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3244 MONO_START_BB (cfg, end_bb);
3246 /* FIXME: */
3247 MONO_INST_NEW (cfg, ins, OP_ICONST);
3248 ins->dreg = dreg;
3249 ins->type = STACK_I4;
3251 return ins;
3254 static G_GNUC_UNUSED MonoInst*
3255 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3257 gpointer *trampoline;
3258 MonoInst *obj, *method_ins, *tramp_ins;
3259 MonoDomain *domain;
3260 guint8 **code_slot;
3262 obj = handle_alloc (cfg, klass, FALSE);
3264 /* Inline the contents of mono_delegate_ctor */
3266 /* Set target field */
3267 /* Optimize away setting of NULL target */
3268 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3271 /* Set method field */
3272 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3276 * To avoid looking up the compiled code belonging to the target method
3277 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3278 * store it, and we fill it after the method has been compiled.
3280 if (!cfg->compile_aot && !method->dynamic) {
3281 MonoInst *code_slot_ins;
3283 domain = mono_domain_get ();
3284 mono_domain_lock (domain);
3285 if (!domain_jit_info (domain)->method_code_hash)
3286 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3287 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3288 if (!code_slot) {
3289 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3290 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3292 mono_domain_unlock (domain);
3294 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3298 /* Set invoke_impl field */
3299 if (cfg->compile_aot) {
3300 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3301 } else {
3302 trampoline = mono_create_delegate_trampoline (klass);
3303 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3307 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3309 return obj;
3312 static MonoInst*
3313 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3315 MonoJitICallInfo *info;
3317 /* Need to register the icall so it gets an icall wrapper */
3318 info = mono_get_array_new_va_icall (rank);
3320 cfg->flags |= MONO_CFG_HAS_VARARGS;
3322 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3323 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3326 static void
3327 mono_emit_load_got_addr (MonoCompile *cfg)
3329 MonoInst *getaddr, *dummy_use;
3331 if (!cfg->got_var || cfg->got_var_allocated)
3332 return;
3334 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3335 getaddr->dreg = cfg->got_var->dreg;
3337 /* Add it to the start of the first bblock */
3338 if (cfg->bb_entry->code) {
3339 getaddr->next = cfg->bb_entry->code;
3340 cfg->bb_entry->code = getaddr;
3342 else
3343 MONO_ADD_INS (cfg->bb_entry, getaddr);
3345 cfg->got_var_allocated = TRUE;
3348 * Add a dummy use to keep the got_var alive, since real uses might
3349 * only be generated by the back ends.
3350 * Add it to end_bblock, so the variable's lifetime covers the whole
3351 * method.
3352 * It would be better to make the usage of the got var explicit in all
3353 * cases when the backend needs it (i.e. calls, throw etc.), so this
3354 * wouldn't be needed.
3356 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3357 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3360 static int inline_limit;
3361 static gboolean inline_limit_inited;
3363 static gboolean
3364 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3366 MonoMethodHeader *header;
3367 MonoVTable *vtable;
3368 #ifdef MONO_ARCH_SOFT_FLOAT
3369 MonoMethodSignature *sig = mono_method_signature (method);
3370 int i;
3371 #endif
3373 if (cfg->generic_sharing_context)
3374 return FALSE;
3376 #ifdef MONO_ARCH_HAVE_LMF_OPS
3377 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3378 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3379 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3380 return TRUE;
3381 #endif
3383 if (method->is_inflated)
3384 /* Avoid inflating the header */
3385 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3386 else
3387 header = mono_method_get_header (method);
3389 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3390 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3391 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3392 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3393 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3394 (method->klass->marshalbyref) ||
3395 !header || header->num_clauses)
3396 return FALSE;
3398 /* also consider num_locals? */
3399 /* Do the size check early to avoid creating vtables */
3400 if (!inline_limit_inited) {
3401 if (getenv ("MONO_INLINELIMIT"))
3402 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3403 else
3404 inline_limit = INLINE_LENGTH_LIMIT;
3405 inline_limit_inited = TRUE;
3407 if (header->code_size >= inline_limit)
3408 return FALSE;
3411 * if we can initialize the class of the method right away, we do,
3412 * otherwise we don't allow inlining if the class needs initialization,
3413 * since it would mean inserting a call to mono_runtime_class_init()
3414 * inside the inlined code
3416 if (!(cfg->opt & MONO_OPT_SHARED)) {
3417 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3418 if (cfg->run_cctors && method->klass->has_cctor) {
3419 if (!method->klass->runtime_info)
3420 /* No vtable created yet */
3421 return FALSE;
3422 vtable = mono_class_vtable (cfg->domain, method->klass);
3423 if (!vtable)
3424 return FALSE;
3425 /* This makes so that inline cannot trigger */
3426 /* .cctors: too many apps depend on them */
3427 /* running with a specific order... */
3428 if (! vtable->initialized)
3429 return FALSE;
3430 mono_runtime_class_init (vtable);
3432 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3433 if (!method->klass->runtime_info)
3434 /* No vtable created yet */
3435 return FALSE;
3436 vtable = mono_class_vtable (cfg->domain, method->klass);
3437 if (!vtable)
3438 return FALSE;
3439 if (!vtable->initialized)
3440 return FALSE;
3442 } else {
3444 * If we're compiling for shared code
3445 * the cctor will need to be run at aot method load time, for example,
3446 * or at the end of the compilation of the inlining method.
3448 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3449 return FALSE;
3453 * CAS - do not inline methods with declarative security
3454 * Note: this has to be before any possible return TRUE;
3456 if (mono_method_has_declsec (method))
3457 return FALSE;
3459 #ifdef MONO_ARCH_SOFT_FLOAT
3460 /* FIXME: */
3461 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3462 return FALSE;
3463 for (i = 0; i < sig->param_count; ++i)
3464 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3465 return FALSE;
3466 #endif
3468 return TRUE;
3471 static gboolean
3472 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3474 if (vtable->initialized && !cfg->compile_aot)
3475 return FALSE;
3477 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3478 return FALSE;
3480 if (!mono_class_needs_cctor_run (vtable->klass, method))
3481 return FALSE;
3483 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3484 /* The initialization is already done before the method is called */
3485 return FALSE;
3487 return TRUE;
3490 static MonoInst*
3491 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3493 MonoInst *ins;
3494 guint32 size;
3495 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3497 mono_class_init (klass);
3498 size = mono_class_array_element_size (klass);
3500 mult_reg = alloc_preg (cfg);
3501 array_reg = arr->dreg;
3502 index_reg = index->dreg;
3504 #if SIZEOF_REGISTER == 8
3505 /* The array reg is 64 bits but the index reg is only 32 */
3506 index2_reg = alloc_preg (cfg);
3507 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3508 #else
3509 if (index->type == STACK_I8) {
3510 index2_reg = alloc_preg (cfg);
3511 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3512 } else {
3513 index2_reg = index_reg;
3515 #endif
3517 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3519 #if defined(__i386__) || defined(__x86_64__)
3520 if (size == 1 || size == 2 || size == 4 || size == 8) {
3521 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3523 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3524 ins->type = STACK_PTR;
3526 return ins;
3528 #endif
3530 add_reg = alloc_preg (cfg);
3532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3533 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3534 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3535 ins->type = STACK_PTR;
3536 MONO_ADD_INS (cfg->cbb, ins);
3538 return ins;
3541 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3542 static MonoInst*
3543 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3545 int bounds_reg = alloc_preg (cfg);
3546 int add_reg = alloc_preg (cfg);
3547 int mult_reg = alloc_preg (cfg);
3548 int mult2_reg = alloc_preg (cfg);
3549 int low1_reg = alloc_preg (cfg);
3550 int low2_reg = alloc_preg (cfg);
3551 int high1_reg = alloc_preg (cfg);
3552 int high2_reg = alloc_preg (cfg);
3553 int realidx1_reg = alloc_preg (cfg);
3554 int realidx2_reg = alloc_preg (cfg);
3555 int sum_reg = alloc_preg (cfg);
3556 int index1, index2;
3557 MonoInst *ins;
3558 guint32 size;
3560 mono_class_init (klass);
3561 size = mono_class_array_element_size (klass);
3563 index1 = index_ins1->dreg;
3564 index2 = index_ins2->dreg;
3566 /* range checking */
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3568 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3570 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3571 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3572 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3574 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3575 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3576 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3579 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3580 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3582 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3583 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3584 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3586 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3587 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3589 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3590 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3592 ins->type = STACK_MP;
3593 ins->klass = klass;
3594 MONO_ADD_INS (cfg->cbb, ins);
3596 return ins;
3598 #endif
3600 static MonoInst*
3601 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3603 int rank;
3604 MonoInst *addr;
3605 MonoMethod *addr_method;
3606 int element_size;
3608 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3610 if (rank == 1)
3611 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3613 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3614 /* emit_ldelema_2 depends on OP_LMUL */
3615 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3616 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3618 #endif
3620 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3621 addr_method = mono_marshal_get_array_address (rank, element_size);
3622 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3624 return addr;
3627 static MonoInst*
3628 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3630 MonoInst *ins = NULL;
3632 static MonoClass *runtime_helpers_class = NULL;
3633 if (! runtime_helpers_class)
3634 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3635 "System.Runtime.CompilerServices", "RuntimeHelpers");
3637 if (cmethod->klass == mono_defaults.string_class) {
3638 if (strcmp (cmethod->name, "get_Chars") == 0) {
3639 int dreg = alloc_ireg (cfg);
3640 int index_reg = alloc_preg (cfg);
3641 int mult_reg = alloc_preg (cfg);
3642 int add_reg = alloc_preg (cfg);
3644 #if SIZEOF_REGISTER == 8
3645 /* The array reg is 64 bits but the index reg is only 32 */
3646 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3647 #else
3648 index_reg = args [1]->dreg;
3649 #endif
3650 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3652 #if defined(__i386__) || defined(__x86_64__)
3653 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3654 add_reg = ins->dreg;
3655 /* Avoid a warning */
3656 mult_reg = 0;
3657 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3658 add_reg, 0);
3659 #else
3660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3661 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3662 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3663 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3664 #endif
3665 type_from_op (ins, NULL, NULL);
3666 return ins;
3667 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3668 int dreg = alloc_ireg (cfg);
3669 /* Decompose later to allow more optimizations */
3670 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3671 ins->type = STACK_I4;
3672 cfg->cbb->has_array_access = TRUE;
3673 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3675 return ins;
3676 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3677 int mult_reg = alloc_preg (cfg);
3678 int add_reg = alloc_preg (cfg);
3680 /* The corlib functions check for oob already. */
3681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3682 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3684 } else
3685 return NULL;
3686 } else if (cmethod->klass == mono_defaults.object_class) {
3688 if (strcmp (cmethod->name, "GetType") == 0) {
3689 int dreg = alloc_preg (cfg);
3690 int vt_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3692 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3693 type_from_op (ins, NULL, NULL);
3695 return ins;
3696 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3697 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3698 int dreg = alloc_ireg (cfg);
3699 int t1 = alloc_ireg (cfg);
3701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3702 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3703 ins->type = STACK_I4;
3705 return ins;
3706 #endif
3707 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3708 MONO_INST_NEW (cfg, ins, OP_NOP);
3709 MONO_ADD_INS (cfg->cbb, ins);
3710 return ins;
3711 } else
3712 return NULL;
3713 } else if (cmethod->klass == mono_defaults.array_class) {
3714 if (cmethod->name [0] != 'g')
3715 return NULL;
3717 if (strcmp (cmethod->name, "get_Rank") == 0) {
3718 int dreg = alloc_ireg (cfg);
3719 int vtable_reg = alloc_preg (cfg);
3720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3721 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3722 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3723 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3724 type_from_op (ins, NULL, NULL);
3726 return ins;
3727 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3728 int dreg = alloc_ireg (cfg);
3730 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3731 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3732 type_from_op (ins, NULL, NULL);
3734 return ins;
3735 } else
3736 return NULL;
3737 } else if (cmethod->klass == runtime_helpers_class) {
3739 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3740 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3741 return ins;
3742 } else
3743 return NULL;
3744 } else if (cmethod->klass == mono_defaults.thread_class) {
3745 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3746 ins->dreg = alloc_preg (cfg);
3747 ins->type = STACK_OBJ;
3748 MONO_ADD_INS (cfg->cbb, ins);
3749 return ins;
3750 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3751 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3752 MONO_ADD_INS (cfg->cbb, ins);
3753 return ins;
3754 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3755 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3756 MONO_ADD_INS (cfg->cbb, ins);
3757 return ins;
3759 } else if (cmethod->klass == mono_defaults.monitor_class) {
3760 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3761 if (strcmp (cmethod->name, "Enter") == 0) {
3762 MonoCallInst *call;
3764 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3765 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3766 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3767 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3769 return (MonoInst*)call;
3770 } else if (strcmp (cmethod->name, "Exit") == 0) {
3771 MonoCallInst *call;
3773 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3774 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3775 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3776 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3778 return (MonoInst*)call;
3780 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3781 MonoMethod *fast_method = NULL;
3783 /* Avoid infinite recursion */
3784 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3785 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3786 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3787 return NULL;
3789 if (strcmp (cmethod->name, "Enter") == 0 ||
3790 strcmp (cmethod->name, "Exit") == 0)
3791 fast_method = mono_monitor_get_fast_path (cmethod);
3792 if (!fast_method)
3793 return NULL;
3795 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3796 #endif
3797 } else if (mini_class_is_system_array (cmethod->klass) &&
3798 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3799 MonoInst *addr, *store, *load;
3800 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3802 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3803 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3804 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3805 return store;
3806 } else if (cmethod->klass->image == mono_defaults.corlib &&
3807 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3808 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3809 ins = NULL;
3811 #if SIZEOF_REGISTER == 8
3812 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3813 /* 64 bit reads are already atomic */
3814 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3815 ins->dreg = mono_alloc_preg (cfg);
3816 ins->inst_basereg = args [0]->dreg;
3817 ins->inst_offset = 0;
3818 MONO_ADD_INS (cfg->cbb, ins);
3820 #endif
3822 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3823 if (strcmp (cmethod->name, "Increment") == 0) {
3824 MonoInst *ins_iconst;
3825 guint32 opcode = 0;
3827 if (fsig->params [0]->type == MONO_TYPE_I4)
3828 opcode = OP_ATOMIC_ADD_NEW_I4;
3829 #if SIZEOF_REGISTER == 8
3830 else if (fsig->params [0]->type == MONO_TYPE_I8)
3831 opcode = OP_ATOMIC_ADD_NEW_I8;
3832 #endif
3833 if (opcode) {
3834 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3835 ins_iconst->inst_c0 = 1;
3836 ins_iconst->dreg = mono_alloc_ireg (cfg);
3837 MONO_ADD_INS (cfg->cbb, ins_iconst);
3839 MONO_INST_NEW (cfg, ins, opcode);
3840 ins->dreg = mono_alloc_ireg (cfg);
3841 ins->inst_basereg = args [0]->dreg;
3842 ins->inst_offset = 0;
3843 ins->sreg2 = ins_iconst->dreg;
3844 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3845 MONO_ADD_INS (cfg->cbb, ins);
3847 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3848 MonoInst *ins_iconst;
3849 guint32 opcode = 0;
3851 if (fsig->params [0]->type == MONO_TYPE_I4)
3852 opcode = OP_ATOMIC_ADD_NEW_I4;
3853 #if SIZEOF_REGISTER == 8
3854 else if (fsig->params [0]->type == MONO_TYPE_I8)
3855 opcode = OP_ATOMIC_ADD_NEW_I8;
3856 #endif
3857 if (opcode) {
3858 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3859 ins_iconst->inst_c0 = -1;
3860 ins_iconst->dreg = mono_alloc_ireg (cfg);
3861 MONO_ADD_INS (cfg->cbb, ins_iconst);
3863 MONO_INST_NEW (cfg, ins, opcode);
3864 ins->dreg = mono_alloc_ireg (cfg);
3865 ins->inst_basereg = args [0]->dreg;
3866 ins->inst_offset = 0;
3867 ins->sreg2 = ins_iconst->dreg;
3868 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3869 MONO_ADD_INS (cfg->cbb, ins);
3871 } else if (strcmp (cmethod->name, "Add") == 0) {
3872 guint32 opcode = 0;
3874 if (fsig->params [0]->type == MONO_TYPE_I4)
3875 opcode = OP_ATOMIC_ADD_NEW_I4;
3876 #if SIZEOF_REGISTER == 8
3877 else if (fsig->params [0]->type == MONO_TYPE_I8)
3878 opcode = OP_ATOMIC_ADD_NEW_I8;
3879 #endif
3881 if (opcode) {
3882 MONO_INST_NEW (cfg, ins, opcode);
3883 ins->dreg = mono_alloc_ireg (cfg);
3884 ins->inst_basereg = args [0]->dreg;
3885 ins->inst_offset = 0;
3886 ins->sreg2 = args [1]->dreg;
3887 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3888 MONO_ADD_INS (cfg->cbb, ins);
3891 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3893 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3894 if (strcmp (cmethod->name, "Exchange") == 0) {
3895 guint32 opcode;
3897 if (fsig->params [0]->type == MONO_TYPE_I4)
3898 opcode = OP_ATOMIC_EXCHANGE_I4;
3899 #if SIZEOF_REGISTER == 8
3900 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3901 (fsig->params [0]->type == MONO_TYPE_I) ||
3902 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3903 opcode = OP_ATOMIC_EXCHANGE_I8;
3904 #else
3905 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3906 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3907 opcode = OP_ATOMIC_EXCHANGE_I4;
3908 #endif
3909 else
3910 return NULL;
3912 MONO_INST_NEW (cfg, ins, opcode);
3913 ins->dreg = mono_alloc_ireg (cfg);
3914 ins->inst_basereg = args [0]->dreg;
3915 ins->inst_offset = 0;
3916 ins->sreg2 = args [1]->dreg;
3917 MONO_ADD_INS (cfg->cbb, ins);
3919 switch (fsig->params [0]->type) {
3920 case MONO_TYPE_I4:
3921 ins->type = STACK_I4;
3922 break;
3923 case MONO_TYPE_I8:
3924 case MONO_TYPE_I:
3925 ins->type = STACK_I8;
3926 break;
3927 case MONO_TYPE_OBJECT:
3928 ins->type = STACK_OBJ;
3929 break;
3930 default:
3931 g_assert_not_reached ();
3934 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3936 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3937 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3938 int size = 0;
3939 if (fsig->params [1]->type == MONO_TYPE_I4)
3940 size = 4;
3941 else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
3942 size = sizeof (gpointer);
3943 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
3944 size = 8;
3945 if (size == 4) {
3946 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
3947 ins->dreg = alloc_ireg (cfg);
3948 ins->sreg1 = args [0]->dreg;
3949 ins->sreg2 = args [1]->dreg;
3950 ins->sreg3 = args [2]->dreg;
3951 ins->type = STACK_I4;
3952 MONO_ADD_INS (cfg->cbb, ins);
3953 } else if (size == 8) {
3954 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
3955 ins->dreg = alloc_ireg (cfg);
3956 ins->sreg1 = args [0]->dreg;
3957 ins->sreg2 = args [1]->dreg;
3958 ins->sreg3 = args [2]->dreg;
3959 ins->type = STACK_I8;
3960 MONO_ADD_INS (cfg->cbb, ins);
3961 } else {
3962 /* g_assert_not_reached (); */
3965 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3967 if (ins)
3968 return ins;
3969 } else if (cmethod->klass->image == mono_defaults.corlib) {
3970 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3971 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3972 MONO_INST_NEW (cfg, ins, OP_BREAK);
3973 MONO_ADD_INS (cfg->cbb, ins);
3974 return ins;
3976 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3977 && strcmp (cmethod->klass->name, "Environment") == 0) {
3978 #ifdef PLATFORM_WIN32
3979 EMIT_NEW_ICONST (cfg, ins, 1);
3980 #else
3981 EMIT_NEW_ICONST (cfg, ins, 0);
3982 #endif
3983 return ins;
3985 } else if (cmethod->klass == mono_defaults.math_class) {
3987 * There is general branches code for Min/Max, but it does not work for
3988 * all inputs:
3989 * http://everything2.com/?node_id=1051618
3993 #ifdef MONO_ARCH_SIMD_INTRINSICS
3994 if (cfg->opt & MONO_OPT_SIMD) {
3995 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3996 if (ins)
3997 return ins;
3999 #endif
4001 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4005 * This entry point could be used later for arbitrary method
4006 * redirection.
4008 inline static MonoInst*
4009 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4010 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4012 if (method->klass == mono_defaults.string_class) {
4013 /* managed string allocation support */
4014 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4015 MonoInst *iargs [2];
4016 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4017 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4018 if (!managed_alloc)
4019 return NULL;
4020 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4021 iargs [1] = args [0];
4022 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4025 return NULL;
4028 static void
4029 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4031 MonoInst *store, *temp;
4032 int i;
4034 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4035 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4038 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4039 * would be different than the MonoInst's used to represent arguments, and
4040 * the ldelema implementation can't deal with that.
4041 * Solution: When ldelema is used on an inline argument, create a var for
4042 * it, emit ldelema on that var, and emit the saving code below in
4043 * inline_method () if needed.
4045 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4046 cfg->args [i] = temp;
4047 /* This uses cfg->args [i] which is set by the preceeding line */
4048 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4049 store->cil_code = sp [0]->cil_code;
4050 sp++;
4054 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4055 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4057 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4058 static gboolean
4059 check_inline_called_method_name_limit (MonoMethod *called_method)
4061 int strncmp_result;
4062 static char *limit = NULL;
4064 if (limit == NULL) {
4065 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4067 if (limit_string != NULL)
4068 limit = limit_string;
4069 else
4070 limit = (char *) "";
4073 if (limit [0] != '\0') {
4074 char *called_method_name = mono_method_full_name (called_method, TRUE);
4076 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4077 g_free (called_method_name);
4079 //return (strncmp_result <= 0);
4080 return (strncmp_result == 0);
4081 } else {
4082 return TRUE;
4085 #endif
4087 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4088 static gboolean
4089 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4091 int strncmp_result;
4092 static char *limit = NULL;
4094 if (limit == NULL) {
4095 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4096 if (limit_string != NULL) {
4097 limit = limit_string;
4098 } else {
4099 limit = (char *) "";
4103 if (limit [0] != '\0') {
4104 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4106 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4107 g_free (caller_method_name);
4109 //return (strncmp_result <= 0);
4110 return (strncmp_result == 0);
4111 } else {
4112 return TRUE;
4115 #endif
4117 static int
4118 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4119 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4121 MonoInst *ins, *rvar = NULL;
4122 MonoMethodHeader *cheader;
4123 MonoBasicBlock *ebblock, *sbblock;
4124 int i, costs;
4125 MonoMethod *prev_inlined_method;
4126 MonoInst **prev_locals, **prev_args;
4127 MonoType **prev_arg_types;
4128 guint prev_real_offset;
4129 GHashTable *prev_cbb_hash;
4130 MonoBasicBlock **prev_cil_offset_to_bb;
4131 MonoBasicBlock *prev_cbb;
4132 unsigned char* prev_cil_start;
4133 guint32 prev_cil_offset_to_bb_len;
4134 MonoMethod *prev_current_method;
4135 MonoGenericContext *prev_generic_context;
4136 gboolean ret_var_set, prev_ret_var_set;
4138 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4140 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4141 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4142 return 0;
4143 #endif
4144 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4145 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4146 return 0;
4147 #endif
4149 if (cfg->verbose_level > 2)
4150 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4152 if (!cmethod->inline_info) {
4153 mono_jit_stats.inlineable_methods++;
4154 cmethod->inline_info = 1;
4156 /* allocate space to store the return value */
4157 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4158 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4161 /* allocate local variables */
4162 cheader = mono_method_get_header (cmethod);
4163 prev_locals = cfg->locals;
4164 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4165 for (i = 0; i < cheader->num_locals; ++i)
4166 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4168 /* allocate start and end blocks */
4169 /* This is needed so if the inline is aborted, we can clean up */
4170 NEW_BBLOCK (cfg, sbblock);
4171 sbblock->real_offset = real_offset;
4173 NEW_BBLOCK (cfg, ebblock);
4174 ebblock->block_num = cfg->num_bblocks++;
4175 ebblock->real_offset = real_offset;
4177 prev_args = cfg->args;
4178 prev_arg_types = cfg->arg_types;
4179 prev_inlined_method = cfg->inlined_method;
4180 cfg->inlined_method = cmethod;
4181 cfg->ret_var_set = FALSE;
4182 prev_real_offset = cfg->real_offset;
4183 prev_cbb_hash = cfg->cbb_hash;
4184 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4185 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4186 prev_cil_start = cfg->cil_start;
4187 prev_cbb = cfg->cbb;
4188 prev_current_method = cfg->current_method;
4189 prev_generic_context = cfg->generic_context;
4190 prev_ret_var_set = cfg->ret_var_set;
4192 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4194 ret_var_set = cfg->ret_var_set;
4196 cfg->inlined_method = prev_inlined_method;
4197 cfg->real_offset = prev_real_offset;
4198 cfg->cbb_hash = prev_cbb_hash;
4199 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4200 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4201 cfg->cil_start = prev_cil_start;
4202 cfg->locals = prev_locals;
4203 cfg->args = prev_args;
4204 cfg->arg_types = prev_arg_types;
4205 cfg->current_method = prev_current_method;
4206 cfg->generic_context = prev_generic_context;
4207 cfg->ret_var_set = prev_ret_var_set;
4209 if ((costs >= 0 && costs < 60) || inline_allways) {
4210 if (cfg->verbose_level > 2)
4211 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4213 mono_jit_stats.inlined_methods++;
4215 /* always add some code to avoid block split failures */
4216 MONO_INST_NEW (cfg, ins, OP_NOP);
4217 MONO_ADD_INS (prev_cbb, ins);
4219 prev_cbb->next_bb = sbblock;
4220 link_bblock (cfg, prev_cbb, sbblock);
4223 * Get rid of the begin and end bblocks if possible to aid local
4224 * optimizations.
4226 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4228 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4229 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4231 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4232 MonoBasicBlock *prev = ebblock->in_bb [0];
4233 mono_merge_basic_blocks (cfg, prev, ebblock);
4234 cfg->cbb = prev;
4235 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4236 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4237 cfg->cbb = prev_cbb;
4239 } else {
4240 cfg->cbb = ebblock;
4243 if (rvar) {
4245 * If the inlined method contains only a throw, then the ret var is not
4246 * set, so set it to a dummy value.
4248 if (!ret_var_set) {
4249 static double r8_0 = 0.0;
4251 switch (rvar->type) {
4252 case STACK_I4:
4253 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4254 break;
4255 case STACK_I8:
4256 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4257 break;
4258 case STACK_PTR:
4259 case STACK_MP:
4260 case STACK_OBJ:
4261 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4262 break;
4263 case STACK_R8:
4264 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4265 ins->type = STACK_R8;
4266 ins->inst_p0 = (void*)&r8_0;
4267 ins->dreg = rvar->dreg;
4268 MONO_ADD_INS (cfg->cbb, ins);
4269 break;
4270 case STACK_VTYPE:
4271 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4272 break;
4273 default:
4274 g_assert_not_reached ();
4278 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4279 *sp++ = ins;
4281 return costs + 1;
4282 } else {
4283 if (cfg->verbose_level > 2)
4284 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4285 cfg->exception_type = MONO_EXCEPTION_NONE;
4286 mono_loader_clear_error ();
4288 /* This gets rid of the newly added bblocks */
4289 cfg->cbb = prev_cbb;
4291 return 0;
4295 * Some of these comments may well be out-of-date.
4296 * Design decisions: we do a single pass over the IL code (and we do bblock
4297 * splitting/merging in the few cases when it's required: a back jump to an IL
4298 * address that was not already seen as bblock starting point).
4299 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4300 * Complex operations are decomposed in simpler ones right away. We need to let the
4301 * arch-specific code peek and poke inside this process somehow (except when the
4302 * optimizations can take advantage of the full semantic info of coarse opcodes).
4303 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4304 * MonoInst->opcode initially is the IL opcode or some simplification of that
4305 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4306 * opcode with value bigger than OP_LAST.
4307 * At this point the IR can be handed over to an interpreter, a dumb code generator
4308 * or to the optimizing code generator that will translate it to SSA form.
4310 * Profiling directed optimizations.
4311 * We may compile by default with few or no optimizations and instrument the code
4312 * or the user may indicate what methods to optimize the most either in a config file
4313 * or through repeated runs where the compiler applies offline the optimizations to
4314 * each method and then decides if it was worth it.
4317 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4318 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4319 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4320 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4321 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4322 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4323 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4324 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4326 /* offset from br.s -> br like opcodes */
4327 #define BIG_BRANCH_OFFSET 13
4329 static gboolean
4330 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4332 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4334 return b == NULL || b == bb;
4337 static int
4338 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4340 unsigned char *ip = start;
4341 unsigned char *target;
4342 int i;
4343 guint cli_addr;
4344 MonoBasicBlock *bblock;
4345 const MonoOpcode *opcode;
4347 while (ip < end) {
4348 cli_addr = ip - start;
4349 i = mono_opcode_value ((const guint8 **)&ip, end);
4350 if (i < 0)
4351 UNVERIFIED;
4352 opcode = &mono_opcodes [i];
4353 switch (opcode->argument) {
4354 case MonoInlineNone:
4355 ip++;
4356 break;
4357 case MonoInlineString:
4358 case MonoInlineType:
4359 case MonoInlineField:
4360 case MonoInlineMethod:
4361 case MonoInlineTok:
4362 case MonoInlineSig:
4363 case MonoShortInlineR:
4364 case MonoInlineI:
4365 ip += 5;
4366 break;
4367 case MonoInlineVar:
4368 ip += 3;
4369 break;
4370 case MonoShortInlineVar:
4371 case MonoShortInlineI:
4372 ip += 2;
4373 break;
4374 case MonoShortInlineBrTarget:
4375 target = start + cli_addr + 2 + (signed char)ip [1];
4376 GET_BBLOCK (cfg, bblock, target);
4377 ip += 2;
4378 if (ip < end)
4379 GET_BBLOCK (cfg, bblock, ip);
4380 break;
4381 case MonoInlineBrTarget:
4382 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4383 GET_BBLOCK (cfg, bblock, target);
4384 ip += 5;
4385 if (ip < end)
4386 GET_BBLOCK (cfg, bblock, ip);
4387 break;
4388 case MonoInlineSwitch: {
4389 guint32 n = read32 (ip + 1);
4390 guint32 j;
4391 ip += 5;
4392 cli_addr += 5 + 4 * n;
4393 target = start + cli_addr;
4394 GET_BBLOCK (cfg, bblock, target);
4396 for (j = 0; j < n; ++j) {
4397 target = start + cli_addr + (gint32)read32 (ip);
4398 GET_BBLOCK (cfg, bblock, target);
4399 ip += 4;
4401 break;
4403 case MonoInlineR:
4404 case MonoInlineI8:
4405 ip += 9;
4406 break;
4407 default:
4408 g_assert_not_reached ();
4411 if (i == CEE_THROW) {
4412 unsigned char *bb_start = ip - 1;
4414 /* Find the start of the bblock containing the throw */
4415 bblock = NULL;
4416 while ((bb_start >= start) && !bblock) {
4417 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4418 bb_start --;
4420 if (bblock)
4421 bblock->out_of_line = 1;
4424 return 0;
4425 unverified:
4426 *pos = ip;
4427 return 1;
4430 static inline MonoMethod *
4431 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4433 MonoMethod *method;
4435 if (m->wrapper_type != MONO_WRAPPER_NONE)
4436 return mono_method_get_wrapper_data (m, token);
4438 method = mono_get_method_full (m->klass->image, token, klass, context);
4440 return method;
4443 static inline MonoMethod *
4444 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4446 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4448 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4449 return NULL;
4451 return method;
4454 static inline MonoClass*
4455 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4457 MonoClass *klass;
4459 if (method->wrapper_type != MONO_WRAPPER_NONE)
4460 klass = mono_method_get_wrapper_data (method, token);
4461 else
4462 klass = mono_class_get_full (method->klass->image, token, context);
4463 if (klass)
4464 mono_class_init (klass);
4465 return klass;
4469 * Returns TRUE if the JIT should abort inlining because "callee"
4470 * is influenced by security attributes.
4472 static
4473 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4475 guint32 result;
4477 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4478 return TRUE;
4481 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4482 if (result == MONO_JIT_SECURITY_OK)
4483 return FALSE;
4485 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4486 /* Generate code to throw a SecurityException before the actual call/link */
4487 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4488 MonoInst *args [2];
4490 NEW_ICONST (cfg, args [0], 4);
4491 NEW_METHODCONST (cfg, args [1], caller);
4492 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4493 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4494 /* don't hide previous results */
4495 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4496 cfg->exception_data = result;
4497 return TRUE;
4500 return FALSE;
4503 static MonoMethod*
4504 method_access_exception (void)
4506 static MonoMethod *method = NULL;
4508 if (!method) {
4509 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4510 method = mono_class_get_method_from_name (secman->securitymanager,
4511 "MethodAccessException", 2);
4513 g_assert (method);
4514 return method;
4517 static void
4518 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4519 MonoBasicBlock *bblock, unsigned char *ip)
4521 MonoMethod *thrower = method_access_exception ();
4522 MonoInst *args [2];
4524 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4525 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4526 mono_emit_method_call (cfg, thrower, args, NULL);
4529 static MonoMethod*
4530 field_access_exception (void)
4532 static MonoMethod *method = NULL;
4534 if (!method) {
4535 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4536 method = mono_class_get_method_from_name (secman->securitymanager,
4537 "FieldAccessException", 2);
4539 g_assert (method);
4540 return method;
4543 static void
4544 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4545 MonoBasicBlock *bblock, unsigned char *ip)
4547 MonoMethod *thrower = field_access_exception ();
4548 MonoInst *args [2];
4550 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4551 EMIT_NEW_METHODCONST (cfg, args [1], field);
4552 mono_emit_method_call (cfg, thrower, args, NULL);
4556 * Return the original method is a wrapper is specified. We can only access
4557 * the custom attributes from the original method.
4559 static MonoMethod*
4560 get_original_method (MonoMethod *method)
4562 if (method->wrapper_type == MONO_WRAPPER_NONE)
4563 return method;
4565 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4566 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4567 return NULL;
4569 /* in other cases we need to find the original method */
4570 return mono_marshal_method_from_wrapper (method);
4573 static void
4574 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4575 MonoBasicBlock *bblock, unsigned char *ip)
4577 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4578 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4579 return;
4581 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4582 caller = get_original_method (caller);
4583 if (!caller)
4584 return;
4586 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4587 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4588 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4591 static void
4592 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4593 MonoBasicBlock *bblock, unsigned char *ip)
4595 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4596 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4597 return;
4599 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4600 caller = get_original_method (caller);
4601 if (!caller)
4602 return;
4604 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4605 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4606 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4610 * Check that the IL instructions at ip are the array initialization
4611 * sequence and return the pointer to the data and the size.
4613 static const char*
4614 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4617 * newarr[System.Int32]
4618 * dup
4619 * ldtoken field valuetype ...
4620 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4622 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4623 guint32 token = read32 (ip + 7);
4624 guint32 field_token = read32 (ip + 2);
4625 guint32 field_index = field_token & 0xffffff;
4626 guint32 rva;
4627 const char *data_ptr;
4628 int size = 0;
4629 MonoMethod *cmethod;
4630 MonoClass *dummy_class;
4631 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4632 int dummy_align;
4634 if (!field)
4635 return NULL;
4637 *out_field_token = field_token;
4639 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4640 if (!cmethod)
4641 return NULL;
4642 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4643 return NULL;
4644 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4645 case MONO_TYPE_BOOLEAN:
4646 case MONO_TYPE_I1:
4647 case MONO_TYPE_U1:
4648 size = 1; break;
4649 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4650 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4651 case MONO_TYPE_CHAR:
4652 case MONO_TYPE_I2:
4653 case MONO_TYPE_U2:
4654 size = 2; break;
4655 case MONO_TYPE_I4:
4656 case MONO_TYPE_U4:
4657 case MONO_TYPE_R4:
4658 size = 4; break;
4659 case MONO_TYPE_R8:
4660 #ifdef ARM_FPU_FPA
4661 return NULL; /* stupid ARM FP swapped format */
4662 #endif
4663 case MONO_TYPE_I8:
4664 case MONO_TYPE_U8:
4665 size = 8; break;
4666 #endif
4667 default:
4668 return NULL;
4670 size *= len;
4671 if (size > mono_type_size (field->type, &dummy_align))
4672 return NULL;
4673 *out_size = size;
4674 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4675 if (!method->klass->image->dynamic) {
4676 field_index = read32 (ip + 2) & 0xffffff;
4677 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4678 data_ptr = mono_image_rva_map (method->klass->image, rva);
4679 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4680 /* for aot code we do the lookup on load */
4681 if (aot && data_ptr)
4682 return GUINT_TO_POINTER (rva);
4683 } else {
4684 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4685 g_assert (!aot);
4686 data_ptr = mono_field_get_data (field);
4688 return data_ptr;
4690 return NULL;
4693 static void
4694 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4696 char *method_fname = mono_method_full_name (method, TRUE);
4697 char *method_code;
4699 if (mono_method_get_header (method)->code_size == 0)
4700 method_code = g_strdup ("method body is empty.");
4701 else
4702 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4703 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4704 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4705 g_free (method_fname);
4706 g_free (method_code);
4709 static void
4710 set_exception_object (MonoCompile *cfg, MonoException *exception)
4712 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4713 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4714 cfg->exception_ptr = exception;
4717 static gboolean
4718 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4720 MonoType *type;
4722 if (cfg->generic_sharing_context)
4723 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4724 else
4725 type = &klass->byval_arg;
4726 return MONO_TYPE_IS_REFERENCE (type);
4730 * mono_decompose_array_access_opts:
4732 * Decompose array access opcodes.
4733 * This should be in decompose.c, but it emits calls so it has to stay here until
4734 * the old JIT is gone.
4736 void
4737 mono_decompose_array_access_opts (MonoCompile *cfg)
4739 MonoBasicBlock *bb, *first_bb;
4742 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4743 * can be executed anytime. It should be run before decompose_long
4747 * Create a dummy bblock and emit code into it so we can use the normal
4748 * code generation macros.
4750 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4751 first_bb = cfg->cbb;
4753 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4754 MonoInst *ins;
4755 MonoInst *prev = NULL;
4756 MonoInst *dest;
4757 MonoInst *iargs [3];
4758 gboolean restart;
4760 if (!bb->has_array_access)
4761 continue;
4763 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4765 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4766 restart = TRUE;
4768 while (restart) {
4769 restart = FALSE;
4771 for (ins = bb->code; ins; ins = ins->next) {
4772 switch (ins->opcode) {
4773 case OP_LDLEN:
4774 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4775 G_STRUCT_OFFSET (MonoArray, max_length));
4776 MONO_ADD_INS (cfg->cbb, dest);
4777 break;
4778 case OP_BOUNDS_CHECK:
4779 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4780 break;
4781 case OP_NEWARR:
4782 if (cfg->opt & MONO_OPT_SHARED) {
4783 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4784 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4785 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4786 iargs [2]->dreg = ins->sreg1;
4788 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4789 dest->dreg = ins->dreg;
4790 } else {
4791 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4793 g_assert (vtable);
4794 NEW_VTABLECONST (cfg, iargs [0], vtable);
4795 MONO_ADD_INS (cfg->cbb, iargs [0]);
4796 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4797 iargs [1]->dreg = ins->sreg1;
4799 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4800 dest->dreg = ins->dreg;
4802 break;
4803 case OP_STRLEN:
4804 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4805 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4806 MONO_ADD_INS (cfg->cbb, dest);
4807 break;
4808 default:
4809 break;
4812 g_assert (cfg->cbb == first_bb);
4814 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4815 /* Replace the original instruction with the new code sequence */
4817 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4818 first_bb->code = first_bb->last_ins = NULL;
4819 first_bb->in_count = first_bb->out_count = 0;
4820 cfg->cbb = first_bb;
4822 else
4823 prev = ins;
4827 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4831 typedef union {
4832 guint32 vali [2];
4833 gint64 vall;
4834 double vald;
4835 } DVal;
4837 #ifdef MONO_ARCH_SOFT_FLOAT
4840 * mono_decompose_soft_float:
4842 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4843 * similar to long support on 32 bit platforms. 32 bit float values require special
4844 * handling when used as locals, arguments, and in calls.
4845 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4847 void
4848 mono_decompose_soft_float (MonoCompile *cfg)
4850 MonoBasicBlock *bb, *first_bb;
4853 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4857 * Create a dummy bblock and emit code into it so we can use the normal
4858 * code generation macros.
4860 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4861 first_bb = cfg->cbb;
4863 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4864 MonoInst *ins;
4865 MonoInst *prev = NULL;
4866 gboolean restart;
4868 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4870 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4871 restart = TRUE;
4873 while (restart) {
4874 restart = FALSE;
4876 for (ins = bb->code; ins; ins = ins->next) {
4877 const char *spec = INS_INFO (ins->opcode);
4879 /* Most fp operations are handled automatically by opcode emulation */
4881 switch (ins->opcode) {
4882 case OP_R8CONST: {
4883 DVal d;
4884 d.vald = *(double*)ins->inst_p0;
4885 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4886 break;
4888 case OP_R4CONST: {
4889 DVal d;
4890 /* We load the r8 value */
4891 d.vald = *(float*)ins->inst_p0;
4892 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4893 break;
4895 case OP_FMOVE:
4896 ins->opcode = OP_LMOVE;
4897 break;
4898 case OP_FGETLOW32:
4899 ins->opcode = OP_MOVE;
4900 ins->sreg1 = ins->sreg1 + 1;
4901 break;
4902 case OP_FGETHIGH32:
4903 ins->opcode = OP_MOVE;
4904 ins->sreg1 = ins->sreg1 + 2;
4905 break;
4906 case OP_SETFRET: {
4907 int reg = ins->sreg1;
4909 ins->opcode = OP_SETLRET;
4910 ins->dreg = -1;
4911 ins->sreg1 = reg + 1;
4912 ins->sreg2 = reg + 2;
4913 break;
4915 case OP_LOADR8_MEMBASE:
4916 ins->opcode = OP_LOADI8_MEMBASE;
4917 break;
4918 case OP_STORER8_MEMBASE_REG:
4919 ins->opcode = OP_STOREI8_MEMBASE_REG;
4920 break;
4921 case OP_STORER4_MEMBASE_REG: {
4922 MonoInst *iargs [2];
4923 int addr_reg;
4925 /* Arg 1 is the double value */
4926 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4927 iargs [0]->dreg = ins->sreg1;
4929 /* Arg 2 is the address to store to */
4930 addr_reg = mono_alloc_preg (cfg);
4931 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4932 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4933 restart = TRUE;
4934 break;
4936 case OP_LOADR4_MEMBASE: {
4937 MonoInst *iargs [1];
4938 MonoInst *conv;
4939 int addr_reg;
4941 addr_reg = mono_alloc_preg (cfg);
4942 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4943 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4944 conv->dreg = ins->dreg;
4945 break;
4947 case OP_FCALL:
4948 case OP_FCALL_REG:
4949 case OP_FCALL_MEMBASE: {
4950 MonoCallInst *call = (MonoCallInst*)ins;
4951 if (call->signature->ret->type == MONO_TYPE_R4) {
4952 MonoCallInst *call2;
4953 MonoInst *iargs [1];
4954 MonoInst *conv;
4956 /* Convert the call into a call returning an int */
4957 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4958 memcpy (call2, call, sizeof (MonoCallInst));
4959 switch (ins->opcode) {
4960 case OP_FCALL:
4961 call2->inst.opcode = OP_CALL;
4962 break;
4963 case OP_FCALL_REG:
4964 call2->inst.opcode = OP_CALL_REG;
4965 break;
4966 case OP_FCALL_MEMBASE:
4967 call2->inst.opcode = OP_CALL_MEMBASE;
4968 break;
4969 default:
4970 g_assert_not_reached ();
4972 call2->inst.dreg = mono_alloc_ireg (cfg);
4973 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4975 /* FIXME: Optimize this */
4977 /* Emit an r4->r8 conversion */
4978 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4979 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4980 conv->dreg = ins->dreg;
4981 } else {
4982 switch (ins->opcode) {
4983 case OP_FCALL:
4984 ins->opcode = OP_LCALL;
4985 break;
4986 case OP_FCALL_REG:
4987 ins->opcode = OP_LCALL_REG;
4988 break;
4989 case OP_FCALL_MEMBASE:
4990 ins->opcode = OP_LCALL_MEMBASE;
4991 break;
4992 default:
4993 g_assert_not_reached ();
4996 break;
4998 case OP_FCOMPARE: {
4999 MonoJitICallInfo *info;
5000 MonoInst *iargs [2];
5001 MonoInst *call, *cmp, *br;
5003 /* Convert fcompare+fbcc to icall+icompare+beq */
5005 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5006 g_assert (info);
5008 /* Create dummy MonoInst's for the arguments */
5009 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5010 iargs [0]->dreg = ins->sreg1;
5011 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5012 iargs [1]->dreg = ins->sreg2;
5014 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5016 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5017 cmp->sreg1 = call->dreg;
5018 cmp->inst_imm = 0;
5019 MONO_ADD_INS (cfg->cbb, cmp);
5021 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5022 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5023 br->inst_true_bb = ins->next->inst_true_bb;
5024 br->inst_false_bb = ins->next->inst_false_bb;
5025 MONO_ADD_INS (cfg->cbb, br);
5027 /* The call sequence might include fp ins */
5028 restart = TRUE;
5030 /* Skip fbcc or fccc */
5031 NULLIFY_INS (ins->next);
5032 break;
5034 case OP_FCEQ:
5035 case OP_FCGT:
5036 case OP_FCGT_UN:
5037 case OP_FCLT:
5038 case OP_FCLT_UN: {
5039 MonoJitICallInfo *info;
5040 MonoInst *iargs [2];
5041 MonoInst *call;
5043 /* Convert fccc to icall+icompare+iceq */
5045 info = mono_find_jit_opcode_emulation (ins->opcode);
5046 g_assert (info);
5048 /* Create dummy MonoInst's for the arguments */
5049 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5050 iargs [0]->dreg = ins->sreg1;
5051 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5052 iargs [1]->dreg = ins->sreg2;
5054 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5057 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5059 /* The call sequence might include fp ins */
5060 restart = TRUE;
5061 break;
5063 case OP_CKFINITE: {
5064 MonoInst *iargs [2];
5065 MonoInst *call, *cmp;
5067 /* Convert to icall+icompare+cond_exc+move */
5069 /* Create dummy MonoInst's for the arguments */
5070 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5071 iargs [0]->dreg = ins->sreg1;
5073 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5075 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5076 cmp->sreg1 = call->dreg;
5077 cmp->inst_imm = 1;
5078 MONO_ADD_INS (cfg->cbb, cmp);
5080 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5082 /* Do the assignment if the value is finite */
5083 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5085 restart = TRUE;
5086 break;
5088 default:
5089 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5090 mono_print_ins (ins);
5091 g_assert_not_reached ();
5093 break;
5096 g_assert (cfg->cbb == first_bb);
5098 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5099 /* Replace the original instruction with the new code sequence */
5101 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5102 first_bb->code = first_bb->last_ins = NULL;
5103 first_bb->in_count = first_bb->out_count = 0;
5104 cfg->cbb = first_bb;
5106 else
5107 prev = ins;
5111 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5114 mono_decompose_long_opts (cfg);
5117 #endif
5119 static void
5120 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5122 MonoInst *ins;
5123 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5124 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5125 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5126 /* Optimize reg-reg moves away */
5128 * Can't optimize other opcodes, since sp[0] might point to
5129 * the last ins of a decomposed opcode.
5131 sp [0]->dreg = (cfg)->locals [n]->dreg;
5132 } else {
5133 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5138 * ldloca inhibits many optimizations so try to get rid of it in common
5139 * cases.
5141 static inline unsigned char *
5142 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5144 int local, token;
5145 MonoClass *klass;
5147 if (size == 1) {
5148 local = ip [1];
5149 ip += 2;
5150 } else {
5151 local = read16 (ip + 2);
5152 ip += 4;
5155 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5156 gboolean skip = FALSE;
5158 /* From the INITOBJ case */
5159 token = read32 (ip + 2);
5160 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5161 CHECK_TYPELOAD (klass);
5162 if (generic_class_is_reference_type (cfg, klass)) {
5163 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5164 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5165 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5166 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5167 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5168 } else {
5169 skip = TRUE;
5172 if (!skip)
5173 return ip + 6;
5175 load_error:
5176 return NULL;
5179 static gboolean
5180 is_exception_class (MonoClass *class)
5182 while (class) {
5183 if (class == mono_defaults.exception_class)
5184 return TRUE;
5185 class = class->parent;
5187 return FALSE;
5191 * mono_method_to_ir:
5193 * Translate the .net IL into linear IR.
5196 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5197 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5198 guint inline_offset, gboolean is_virtual_call)
5200 MonoInst *ins, **sp, **stack_start;
5201 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5202 MonoMethod *cmethod, *method_definition;
5203 MonoInst **arg_array;
5204 MonoMethodHeader *header;
5205 MonoImage *image;
5206 guint32 token, ins_flag;
5207 MonoClass *klass;
5208 MonoClass *constrained_call = NULL;
5209 unsigned char *ip, *end, *target, *err_pos;
5210 static double r8_0 = 0.0;
5211 MonoMethodSignature *sig;
5212 MonoGenericContext *generic_context = NULL;
5213 MonoGenericContainer *generic_container = NULL;
5214 MonoType **param_types;
5215 int i, n, start_new_bblock, dreg;
5216 int num_calls = 0, inline_costs = 0;
5217 int breakpoint_id = 0;
5218 guint num_args;
5219 MonoBoolean security, pinvoke;
5220 MonoSecurityManager* secman = NULL;
5221 MonoDeclSecurityActions actions;
5222 GSList *class_inits = NULL;
5223 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5224 int context_used;
5226 /* serialization and xdomain stuff may need access to private fields and methods */
5227 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5228 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5229 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5230 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5231 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5232 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5234 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5236 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5237 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5238 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5239 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5241 image = method->klass->image;
5242 header = mono_method_get_header (method);
5243 generic_container = mono_method_get_generic_container (method);
5244 sig = mono_method_signature (method);
5245 num_args = sig->hasthis + sig->param_count;
5246 ip = (unsigned char*)header->code;
5247 cfg->cil_start = ip;
5248 end = ip + header->code_size;
5249 mono_jit_stats.cil_code_size += header->code_size;
5251 method_definition = method;
5252 while (method_definition->is_inflated) {
5253 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5254 method_definition = imethod->declaring;
5257 /* SkipVerification is not allowed if core-clr is enabled */
5258 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5259 dont_verify = TRUE;
5260 dont_verify_stloc = TRUE;
5263 if (!dont_verify && mini_method_verify (cfg, method_definition))
5264 goto exception_exit;
5266 if (mono_debug_using_mono_debugger ())
5267 cfg->keep_cil_nops = TRUE;
5269 if (sig->is_inflated)
5270 generic_context = mono_method_get_context (method);
5271 else if (generic_container)
5272 generic_context = &generic_container->context;
5273 cfg->generic_context = generic_context;
5275 if (!cfg->generic_sharing_context)
5276 g_assert (!sig->has_type_parameters);
5278 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5279 g_assert (method->is_inflated);
5280 g_assert (mono_method_get_context (method)->method_inst);
5282 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5283 g_assert (sig->generic_param_count);
5285 if (cfg->method == method) {
5286 cfg->real_offset = 0;
5287 } else {
5288 cfg->real_offset = inline_offset;
5291 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5292 cfg->cil_offset_to_bb_len = header->code_size;
5294 cfg->current_method = method;
5296 if (cfg->verbose_level > 2)
5297 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5299 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5300 if (sig->hasthis)
5301 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5302 for (n = 0; n < sig->param_count; ++n)
5303 param_types [n + sig->hasthis] = sig->params [n];
5304 cfg->arg_types = param_types;
5306 dont_inline = g_list_prepend (dont_inline, method);
5307 if (cfg->method == method) {
5309 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5310 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5312 /* ENTRY BLOCK */
5313 NEW_BBLOCK (cfg, start_bblock);
5314 cfg->bb_entry = start_bblock;
5315 start_bblock->cil_code = NULL;
5316 start_bblock->cil_length = 0;
5318 /* EXIT BLOCK */
5319 NEW_BBLOCK (cfg, end_bblock);
5320 cfg->bb_exit = end_bblock;
5321 end_bblock->cil_code = NULL;
5322 end_bblock->cil_length = 0;
5323 g_assert (cfg->num_bblocks == 2);
5325 arg_array = cfg->args;
5327 if (header->num_clauses) {
5328 cfg->spvars = g_hash_table_new (NULL, NULL);
5329 cfg->exvars = g_hash_table_new (NULL, NULL);
5331 /* handle exception clauses */
5332 for (i = 0; i < header->num_clauses; ++i) {
5333 MonoBasicBlock *try_bb;
5334 MonoExceptionClause *clause = &header->clauses [i];
5335 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5336 try_bb->real_offset = clause->try_offset;
5337 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5338 tblock->real_offset = clause->handler_offset;
5339 tblock->flags |= BB_EXCEPTION_HANDLER;
5341 link_bblock (cfg, try_bb, tblock);
5343 if (*(ip + clause->handler_offset) == CEE_POP)
5344 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5346 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5347 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5348 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5349 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5350 MONO_ADD_INS (tblock, ins);
5352 /* todo: is a fault block unsafe to optimize? */
5353 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5354 tblock->flags |= BB_EXCEPTION_UNSAFE;
5358 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5359 while (p < end) {
5360 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5362 /* catch and filter blocks get the exception object on the stack */
5363 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5364 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5365 MonoInst *dummy_use;
5367 /* mostly like handle_stack_args (), but just sets the input args */
5368 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5369 tblock->in_scount = 1;
5370 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5371 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5374 * Add a dummy use for the exvar so its liveness info will be
5375 * correct.
5377 cfg->cbb = tblock;
5378 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5380 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5381 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5382 tblock->flags |= BB_EXCEPTION_HANDLER;
5383 tblock->real_offset = clause->data.filter_offset;
5384 tblock->in_scount = 1;
5385 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5386 /* The filter block shares the exvar with the handler block */
5387 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5388 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5389 MONO_ADD_INS (tblock, ins);
5393 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5394 clause->data.catch_class &&
5395 cfg->generic_sharing_context &&
5396 mono_class_check_context_used (clause->data.catch_class)) {
5398 * In shared generic code with catch
5399 * clauses containing type variables
5400 * the exception handling code has to
5401 * be able to get to the rgctx.
5402 * Therefore we have to make sure that
5403 * the vtable/mrgctx argument (for
5404 * static or generic methods) or the
5405 * "this" argument (for non-static
5406 * methods) are live.
5408 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5409 mini_method_get_context (method)->method_inst ||
5410 method->klass->valuetype) {
5411 mono_get_vtable_var (cfg);
5412 } else {
5413 MonoInst *dummy_use;
5415 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5419 } else {
5420 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5421 cfg->cbb = start_bblock;
5422 cfg->args = arg_array;
5423 mono_save_args (cfg, sig, inline_args);
5426 /* FIRST CODE BLOCK */
5427 NEW_BBLOCK (cfg, bblock);
5428 bblock->cil_code = ip;
5429 cfg->cbb = bblock;
5430 cfg->ip = ip;
5432 ADD_BBLOCK (cfg, bblock);
5434 if (cfg->method == method) {
5435 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5436 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5437 MONO_INST_NEW (cfg, ins, OP_BREAK);
5438 MONO_ADD_INS (bblock, ins);
5442 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5443 secman = mono_security_manager_get_methods ();
5445 security = (secman && mono_method_has_declsec (method));
5446 /* at this point having security doesn't mean we have any code to generate */
5447 if (security && (cfg->method == method)) {
5448 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5449 * And we do not want to enter the next section (with allocation) if we
5450 * have nothing to generate */
5451 security = mono_declsec_get_demands (method, &actions);
5454 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5455 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5456 if (pinvoke) {
5457 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5458 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5459 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5461 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5462 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5463 pinvoke = FALSE;
5465 if (custom)
5466 mono_custom_attrs_free (custom);
5468 if (pinvoke) {
5469 custom = mono_custom_attrs_from_class (wrapped->klass);
5470 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5471 pinvoke = FALSE;
5473 if (custom)
5474 mono_custom_attrs_free (custom);
5476 } else {
5477 /* not a P/Invoke after all */
5478 pinvoke = FALSE;
5482 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5483 /* we use a separate basic block for the initialization code */
5484 NEW_BBLOCK (cfg, init_localsbb);
5485 cfg->bb_init = init_localsbb;
5486 init_localsbb->real_offset = cfg->real_offset;
5487 start_bblock->next_bb = init_localsbb;
5488 init_localsbb->next_bb = bblock;
5489 link_bblock (cfg, start_bblock, init_localsbb);
5490 link_bblock (cfg, init_localsbb, bblock);
5492 cfg->cbb = init_localsbb;
5493 } else {
5494 start_bblock->next_bb = bblock;
5495 link_bblock (cfg, start_bblock, bblock);
5498 /* at this point we know, if security is TRUE, that some code needs to be generated */
5499 if (security && (cfg->method == method)) {
5500 MonoInst *args [2];
5502 mono_jit_stats.cas_demand_generation++;
5504 if (actions.demand.blob) {
5505 /* Add code for SecurityAction.Demand */
5506 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5507 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5508 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5509 mono_emit_method_call (cfg, secman->demand, args, NULL);
5511 if (actions.noncasdemand.blob) {
5512 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5513 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5514 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5515 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5516 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5517 mono_emit_method_call (cfg, secman->demand, args, NULL);
5519 if (actions.demandchoice.blob) {
5520 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5521 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5522 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5523 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5524 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5528 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5529 if (pinvoke) {
5530 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5533 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5534 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5535 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5536 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5537 if (!(method->klass && method->klass->image &&
5538 mono_security_core_clr_is_platform_image (method->klass->image))) {
5539 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5545 if (header->code_size == 0)
5546 UNVERIFIED;
5548 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5549 ip = err_pos;
5550 UNVERIFIED;
5553 if (cfg->method == method)
5554 mono_debug_init_method (cfg, bblock, breakpoint_id);
5556 for (n = 0; n < header->num_locals; ++n) {
5557 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5558 UNVERIFIED;
5560 class_inits = NULL;
5562 /* We force the vtable variable here for all shared methods
5563 for the possibility that they might show up in a stack
5564 trace where their exact instantiation is needed. */
5565 if (cfg->generic_sharing_context && method == cfg->method) {
5566 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5567 mini_method_get_context (method)->method_inst ||
5568 method->klass->valuetype) {
5569 mono_get_vtable_var (cfg);
5570 } else {
5571 /* FIXME: Is there a better way to do this?
5572 We need the variable live for the duration
5573 of the whole method. */
5574 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5578 /* add a check for this != NULL to inlined methods */
5579 if (is_virtual_call) {
5580 MonoInst *arg_ins;
5582 NEW_ARGLOAD (cfg, arg_ins, 0);
5583 MONO_ADD_INS (cfg->cbb, arg_ins);
5584 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5585 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5586 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5589 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5590 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5592 ins_flag = 0;
5593 start_new_bblock = 0;
5594 cfg->cbb = bblock;
5595 while (ip < end) {
5597 if (cfg->method == method)
5598 cfg->real_offset = ip - header->code;
5599 else
5600 cfg->real_offset = inline_offset;
5601 cfg->ip = ip;
5603 context_used = 0;
5605 if (start_new_bblock) {
5606 bblock->cil_length = ip - bblock->cil_code;
5607 if (start_new_bblock == 2) {
5608 g_assert (ip == tblock->cil_code);
5609 } else {
5610 GET_BBLOCK (cfg, tblock, ip);
5612 bblock->next_bb = tblock;
5613 bblock = tblock;
5614 cfg->cbb = bblock;
5615 start_new_bblock = 0;
5616 for (i = 0; i < bblock->in_scount; ++i) {
5617 if (cfg->verbose_level > 3)
5618 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5619 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5620 *sp++ = ins;
5622 if (class_inits)
5623 g_slist_free (class_inits);
5624 class_inits = NULL;
5625 } else {
5626 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5627 link_bblock (cfg, bblock, tblock);
5628 if (sp != stack_start) {
5629 handle_stack_args (cfg, stack_start, sp - stack_start);
5630 sp = stack_start;
5631 CHECK_UNVERIFIABLE (cfg);
5633 bblock->next_bb = tblock;
5634 bblock = tblock;
5635 cfg->cbb = bblock;
5636 for (i = 0; i < bblock->in_scount; ++i) {
5637 if (cfg->verbose_level > 3)
5638 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5639 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5640 *sp++ = ins;
5642 g_slist_free (class_inits);
5643 class_inits = NULL;
5647 bblock->real_offset = cfg->real_offset;
5649 if ((cfg->method == method) && cfg->coverage_info) {
5650 guint32 cil_offset = ip - header->code;
5651 cfg->coverage_info->data [cil_offset].cil_code = ip;
5653 /* TODO: Use an increment here */
5654 #if defined(__i386__)
5655 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5656 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5657 ins->inst_imm = 1;
5658 MONO_ADD_INS (cfg->cbb, ins);
5659 #else
5660 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5662 #endif
5665 if (cfg->verbose_level > 3)
5666 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5668 switch (*ip) {
5669 case CEE_NOP:
5670 if (cfg->keep_cil_nops)
5671 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5672 else
5673 MONO_INST_NEW (cfg, ins, OP_NOP);
5674 ip++;
5675 MONO_ADD_INS (bblock, ins);
5676 break;
5677 case CEE_BREAK:
5678 MONO_INST_NEW (cfg, ins, OP_BREAK);
5679 ip++;
5680 MONO_ADD_INS (bblock, ins);
5681 break;
5682 case CEE_LDARG_0:
5683 case CEE_LDARG_1:
5684 case CEE_LDARG_2:
5685 case CEE_LDARG_3:
5686 CHECK_STACK_OVF (1);
5687 n = (*ip)-CEE_LDARG_0;
5688 CHECK_ARG (n);
5689 EMIT_NEW_ARGLOAD (cfg, ins, n);
5690 ip++;
5691 *sp++ = ins;
5692 break;
5693 case CEE_LDLOC_0:
5694 case CEE_LDLOC_1:
5695 case CEE_LDLOC_2:
5696 case CEE_LDLOC_3:
5697 CHECK_STACK_OVF (1);
5698 n = (*ip)-CEE_LDLOC_0;
5699 CHECK_LOCAL (n);
5700 EMIT_NEW_LOCLOAD (cfg, ins, n);
5701 ip++;
5702 *sp++ = ins;
5703 break;
5704 case CEE_STLOC_0:
5705 case CEE_STLOC_1:
5706 case CEE_STLOC_2:
5707 case CEE_STLOC_3: {
5708 CHECK_STACK (1);
5709 n = (*ip)-CEE_STLOC_0;
5710 CHECK_LOCAL (n);
5711 --sp;
5712 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5713 UNVERIFIED;
5714 emit_stloc_ir (cfg, sp, header, n);
5715 ++ip;
5716 inline_costs += 1;
5717 break;
5719 case CEE_LDARG_S:
5720 CHECK_OPSIZE (2);
5721 CHECK_STACK_OVF (1);
5722 n = ip [1];
5723 CHECK_ARG (n);
5724 EMIT_NEW_ARGLOAD (cfg, ins, n);
5725 *sp++ = ins;
5726 ip += 2;
5727 break;
5728 case CEE_LDARGA_S:
5729 CHECK_OPSIZE (2);
5730 CHECK_STACK_OVF (1);
5731 n = ip [1];
5732 CHECK_ARG (n);
5733 NEW_ARGLOADA (cfg, ins, n);
5734 MONO_ADD_INS (cfg->cbb, ins);
5735 *sp++ = ins;
5736 ip += 2;
5737 break;
5738 case CEE_STARG_S:
5739 CHECK_OPSIZE (2);
5740 CHECK_STACK (1);
5741 --sp;
5742 n = ip [1];
5743 CHECK_ARG (n);
5744 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5745 UNVERIFIED;
5746 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5747 ip += 2;
5748 break;
5749 case CEE_LDLOC_S:
5750 CHECK_OPSIZE (2);
5751 CHECK_STACK_OVF (1);
5752 n = ip [1];
5753 CHECK_LOCAL (n);
5754 EMIT_NEW_LOCLOAD (cfg, ins, n);
5755 *sp++ = ins;
5756 ip += 2;
5757 break;
5758 case CEE_LDLOCA_S: {
5759 unsigned char *tmp_ip;
5760 CHECK_OPSIZE (2);
5761 CHECK_STACK_OVF (1);
5762 CHECK_LOCAL (ip [1]);
5764 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5765 ip = tmp_ip;
5766 inline_costs += 1;
5767 break;
5770 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5771 *sp++ = ins;
5772 ip += 2;
5773 break;
5775 case CEE_STLOC_S:
5776 CHECK_OPSIZE (2);
5777 CHECK_STACK (1);
5778 --sp;
5779 CHECK_LOCAL (ip [1]);
5780 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5781 UNVERIFIED;
5782 emit_stloc_ir (cfg, sp, header, ip [1]);
5783 ip += 2;
5784 inline_costs += 1;
5785 break;
5786 case CEE_LDNULL:
5787 CHECK_STACK_OVF (1);
5788 EMIT_NEW_PCONST (cfg, ins, NULL);
5789 ins->type = STACK_OBJ;
5790 ++ip;
5791 *sp++ = ins;
5792 break;
5793 case CEE_LDC_I4_M1:
5794 CHECK_STACK_OVF (1);
5795 EMIT_NEW_ICONST (cfg, ins, -1);
5796 ++ip;
5797 *sp++ = ins;
5798 break;
5799 case CEE_LDC_I4_0:
5800 case CEE_LDC_I4_1:
5801 case CEE_LDC_I4_2:
5802 case CEE_LDC_I4_3:
5803 case CEE_LDC_I4_4:
5804 case CEE_LDC_I4_5:
5805 case CEE_LDC_I4_6:
5806 case CEE_LDC_I4_7:
5807 case CEE_LDC_I4_8:
5808 CHECK_STACK_OVF (1);
5809 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5810 ++ip;
5811 *sp++ = ins;
5812 break;
5813 case CEE_LDC_I4_S:
5814 CHECK_OPSIZE (2);
5815 CHECK_STACK_OVF (1);
5816 ++ip;
5817 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5818 ++ip;
5819 *sp++ = ins;
5820 break;
5821 case CEE_LDC_I4:
5822 CHECK_OPSIZE (5);
5823 CHECK_STACK_OVF (1);
5824 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5825 ip += 5;
5826 *sp++ = ins;
5827 break;
5828 case CEE_LDC_I8:
5829 CHECK_OPSIZE (9);
5830 CHECK_STACK_OVF (1);
5831 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5832 ins->type = STACK_I8;
5833 ins->dreg = alloc_dreg (cfg, STACK_I8);
5834 ++ip;
5835 ins->inst_l = (gint64)read64 (ip);
5836 MONO_ADD_INS (bblock, ins);
5837 ip += 8;
5838 *sp++ = ins;
5839 break;
5840 case CEE_LDC_R4: {
5841 float *f;
5842 /* FIXME: we should really allocate this only late in the compilation process */
5843 f = mono_domain_alloc (cfg->domain, sizeof (float));
5844 CHECK_OPSIZE (5);
5845 CHECK_STACK_OVF (1);
5846 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5847 ins->type = STACK_R8;
5848 ins->dreg = alloc_dreg (cfg, STACK_R8);
5849 ++ip;
5850 readr4 (ip, f);
5851 ins->inst_p0 = f;
5852 MONO_ADD_INS (bblock, ins);
5854 ip += 4;
5855 *sp++ = ins;
5856 break;
5858 case CEE_LDC_R8: {
5859 double *d;
5860 /* FIXME: we should really allocate this only late in the compilation process */
5861 d = mono_domain_alloc (cfg->domain, sizeof (double));
5862 CHECK_OPSIZE (9);
5863 CHECK_STACK_OVF (1);
5864 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5865 ins->type = STACK_R8;
5866 ins->dreg = alloc_dreg (cfg, STACK_R8);
5867 ++ip;
5868 readr8 (ip, d);
5869 ins->inst_p0 = d;
5870 MONO_ADD_INS (bblock, ins);
5872 ip += 8;
5873 *sp++ = ins;
5874 break;
5876 case CEE_DUP: {
5877 MonoInst *temp, *store;
5878 CHECK_STACK (1);
5879 CHECK_STACK_OVF (1);
5880 sp--;
5881 ins = *sp;
5883 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5884 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5886 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5887 *sp++ = ins;
5889 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5890 *sp++ = ins;
5892 ++ip;
5893 inline_costs += 2;
5894 break;
5896 case CEE_POP:
5897 CHECK_STACK (1);
5898 ip++;
5899 --sp;
5901 #ifdef __i386__
5902 if (sp [0]->type == STACK_R8)
5903 /* we need to pop the value from the x86 FP stack */
5904 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5905 #endif
5906 break;
5907 case CEE_JMP: {
5908 MonoCallInst *call;
5910 INLINE_FAILURE;
5912 CHECK_OPSIZE (5);
5913 if (stack_start != sp)
5914 UNVERIFIED;
5915 token = read32 (ip + 1);
5916 /* FIXME: check the signature matches */
5917 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5919 if (!cmethod)
5920 goto load_error;
5922 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5923 GENERIC_SHARING_FAILURE (CEE_JMP);
5925 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5926 CHECK_CFG_EXCEPTION;
5928 #ifdef __x86_64__
5930 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5931 int i, n;
5933 /* Handle tail calls similarly to calls */
5934 n = fsig->param_count + fsig->hasthis;
5936 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5937 call->method = cmethod;
5938 call->tail_call = TRUE;
5939 call->signature = mono_method_signature (cmethod);
5940 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5941 call->inst.inst_p0 = cmethod;
5942 for (i = 0; i < n; ++i)
5943 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5945 mono_arch_emit_call (cfg, call);
5946 MONO_ADD_INS (bblock, (MonoInst*)call);
5948 #else
5949 for (i = 0; i < num_args; ++i)
5950 /* Prevent arguments from being optimized away */
5951 arg_array [i]->flags |= MONO_INST_VOLATILE;
5953 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5954 ins = (MonoInst*)call;
5955 ins->inst_p0 = cmethod;
5956 MONO_ADD_INS (bblock, ins);
5957 #endif
5959 ip += 5;
5960 start_new_bblock = 1;
5961 break;
5963 case CEE_CALLI:
5964 case CEE_CALL:
5965 case CEE_CALLVIRT: {
5966 MonoInst *addr = NULL;
5967 MonoMethodSignature *fsig = NULL;
5968 int array_rank = 0;
5969 int virtual = *ip == CEE_CALLVIRT;
5970 int calli = *ip == CEE_CALLI;
5971 gboolean pass_imt_from_rgctx = FALSE;
5972 MonoInst *imt_arg = NULL;
5973 gboolean pass_vtable = FALSE;
5974 gboolean pass_mrgctx = FALSE;
5975 MonoInst *vtable_arg = NULL;
5976 gboolean check_this = FALSE;
5978 CHECK_OPSIZE (5);
5979 token = read32 (ip + 1);
5981 if (calli) {
5982 cmethod = NULL;
5983 CHECK_STACK (1);
5984 --sp;
5985 addr = *sp;
5986 if (method->wrapper_type != MONO_WRAPPER_NONE)
5987 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5988 else
5989 fsig = mono_metadata_parse_signature (image, token);
5991 n = fsig->param_count + fsig->hasthis;
5992 } else {
5993 MonoMethod *cil_method;
5995 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5996 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5997 cil_method = cmethod;
5998 } else if (constrained_call) {
5999 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6001 * This is needed since get_method_constrained can't find
6002 * the method in klass representing a type var.
6003 * The type var is guaranteed to be a reference type in this
6004 * case.
6006 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6007 cil_method = cmethod;
6008 g_assert (!cmethod->klass->valuetype);
6009 } else {
6010 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6012 } else {
6013 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6014 cil_method = cmethod;
6017 if (!cmethod)
6018 goto load_error;
6019 if (!dont_verify && !cfg->skip_visibility) {
6020 MonoMethod *target_method = cil_method;
6021 if (method->is_inflated) {
6022 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6024 if (!mono_method_can_access_method (method_definition, target_method) &&
6025 !mono_method_can_access_method (method, cil_method))
6026 METHOD_ACCESS_FAILURE;
6029 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6030 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6032 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6033 /* MS.NET seems to silently convert this to a callvirt */
6034 virtual = 1;
6036 if (!cmethod->klass->inited)
6037 if (!mono_class_init (cmethod->klass))
6038 goto load_error;
6040 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6041 mini_class_is_system_array (cmethod->klass)) {
6042 array_rank = cmethod->klass->rank;
6043 fsig = mono_method_signature (cmethod);
6044 } else {
6045 if (mono_method_signature (cmethod)->pinvoke) {
6046 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6047 check_for_pending_exc, FALSE);
6048 fsig = mono_method_signature (wrapper);
6049 } else if (constrained_call) {
6050 fsig = mono_method_signature (cmethod);
6051 } else {
6052 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6056 mono_save_token_info (cfg, image, token, cil_method);
6058 n = fsig->param_count + fsig->hasthis;
6060 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6061 if (check_linkdemand (cfg, method, cmethod))
6062 INLINE_FAILURE;
6063 CHECK_CFG_EXCEPTION;
6066 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6067 g_assert_not_reached ();
6070 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6071 UNVERIFIED;
6073 if (!cfg->generic_sharing_context && cmethod)
6074 g_assert (!mono_method_check_context_used (cmethod));
6076 CHECK_STACK (n);
6078 //g_assert (!virtual || fsig->hasthis);
6080 sp -= n;
6082 if (constrained_call) {
6084 * We have the `constrained.' prefix opcode.
6086 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6087 int dreg;
6090 * The type parameter is instantiated as a valuetype,
6091 * but that type doesn't override the method we're
6092 * calling, so we need to box `this'.
6094 dreg = alloc_dreg (cfg, STACK_VTYPE);
6095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6096 ins->klass = constrained_call;
6097 sp [0] = handle_box (cfg, ins, constrained_call);
6098 } else if (!constrained_call->valuetype) {
6099 int dreg = alloc_preg (cfg);
6102 * The type parameter is instantiated as a reference
6103 * type. We have a managed pointer on the stack, so
6104 * we need to dereference it here.
6106 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6107 ins->type = STACK_OBJ;
6108 sp [0] = ins;
6109 } else if (cmethod->klass->valuetype)
6110 virtual = 0;
6111 constrained_call = NULL;
6114 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6115 UNVERIFIED;
6118 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6119 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6120 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6121 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6122 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6125 * Pass vtable iff target method might
6126 * be shared, which means that sharing
6127 * is enabled for its class and its
6128 * context is sharable (and it's not a
6129 * generic method).
6131 if (sharing_enabled && context_sharable &&
6132 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6133 pass_vtable = TRUE;
6136 if (cmethod && mini_method_get_context (cmethod) &&
6137 mini_method_get_context (cmethod)->method_inst) {
6138 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6139 MonoGenericContext *context = mini_method_get_context (cmethod);
6140 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6142 g_assert (!pass_vtable);
6144 if (sharing_enabled && context_sharable)
6145 pass_mrgctx = TRUE;
6148 if (cfg->generic_sharing_context && cmethod) {
6149 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6151 context_used = mono_method_check_context_used (cmethod);
6153 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6154 /* Generic method interface
6155 calls are resolved via a
6156 helper function and don't
6157 need an imt. */
6158 if (!cmethod_context || !cmethod_context->method_inst)
6159 pass_imt_from_rgctx = TRUE;
6163 * If a shared method calls another
6164 * shared method then the caller must
6165 * have a generic sharing context
6166 * because the magic trampoline
6167 * requires it. FIXME: We shouldn't
6168 * have to force the vtable/mrgctx
6169 * variable here. Instead there
6170 * should be a flag in the cfg to
6171 * request a generic sharing context.
6173 if (context_used &&
6174 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6175 mono_get_vtable_var (cfg);
6178 if (pass_vtable) {
6179 if (context_used) {
6180 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6181 } else {
6182 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6184 CHECK_TYPELOAD (cmethod->klass);
6185 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6189 if (pass_mrgctx) {
6190 g_assert (!vtable_arg);
6192 if (context_used) {
6193 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6194 } else {
6195 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6198 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6199 MONO_METHOD_IS_FINAL (cmethod)) {
6200 if (virtual)
6201 check_this = TRUE;
6202 virtual = 0;
6206 if (pass_imt_from_rgctx) {
6207 g_assert (!pass_vtable);
6208 g_assert (cmethod);
6210 imt_arg = emit_get_rgctx_method (cfg, context_used,
6211 cmethod, MONO_RGCTX_INFO_METHOD);
6214 if (check_this) {
6215 MonoInst *check;
6217 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6218 check->sreg1 = sp [0]->dreg;
6219 MONO_ADD_INS (cfg->cbb, check);
6222 /* Calling virtual generic methods */
6223 if (cmethod && virtual &&
6224 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6225 !(MONO_METHOD_IS_FINAL (cmethod) &&
6226 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6227 mono_method_signature (cmethod)->generic_param_count) {
6228 MonoInst *this_temp, *this_arg_temp, *store;
6229 MonoInst *iargs [4];
6231 g_assert (mono_method_signature (cmethod)->is_inflated);
6233 /* Prevent inlining of methods that contain indirect calls */
6234 INLINE_FAILURE;
6236 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6237 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6238 g_assert (!imt_arg);
6239 if (context_used) {
6240 imt_arg = emit_get_rgctx_method (cfg, context_used,
6241 cmethod, MONO_RGCTX_INFO_METHOD);
6243 } else {
6244 g_assert (cmethod->is_inflated);
6245 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6247 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6248 } else
6249 #endif
6251 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6252 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6253 MONO_ADD_INS (bblock, store);
6255 /* FIXME: This should be a managed pointer */
6256 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6258 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6259 if (context_used) {
6260 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6261 cmethod, MONO_RGCTX_INFO_METHOD);
6262 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6263 addr = mono_emit_jit_icall (cfg,
6264 mono_helper_compile_generic_method, iargs);
6265 } else {
6266 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6267 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6268 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6271 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6273 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6276 if (!MONO_TYPE_IS_VOID (fsig->ret))
6277 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6279 ip += 5;
6280 ins_flag = 0;
6281 break;
6284 /* Tail prefix */
6285 /* FIXME: runtime generic context pointer for jumps? */
6286 /* FIXME: handle this for generic sharing eventually */
6287 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6288 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod))) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret)) {
6289 MonoCallInst *call;
6291 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6292 INLINE_FAILURE;
6294 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6295 call->tail_call = TRUE;
6296 call->method = cmethod;
6297 call->signature = mono_method_signature (cmethod);
6299 #ifdef __x86_64__
6300 /* Handle tail calls similarly to calls */
6301 call->inst.opcode = OP_TAILCALL;
6302 call->args = sp;
6303 mono_arch_emit_call (cfg, call);
6304 #else
6306 * We implement tail calls by storing the actual arguments into the
6307 * argument variables, then emitting a CEE_JMP.
6309 for (i = 0; i < n; ++i) {
6310 /* Prevent argument from being register allocated */
6311 arg_array [i]->flags |= MONO_INST_VOLATILE;
6312 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6314 #endif
6316 ins = (MonoInst*)call;
6317 ins->inst_p0 = cmethod;
6318 ins->inst_p1 = arg_array [0];
6319 MONO_ADD_INS (bblock, ins);
6320 link_bblock (cfg, bblock, end_bblock);
6321 start_new_bblock = 1;
6322 /* skip CEE_RET as well */
6323 ip += 6;
6324 ins_flag = 0;
6325 break;
6328 /* Conversion to a JIT intrinsic */
6329 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6330 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6331 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6332 *sp = ins;
6333 sp++;
6336 ip += 5;
6337 ins_flag = 0;
6338 break;
6341 /* Inlining */
6342 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6343 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6344 mono_method_check_inlining (cfg, cmethod) &&
6345 !g_list_find (dont_inline, cmethod)) {
6346 int costs;
6347 gboolean allways = FALSE;
6349 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6350 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6351 /* Prevent inlining of methods that call wrappers */
6352 INLINE_FAILURE;
6353 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6354 allways = TRUE;
6357 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6358 ip += 5;
6359 cfg->real_offset += 5;
6360 bblock = cfg->cbb;
6362 if (!MONO_TYPE_IS_VOID (fsig->ret))
6363 /* *sp is already set by inline_method */
6364 sp++;
6366 inline_costs += costs;
6367 ins_flag = 0;
6368 break;
6372 inline_costs += 10 * num_calls++;
6374 /* Tail recursion elimination */
6375 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6376 gboolean has_vtargs = FALSE;
6377 int i;
6379 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6380 INLINE_FAILURE;
6382 /* keep it simple */
6383 for (i = fsig->param_count - 1; i >= 0; i--) {
6384 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6385 has_vtargs = TRUE;
6388 if (!has_vtargs) {
6389 for (i = 0; i < n; ++i)
6390 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6391 MONO_INST_NEW (cfg, ins, OP_BR);
6392 MONO_ADD_INS (bblock, ins);
6393 tblock = start_bblock->out_bb [0];
6394 link_bblock (cfg, bblock, tblock);
6395 ins->inst_target_bb = tblock;
6396 start_new_bblock = 1;
6398 /* skip the CEE_RET, too */
6399 if (ip_in_bb (cfg, bblock, ip + 5))
6400 ip += 6;
6401 else
6402 ip += 5;
6404 ins_flag = 0;
6405 break;
6409 /* Generic sharing */
6410 /* FIXME: only do this for generic methods if
6411 they are not shared! */
6412 if (context_used && !imt_arg && !array_rank &&
6413 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6414 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6415 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6416 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6417 INLINE_FAILURE;
6419 g_assert (cfg->generic_sharing_context && cmethod);
6420 g_assert (!addr);
6423 * We are compiling a call to a
6424 * generic method from shared code,
6425 * which means that we have to look up
6426 * the method in the rgctx and do an
6427 * indirect call.
6429 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6432 /* Indirect calls */
6433 if (addr) {
6434 g_assert (!imt_arg);
6436 if (*ip == CEE_CALL)
6437 g_assert (context_used);
6438 else if (*ip == CEE_CALLI)
6439 g_assert (!vtable_arg);
6440 else
6441 /* FIXME: what the hell is this??? */
6442 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6443 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6445 /* Prevent inlining of methods with indirect calls */
6446 INLINE_FAILURE;
6448 if (vtable_arg) {
6449 #ifdef MONO_ARCH_RGCTX_REG
6450 MonoCallInst *call;
6451 int rgctx_reg = mono_alloc_preg (cfg);
6453 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6454 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6455 call = (MonoCallInst*)ins;
6456 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6457 cfg->uses_rgctx_reg = TRUE;
6458 call->rgctx_reg = TRUE;
6459 #else
6460 NOT_IMPLEMENTED;
6461 #endif
6462 } else {
6463 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6465 * Instead of emitting an indirect call, emit a direct call
6466 * with the contents of the aotconst as the patch info.
6468 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6469 NULLIFY_INS (addr);
6470 } else {
6471 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6474 if (!MONO_TYPE_IS_VOID (fsig->ret))
6475 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6477 ip += 5;
6478 ins_flag = 0;
6479 break;
6482 /* Array methods */
6483 if (array_rank) {
6484 MonoInst *addr;
6486 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6487 if (sp [fsig->param_count]->type == STACK_OBJ) {
6488 MonoInst *iargs [2];
6490 iargs [0] = sp [0];
6491 iargs [1] = sp [fsig->param_count];
6493 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6496 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6497 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6498 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6499 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6501 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6503 *sp++ = ins;
6504 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6505 if (!cmethod->klass->element_class->valuetype && !readonly)
6506 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6508 readonly = FALSE;
6509 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6510 *sp++ = addr;
6511 } else {
6512 g_assert_not_reached ();
6515 ip += 5;
6516 ins_flag = 0;
6517 break;
6520 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6521 if (ins) {
6522 if (!MONO_TYPE_IS_VOID (fsig->ret))
6523 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6525 ip += 5;
6526 ins_flag = 0;
6527 break;
6530 /* Common call */
6531 INLINE_FAILURE;
6532 if (vtable_arg) {
6533 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6534 NULL, vtable_arg);
6535 } else if (imt_arg) {
6536 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6537 } else {
6538 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6541 if (!MONO_TYPE_IS_VOID (fsig->ret))
6542 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6544 ip += 5;
6545 ins_flag = 0;
6546 break;
6548 case CEE_RET:
6549 if (cfg->method != method) {
6550 /* return from inlined method */
6552 * If in_count == 0, that means the ret is unreachable due to
6553 * being preceeded by a throw. In that case, inline_method () will
6554 * handle setting the return value
6555 * (test case: test_0_inline_throw ()).
6557 if (return_var && cfg->cbb->in_count) {
6558 MonoInst *store;
6559 CHECK_STACK (1);
6560 --sp;
6561 //g_assert (returnvar != -1);
6562 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6563 cfg->ret_var_set = TRUE;
6565 } else {
6566 if (cfg->ret) {
6567 MonoType *ret_type = mono_method_signature (method)->ret;
6569 g_assert (!return_var);
6570 CHECK_STACK (1);
6571 --sp;
6572 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6573 MonoInst *ret_addr;
6575 if (!cfg->vret_addr) {
6576 MonoInst *ins;
6578 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6579 } else {
6580 EMIT_NEW_RETLOADA (cfg, ret_addr);
6582 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6583 ins->klass = mono_class_from_mono_type (ret_type);
6585 } else {
6586 #ifdef MONO_ARCH_SOFT_FLOAT
6587 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6588 MonoInst *iargs [1];
6589 MonoInst *conv;
6591 iargs [0] = *sp;
6592 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6593 mono_arch_emit_setret (cfg, method, conv);
6594 } else {
6595 mono_arch_emit_setret (cfg, method, *sp);
6597 #else
6598 mono_arch_emit_setret (cfg, method, *sp);
6599 #endif
6603 if (sp != stack_start)
6604 UNVERIFIED;
6605 MONO_INST_NEW (cfg, ins, OP_BR);
6606 ip++;
6607 ins->inst_target_bb = end_bblock;
6608 MONO_ADD_INS (bblock, ins);
6609 link_bblock (cfg, bblock, end_bblock);
6610 start_new_bblock = 1;
6611 break;
6612 case CEE_BR_S:
6613 CHECK_OPSIZE (2);
6614 MONO_INST_NEW (cfg, ins, OP_BR);
6615 ip++;
6616 target = ip + 1 + (signed char)(*ip);
6617 ++ip;
6618 GET_BBLOCK (cfg, tblock, target);
6619 link_bblock (cfg, bblock, tblock);
6620 ins->inst_target_bb = tblock;
6621 if (sp != stack_start) {
6622 handle_stack_args (cfg, stack_start, sp - stack_start);
6623 sp = stack_start;
6624 CHECK_UNVERIFIABLE (cfg);
6626 MONO_ADD_INS (bblock, ins);
6627 start_new_bblock = 1;
6628 inline_costs += BRANCH_COST;
6629 break;
6630 case CEE_BEQ_S:
6631 case CEE_BGE_S:
6632 case CEE_BGT_S:
6633 case CEE_BLE_S:
6634 case CEE_BLT_S:
6635 case CEE_BNE_UN_S:
6636 case CEE_BGE_UN_S:
6637 case CEE_BGT_UN_S:
6638 case CEE_BLE_UN_S:
6639 case CEE_BLT_UN_S:
6640 CHECK_OPSIZE (2);
6641 CHECK_STACK (2);
6642 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6643 ip++;
6644 target = ip + 1 + *(signed char*)ip;
6645 ip++;
6647 ADD_BINCOND (NULL);
6649 sp = stack_start;
6650 inline_costs += BRANCH_COST;
6651 break;
6652 case CEE_BR:
6653 CHECK_OPSIZE (5);
6654 MONO_INST_NEW (cfg, ins, OP_BR);
6655 ip++;
6657 target = ip + 4 + (gint32)read32(ip);
6658 ip += 4;
6659 GET_BBLOCK (cfg, tblock, target);
6660 link_bblock (cfg, bblock, tblock);
6661 ins->inst_target_bb = tblock;
6662 if (sp != stack_start) {
6663 handle_stack_args (cfg, stack_start, sp - stack_start);
6664 sp = stack_start;
6665 CHECK_UNVERIFIABLE (cfg);
6668 MONO_ADD_INS (bblock, ins);
6670 start_new_bblock = 1;
6671 inline_costs += BRANCH_COST;
6672 break;
6673 case CEE_BRFALSE_S:
6674 case CEE_BRTRUE_S:
6675 case CEE_BRFALSE:
6676 case CEE_BRTRUE: {
6677 MonoInst *cmp;
6678 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6679 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6680 guint32 opsize = is_short ? 1 : 4;
6682 CHECK_OPSIZE (opsize);
6683 CHECK_STACK (1);
6684 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6685 UNVERIFIED;
6686 ip ++;
6687 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6688 ip += opsize;
6690 sp--;
6692 GET_BBLOCK (cfg, tblock, target);
6693 link_bblock (cfg, bblock, tblock);
6694 GET_BBLOCK (cfg, tblock, ip);
6695 link_bblock (cfg, bblock, tblock);
6697 if (sp != stack_start) {
6698 handle_stack_args (cfg, stack_start, sp - stack_start);
6699 CHECK_UNVERIFIABLE (cfg);
6702 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6703 cmp->sreg1 = sp [0]->dreg;
6704 type_from_op (cmp, sp [0], NULL);
6705 CHECK_TYPE (cmp);
6707 #if SIZEOF_REGISTER == 4
6708 if (cmp->opcode == OP_LCOMPARE_IMM) {
6709 /* Convert it to OP_LCOMPARE */
6710 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6711 ins->type = STACK_I8;
6712 ins->dreg = alloc_dreg (cfg, STACK_I8);
6713 ins->inst_l = 0;
6714 MONO_ADD_INS (bblock, ins);
6715 cmp->opcode = OP_LCOMPARE;
6716 cmp->sreg2 = ins->dreg;
6718 #endif
6719 MONO_ADD_INS (bblock, cmp);
6721 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6722 type_from_op (ins, sp [0], NULL);
6723 MONO_ADD_INS (bblock, ins);
6724 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6725 GET_BBLOCK (cfg, tblock, target);
6726 ins->inst_true_bb = tblock;
6727 GET_BBLOCK (cfg, tblock, ip);
6728 ins->inst_false_bb = tblock;
6729 start_new_bblock = 2;
6731 sp = stack_start;
6732 inline_costs += BRANCH_COST;
6733 break;
6735 case CEE_BEQ:
6736 case CEE_BGE:
6737 case CEE_BGT:
6738 case CEE_BLE:
6739 case CEE_BLT:
6740 case CEE_BNE_UN:
6741 case CEE_BGE_UN:
6742 case CEE_BGT_UN:
6743 case CEE_BLE_UN:
6744 case CEE_BLT_UN:
6745 CHECK_OPSIZE (5);
6746 CHECK_STACK (2);
6747 MONO_INST_NEW (cfg, ins, *ip);
6748 ip++;
6749 target = ip + 4 + (gint32)read32(ip);
6750 ip += 4;
6752 ADD_BINCOND (NULL);
6754 sp = stack_start;
6755 inline_costs += BRANCH_COST;
6756 break;
6757 case CEE_SWITCH: {
6758 MonoInst *src1;
6759 MonoBasicBlock **targets;
6760 MonoBasicBlock *default_bblock;
6761 MonoJumpInfoBBTable *table;
6762 int offset_reg = alloc_preg (cfg);
6763 int target_reg = alloc_preg (cfg);
6764 int table_reg = alloc_preg (cfg);
6765 int sum_reg = alloc_preg (cfg);
6766 gboolean use_op_switch;
6768 CHECK_OPSIZE (5);
6769 CHECK_STACK (1);
6770 n = read32 (ip + 1);
6771 --sp;
6772 src1 = sp [0];
6773 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6774 UNVERIFIED;
6776 ip += 5;
6777 CHECK_OPSIZE (n * sizeof (guint32));
6778 target = ip + n * sizeof (guint32);
6780 GET_BBLOCK (cfg, default_bblock, target);
6782 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6783 for (i = 0; i < n; ++i) {
6784 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6785 targets [i] = tblock;
6786 ip += 4;
6789 if (sp != stack_start) {
6791 * Link the current bb with the targets as well, so handle_stack_args
6792 * will set their in_stack correctly.
6794 link_bblock (cfg, bblock, default_bblock);
6795 for (i = 0; i < n; ++i)
6796 link_bblock (cfg, bblock, targets [i]);
6798 handle_stack_args (cfg, stack_start, sp - stack_start);
6799 sp = stack_start;
6800 CHECK_UNVERIFIABLE (cfg);
6803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6804 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6805 bblock = cfg->cbb;
6807 for (i = 0; i < n; ++i)
6808 link_bblock (cfg, bblock, targets [i]);
6810 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6811 table->table = targets;
6812 table->table_size = n;
6814 use_op_switch = FALSE;
6815 #ifdef __arm__
6816 /* ARM implements SWITCH statements differently */
6817 /* FIXME: Make it use the generic implementation */
6818 if (!cfg->compile_aot)
6819 use_op_switch = TRUE;
6820 #endif
6822 if (COMPILE_LLVM (cfg))
6823 use_op_switch = TRUE;
6825 if (use_op_switch) {
6826 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6827 ins->sreg1 = src1->dreg;
6828 ins->inst_p0 = table;
6829 ins->inst_many_bb = targets;
6830 ins->klass = GUINT_TO_POINTER (n);
6831 MONO_ADD_INS (cfg->cbb, ins);
6832 } else {
6833 if (sizeof (gpointer) == 8)
6834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6835 else
6836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6838 #if SIZEOF_REGISTER == 8
6839 /* The upper word might not be zero, and we add it to a 64 bit address later */
6840 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6841 #endif
6843 if (cfg->compile_aot) {
6844 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6845 } else {
6846 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6847 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6848 ins->inst_p0 = table;
6849 ins->dreg = table_reg;
6850 MONO_ADD_INS (cfg->cbb, ins);
6853 /* FIXME: Use load_memindex */
6854 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6856 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6858 start_new_bblock = 1;
6859 inline_costs += (BRANCH_COST * 2);
6860 break;
6862 case CEE_LDIND_I1:
6863 case CEE_LDIND_U1:
6864 case CEE_LDIND_I2:
6865 case CEE_LDIND_U2:
6866 case CEE_LDIND_I4:
6867 case CEE_LDIND_U4:
6868 case CEE_LDIND_I8:
6869 case CEE_LDIND_I:
6870 case CEE_LDIND_R4:
6871 case CEE_LDIND_R8:
6872 case CEE_LDIND_REF:
6873 CHECK_STACK (1);
6874 --sp;
6876 switch (*ip) {
6877 case CEE_LDIND_R4:
6878 case CEE_LDIND_R8:
6879 dreg = alloc_freg (cfg);
6880 break;
6881 case CEE_LDIND_I8:
6882 dreg = alloc_lreg (cfg);
6883 break;
6884 default:
6885 dreg = alloc_preg (cfg);
6888 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6889 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6890 ins->flags |= ins_flag;
6891 ins_flag = 0;
6892 MONO_ADD_INS (bblock, ins);
6893 *sp++ = ins;
6894 ++ip;
6895 break;
6896 case CEE_STIND_REF:
6897 case CEE_STIND_I1:
6898 case CEE_STIND_I2:
6899 case CEE_STIND_I4:
6900 case CEE_STIND_I8:
6901 case CEE_STIND_R4:
6902 case CEE_STIND_R8:
6903 case CEE_STIND_I:
6904 CHECK_STACK (2);
6905 sp -= 2;
6907 #if HAVE_WRITE_BARRIERS
6908 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6909 /* insert call to write barrier */
6910 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6911 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6912 ins_flag = 0;
6913 ip++;
6914 break;
6916 #endif
6918 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6919 ins->flags |= ins_flag;
6920 ins_flag = 0;
6921 MONO_ADD_INS (bblock, ins);
6922 inline_costs += 1;
6923 ++ip;
6924 break;
6926 case CEE_MUL:
6927 CHECK_STACK (2);
6929 MONO_INST_NEW (cfg, ins, (*ip));
6930 sp -= 2;
6931 ins->sreg1 = sp [0]->dreg;
6932 ins->sreg2 = sp [1]->dreg;
6933 type_from_op (ins, sp [0], sp [1]);
6934 CHECK_TYPE (ins);
6935 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6937 /* Use the immediate opcodes if possible */
6938 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6939 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6940 if (imm_opcode != -1) {
6941 ins->opcode = imm_opcode;
6942 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6943 ins->sreg2 = -1;
6945 sp [1]->opcode = OP_NOP;
6949 MONO_ADD_INS ((cfg)->cbb, (ins));
6951 *sp++ = mono_decompose_opcode (cfg, ins);
6952 ip++;
6953 break;
6954 case CEE_ADD:
6955 case CEE_SUB:
6956 case CEE_DIV:
6957 case CEE_DIV_UN:
6958 case CEE_REM:
6959 case CEE_REM_UN:
6960 case CEE_AND:
6961 case CEE_OR:
6962 case CEE_XOR:
6963 case CEE_SHL:
6964 case CEE_SHR:
6965 case CEE_SHR_UN:
6966 CHECK_STACK (2);
6968 MONO_INST_NEW (cfg, ins, (*ip));
6969 sp -= 2;
6970 ins->sreg1 = sp [0]->dreg;
6971 ins->sreg2 = sp [1]->dreg;
6972 type_from_op (ins, sp [0], sp [1]);
6973 CHECK_TYPE (ins);
6974 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6975 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6977 /* FIXME: Pass opcode to is_inst_imm */
6979 /* Use the immediate opcodes if possible */
6980 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6981 int imm_opcode;
6983 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6984 if (imm_opcode != -1) {
6985 ins->opcode = imm_opcode;
6986 if (sp [1]->opcode == OP_I8CONST) {
6987 #if SIZEOF_REGISTER == 8
6988 ins->inst_imm = sp [1]->inst_l;
6989 #else
6990 ins->inst_ls_word = sp [1]->inst_ls_word;
6991 ins->inst_ms_word = sp [1]->inst_ms_word;
6992 #endif
6994 else
6995 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6996 ins->sreg2 = -1;
6998 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6999 if (sp [1]->next == NULL)
7000 sp [1]->opcode = OP_NOP;
7003 MONO_ADD_INS ((cfg)->cbb, (ins));
7005 *sp++ = mono_decompose_opcode (cfg, ins);
7006 ip++;
7007 break;
7008 case CEE_NEG:
7009 case CEE_NOT:
7010 case CEE_CONV_I1:
7011 case CEE_CONV_I2:
7012 case CEE_CONV_I4:
7013 case CEE_CONV_R4:
7014 case CEE_CONV_R8:
7015 case CEE_CONV_U4:
7016 case CEE_CONV_I8:
7017 case CEE_CONV_U8:
7018 case CEE_CONV_OVF_I8:
7019 case CEE_CONV_OVF_U8:
7020 case CEE_CONV_R_UN:
7021 CHECK_STACK (1);
7023 /* Special case this earlier so we have long constants in the IR */
7024 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7025 int data = sp [-1]->inst_c0;
7026 sp [-1]->opcode = OP_I8CONST;
7027 sp [-1]->type = STACK_I8;
7028 #if SIZEOF_REGISTER == 8
7029 if ((*ip) == CEE_CONV_U8)
7030 sp [-1]->inst_c0 = (guint32)data;
7031 else
7032 sp [-1]->inst_c0 = data;
7033 #else
7034 sp [-1]->inst_ls_word = data;
7035 if ((*ip) == CEE_CONV_U8)
7036 sp [-1]->inst_ms_word = 0;
7037 else
7038 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7039 #endif
7040 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7042 else {
7043 ADD_UNOP (*ip);
7045 ip++;
7046 break;
7047 case CEE_CONV_OVF_I4:
7048 case CEE_CONV_OVF_I1:
7049 case CEE_CONV_OVF_I2:
7050 case CEE_CONV_OVF_I:
7051 case CEE_CONV_OVF_U:
7052 CHECK_STACK (1);
7054 if (sp [-1]->type == STACK_R8) {
7055 ADD_UNOP (CEE_CONV_OVF_I8);
7056 ADD_UNOP (*ip);
7057 } else {
7058 ADD_UNOP (*ip);
7060 ip++;
7061 break;
7062 case CEE_CONV_OVF_U1:
7063 case CEE_CONV_OVF_U2:
7064 case CEE_CONV_OVF_U4:
7065 CHECK_STACK (1);
7067 if (sp [-1]->type == STACK_R8) {
7068 ADD_UNOP (CEE_CONV_OVF_U8);
7069 ADD_UNOP (*ip);
7070 } else {
7071 ADD_UNOP (*ip);
7073 ip++;
7074 break;
7075 case CEE_CONV_OVF_I1_UN:
7076 case CEE_CONV_OVF_I2_UN:
7077 case CEE_CONV_OVF_I4_UN:
7078 case CEE_CONV_OVF_I8_UN:
7079 case CEE_CONV_OVF_U1_UN:
7080 case CEE_CONV_OVF_U2_UN:
7081 case CEE_CONV_OVF_U4_UN:
7082 case CEE_CONV_OVF_U8_UN:
7083 case CEE_CONV_OVF_I_UN:
7084 case CEE_CONV_OVF_U_UN:
7085 case CEE_CONV_U2:
7086 case CEE_CONV_U1:
7087 case CEE_CONV_I:
7088 case CEE_CONV_U:
7089 CHECK_STACK (1);
7090 ADD_UNOP (*ip);
7091 ip++;
7092 break;
7093 case CEE_ADD_OVF:
7094 case CEE_ADD_OVF_UN:
7095 case CEE_MUL_OVF:
7096 case CEE_MUL_OVF_UN:
7097 case CEE_SUB_OVF:
7098 case CEE_SUB_OVF_UN:
7099 CHECK_STACK (2);
7100 ADD_BINOP (*ip);
7101 ip++;
7102 break;
7103 case CEE_CPOBJ:
7104 CHECK_OPSIZE (5);
7105 CHECK_STACK (2);
7106 token = read32 (ip + 1);
7107 klass = mini_get_class (method, token, generic_context);
7108 CHECK_TYPELOAD (klass);
7109 sp -= 2;
7110 if (generic_class_is_reference_type (cfg, klass)) {
7111 MonoInst *store, *load;
7112 int dreg = alloc_preg (cfg);
7114 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7115 load->flags |= ins_flag;
7116 MONO_ADD_INS (cfg->cbb, load);
7118 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7119 store->flags |= ins_flag;
7120 MONO_ADD_INS (cfg->cbb, store);
7121 } else {
7122 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7124 ins_flag = 0;
7125 ip += 5;
7126 break;
7127 case CEE_LDOBJ: {
7128 int loc_index = -1;
7129 int stloc_len = 0;
7131 CHECK_OPSIZE (5);
7132 CHECK_STACK (1);
7133 --sp;
7134 token = read32 (ip + 1);
7135 klass = mini_get_class (method, token, generic_context);
7136 CHECK_TYPELOAD (klass);
7138 /* Optimize the common ldobj+stloc combination */
7139 switch (ip [5]) {
7140 case CEE_STLOC_S:
7141 loc_index = ip [6];
7142 stloc_len = 2;
7143 break;
7144 case CEE_STLOC_0:
7145 case CEE_STLOC_1:
7146 case CEE_STLOC_2:
7147 case CEE_STLOC_3:
7148 loc_index = ip [5] - CEE_STLOC_0;
7149 stloc_len = 1;
7150 break;
7151 default:
7152 break;
7155 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7156 CHECK_LOCAL (loc_index);
7158 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7159 ins->dreg = cfg->locals [loc_index]->dreg;
7160 ip += 5;
7161 ip += stloc_len;
7162 break;
7165 /* Optimize the ldobj+stobj combination */
7166 /* The reference case ends up being a load+store anyway */
7167 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7168 CHECK_STACK (1);
7170 sp --;
7172 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7174 ip += 5 + 5;
7175 ins_flag = 0;
7176 break;
7179 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7180 *sp++ = ins;
7182 ip += 5;
7183 ins_flag = 0;
7184 inline_costs += 1;
7185 break;
7187 case CEE_LDSTR:
7188 CHECK_STACK_OVF (1);
7189 CHECK_OPSIZE (5);
7190 n = read32 (ip + 1);
7192 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7193 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7194 ins->type = STACK_OBJ;
7195 *sp = ins;
7197 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7198 MonoInst *iargs [1];
7200 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7201 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7202 } else {
7203 if (cfg->opt & MONO_OPT_SHARED) {
7204 MonoInst *iargs [3];
7206 if (cfg->compile_aot) {
7207 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7209 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7210 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7211 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7212 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7213 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7214 } else {
7215 if (bblock->out_of_line) {
7216 MonoInst *iargs [2];
7218 if (image == mono_defaults.corlib) {
7220 * Avoid relocations in AOT and save some space by using a
7221 * version of helper_ldstr specialized to mscorlib.
7223 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7224 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7225 } else {
7226 /* Avoid creating the string object */
7227 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7228 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7229 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7232 else
7233 if (cfg->compile_aot) {
7234 NEW_LDSTRCONST (cfg, ins, image, n);
7235 *sp = ins;
7236 MONO_ADD_INS (bblock, ins);
7238 else {
7239 NEW_PCONST (cfg, ins, NULL);
7240 ins->type = STACK_OBJ;
7241 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7242 *sp = ins;
7243 MONO_ADD_INS (bblock, ins);
7248 sp++;
7249 ip += 5;
7250 break;
7251 case CEE_NEWOBJ: {
7252 MonoInst *iargs [2];
7253 MonoMethodSignature *fsig;
7254 MonoInst this_ins;
7255 MonoInst *alloc;
7256 MonoInst *vtable_arg = NULL;
7258 CHECK_OPSIZE (5);
7259 token = read32 (ip + 1);
7260 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7261 if (!cmethod)
7262 goto load_error;
7263 fsig = mono_method_get_signature (cmethod, image, token);
7265 mono_save_token_info (cfg, image, token, cmethod);
7267 if (!mono_class_init (cmethod->klass))
7268 goto load_error;
7270 if (cfg->generic_sharing_context)
7271 context_used = mono_method_check_context_used (cmethod);
7273 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7274 if (check_linkdemand (cfg, method, cmethod))
7275 INLINE_FAILURE;
7276 CHECK_CFG_EXCEPTION;
7277 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7278 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7281 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7282 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7283 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7284 if (context_used) {
7285 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7286 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7287 } else {
7288 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7290 } else {
7291 if (context_used) {
7292 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7293 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7294 } else {
7295 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7297 CHECK_TYPELOAD (cmethod->klass);
7298 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7303 n = fsig->param_count;
7304 CHECK_STACK (n);
7307 * Generate smaller code for the common newobj <exception> instruction in
7308 * argument checking code.
7310 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7311 is_exception_class (cmethod->klass) && n <= 2 &&
7312 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7313 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7314 MonoInst *iargs [3];
7316 g_assert (!vtable_arg);
7318 sp -= n;
7320 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7321 switch (n) {
7322 case 0:
7323 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7324 break;
7325 case 1:
7326 iargs [1] = sp [0];
7327 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7328 break;
7329 case 2:
7330 iargs [1] = sp [0];
7331 iargs [2] = sp [1];
7332 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7333 break;
7334 default:
7335 g_assert_not_reached ();
7338 ip += 5;
7339 inline_costs += 5;
7340 break;
7343 /* move the args to allow room for 'this' in the first position */
7344 while (n--) {
7345 --sp;
7346 sp [1] = sp [0];
7349 /* check_call_signature () requires sp[0] to be set */
7350 this_ins.type = STACK_OBJ;
7351 sp [0] = &this_ins;
7352 if (check_call_signature (cfg, fsig, sp))
7353 UNVERIFIED;
7355 iargs [0] = NULL;
7357 if (mini_class_is_system_array (cmethod->klass)) {
7358 g_assert (!vtable_arg);
7360 if (context_used) {
7361 *sp = emit_get_rgctx_method (cfg, context_used,
7362 cmethod, MONO_RGCTX_INFO_METHOD);
7363 } else {
7364 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7367 /* Avoid varargs in the common case */
7368 if (fsig->param_count == 1)
7369 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7370 else if (fsig->param_count == 2)
7371 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7372 else
7373 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7374 } else if (cmethod->string_ctor) {
7375 g_assert (!context_used);
7376 g_assert (!vtable_arg);
7377 /* we simply pass a null pointer */
7378 EMIT_NEW_PCONST (cfg, *sp, NULL);
7379 /* now call the string ctor */
7380 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7381 } else {
7382 MonoInst* callvirt_this_arg = NULL;
7384 if (cmethod->klass->valuetype) {
7385 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7386 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7387 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7389 alloc = NULL;
7392 * The code generated by mini_emit_virtual_call () expects
7393 * iargs [0] to be a boxed instance, but luckily the vcall
7394 * will be transformed into a normal call there.
7396 } else if (context_used) {
7397 MonoInst *data;
7398 int rgctx_info;
7400 if (cfg->opt & MONO_OPT_SHARED)
7401 rgctx_info = MONO_RGCTX_INFO_KLASS;
7402 else
7403 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7404 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7406 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7407 *sp = alloc;
7408 } else {
7409 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7411 CHECK_TYPELOAD (cmethod->klass);
7414 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7415 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7416 * As a workaround, we call class cctors before allocating objects.
7418 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7419 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7420 if (cfg->verbose_level > 2)
7421 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7422 class_inits = g_slist_prepend (class_inits, vtable);
7425 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7426 *sp = alloc;
7429 if (alloc)
7430 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7432 /* Now call the actual ctor */
7433 /* Avoid virtual calls to ctors if possible */
7434 if (cmethod->klass->marshalbyref)
7435 callvirt_this_arg = sp [0];
7437 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7438 mono_method_check_inlining (cfg, cmethod) &&
7439 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7440 !g_list_find (dont_inline, cmethod)) {
7441 int costs;
7443 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7444 cfg->real_offset += 5;
7445 bblock = cfg->cbb;
7447 inline_costs += costs - 5;
7448 } else {
7449 INLINE_FAILURE;
7450 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7452 } else if (context_used &&
7453 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7454 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7455 MonoInst *cmethod_addr;
7457 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7458 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7460 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7461 } else {
7462 INLINE_FAILURE;
7463 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7464 callvirt_this_arg, NULL, vtable_arg);
7465 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7466 GENERIC_SHARING_FAILURE (*ip);
7470 if (alloc == NULL) {
7471 /* Valuetype */
7472 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7473 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7474 *sp++= ins;
7476 else
7477 *sp++ = alloc;
7479 ip += 5;
7480 inline_costs += 5;
7481 break;
7483 case CEE_CASTCLASS:
7484 CHECK_STACK (1);
7485 --sp;
7486 CHECK_OPSIZE (5);
7487 token = read32 (ip + 1);
7488 klass = mini_get_class (method, token, generic_context);
7489 CHECK_TYPELOAD (klass);
7490 if (sp [0]->type != STACK_OBJ)
7491 UNVERIFIED;
7493 if (cfg->generic_sharing_context)
7494 context_used = mono_class_check_context_used (klass);
7496 if (context_used) {
7497 MonoInst *args [2];
7499 /* obj */
7500 args [0] = *sp;
7502 /* klass */
7503 args [1] = emit_get_rgctx_klass (cfg, context_used,
7504 klass, MONO_RGCTX_INFO_KLASS);
7506 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7507 *sp ++ = ins;
7508 ip += 5;
7509 inline_costs += 2;
7510 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7511 MonoMethod *mono_castclass;
7512 MonoInst *iargs [1];
7513 int costs;
7515 mono_castclass = mono_marshal_get_castclass (klass);
7516 iargs [0] = sp [0];
7518 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7519 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7520 g_assert (costs > 0);
7522 ip += 5;
7523 cfg->real_offset += 5;
7524 bblock = cfg->cbb;
7526 *sp++ = iargs [0];
7528 inline_costs += costs;
7530 else {
7531 ins = handle_castclass (cfg, klass, *sp);
7532 bblock = cfg->cbb;
7533 *sp ++ = ins;
7534 ip += 5;
7536 break;
7537 case CEE_ISINST: {
7538 CHECK_STACK (1);
7539 --sp;
7540 CHECK_OPSIZE (5);
7541 token = read32 (ip + 1);
7542 klass = mini_get_class (method, token, generic_context);
7543 CHECK_TYPELOAD (klass);
7544 if (sp [0]->type != STACK_OBJ)
7545 UNVERIFIED;
7547 if (cfg->generic_sharing_context)
7548 context_used = mono_class_check_context_used (klass);
7550 if (context_used) {
7551 MonoInst *args [2];
7553 /* obj */
7554 args [0] = *sp;
7556 /* klass */
7557 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7559 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7560 sp++;
7561 ip += 5;
7562 inline_costs += 2;
7563 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7564 MonoMethod *mono_isinst;
7565 MonoInst *iargs [1];
7566 int costs;
7568 mono_isinst = mono_marshal_get_isinst (klass);
7569 iargs [0] = sp [0];
7571 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7572 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7573 g_assert (costs > 0);
7575 ip += 5;
7576 cfg->real_offset += 5;
7577 bblock = cfg->cbb;
7579 *sp++= iargs [0];
7581 inline_costs += costs;
7583 else {
7584 ins = handle_isinst (cfg, klass, *sp);
7585 bblock = cfg->cbb;
7586 *sp ++ = ins;
7587 ip += 5;
7589 break;
7591 case CEE_UNBOX_ANY: {
7592 CHECK_STACK (1);
7593 --sp;
7594 CHECK_OPSIZE (5);
7595 token = read32 (ip + 1);
7596 klass = mini_get_class (method, token, generic_context);
7597 CHECK_TYPELOAD (klass);
7599 mono_save_token_info (cfg, image, token, klass);
7601 if (cfg->generic_sharing_context)
7602 context_used = mono_class_check_context_used (klass);
7604 if (generic_class_is_reference_type (cfg, klass)) {
7605 /* CASTCLASS */
7606 if (context_used) {
7607 MonoInst *iargs [2];
7609 /* obj */
7610 iargs [0] = *sp;
7611 /* klass */
7612 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7613 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7614 *sp ++ = ins;
7615 ip += 5;
7616 inline_costs += 2;
7617 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7618 MonoMethod *mono_castclass;
7619 MonoInst *iargs [1];
7620 int costs;
7622 mono_castclass = mono_marshal_get_castclass (klass);
7623 iargs [0] = sp [0];
7625 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7626 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7628 g_assert (costs > 0);
7630 ip += 5;
7631 cfg->real_offset += 5;
7632 bblock = cfg->cbb;
7634 *sp++ = iargs [0];
7635 inline_costs += costs;
7636 } else {
7637 ins = handle_castclass (cfg, klass, *sp);
7638 bblock = cfg->cbb;
7639 *sp ++ = ins;
7640 ip += 5;
7642 break;
7645 if (mono_class_is_nullable (klass)) {
7646 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7647 *sp++= ins;
7648 ip += 5;
7649 break;
7652 /* UNBOX */
7653 ins = handle_unbox (cfg, klass, sp, context_used);
7654 *sp = ins;
7656 ip += 5;
7658 /* LDOBJ */
7659 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7660 *sp++ = ins;
7662 inline_costs += 2;
7663 break;
7665 case CEE_BOX: {
7666 MonoInst *val;
7668 CHECK_STACK (1);
7669 --sp;
7670 val = *sp;
7671 CHECK_OPSIZE (5);
7672 token = read32 (ip + 1);
7673 klass = mini_get_class (method, token, generic_context);
7674 CHECK_TYPELOAD (klass);
7676 mono_save_token_info (cfg, image, token, klass);
7678 if (cfg->generic_sharing_context)
7679 context_used = mono_class_check_context_used (klass);
7681 if (generic_class_is_reference_type (cfg, klass)) {
7682 *sp++ = val;
7683 ip += 5;
7684 break;
7687 if (klass == mono_defaults.void_class)
7688 UNVERIFIED;
7689 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7690 UNVERIFIED;
7691 /* frequent check in generic code: box (struct), brtrue */
7692 if (!mono_class_is_nullable (klass) &&
7693 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7694 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7695 ip += 5;
7696 MONO_INST_NEW (cfg, ins, OP_BR);
7697 if (*ip == CEE_BRTRUE_S) {
7698 CHECK_OPSIZE (2);
7699 ip++;
7700 target = ip + 1 + (signed char)(*ip);
7701 ip++;
7702 } else {
7703 CHECK_OPSIZE (5);
7704 ip++;
7705 target = ip + 4 + (gint)(read32 (ip));
7706 ip += 4;
7708 GET_BBLOCK (cfg, tblock, target);
7709 link_bblock (cfg, bblock, tblock);
7710 ins->inst_target_bb = tblock;
7711 GET_BBLOCK (cfg, tblock, ip);
7713 * This leads to some inconsistency, since the two bblocks are
7714 * not really connected, but it is needed for handling stack
7715 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7716 * FIXME: This should only be needed if sp != stack_start, but that
7717 * doesn't work for some reason (test failure in mcs/tests on x86).
7719 link_bblock (cfg, bblock, tblock);
7720 if (sp != stack_start) {
7721 handle_stack_args (cfg, stack_start, sp - stack_start);
7722 sp = stack_start;
7723 CHECK_UNVERIFIABLE (cfg);
7725 MONO_ADD_INS (bblock, ins);
7726 start_new_bblock = 1;
7727 break;
7730 if (context_used) {
7731 MonoInst *data;
7732 int rgctx_info;
7734 if (cfg->opt & MONO_OPT_SHARED)
7735 rgctx_info = MONO_RGCTX_INFO_KLASS;
7736 else
7737 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7738 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7739 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7740 } else {
7741 *sp++ = handle_box (cfg, val, klass);
7744 ip += 5;
7745 inline_costs += 1;
7746 break;
7748 case CEE_UNBOX: {
7749 CHECK_STACK (1);
7750 --sp;
7751 CHECK_OPSIZE (5);
7752 token = read32 (ip + 1);
7753 klass = mini_get_class (method, token, generic_context);
7754 CHECK_TYPELOAD (klass);
7756 mono_save_token_info (cfg, image, token, klass);
7758 if (cfg->generic_sharing_context)
7759 context_used = mono_class_check_context_used (klass);
7761 if (mono_class_is_nullable (klass)) {
7762 MonoInst *val;
7764 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7765 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7767 *sp++= ins;
7768 } else {
7769 ins = handle_unbox (cfg, klass, sp, context_used);
7770 *sp++ = ins;
7772 ip += 5;
7773 inline_costs += 2;
7774 break;
7776 case CEE_LDFLD:
7777 case CEE_LDFLDA:
7778 case CEE_STFLD: {
7779 MonoClassField *field;
7780 int costs;
7781 guint foffset;
7783 if (*ip == CEE_STFLD) {
7784 CHECK_STACK (2);
7785 sp -= 2;
7786 } else {
7787 CHECK_STACK (1);
7788 --sp;
7790 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7791 UNVERIFIED;
7792 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7793 UNVERIFIED;
7794 CHECK_OPSIZE (5);
7795 token = read32 (ip + 1);
7796 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7797 field = mono_method_get_wrapper_data (method, token);
7798 klass = field->parent;
7800 else {
7801 field = mono_field_from_token (image, token, &klass, generic_context);
7803 if (!field)
7804 goto load_error;
7805 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7806 FIELD_ACCESS_FAILURE;
7807 mono_class_init (klass);
7809 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7810 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7811 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7812 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7815 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7816 if (*ip == CEE_STFLD) {
7817 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7818 UNVERIFIED;
7819 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7820 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7821 MonoInst *iargs [5];
7823 iargs [0] = sp [0];
7824 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7825 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7826 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7827 field->offset);
7828 iargs [4] = sp [1];
7830 if (cfg->opt & MONO_OPT_INLINE) {
7831 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7832 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7833 g_assert (costs > 0);
7835 cfg->real_offset += 5;
7836 bblock = cfg->cbb;
7838 inline_costs += costs;
7839 } else {
7840 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7842 } else {
7843 MonoInst *store;
7845 #if HAVE_WRITE_BARRIERS
7846 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7847 /* insert call to write barrier */
7848 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7849 MonoInst *iargs [2];
7850 int dreg;
7852 dreg = alloc_preg (cfg);
7853 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7854 iargs [1] = sp [1];
7855 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7857 #endif
7859 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7861 store->flags |= ins_flag;
7863 ins_flag = 0;
7864 ip += 5;
7865 break;
7868 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7869 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7870 MonoInst *iargs [4];
7872 iargs [0] = sp [0];
7873 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7874 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7875 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7876 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7877 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7878 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7879 bblock = cfg->cbb;
7880 g_assert (costs > 0);
7882 cfg->real_offset += 5;
7884 *sp++ = iargs [0];
7886 inline_costs += costs;
7887 } else {
7888 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7889 *sp++ = ins;
7891 } else {
7892 if (sp [0]->type == STACK_VTYPE) {
7893 MonoInst *var;
7895 /* Have to compute the address of the variable */
7897 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7898 if (!var)
7899 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7900 else
7901 g_assert (var->klass == klass);
7903 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7904 sp [0] = ins;
7907 if (*ip == CEE_LDFLDA) {
7908 dreg = alloc_preg (cfg);
7910 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7911 ins->klass = mono_class_from_mono_type (field->type);
7912 ins->type = STACK_MP;
7913 *sp++ = ins;
7914 } else {
7915 MonoInst *load;
7917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7918 load->flags |= ins_flag;
7919 *sp++ = load;
7922 ins_flag = 0;
7923 ip += 5;
7924 break;
7926 case CEE_LDSFLD:
7927 case CEE_LDSFLDA:
7928 case CEE_STSFLD: {
7929 MonoClassField *field;
7930 gpointer addr = NULL;
7931 gboolean is_special_static;
7933 CHECK_OPSIZE (5);
7934 token = read32 (ip + 1);
7936 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7937 field = mono_method_get_wrapper_data (method, token);
7938 klass = field->parent;
7940 else
7941 field = mono_field_from_token (image, token, &klass, generic_context);
7942 if (!field)
7943 goto load_error;
7944 mono_class_init (klass);
7945 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7946 FIELD_ACCESS_FAILURE;
7948 /* if the class is Critical then transparent code cannot access it's fields */
7949 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7950 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7953 * We can only support shared generic static
7954 * field access on architectures where the
7955 * trampoline code has been extended to handle
7956 * the generic class init.
7958 #ifndef MONO_ARCH_VTABLE_REG
7959 GENERIC_SHARING_FAILURE (*ip);
7960 #endif
7962 if (cfg->generic_sharing_context)
7963 context_used = mono_class_check_context_used (klass);
7965 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7967 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7968 * to be called here.
7970 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7971 mono_class_vtable (cfg->domain, klass);
7972 CHECK_TYPELOAD (klass);
7974 mono_domain_lock (cfg->domain);
7975 if (cfg->domain->special_static_fields)
7976 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7977 mono_domain_unlock (cfg->domain);
7979 is_special_static = mono_class_field_is_special_static (field);
7981 /* Generate IR to compute the field address */
7983 if ((cfg->opt & MONO_OPT_SHARED) ||
7984 (cfg->compile_aot && is_special_static) ||
7985 (context_used && is_special_static)) {
7986 MonoInst *iargs [2];
7988 g_assert (field->parent);
7989 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7990 if (context_used) {
7991 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7992 field, MONO_RGCTX_INFO_CLASS_FIELD);
7993 } else {
7994 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7996 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7997 } else if (context_used) {
7998 MonoInst *static_data;
8001 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8002 method->klass->name_space, method->klass->name, method->name,
8003 depth, field->offset);
8006 if (mono_class_needs_cctor_run (klass, method)) {
8007 MonoCallInst *call;
8008 MonoInst *vtable;
8010 vtable = emit_get_rgctx_klass (cfg, context_used,
8011 klass, MONO_RGCTX_INFO_VTABLE);
8013 // FIXME: This doesn't work since it tries to pass the argument
8014 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8016 * The vtable pointer is always passed in a register regardless of
8017 * the calling convention, so assign it manually, and make a call
8018 * using a signature without parameters.
8020 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8021 #ifdef MONO_ARCH_VTABLE_REG
8022 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8023 cfg->uses_vtable_reg = TRUE;
8024 #else
8025 NOT_IMPLEMENTED;
8026 #endif
8030 * The pointer we're computing here is
8032 * super_info.static_data + field->offset
8034 static_data = emit_get_rgctx_klass (cfg, context_used,
8035 klass, MONO_RGCTX_INFO_STATIC_DATA);
8037 if (field->offset == 0) {
8038 ins = static_data;
8039 } else {
8040 int addr_reg = mono_alloc_preg (cfg);
8041 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8043 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8044 MonoInst *iargs [2];
8046 g_assert (field->parent);
8047 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8048 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8049 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8050 } else {
8051 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8053 CHECK_TYPELOAD (klass);
8054 if (!addr) {
8055 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8056 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8057 if (cfg->verbose_level > 2)
8058 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8059 class_inits = g_slist_prepend (class_inits, vtable);
8060 } else {
8061 if (cfg->run_cctors) {
8062 MonoException *ex;
8063 /* This makes so that inline cannot trigger */
8064 /* .cctors: too many apps depend on them */
8065 /* running with a specific order... */
8066 if (! vtable->initialized)
8067 INLINE_FAILURE;
8068 ex = mono_runtime_class_init_full (vtable, FALSE);
8069 if (ex) {
8070 set_exception_object (cfg, ex);
8071 goto exception_exit;
8075 addr = (char*)vtable->data + field->offset;
8077 if (cfg->compile_aot)
8078 EMIT_NEW_SFLDACONST (cfg, ins, field);
8079 else
8080 EMIT_NEW_PCONST (cfg, ins, addr);
8081 } else {
8083 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8084 * This could be later optimized to do just a couple of
8085 * memory dereferences with constant offsets.
8087 MonoInst *iargs [1];
8088 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8089 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8093 /* Generate IR to do the actual load/store operation */
8095 if (*ip == CEE_LDSFLDA) {
8096 ins->klass = mono_class_from_mono_type (field->type);
8097 ins->type = STACK_PTR;
8098 *sp++ = ins;
8099 } else if (*ip == CEE_STSFLD) {
8100 MonoInst *store;
8101 CHECK_STACK (1);
8102 sp--;
8104 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8105 store->flags |= ins_flag;
8106 } else {
8107 gboolean is_const = FALSE;
8108 MonoVTable *vtable = NULL;
8110 if (!context_used) {
8111 vtable = mono_class_vtable (cfg->domain, klass);
8112 CHECK_TYPELOAD (klass);
8114 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8115 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8116 gpointer addr = (char*)vtable->data + field->offset;
8117 int ro_type = field->type->type;
8118 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8119 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8121 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8122 is_const = TRUE;
8123 switch (ro_type) {
8124 case MONO_TYPE_BOOLEAN:
8125 case MONO_TYPE_U1:
8126 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8127 sp++;
8128 break;
8129 case MONO_TYPE_I1:
8130 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8131 sp++;
8132 break;
8133 case MONO_TYPE_CHAR:
8134 case MONO_TYPE_U2:
8135 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8136 sp++;
8137 break;
8138 case MONO_TYPE_I2:
8139 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8140 sp++;
8141 break;
8142 break;
8143 case MONO_TYPE_I4:
8144 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8145 sp++;
8146 break;
8147 case MONO_TYPE_U4:
8148 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8149 sp++;
8150 break;
8151 #ifndef HAVE_MOVING_COLLECTOR
8152 case MONO_TYPE_I:
8153 case MONO_TYPE_U:
8154 case MONO_TYPE_STRING:
8155 case MONO_TYPE_OBJECT:
8156 case MONO_TYPE_CLASS:
8157 case MONO_TYPE_SZARRAY:
8158 case MONO_TYPE_PTR:
8159 case MONO_TYPE_FNPTR:
8160 case MONO_TYPE_ARRAY:
8161 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8162 type_to_eval_stack_type ((cfg), field->type, *sp);
8163 sp++;
8164 break;
8165 #endif
8166 case MONO_TYPE_I8:
8167 case MONO_TYPE_U8:
8168 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8169 sp++;
8170 break;
8171 case MONO_TYPE_R4:
8172 case MONO_TYPE_R8:
8173 case MONO_TYPE_VALUETYPE:
8174 default:
8175 is_const = FALSE;
8176 break;
8180 if (!is_const) {
8181 MonoInst *load;
8183 CHECK_STACK_OVF (1);
8185 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8186 load->flags |= ins_flag;
8187 ins_flag = 0;
8188 *sp++ = load;
8191 ins_flag = 0;
8192 ip += 5;
8193 break;
8195 case CEE_STOBJ:
8196 CHECK_STACK (2);
8197 sp -= 2;
8198 CHECK_OPSIZE (5);
8199 token = read32 (ip + 1);
8200 klass = mini_get_class (method, token, generic_context);
8201 CHECK_TYPELOAD (klass);
8202 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8203 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8204 ins_flag = 0;
8205 ip += 5;
8206 inline_costs += 1;
8207 break;
8210 * Array opcodes
8212 case CEE_NEWARR: {
8213 MonoInst *len_ins;
8214 const char *data_ptr;
8215 int data_size = 0;
8216 guint32 field_token;
8218 CHECK_STACK (1);
8219 --sp;
8221 CHECK_OPSIZE (5);
8222 token = read32 (ip + 1);
8224 klass = mini_get_class (method, token, generic_context);
8225 CHECK_TYPELOAD (klass);
8227 if (cfg->generic_sharing_context)
8228 context_used = mono_class_check_context_used (klass);
8230 if (context_used) {
8231 MonoInst *args [2];
8233 /* FIXME: Decompose later to help abcrem */
8235 /* vtable */
8236 args [0] = emit_get_rgctx_klass (cfg, context_used,
8237 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8239 /* array len */
8240 args [1] = sp [0];
8242 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8243 } else {
8244 if (cfg->opt & MONO_OPT_SHARED) {
8245 /* Decompose now to avoid problems with references to the domainvar */
8246 MonoInst *iargs [3];
8248 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8249 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8250 iargs [2] = sp [0];
8252 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8253 } else {
8254 /* Decompose later since it is needed by abcrem */
8255 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8256 ins->dreg = alloc_preg (cfg);
8257 ins->sreg1 = sp [0]->dreg;
8258 ins->inst_newa_class = klass;
8259 ins->type = STACK_OBJ;
8260 ins->klass = klass;
8261 MONO_ADD_INS (cfg->cbb, ins);
8262 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8263 cfg->cbb->has_array_access = TRUE;
8265 /* Needed so mono_emit_load_get_addr () gets called */
8266 mono_get_got_var (cfg);
8270 len_ins = sp [0];
8271 ip += 5;
8272 *sp++ = ins;
8273 inline_costs += 1;
8276 * we inline/optimize the initialization sequence if possible.
8277 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8278 * for small sizes open code the memcpy
8279 * ensure the rva field is big enough
8281 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8282 MonoMethod *memcpy_method = get_memcpy_method ();
8283 MonoInst *iargs [3];
8284 int add_reg = alloc_preg (cfg);
8286 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8287 if (cfg->compile_aot) {
8288 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8289 } else {
8290 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8292 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8293 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8294 ip += 11;
8297 break;
8299 case CEE_LDLEN:
8300 CHECK_STACK (1);
8301 --sp;
8302 if (sp [0]->type != STACK_OBJ)
8303 UNVERIFIED;
8305 dreg = alloc_preg (cfg);
8306 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8307 ins->dreg = alloc_preg (cfg);
8308 ins->sreg1 = sp [0]->dreg;
8309 ins->type = STACK_I4;
8310 MONO_ADD_INS (cfg->cbb, ins);
8311 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8312 cfg->cbb->has_array_access = TRUE;
8313 ip ++;
8314 *sp++ = ins;
8315 break;
8316 case CEE_LDELEMA:
8317 CHECK_STACK (2);
8318 sp -= 2;
8319 CHECK_OPSIZE (5);
8320 if (sp [0]->type != STACK_OBJ)
8321 UNVERIFIED;
8323 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8325 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8326 CHECK_TYPELOAD (klass);
8327 /* we need to make sure that this array is exactly the type it needs
8328 * to be for correctness. the wrappers are lax with their usage
8329 * so we need to ignore them here
8331 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8332 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8334 readonly = FALSE;
8335 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8336 *sp++ = ins;
8337 ip += 5;
8338 break;
8339 case CEE_LDELEM_ANY:
8340 case CEE_LDELEM_I1:
8341 case CEE_LDELEM_U1:
8342 case CEE_LDELEM_I2:
8343 case CEE_LDELEM_U2:
8344 case CEE_LDELEM_I4:
8345 case CEE_LDELEM_U4:
8346 case CEE_LDELEM_I8:
8347 case CEE_LDELEM_I:
8348 case CEE_LDELEM_R4:
8349 case CEE_LDELEM_R8:
8350 case CEE_LDELEM_REF: {
8351 MonoInst *addr;
8353 CHECK_STACK (2);
8354 sp -= 2;
8356 if (*ip == CEE_LDELEM_ANY) {
8357 CHECK_OPSIZE (5);
8358 token = read32 (ip + 1);
8359 klass = mini_get_class (method, token, generic_context);
8360 CHECK_TYPELOAD (klass);
8361 mono_class_init (klass);
8363 else
8364 klass = array_access_to_klass (*ip);
8366 if (sp [0]->type != STACK_OBJ)
8367 UNVERIFIED;
8369 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8371 if (sp [1]->opcode == OP_ICONST) {
8372 int array_reg = sp [0]->dreg;
8373 int index_reg = sp [1]->dreg;
8374 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8376 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8377 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8378 } else {
8379 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8382 *sp++ = ins;
8383 if (*ip == CEE_LDELEM_ANY)
8384 ip += 5;
8385 else
8386 ++ip;
8387 break;
8389 case CEE_STELEM_I:
8390 case CEE_STELEM_I1:
8391 case CEE_STELEM_I2:
8392 case CEE_STELEM_I4:
8393 case CEE_STELEM_I8:
8394 case CEE_STELEM_R4:
8395 case CEE_STELEM_R8:
8396 case CEE_STELEM_REF:
8397 case CEE_STELEM_ANY: {
8398 MonoInst *addr;
8400 CHECK_STACK (3);
8401 sp -= 3;
8403 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8405 if (*ip == CEE_STELEM_ANY) {
8406 CHECK_OPSIZE (5);
8407 token = read32 (ip + 1);
8408 klass = mini_get_class (method, token, generic_context);
8409 CHECK_TYPELOAD (klass);
8410 mono_class_init (klass);
8412 else
8413 klass = array_access_to_klass (*ip);
8415 if (sp [0]->type != STACK_OBJ)
8416 UNVERIFIED;
8418 /* storing a NULL doesn't need any of the complex checks in stelemref */
8419 if (generic_class_is_reference_type (cfg, klass) &&
8420 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8421 MonoMethod* helper = mono_marshal_get_stelemref ();
8422 MonoInst *iargs [3];
8424 if (sp [0]->type != STACK_OBJ)
8425 UNVERIFIED;
8426 if (sp [2]->type != STACK_OBJ)
8427 UNVERIFIED;
8429 iargs [2] = sp [2];
8430 iargs [1] = sp [1];
8431 iargs [0] = sp [0];
8433 mono_emit_method_call (cfg, helper, iargs, NULL);
8434 } else {
8435 if (sp [1]->opcode == OP_ICONST) {
8436 int array_reg = sp [0]->dreg;
8437 int index_reg = sp [1]->dreg;
8438 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8440 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8441 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8442 } else {
8443 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8444 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8448 if (*ip == CEE_STELEM_ANY)
8449 ip += 5;
8450 else
8451 ++ip;
8452 inline_costs += 1;
8453 break;
8455 case CEE_CKFINITE: {
8456 CHECK_STACK (1);
8457 --sp;
8459 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8460 ins->sreg1 = sp [0]->dreg;
8461 ins->dreg = alloc_freg (cfg);
8462 ins->type = STACK_R8;
8463 MONO_ADD_INS (bblock, ins);
8465 *sp++ = mono_decompose_opcode (cfg, ins);
8467 ++ip;
8468 break;
8470 case CEE_REFANYVAL: {
8471 MonoInst *src_var, *src;
8473 int klass_reg = alloc_preg (cfg);
8474 int dreg = alloc_preg (cfg);
8476 CHECK_STACK (1);
8477 MONO_INST_NEW (cfg, ins, *ip);
8478 --sp;
8479 CHECK_OPSIZE (5);
8480 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8481 CHECK_TYPELOAD (klass);
8482 mono_class_init (klass);
8484 if (cfg->generic_sharing_context)
8485 context_used = mono_class_check_context_used (klass);
8487 // FIXME:
8488 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8489 if (!src_var)
8490 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8491 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8494 if (context_used) {
8495 MonoInst *klass_ins;
8497 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8498 klass, MONO_RGCTX_INFO_KLASS);
8500 // FIXME:
8501 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8502 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8503 } else {
8504 mini_emit_class_check (cfg, klass_reg, klass);
8506 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8507 ins->type = STACK_MP;
8508 *sp++ = ins;
8509 ip += 5;
8510 break;
8512 case CEE_MKREFANY: {
8513 MonoInst *loc, *addr;
8515 CHECK_STACK (1);
8516 MONO_INST_NEW (cfg, ins, *ip);
8517 --sp;
8518 CHECK_OPSIZE (5);
8519 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8520 CHECK_TYPELOAD (klass);
8521 mono_class_init (klass);
8523 if (cfg->generic_sharing_context)
8524 context_used = mono_class_check_context_used (klass);
8526 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8527 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8529 if (context_used) {
8530 MonoInst *const_ins;
8531 int type_reg = alloc_preg (cfg);
8533 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8536 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8537 } else if (cfg->compile_aot) {
8538 int const_reg = alloc_preg (cfg);
8539 int type_reg = alloc_preg (cfg);
8541 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8545 } else {
8546 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8547 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8551 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8552 ins->type = STACK_VTYPE;
8553 ins->klass = mono_defaults.typed_reference_class;
8554 *sp++ = ins;
8555 ip += 5;
8556 break;
8558 case CEE_LDTOKEN: {
8559 gpointer handle;
8560 MonoClass *handle_class;
8562 CHECK_STACK_OVF (1);
8564 CHECK_OPSIZE (5);
8565 n = read32 (ip + 1);
8567 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8568 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8569 handle = mono_method_get_wrapper_data (method, n);
8570 handle_class = mono_method_get_wrapper_data (method, n + 1);
8571 if (handle_class == mono_defaults.typehandle_class)
8572 handle = &((MonoClass*)handle)->byval_arg;
8574 else {
8575 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8577 if (!handle)
8578 goto load_error;
8579 mono_class_init (handle_class);
8580 if (cfg->generic_sharing_context) {
8581 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8582 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8583 /* This case handles ldtoken
8584 of an open type, like for
8585 typeof(Gen<>). */
8586 context_used = 0;
8587 } else if (handle_class == mono_defaults.typehandle_class) {
8588 /* If we get a MONO_TYPE_CLASS
8589 then we need to provide the
8590 open type, not an
8591 instantiation of it. */
8592 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8593 context_used = 0;
8594 else
8595 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8596 } else if (handle_class == mono_defaults.fieldhandle_class)
8597 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8598 else if (handle_class == mono_defaults.methodhandle_class)
8599 context_used = mono_method_check_context_used (handle);
8600 else
8601 g_assert_not_reached ();
8604 if ((cfg->opt & MONO_OPT_SHARED) &&
8605 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8606 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8607 MonoInst *addr, *vtvar, *iargs [3];
8608 int method_context_used;
8610 if (cfg->generic_sharing_context)
8611 method_context_used = mono_method_check_context_used (method);
8612 else
8613 method_context_used = 0;
8615 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8617 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8618 EMIT_NEW_ICONST (cfg, iargs [1], n);
8619 if (method_context_used) {
8620 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8621 method, MONO_RGCTX_INFO_METHOD);
8622 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8623 } else {
8624 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8625 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8627 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8631 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8632 } else {
8633 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8634 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8635 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8636 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8637 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8638 MonoClass *tclass = mono_class_from_mono_type (handle);
8640 mono_class_init (tclass);
8641 if (context_used) {
8642 ins = emit_get_rgctx_klass (cfg, context_used,
8643 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8644 } else if (cfg->compile_aot) {
8645 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8646 } else {
8647 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8649 ins->type = STACK_OBJ;
8650 ins->klass = cmethod->klass;
8651 ip += 5;
8652 } else {
8653 MonoInst *addr, *vtvar;
8655 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8657 if (context_used) {
8658 if (handle_class == mono_defaults.typehandle_class) {
8659 ins = emit_get_rgctx_klass (cfg, context_used,
8660 mono_class_from_mono_type (handle),
8661 MONO_RGCTX_INFO_TYPE);
8662 } else if (handle_class == mono_defaults.methodhandle_class) {
8663 ins = emit_get_rgctx_method (cfg, context_used,
8664 handle, MONO_RGCTX_INFO_METHOD);
8665 } else if (handle_class == mono_defaults.fieldhandle_class) {
8666 ins = emit_get_rgctx_field (cfg, context_used,
8667 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8668 } else {
8669 g_assert_not_reached ();
8671 } else if (cfg->compile_aot) {
8672 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8673 } else {
8674 EMIT_NEW_PCONST (cfg, ins, handle);
8676 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8678 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8682 *sp++ = ins;
8683 ip += 5;
8684 break;
8686 case CEE_THROW:
8687 CHECK_STACK (1);
8688 MONO_INST_NEW (cfg, ins, OP_THROW);
8689 --sp;
8690 ins->sreg1 = sp [0]->dreg;
8691 ip++;
8692 bblock->out_of_line = TRUE;
8693 MONO_ADD_INS (bblock, ins);
8694 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8695 MONO_ADD_INS (bblock, ins);
8696 sp = stack_start;
8698 link_bblock (cfg, bblock, end_bblock);
8699 start_new_bblock = 1;
8700 break;
8701 case CEE_ENDFINALLY:
8702 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8703 MONO_ADD_INS (bblock, ins);
8704 ip++;
8705 start_new_bblock = 1;
8708 * Control will leave the method so empty the stack, otherwise
8709 * the next basic block will start with a nonempty stack.
8711 while (sp != stack_start) {
8712 sp--;
8714 break;
8715 case CEE_LEAVE:
8716 case CEE_LEAVE_S: {
8717 GList *handlers;
8719 if (*ip == CEE_LEAVE) {
8720 CHECK_OPSIZE (5);
8721 target = ip + 5 + (gint32)read32(ip + 1);
8722 } else {
8723 CHECK_OPSIZE (2);
8724 target = ip + 2 + (signed char)(ip [1]);
8727 /* empty the stack */
8728 while (sp != stack_start) {
8729 sp--;
8733 * If this leave statement is in a catch block, check for a
8734 * pending exception, and rethrow it if necessary.
8736 for (i = 0; i < header->num_clauses; ++i) {
8737 MonoExceptionClause *clause = &header->clauses [i];
8740 * Use <= in the final comparison to handle clauses with multiple
8741 * leave statements, like in bug #78024.
8742 * The ordering of the exception clauses guarantees that we find the
8743 * innermost clause.
8745 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8746 MonoInst *exc_ins;
8747 MonoBasicBlock *dont_throw;
8750 MonoInst *load;
8752 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8755 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8757 NEW_BBLOCK (cfg, dont_throw);
8760 * Currently, we allways rethrow the abort exception, despite the
8761 * fact that this is not correct. See thread6.cs for an example.
8762 * But propagating the abort exception is more important than
8763 * getting the sematics right.
8765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8767 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8769 MONO_START_BB (cfg, dont_throw);
8770 bblock = cfg->cbb;
8774 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8775 GList *tmp;
8776 for (tmp = handlers; tmp; tmp = tmp->next) {
8777 tblock = tmp->data;
8778 link_bblock (cfg, bblock, tblock);
8779 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8780 ins->inst_target_bb = tblock;
8781 MONO_ADD_INS (bblock, ins);
8783 g_list_free (handlers);
8786 MONO_INST_NEW (cfg, ins, OP_BR);
8787 MONO_ADD_INS (bblock, ins);
8788 GET_BBLOCK (cfg, tblock, target);
8789 link_bblock (cfg, bblock, tblock);
8790 ins->inst_target_bb = tblock;
8791 start_new_bblock = 1;
8793 if (*ip == CEE_LEAVE)
8794 ip += 5;
8795 else
8796 ip += 2;
8798 break;
8802 * Mono specific opcodes
8804 case MONO_CUSTOM_PREFIX: {
8806 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8808 CHECK_OPSIZE (2);
8809 switch (ip [1]) {
8810 case CEE_MONO_ICALL: {
8811 gpointer func;
8812 MonoJitICallInfo *info;
8814 token = read32 (ip + 2);
8815 func = mono_method_get_wrapper_data (method, token);
8816 info = mono_find_jit_icall_by_addr (func);
8817 g_assert (info);
8819 CHECK_STACK (info->sig->param_count);
8820 sp -= info->sig->param_count;
8822 ins = mono_emit_jit_icall (cfg, info->func, sp);
8823 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8824 *sp++ = ins;
8826 ip += 6;
8827 inline_costs += 10 * num_calls++;
8829 break;
8831 case CEE_MONO_LDPTR: {
8832 gpointer ptr;
8834 CHECK_STACK_OVF (1);
8835 CHECK_OPSIZE (6);
8836 token = read32 (ip + 2);
8838 ptr = mono_method_get_wrapper_data (method, token);
8839 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8840 MonoJitICallInfo *callinfo;
8841 const char *icall_name;
8843 icall_name = method->name + strlen ("__icall_wrapper_");
8844 g_assert (icall_name);
8845 callinfo = mono_find_jit_icall_by_name (icall_name);
8846 g_assert (callinfo);
8848 if (ptr == callinfo->func) {
8849 /* Will be transformed into an AOTCONST later */
8850 EMIT_NEW_PCONST (cfg, ins, ptr);
8851 *sp++ = ins;
8852 ip += 6;
8853 break;
8856 /* FIXME: Generalize this */
8857 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8858 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8859 *sp++ = ins;
8860 ip += 6;
8861 break;
8863 EMIT_NEW_PCONST (cfg, ins, ptr);
8864 *sp++ = ins;
8865 ip += 6;
8866 inline_costs += 10 * num_calls++;
8867 /* Can't embed random pointers into AOT code */
8868 cfg->disable_aot = 1;
8869 break;
8871 case CEE_MONO_ICALL_ADDR: {
8872 MonoMethod *cmethod;
8873 gpointer ptr;
8875 CHECK_STACK_OVF (1);
8876 CHECK_OPSIZE (6);
8877 token = read32 (ip + 2);
8879 cmethod = mono_method_get_wrapper_data (method, token);
8881 if (cfg->compile_aot) {
8882 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8883 } else {
8884 ptr = mono_lookup_internal_call (cmethod);
8885 g_assert (ptr);
8886 EMIT_NEW_PCONST (cfg, ins, ptr);
8888 *sp++ = ins;
8889 ip += 6;
8890 break;
8892 case CEE_MONO_VTADDR: {
8893 MonoInst *src_var, *src;
8895 CHECK_STACK (1);
8896 --sp;
8898 // FIXME:
8899 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8900 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8901 *sp++ = src;
8902 ip += 2;
8903 break;
8905 case CEE_MONO_NEWOBJ: {
8906 MonoInst *iargs [2];
8908 CHECK_STACK_OVF (1);
8909 CHECK_OPSIZE (6);
8910 token = read32 (ip + 2);
8911 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8912 mono_class_init (klass);
8913 NEW_DOMAINCONST (cfg, iargs [0]);
8914 MONO_ADD_INS (cfg->cbb, iargs [0]);
8915 NEW_CLASSCONST (cfg, iargs [1], klass);
8916 MONO_ADD_INS (cfg->cbb, iargs [1]);
8917 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8918 ip += 6;
8919 inline_costs += 10 * num_calls++;
8920 break;
8922 case CEE_MONO_OBJADDR:
8923 CHECK_STACK (1);
8924 --sp;
8925 MONO_INST_NEW (cfg, ins, OP_MOVE);
8926 ins->dreg = alloc_preg (cfg);
8927 ins->sreg1 = sp [0]->dreg;
8928 ins->type = STACK_MP;
8929 MONO_ADD_INS (cfg->cbb, ins);
8930 *sp++ = ins;
8931 ip += 2;
8932 break;
8933 case CEE_MONO_LDNATIVEOBJ:
8935 * Similar to LDOBJ, but instead load the unmanaged
8936 * representation of the vtype to the stack.
8938 CHECK_STACK (1);
8939 CHECK_OPSIZE (6);
8940 --sp;
8941 token = read32 (ip + 2);
8942 klass = mono_method_get_wrapper_data (method, token);
8943 g_assert (klass->valuetype);
8944 mono_class_init (klass);
8947 MonoInst *src, *dest, *temp;
8949 src = sp [0];
8950 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8951 temp->backend.is_pinvoke = 1;
8952 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8953 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8955 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8956 dest->type = STACK_VTYPE;
8957 dest->klass = klass;
8959 *sp ++ = dest;
8960 ip += 6;
8962 break;
8963 case CEE_MONO_RETOBJ: {
8965 * Same as RET, but return the native representation of a vtype
8966 * to the caller.
8968 g_assert (cfg->ret);
8969 g_assert (mono_method_signature (method)->pinvoke);
8970 CHECK_STACK (1);
8971 --sp;
8973 CHECK_OPSIZE (6);
8974 token = read32 (ip + 2);
8975 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8977 if (!cfg->vret_addr) {
8978 g_assert (cfg->ret_var_is_local);
8980 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8981 } else {
8982 EMIT_NEW_RETLOADA (cfg, ins);
8984 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8986 if (sp != stack_start)
8987 UNVERIFIED;
8989 MONO_INST_NEW (cfg, ins, OP_BR);
8990 ins->inst_target_bb = end_bblock;
8991 MONO_ADD_INS (bblock, ins);
8992 link_bblock (cfg, bblock, end_bblock);
8993 start_new_bblock = 1;
8994 ip += 6;
8995 break;
8997 case CEE_MONO_CISINST:
8998 case CEE_MONO_CCASTCLASS: {
8999 int token;
9000 CHECK_STACK (1);
9001 --sp;
9002 CHECK_OPSIZE (6);
9003 token = read32 (ip + 2);
9004 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9005 if (ip [1] == CEE_MONO_CISINST)
9006 ins = handle_cisinst (cfg, klass, sp [0]);
9007 else
9008 ins = handle_ccastclass (cfg, klass, sp [0]);
9009 bblock = cfg->cbb;
9010 *sp++ = ins;
9011 ip += 6;
9012 break;
9014 case CEE_MONO_SAVE_LMF:
9015 case CEE_MONO_RESTORE_LMF:
9016 #ifdef MONO_ARCH_HAVE_LMF_OPS
9017 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9018 MONO_ADD_INS (bblock, ins);
9019 cfg->need_lmf_area = TRUE;
9020 #endif
9021 ip += 2;
9022 break;
9023 case CEE_MONO_CLASSCONST:
9024 CHECK_STACK_OVF (1);
9025 CHECK_OPSIZE (6);
9026 token = read32 (ip + 2);
9027 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9028 *sp++ = ins;
9029 ip += 6;
9030 inline_costs += 10 * num_calls++;
9031 break;
9032 case CEE_MONO_NOT_TAKEN:
9033 bblock->out_of_line = TRUE;
9034 ip += 2;
9035 break;
9036 case CEE_MONO_TLS:
9037 CHECK_STACK_OVF (1);
9038 CHECK_OPSIZE (6);
9039 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9040 ins->dreg = alloc_preg (cfg);
9041 ins->inst_offset = (gint32)read32 (ip + 2);
9042 ins->type = STACK_PTR;
9043 MONO_ADD_INS (bblock, ins);
9044 *sp++ = ins;
9045 ip += 6;
9046 break;
9047 default:
9048 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9049 break;
9051 break;
9054 case CEE_PREFIX1: {
9055 CHECK_OPSIZE (2);
9056 switch (ip [1]) {
9057 case CEE_ARGLIST: {
9058 /* somewhat similar to LDTOKEN */
9059 MonoInst *addr, *vtvar;
9060 CHECK_STACK_OVF (1);
9061 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9063 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9064 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9066 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9067 ins->type = STACK_VTYPE;
9068 ins->klass = mono_defaults.argumenthandle_class;
9069 *sp++ = ins;
9070 ip += 2;
9071 break;
9073 case CEE_CEQ:
9074 case CEE_CGT:
9075 case CEE_CGT_UN:
9076 case CEE_CLT:
9077 case CEE_CLT_UN: {
9078 MonoInst *cmp;
9079 CHECK_STACK (2);
9081 * The following transforms:
9082 * CEE_CEQ into OP_CEQ
9083 * CEE_CGT into OP_CGT
9084 * CEE_CGT_UN into OP_CGT_UN
9085 * CEE_CLT into OP_CLT
9086 * CEE_CLT_UN into OP_CLT_UN
9088 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9090 MONO_INST_NEW (cfg, ins, cmp->opcode);
9091 sp -= 2;
9092 cmp->sreg1 = sp [0]->dreg;
9093 cmp->sreg2 = sp [1]->dreg;
9094 type_from_op (cmp, sp [0], sp [1]);
9095 CHECK_TYPE (cmp);
9096 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9097 cmp->opcode = OP_LCOMPARE;
9098 else if (sp [0]->type == STACK_R8)
9099 cmp->opcode = OP_FCOMPARE;
9100 else
9101 cmp->opcode = OP_ICOMPARE;
9102 MONO_ADD_INS (bblock, cmp);
9103 ins->type = STACK_I4;
9104 ins->dreg = alloc_dreg (cfg, ins->type);
9105 type_from_op (ins, sp [0], sp [1]);
9107 if (cmp->opcode == OP_FCOMPARE) {
9109 * The backends expect the fceq opcodes to do the
9110 * comparison too.
9112 cmp->opcode = OP_NOP;
9113 ins->sreg1 = cmp->sreg1;
9114 ins->sreg2 = cmp->sreg2;
9116 MONO_ADD_INS (bblock, ins);
9117 *sp++ = ins;
9118 ip += 2;
9119 break;
9121 case CEE_LDFTN: {
9122 MonoInst *argconst;
9123 MonoMethod *cil_method;
9124 gboolean needs_static_rgctx_invoke;
9126 CHECK_STACK_OVF (1);
9127 CHECK_OPSIZE (6);
9128 n = read32 (ip + 2);
9129 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9130 if (!cmethod)
9131 goto load_error;
9132 mono_class_init (cmethod->klass);
9134 mono_save_token_info (cfg, image, n, cmethod);
9136 if (cfg->generic_sharing_context)
9137 context_used = mono_method_check_context_used (cmethod);
9139 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9141 cil_method = cmethod;
9142 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9143 METHOD_ACCESS_FAILURE;
9145 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9146 if (check_linkdemand (cfg, method, cmethod))
9147 INLINE_FAILURE;
9148 CHECK_CFG_EXCEPTION;
9149 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9150 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9154 * Optimize the common case of ldftn+delegate creation
9156 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9157 /* FIXME: SGEN support */
9158 /* FIXME: handle shared static generic methods */
9159 /* FIXME: handle this in shared code */
9160 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9161 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9162 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9163 MonoInst *target_ins;
9164 MonoMethod *invoke;
9166 invoke = mono_get_delegate_invoke (ctor_method->klass);
9167 if (!invoke || !mono_method_signature (invoke))
9168 goto load_error;
9170 ip += 6;
9171 if (cfg->verbose_level > 3)
9172 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9173 target_ins = sp [-1];
9174 sp --;
9175 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9176 ip += 5;
9177 sp ++;
9178 break;
9181 #endif
9183 if (context_used) {
9184 if (needs_static_rgctx_invoke)
9185 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9187 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9188 } else if (needs_static_rgctx_invoke) {
9189 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9190 } else {
9191 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9193 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9194 *sp++ = ins;
9196 ip += 6;
9197 inline_costs += 10 * num_calls++;
9198 break;
9200 case CEE_LDVIRTFTN: {
9201 MonoInst *args [2];
9203 CHECK_STACK (1);
9204 CHECK_OPSIZE (6);
9205 n = read32 (ip + 2);
9206 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9207 if (!cmethod)
9208 goto load_error;
9209 mono_class_init (cmethod->klass);
9211 if (cfg->generic_sharing_context)
9212 context_used = mono_method_check_context_used (cmethod);
9214 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9215 if (check_linkdemand (cfg, method, cmethod))
9216 INLINE_FAILURE;
9217 CHECK_CFG_EXCEPTION;
9218 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9219 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9222 --sp;
9223 args [0] = *sp;
9225 if (context_used) {
9226 args [1] = emit_get_rgctx_method (cfg, context_used,
9227 cmethod, MONO_RGCTX_INFO_METHOD);
9228 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9229 } else {
9230 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9231 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9234 ip += 6;
9235 inline_costs += 10 * num_calls++;
9236 break;
9238 case CEE_LDARG:
9239 CHECK_STACK_OVF (1);
9240 CHECK_OPSIZE (4);
9241 n = read16 (ip + 2);
9242 CHECK_ARG (n);
9243 EMIT_NEW_ARGLOAD (cfg, ins, n);
9244 *sp++ = ins;
9245 ip += 4;
9246 break;
9247 case CEE_LDARGA:
9248 CHECK_STACK_OVF (1);
9249 CHECK_OPSIZE (4);
9250 n = read16 (ip + 2);
9251 CHECK_ARG (n);
9252 NEW_ARGLOADA (cfg, ins, n);
9253 MONO_ADD_INS (cfg->cbb, ins);
9254 *sp++ = ins;
9255 ip += 4;
9256 break;
9257 case CEE_STARG:
9258 CHECK_STACK (1);
9259 --sp;
9260 CHECK_OPSIZE (4);
9261 n = read16 (ip + 2);
9262 CHECK_ARG (n);
9263 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9264 UNVERIFIED;
9265 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9266 ip += 4;
9267 break;
9268 case CEE_LDLOC:
9269 CHECK_STACK_OVF (1);
9270 CHECK_OPSIZE (4);
9271 n = read16 (ip + 2);
9272 CHECK_LOCAL (n);
9273 EMIT_NEW_LOCLOAD (cfg, ins, n);
9274 *sp++ = ins;
9275 ip += 4;
9276 break;
9277 case CEE_LDLOCA: {
9278 unsigned char *tmp_ip;
9279 CHECK_STACK_OVF (1);
9280 CHECK_OPSIZE (4);
9281 n = read16 (ip + 2);
9282 CHECK_LOCAL (n);
9284 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9285 ip = tmp_ip;
9286 inline_costs += 1;
9287 break;
9290 EMIT_NEW_LOCLOADA (cfg, ins, n);
9291 *sp++ = ins;
9292 ip += 4;
9293 break;
9295 case CEE_STLOC:
9296 CHECK_STACK (1);
9297 --sp;
9298 CHECK_OPSIZE (4);
9299 n = read16 (ip + 2);
9300 CHECK_LOCAL (n);
9301 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9302 UNVERIFIED;
9303 emit_stloc_ir (cfg, sp, header, n);
9304 ip += 4;
9305 inline_costs += 1;
9306 break;
9307 case CEE_LOCALLOC:
9308 CHECK_STACK (1);
9309 --sp;
9310 if (sp != stack_start)
9311 UNVERIFIED;
9312 if (cfg->method != method)
9314 * Inlining this into a loop in a parent could lead to
9315 * stack overflows which is different behavior than the
9316 * non-inlined case, thus disable inlining in this case.
9318 goto inline_failure;
9320 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9321 ins->dreg = alloc_preg (cfg);
9322 ins->sreg1 = sp [0]->dreg;
9323 ins->type = STACK_PTR;
9324 MONO_ADD_INS (cfg->cbb, ins);
9326 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9327 if (header->init_locals)
9328 ins->flags |= MONO_INST_INIT;
9330 *sp++ = ins;
9331 ip += 2;
9332 break;
9333 case CEE_ENDFILTER: {
9334 MonoExceptionClause *clause, *nearest;
9335 int cc, nearest_num;
9337 CHECK_STACK (1);
9338 --sp;
9339 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9340 UNVERIFIED;
9341 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9342 ins->sreg1 = (*sp)->dreg;
9343 MONO_ADD_INS (bblock, ins);
9344 start_new_bblock = 1;
9345 ip += 2;
9347 nearest = NULL;
9348 nearest_num = 0;
9349 for (cc = 0; cc < header->num_clauses; ++cc) {
9350 clause = &header->clauses [cc];
9351 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9352 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9353 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9354 nearest = clause;
9355 nearest_num = cc;
9358 g_assert (nearest);
9359 if ((ip - header->code) != nearest->handler_offset)
9360 UNVERIFIED;
9362 break;
9364 case CEE_UNALIGNED_:
9365 ins_flag |= MONO_INST_UNALIGNED;
9366 /* FIXME: record alignment? we can assume 1 for now */
9367 CHECK_OPSIZE (3);
9368 ip += 3;
9369 break;
9370 case CEE_VOLATILE_:
9371 ins_flag |= MONO_INST_VOLATILE;
9372 ip += 2;
9373 break;
9374 case CEE_TAIL_:
9375 ins_flag |= MONO_INST_TAILCALL;
9376 cfg->flags |= MONO_CFG_HAS_TAIL;
9377 /* Can't inline tail calls at this time */
9378 inline_costs += 100000;
9379 ip += 2;
9380 break;
9381 case CEE_INITOBJ:
9382 CHECK_STACK (1);
9383 --sp;
9384 CHECK_OPSIZE (6);
9385 token = read32 (ip + 2);
9386 klass = mini_get_class (method, token, generic_context);
9387 CHECK_TYPELOAD (klass);
9388 if (generic_class_is_reference_type (cfg, klass))
9389 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9390 else
9391 mini_emit_initobj (cfg, *sp, NULL, klass);
9392 ip += 6;
9393 inline_costs += 1;
9394 break;
9395 case CEE_CONSTRAINED_:
9396 CHECK_OPSIZE (6);
9397 token = read32 (ip + 2);
9398 constrained_call = mono_class_get_full (image, token, generic_context);
9399 CHECK_TYPELOAD (constrained_call);
9400 ip += 6;
9401 break;
9402 case CEE_CPBLK:
9403 case CEE_INITBLK: {
9404 MonoInst *iargs [3];
9405 CHECK_STACK (3);
9406 sp -= 3;
9408 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9409 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9410 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9411 /* emit_memset only works when val == 0 */
9412 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9413 } else {
9414 iargs [0] = sp [0];
9415 iargs [1] = sp [1];
9416 iargs [2] = sp [2];
9417 if (ip [1] == CEE_CPBLK) {
9418 MonoMethod *memcpy_method = get_memcpy_method ();
9419 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9420 } else {
9421 MonoMethod *memset_method = get_memset_method ();
9422 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9425 ip += 2;
9426 inline_costs += 1;
9427 break;
9429 case CEE_NO_:
9430 CHECK_OPSIZE (3);
9431 if (ip [2] & 0x1)
9432 ins_flag |= MONO_INST_NOTYPECHECK;
9433 if (ip [2] & 0x2)
9434 ins_flag |= MONO_INST_NORANGECHECK;
9435 /* we ignore the no-nullcheck for now since we
9436 * really do it explicitly only when doing callvirt->call
9438 ip += 3;
9439 break;
9440 case CEE_RETHROW: {
9441 MonoInst *load;
9442 int handler_offset = -1;
9444 for (i = 0; i < header->num_clauses; ++i) {
9445 MonoExceptionClause *clause = &header->clauses [i];
9446 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9447 handler_offset = clause->handler_offset;
9448 break;
9452 bblock->flags |= BB_EXCEPTION_UNSAFE;
9454 g_assert (handler_offset != -1);
9456 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9457 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9458 ins->sreg1 = load->dreg;
9459 MONO_ADD_INS (bblock, ins);
9460 sp = stack_start;
9461 link_bblock (cfg, bblock, end_bblock);
9462 start_new_bblock = 1;
9463 ip += 2;
9464 break;
9466 case CEE_SIZEOF: {
9467 guint32 align;
9468 int ialign;
9470 CHECK_STACK_OVF (1);
9471 CHECK_OPSIZE (6);
9472 token = read32 (ip + 2);
9473 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9474 MonoType *type = mono_type_create_from_typespec (image, token);
9475 token = mono_type_size (type, &ialign);
9476 } else {
9477 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9478 CHECK_TYPELOAD (klass);
9479 mono_class_init (klass);
9480 token = mono_class_value_size (klass, &align);
9482 EMIT_NEW_ICONST (cfg, ins, token);
9483 *sp++= ins;
9484 ip += 6;
9485 break;
9487 case CEE_REFANYTYPE: {
9488 MonoInst *src_var, *src;
9490 CHECK_STACK (1);
9491 --sp;
9493 // FIXME:
9494 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9495 if (!src_var)
9496 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9497 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9498 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9499 *sp++ = ins;
9500 ip += 2;
9501 break;
9503 case CEE_READONLY_:
9504 readonly = TRUE;
9505 ip += 2;
9506 break;
9507 default:
9508 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9510 break;
9512 default:
9513 g_error ("opcode 0x%02x not handled", *ip);
9516 if (start_new_bblock != 1)
9517 UNVERIFIED;
9519 bblock->cil_length = ip - bblock->cil_code;
9520 bblock->next_bb = end_bblock;
9522 if (cfg->method == method && cfg->domainvar) {
9523 MonoInst *store;
9524 MonoInst *get_domain;
9526 cfg->cbb = init_localsbb;
9528 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9529 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9531 else {
9532 get_domain->dreg = alloc_preg (cfg);
9533 MONO_ADD_INS (cfg->cbb, get_domain);
9535 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9536 MONO_ADD_INS (cfg->cbb, store);
9539 if (cfg->method == method && cfg->got_var)
9540 mono_emit_load_got_addr (cfg);
9542 if (header->init_locals) {
9543 MonoInst *store;
9545 cfg->cbb = init_localsbb;
9546 cfg->ip = NULL;
9547 for (i = 0; i < header->num_locals; ++i) {
9548 MonoType *ptype = header->locals [i];
9549 int t = ptype->type;
9550 dreg = cfg->locals [i]->dreg;
9552 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9553 t = mono_class_enum_basetype (ptype->data.klass)->type;
9554 if (ptype->byref) {
9555 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9556 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9557 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9558 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9559 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9560 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9561 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9562 ins->type = STACK_R8;
9563 ins->inst_p0 = (void*)&r8_0;
9564 ins->dreg = alloc_dreg (cfg, STACK_R8);
9565 MONO_ADD_INS (init_localsbb, ins);
9566 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9567 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9568 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9569 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9570 } else {
9571 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9576 cfg->ip = NULL;
9578 if (cfg->method == method) {
9579 MonoBasicBlock *bb;
9580 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9581 bb->region = mono_find_block_region (cfg, bb->real_offset);
9582 if (cfg->spvars)
9583 mono_create_spvar_for_region (cfg, bb->region);
9584 if (cfg->verbose_level > 2)
9585 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9589 g_slist_free (class_inits);
9590 dont_inline = g_list_remove (dont_inline, method);
9592 if (inline_costs < 0) {
9593 char *mname;
9595 /* Method is too large */
9596 mname = mono_method_full_name (method, TRUE);
9597 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9598 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9599 g_free (mname);
9600 return -1;
9603 if ((cfg->verbose_level > 2) && (cfg->method == method))
9604 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9606 return inline_costs;
9608 exception_exit:
9609 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9610 g_slist_free (class_inits);
9611 dont_inline = g_list_remove (dont_inline, method);
9612 return -1;
9614 inline_failure:
9615 g_slist_free (class_inits);
9616 dont_inline = g_list_remove (dont_inline, method);
9617 return -1;
9619 load_error:
9620 g_slist_free (class_inits);
9621 dont_inline = g_list_remove (dont_inline, method);
9622 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9623 return -1;
9625 unverified:
9626 g_slist_free (class_inits);
9627 dont_inline = g_list_remove (dont_inline, method);
9628 set_exception_type_from_invalid_il (cfg, method, ip);
9629 return -1;
9632 static int
9633 store_membase_reg_to_store_membase_imm (int opcode)
9635 switch (opcode) {
9636 case OP_STORE_MEMBASE_REG:
9637 return OP_STORE_MEMBASE_IMM;
9638 case OP_STOREI1_MEMBASE_REG:
9639 return OP_STOREI1_MEMBASE_IMM;
9640 case OP_STOREI2_MEMBASE_REG:
9641 return OP_STOREI2_MEMBASE_IMM;
9642 case OP_STOREI4_MEMBASE_REG:
9643 return OP_STOREI4_MEMBASE_IMM;
9644 case OP_STOREI8_MEMBASE_REG:
9645 return OP_STOREI8_MEMBASE_IMM;
9646 default:
9647 g_assert_not_reached ();
9650 return -1;
9653 #endif /* DISABLE_JIT */
9656 mono_op_to_op_imm (int opcode)
9658 switch (opcode) {
9659 case OP_IADD:
9660 return OP_IADD_IMM;
9661 case OP_ISUB:
9662 return OP_ISUB_IMM;
9663 case OP_IDIV:
9664 return OP_IDIV_IMM;
9665 case OP_IDIV_UN:
9666 return OP_IDIV_UN_IMM;
9667 case OP_IREM:
9668 return OP_IREM_IMM;
9669 case OP_IREM_UN:
9670 return OP_IREM_UN_IMM;
9671 case OP_IMUL:
9672 return OP_IMUL_IMM;
9673 case OP_IAND:
9674 return OP_IAND_IMM;
9675 case OP_IOR:
9676 return OP_IOR_IMM;
9677 case OP_IXOR:
9678 return OP_IXOR_IMM;
9679 case OP_ISHL:
9680 return OP_ISHL_IMM;
9681 case OP_ISHR:
9682 return OP_ISHR_IMM;
9683 case OP_ISHR_UN:
9684 return OP_ISHR_UN_IMM;
9686 case OP_LADD:
9687 return OP_LADD_IMM;
9688 case OP_LSUB:
9689 return OP_LSUB_IMM;
9690 case OP_LAND:
9691 return OP_LAND_IMM;
9692 case OP_LOR:
9693 return OP_LOR_IMM;
9694 case OP_LXOR:
9695 return OP_LXOR_IMM;
9696 case OP_LSHL:
9697 return OP_LSHL_IMM;
9698 case OP_LSHR:
9699 return OP_LSHR_IMM;
9700 case OP_LSHR_UN:
9701 return OP_LSHR_UN_IMM;
9703 case OP_COMPARE:
9704 return OP_COMPARE_IMM;
9705 case OP_ICOMPARE:
9706 return OP_ICOMPARE_IMM;
9707 case OP_LCOMPARE:
9708 return OP_LCOMPARE_IMM;
9710 case OP_STORE_MEMBASE_REG:
9711 return OP_STORE_MEMBASE_IMM;
9712 case OP_STOREI1_MEMBASE_REG:
9713 return OP_STOREI1_MEMBASE_IMM;
9714 case OP_STOREI2_MEMBASE_REG:
9715 return OP_STOREI2_MEMBASE_IMM;
9716 case OP_STOREI4_MEMBASE_REG:
9717 return OP_STOREI4_MEMBASE_IMM;
9719 #if defined(__i386__) || defined (__x86_64__)
9720 case OP_X86_PUSH:
9721 return OP_X86_PUSH_IMM;
9722 case OP_X86_COMPARE_MEMBASE_REG:
9723 return OP_X86_COMPARE_MEMBASE_IMM;
9724 #endif
9725 #if defined(__x86_64__)
9726 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9727 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9728 #endif
9729 case OP_VOIDCALL_REG:
9730 return OP_VOIDCALL;
9731 case OP_CALL_REG:
9732 return OP_CALL;
9733 case OP_LCALL_REG:
9734 return OP_LCALL;
9735 case OP_FCALL_REG:
9736 return OP_FCALL;
9737 case OP_LOCALLOC:
9738 return OP_LOCALLOC_IMM;
9741 return -1;
9744 static int
9745 ldind_to_load_membase (int opcode)
9747 switch (opcode) {
9748 case CEE_LDIND_I1:
9749 return OP_LOADI1_MEMBASE;
9750 case CEE_LDIND_U1:
9751 return OP_LOADU1_MEMBASE;
9752 case CEE_LDIND_I2:
9753 return OP_LOADI2_MEMBASE;
9754 case CEE_LDIND_U2:
9755 return OP_LOADU2_MEMBASE;
9756 case CEE_LDIND_I4:
9757 return OP_LOADI4_MEMBASE;
9758 case CEE_LDIND_U4:
9759 return OP_LOADU4_MEMBASE;
9760 case CEE_LDIND_I:
9761 return OP_LOAD_MEMBASE;
9762 case CEE_LDIND_REF:
9763 return OP_LOAD_MEMBASE;
9764 case CEE_LDIND_I8:
9765 return OP_LOADI8_MEMBASE;
9766 case CEE_LDIND_R4:
9767 return OP_LOADR4_MEMBASE;
9768 case CEE_LDIND_R8:
9769 return OP_LOADR8_MEMBASE;
9770 default:
9771 g_assert_not_reached ();
9774 return -1;
9777 static int
9778 stind_to_store_membase (int opcode)
9780 switch (opcode) {
9781 case CEE_STIND_I1:
9782 return OP_STOREI1_MEMBASE_REG;
9783 case CEE_STIND_I2:
9784 return OP_STOREI2_MEMBASE_REG;
9785 case CEE_STIND_I4:
9786 return OP_STOREI4_MEMBASE_REG;
9787 case CEE_STIND_I:
9788 case CEE_STIND_REF:
9789 return OP_STORE_MEMBASE_REG;
9790 case CEE_STIND_I8:
9791 return OP_STOREI8_MEMBASE_REG;
9792 case CEE_STIND_R4:
9793 return OP_STORER4_MEMBASE_REG;
9794 case CEE_STIND_R8:
9795 return OP_STORER8_MEMBASE_REG;
9796 default:
9797 g_assert_not_reached ();
9800 return -1;
9804 mono_load_membase_to_load_mem (int opcode)
9806 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9807 #if defined(__i386__) || defined(__x86_64__)
9808 switch (opcode) {
9809 case OP_LOAD_MEMBASE:
9810 return OP_LOAD_MEM;
9811 case OP_LOADU1_MEMBASE:
9812 return OP_LOADU1_MEM;
9813 case OP_LOADU2_MEMBASE:
9814 return OP_LOADU2_MEM;
9815 case OP_LOADI4_MEMBASE:
9816 return OP_LOADI4_MEM;
9817 case OP_LOADU4_MEMBASE:
9818 return OP_LOADU4_MEM;
9819 #if SIZEOF_REGISTER == 8
9820 case OP_LOADI8_MEMBASE:
9821 return OP_LOADI8_MEM;
9822 #endif
9824 #endif
9826 return -1;
9829 static inline int
9830 op_to_op_dest_membase (int store_opcode, int opcode)
9832 #if defined(__i386__)
9833 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9834 return -1;
9836 switch (opcode) {
9837 case OP_IADD:
9838 return OP_X86_ADD_MEMBASE_REG;
9839 case OP_ISUB:
9840 return OP_X86_SUB_MEMBASE_REG;
9841 case OP_IAND:
9842 return OP_X86_AND_MEMBASE_REG;
9843 case OP_IOR:
9844 return OP_X86_OR_MEMBASE_REG;
9845 case OP_IXOR:
9846 return OP_X86_XOR_MEMBASE_REG;
9847 case OP_ADD_IMM:
9848 case OP_IADD_IMM:
9849 return OP_X86_ADD_MEMBASE_IMM;
9850 case OP_SUB_IMM:
9851 case OP_ISUB_IMM:
9852 return OP_X86_SUB_MEMBASE_IMM;
9853 case OP_AND_IMM:
9854 case OP_IAND_IMM:
9855 return OP_X86_AND_MEMBASE_IMM;
9856 case OP_OR_IMM:
9857 case OP_IOR_IMM:
9858 return OP_X86_OR_MEMBASE_IMM;
9859 case OP_XOR_IMM:
9860 case OP_IXOR_IMM:
9861 return OP_X86_XOR_MEMBASE_IMM;
9862 case OP_MOVE:
9863 return OP_NOP;
9865 #endif
9867 #if defined(__x86_64__)
9868 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9869 return -1;
9871 switch (opcode) {
9872 case OP_IADD:
9873 return OP_X86_ADD_MEMBASE_REG;
9874 case OP_ISUB:
9875 return OP_X86_SUB_MEMBASE_REG;
9876 case OP_IAND:
9877 return OP_X86_AND_MEMBASE_REG;
9878 case OP_IOR:
9879 return OP_X86_OR_MEMBASE_REG;
9880 case OP_IXOR:
9881 return OP_X86_XOR_MEMBASE_REG;
9882 case OP_IADD_IMM:
9883 return OP_X86_ADD_MEMBASE_IMM;
9884 case OP_ISUB_IMM:
9885 return OP_X86_SUB_MEMBASE_IMM;
9886 case OP_IAND_IMM:
9887 return OP_X86_AND_MEMBASE_IMM;
9888 case OP_IOR_IMM:
9889 return OP_X86_OR_MEMBASE_IMM;
9890 case OP_IXOR_IMM:
9891 return OP_X86_XOR_MEMBASE_IMM;
9892 case OP_LADD:
9893 return OP_AMD64_ADD_MEMBASE_REG;
9894 case OP_LSUB:
9895 return OP_AMD64_SUB_MEMBASE_REG;
9896 case OP_LAND:
9897 return OP_AMD64_AND_MEMBASE_REG;
9898 case OP_LOR:
9899 return OP_AMD64_OR_MEMBASE_REG;
9900 case OP_LXOR:
9901 return OP_AMD64_XOR_MEMBASE_REG;
9902 case OP_ADD_IMM:
9903 case OP_LADD_IMM:
9904 return OP_AMD64_ADD_MEMBASE_IMM;
9905 case OP_SUB_IMM:
9906 case OP_LSUB_IMM:
9907 return OP_AMD64_SUB_MEMBASE_IMM;
9908 case OP_AND_IMM:
9909 case OP_LAND_IMM:
9910 return OP_AMD64_AND_MEMBASE_IMM;
9911 case OP_OR_IMM:
9912 case OP_LOR_IMM:
9913 return OP_AMD64_OR_MEMBASE_IMM;
9914 case OP_XOR_IMM:
9915 case OP_LXOR_IMM:
9916 return OP_AMD64_XOR_MEMBASE_IMM;
9917 case OP_MOVE:
9918 return OP_NOP;
9920 #endif
9922 return -1;
9925 static inline int
9926 op_to_op_store_membase (int store_opcode, int opcode)
9928 #if defined(__i386__) || defined(__x86_64__)
9929 switch (opcode) {
9930 case OP_ICEQ:
9931 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9932 return OP_X86_SETEQ_MEMBASE;
9933 case OP_CNE:
9934 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9935 return OP_X86_SETNE_MEMBASE;
9937 #endif
9939 return -1;
9942 static inline int
9943 op_to_op_src1_membase (int load_opcode, int opcode)
9945 #ifdef __i386__
9946 /* FIXME: This has sign extension issues */
9948 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9949 return OP_X86_COMPARE_MEMBASE8_IMM;
9952 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9953 return -1;
9955 switch (opcode) {
9956 case OP_X86_PUSH:
9957 return OP_X86_PUSH_MEMBASE;
9958 case OP_COMPARE_IMM:
9959 case OP_ICOMPARE_IMM:
9960 return OP_X86_COMPARE_MEMBASE_IMM;
9961 case OP_COMPARE:
9962 case OP_ICOMPARE:
9963 return OP_X86_COMPARE_MEMBASE_REG;
9965 #endif
9967 #ifdef __x86_64__
9968 /* FIXME: This has sign extension issues */
9970 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9971 return OP_X86_COMPARE_MEMBASE8_IMM;
9974 switch (opcode) {
9975 case OP_X86_PUSH:
9976 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9977 return OP_X86_PUSH_MEMBASE;
9978 break;
9979 /* FIXME: This only works for 32 bit immediates
9980 case OP_COMPARE_IMM:
9981 case OP_LCOMPARE_IMM:
9982 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9983 return OP_AMD64_COMPARE_MEMBASE_IMM;
9985 case OP_ICOMPARE_IMM:
9986 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9987 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9988 break;
9989 case OP_COMPARE:
9990 case OP_LCOMPARE:
9991 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9992 return OP_AMD64_COMPARE_MEMBASE_REG;
9993 break;
9994 case OP_ICOMPARE:
9995 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9996 return OP_AMD64_ICOMPARE_MEMBASE_REG;
9997 break;
9999 #endif
10001 return -1;
10004 static inline int
10005 op_to_op_src2_membase (int load_opcode, int opcode)
10007 #ifdef __i386__
10008 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10009 return -1;
10011 switch (opcode) {
10012 case OP_COMPARE:
10013 case OP_ICOMPARE:
10014 return OP_X86_COMPARE_REG_MEMBASE;
10015 case OP_IADD:
10016 return OP_X86_ADD_REG_MEMBASE;
10017 case OP_ISUB:
10018 return OP_X86_SUB_REG_MEMBASE;
10019 case OP_IAND:
10020 return OP_X86_AND_REG_MEMBASE;
10021 case OP_IOR:
10022 return OP_X86_OR_REG_MEMBASE;
10023 case OP_IXOR:
10024 return OP_X86_XOR_REG_MEMBASE;
10026 #endif
10028 #ifdef __x86_64__
10029 switch (opcode) {
10030 case OP_ICOMPARE:
10031 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10032 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10033 break;
10034 case OP_COMPARE:
10035 case OP_LCOMPARE:
10036 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10037 return OP_AMD64_COMPARE_REG_MEMBASE;
10038 break;
10039 case OP_IADD:
10040 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10041 return OP_X86_ADD_REG_MEMBASE;
10042 case OP_ISUB:
10043 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10044 return OP_X86_SUB_REG_MEMBASE;
10045 case OP_IAND:
10046 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10047 return OP_X86_AND_REG_MEMBASE;
10048 case OP_IOR:
10049 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10050 return OP_X86_OR_REG_MEMBASE;
10051 case OP_IXOR:
10052 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10053 return OP_X86_XOR_REG_MEMBASE;
10054 case OP_LADD:
10055 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10056 return OP_AMD64_ADD_REG_MEMBASE;
10057 case OP_LSUB:
10058 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10059 return OP_AMD64_SUB_REG_MEMBASE;
10060 case OP_LAND:
10061 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10062 return OP_AMD64_AND_REG_MEMBASE;
10063 case OP_LOR:
10064 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10065 return OP_AMD64_OR_REG_MEMBASE;
10066 case OP_LXOR:
10067 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10068 return OP_AMD64_XOR_REG_MEMBASE;
10070 #endif
10072 return -1;
10076 mono_op_to_op_imm_noemul (int opcode)
10078 switch (opcode) {
10079 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10080 case OP_LSHR:
10081 case OP_LSHL:
10082 case OP_LSHR_UN:
10083 #endif
10084 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10085 case OP_IDIV:
10086 case OP_IDIV_UN:
10087 case OP_IREM:
10088 case OP_IREM_UN:
10089 #endif
10090 return -1;
10091 default:
10092 return mono_op_to_op_imm (opcode);
10096 #ifndef DISABLE_JIT
10099 * mono_handle_global_vregs:
10101 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10102 * for them.
10104 void
10105 mono_handle_global_vregs (MonoCompile *cfg)
10107 gint32 *vreg_to_bb;
10108 MonoBasicBlock *bb;
10109 int i, pos;
10111 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10113 #ifdef MONO_ARCH_SIMD_INTRINSICS
10114 if (cfg->uses_simd_intrinsics)
10115 mono_simd_simplify_indirection (cfg);
10116 #endif
10118 /* Find local vregs used in more than one bb */
10119 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10120 MonoInst *ins = bb->code;
10121 int block_num = bb->block_num;
10123 if (cfg->verbose_level > 2)
10124 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10126 cfg->cbb = bb;
10127 for (; ins; ins = ins->next) {
10128 const char *spec = INS_INFO (ins->opcode);
10129 int regtype, regindex;
10130 gint32 prev_bb;
10132 if (G_UNLIKELY (cfg->verbose_level > 2))
10133 mono_print_ins (ins);
10135 g_assert (ins->opcode >= MONO_CEE_LAST);
10137 for (regindex = 0; regindex < 4; regindex ++) {
10138 int vreg;
10140 if (regindex == 0) {
10141 regtype = spec [MONO_INST_DEST];
10142 if (regtype == ' ')
10143 continue;
10144 vreg = ins->dreg;
10145 } else if (regindex == 1) {
10146 regtype = spec [MONO_INST_SRC1];
10147 if (regtype == ' ')
10148 continue;
10149 vreg = ins->sreg1;
10150 } else if (regindex == 2) {
10151 regtype = spec [MONO_INST_SRC2];
10152 if (regtype == ' ')
10153 continue;
10154 vreg = ins->sreg2;
10155 } else if (regindex == 3) {
10156 regtype = spec [MONO_INST_SRC3];
10157 if (regtype == ' ')
10158 continue;
10159 vreg = ins->sreg3;
10162 #if SIZEOF_REGISTER == 4
10163 if (regtype == 'l') {
10165 * Since some instructions reference the original long vreg,
10166 * and some reference the two component vregs, it is quite hard
10167 * to determine when it needs to be global. So be conservative.
10169 if (!get_vreg_to_inst (cfg, vreg)) {
10170 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10172 if (cfg->verbose_level > 2)
10173 printf ("LONG VREG R%d made global.\n", vreg);
10177 * Make the component vregs volatile since the optimizations can
10178 * get confused otherwise.
10180 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10181 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10183 #endif
10185 g_assert (vreg != -1);
10187 prev_bb = vreg_to_bb [vreg];
10188 if (prev_bb == 0) {
10189 /* 0 is a valid block num */
10190 vreg_to_bb [vreg] = block_num + 1;
10191 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10192 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10193 continue;
10195 if (!get_vreg_to_inst (cfg, vreg)) {
10196 if (G_UNLIKELY (cfg->verbose_level > 2))
10197 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10199 switch (regtype) {
10200 case 'i':
10201 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10202 break;
10203 case 'f':
10204 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10205 break;
10206 case 'v':
10207 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10208 break;
10209 default:
10210 g_assert_not_reached ();
10214 /* Flag as having been used in more than one bb */
10215 vreg_to_bb [vreg] = -1;
10221 /* If a variable is used in only one bblock, convert it into a local vreg */
10222 for (i = 0; i < cfg->num_varinfo; i++) {
10223 MonoInst *var = cfg->varinfo [i];
10224 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10226 switch (var->type) {
10227 case STACK_I4:
10228 case STACK_OBJ:
10229 case STACK_PTR:
10230 case STACK_MP:
10231 case STACK_VTYPE:
10232 #if SIZEOF_REGISTER == 8
10233 case STACK_I8:
10234 #endif
10235 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10236 /* Enabling this screws up the fp stack on x86 */
10237 case STACK_R8:
10238 #endif
10239 /* Arguments are implicitly global */
10240 /* Putting R4 vars into registers doesn't work currently */
10241 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10243 * Make that the variable's liveness interval doesn't contain a call, since
10244 * that would cause the lvreg to be spilled, making the whole optimization
10245 * useless.
10247 /* This is too slow for JIT compilation */
10248 #if 0
10249 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10250 MonoInst *ins;
10251 int def_index, call_index, ins_index;
10252 gboolean spilled = FALSE;
10254 def_index = -1;
10255 call_index = -1;
10256 ins_index = 0;
10257 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10258 const char *spec = INS_INFO (ins->opcode);
10260 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10261 def_index = ins_index;
10263 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10264 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10265 if (call_index > def_index) {
10266 spilled = TRUE;
10267 break;
10271 if (MONO_IS_CALL (ins))
10272 call_index = ins_index;
10274 ins_index ++;
10277 if (spilled)
10278 break;
10280 #endif
10282 if (G_UNLIKELY (cfg->verbose_level > 2))
10283 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10284 var->flags |= MONO_INST_IS_DEAD;
10285 cfg->vreg_to_inst [var->dreg] = NULL;
10287 break;
10292 * Compress the varinfo and vars tables so the liveness computation is faster and
10293 * takes up less space.
10295 pos = 0;
10296 for (i = 0; i < cfg->num_varinfo; ++i) {
10297 MonoInst *var = cfg->varinfo [i];
10298 if (pos < i && cfg->locals_start == i)
10299 cfg->locals_start = pos;
10300 if (!(var->flags & MONO_INST_IS_DEAD)) {
10301 if (pos < i) {
10302 cfg->varinfo [pos] = cfg->varinfo [i];
10303 cfg->varinfo [pos]->inst_c0 = pos;
10304 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10305 cfg->vars [pos].idx = pos;
10306 #if SIZEOF_REGISTER == 4
10307 if (cfg->varinfo [pos]->type == STACK_I8) {
10308 /* Modify the two component vars too */
10309 MonoInst *var1;
10311 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10312 var1->inst_c0 = pos;
10313 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10314 var1->inst_c0 = pos;
10316 #endif
10318 pos ++;
10321 cfg->num_varinfo = pos;
10322 if (cfg->locals_start > cfg->num_varinfo)
10323 cfg->locals_start = cfg->num_varinfo;
10327 * mono_spill_global_vars:
10329 * Generate spill code for variables which are not allocated to registers,
10330 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10331 * code is generated which could be optimized by the local optimization passes.
10333 void
10334 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10336 MonoBasicBlock *bb;
10337 char spec2 [16];
10338 int orig_next_vreg;
10339 guint32 *vreg_to_lvreg;
10340 guint32 *lvregs;
10341 guint32 i, lvregs_len;
10342 gboolean dest_has_lvreg = FALSE;
10343 guint32 stacktypes [128];
10344 MonoInst **live_range_start, **live_range_end;
10345 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10347 *need_local_opts = FALSE;
10349 memset (spec2, 0, sizeof (spec2));
10351 /* FIXME: Move this function to mini.c */
10352 stacktypes ['i'] = STACK_PTR;
10353 stacktypes ['l'] = STACK_I8;
10354 stacktypes ['f'] = STACK_R8;
10355 #ifdef MONO_ARCH_SIMD_INTRINSICS
10356 stacktypes ['x'] = STACK_VTYPE;
10357 #endif
10359 #if SIZEOF_REGISTER == 4
10360 /* Create MonoInsts for longs */
10361 for (i = 0; i < cfg->num_varinfo; i++) {
10362 MonoInst *ins = cfg->varinfo [i];
10364 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10365 switch (ins->type) {
10366 #ifdef MONO_ARCH_SOFT_FLOAT
10367 case STACK_R8:
10368 #endif
10369 case STACK_I8: {
10370 MonoInst *tree;
10372 g_assert (ins->opcode == OP_REGOFFSET);
10374 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10375 g_assert (tree);
10376 tree->opcode = OP_REGOFFSET;
10377 tree->inst_basereg = ins->inst_basereg;
10378 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10380 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10381 g_assert (tree);
10382 tree->opcode = OP_REGOFFSET;
10383 tree->inst_basereg = ins->inst_basereg;
10384 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10385 break;
10387 default:
10388 break;
10392 #endif
10394 /* FIXME: widening and truncation */
10397 * As an optimization, when a variable allocated to the stack is first loaded into
10398 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10399 * the variable again.
10401 orig_next_vreg = cfg->next_vreg;
10402 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10403 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10404 lvregs_len = 0;
10407 * These arrays contain the first and last instructions accessing a given
10408 * variable.
10409 * Since we emit bblocks in the same order we process them here, and we
10410 * don't split live ranges, these will precisely describe the live range of
10411 * the variable, i.e. the instruction range where a valid value can be found
10412 * in the variables location.
10414 /* FIXME: Only do this if debugging info is requested */
10415 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10416 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10417 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10418 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10420 /* Add spill loads/stores */
10421 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10422 MonoInst *ins;
10424 if (cfg->verbose_level > 2)
10425 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10427 /* Clear vreg_to_lvreg array */
10428 for (i = 0; i < lvregs_len; i++)
10429 vreg_to_lvreg [lvregs [i]] = 0;
10430 lvregs_len = 0;
10432 cfg->cbb = bb;
10433 MONO_BB_FOR_EACH_INS (bb, ins) {
10434 const char *spec = INS_INFO (ins->opcode);
10435 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10436 gboolean store, no_lvreg;
10437 int sregs [MONO_MAX_SRC_REGS];
10439 if (G_UNLIKELY (cfg->verbose_level > 2))
10440 mono_print_ins (ins);
10442 if (ins->opcode == OP_NOP)
10443 continue;
10446 * We handle LDADDR here as well, since it can only be decomposed
10447 * when variable addresses are known.
10449 if (ins->opcode == OP_LDADDR) {
10450 MonoInst *var = ins->inst_p0;
10452 if (var->opcode == OP_VTARG_ADDR) {
10453 /* Happens on SPARC/S390 where vtypes are passed by reference */
10454 MonoInst *vtaddr = var->inst_left;
10455 if (vtaddr->opcode == OP_REGVAR) {
10456 ins->opcode = OP_MOVE;
10457 ins->sreg1 = vtaddr->dreg;
10459 else if (var->inst_left->opcode == OP_REGOFFSET) {
10460 ins->opcode = OP_LOAD_MEMBASE;
10461 ins->inst_basereg = vtaddr->inst_basereg;
10462 ins->inst_offset = vtaddr->inst_offset;
10463 } else
10464 NOT_IMPLEMENTED;
10465 } else {
10466 g_assert (var->opcode == OP_REGOFFSET);
10468 ins->opcode = OP_ADD_IMM;
10469 ins->sreg1 = var->inst_basereg;
10470 ins->inst_imm = var->inst_offset;
10473 *need_local_opts = TRUE;
10474 spec = INS_INFO (ins->opcode);
10477 if (ins->opcode < MONO_CEE_LAST) {
10478 mono_print_ins (ins);
10479 g_assert_not_reached ();
10483 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10484 * src register.
10485 * FIXME:
10487 if (MONO_IS_STORE_MEMBASE (ins)) {
10488 tmp_reg = ins->dreg;
10489 ins->dreg = ins->sreg2;
10490 ins->sreg2 = tmp_reg;
10491 store = TRUE;
10493 spec2 [MONO_INST_DEST] = ' ';
10494 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10495 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10496 spec2 [MONO_INST_SRC3] = ' ';
10497 spec = spec2;
10498 } else if (MONO_IS_STORE_MEMINDEX (ins))
10499 g_assert_not_reached ();
10500 else
10501 store = FALSE;
10502 no_lvreg = FALSE;
10504 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10505 printf ("\t %.3s %d", spec, ins->dreg);
10506 num_sregs = mono_inst_get_src_registers (ins, sregs);
10507 for (srcindex = 0; srcindex < 3; ++srcindex)
10508 printf (" %d", sregs [srcindex]);
10509 printf ("\n");
10512 /***************/
10513 /* DREG */
10514 /***************/
10515 regtype = spec [MONO_INST_DEST];
10516 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10517 prev_dreg = -1;
10519 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10520 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10521 MonoInst *store_ins;
10522 int store_opcode;
10523 MonoInst *def_ins = ins;
10524 int dreg = ins->dreg; /* The original vreg */
10526 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10528 if (var->opcode == OP_REGVAR) {
10529 ins->dreg = var->dreg;
10530 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10532 * Instead of emitting a load+store, use a _membase opcode.
10534 g_assert (var->opcode == OP_REGOFFSET);
10535 if (ins->opcode == OP_MOVE) {
10536 NULLIFY_INS (ins);
10537 def_ins = NULL;
10538 } else {
10539 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10540 ins->inst_basereg = var->inst_basereg;
10541 ins->inst_offset = var->inst_offset;
10542 ins->dreg = -1;
10544 spec = INS_INFO (ins->opcode);
10545 } else {
10546 guint32 lvreg;
10548 g_assert (var->opcode == OP_REGOFFSET);
10550 prev_dreg = ins->dreg;
10552 /* Invalidate any previous lvreg for this vreg */
10553 vreg_to_lvreg [ins->dreg] = 0;
10555 lvreg = 0;
10557 #ifdef MONO_ARCH_SOFT_FLOAT
10558 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10559 regtype = 'l';
10560 store_opcode = OP_STOREI8_MEMBASE_REG;
10562 #endif
10564 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10566 if (regtype == 'l') {
10567 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10568 mono_bblock_insert_after_ins (bb, ins, store_ins);
10569 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10570 mono_bblock_insert_after_ins (bb, ins, store_ins);
10571 def_ins = store_ins;
10573 else {
10574 g_assert (store_opcode != OP_STOREV_MEMBASE);
10576 /* Try to fuse the store into the instruction itself */
10577 /* FIXME: Add more instructions */
10578 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10579 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10580 ins->inst_imm = ins->inst_c0;
10581 ins->inst_destbasereg = var->inst_basereg;
10582 ins->inst_offset = var->inst_offset;
10583 spec = INS_INFO (ins->opcode);
10584 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10585 ins->opcode = store_opcode;
10586 ins->inst_destbasereg = var->inst_basereg;
10587 ins->inst_offset = var->inst_offset;
10589 no_lvreg = TRUE;
10591 tmp_reg = ins->dreg;
10592 ins->dreg = ins->sreg2;
10593 ins->sreg2 = tmp_reg;
10594 store = TRUE;
10596 spec2 [MONO_INST_DEST] = ' ';
10597 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10598 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10599 spec2 [MONO_INST_SRC3] = ' ';
10600 spec = spec2;
10601 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10602 // FIXME: The backends expect the base reg to be in inst_basereg
10603 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10604 ins->dreg = -1;
10605 ins->inst_basereg = var->inst_basereg;
10606 ins->inst_offset = var->inst_offset;
10607 spec = INS_INFO (ins->opcode);
10608 } else {
10609 /* printf ("INS: "); mono_print_ins (ins); */
10610 /* Create a store instruction */
10611 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10613 /* Insert it after the instruction */
10614 mono_bblock_insert_after_ins (bb, ins, store_ins);
10616 def_ins = store_ins;
10619 * We can't assign ins->dreg to var->dreg here, since the
10620 * sregs could use it. So set a flag, and do it after
10621 * the sregs.
10623 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10624 dest_has_lvreg = TRUE;
10629 if (def_ins && !live_range_start [dreg]) {
10630 live_range_start [dreg] = def_ins;
10631 live_range_start_bb [dreg] = bb;
10635 /************/
10636 /* SREGS */
10637 /************/
10638 num_sregs = mono_inst_get_src_registers (ins, sregs);
10639 for (srcindex = 0; srcindex < 3; ++srcindex) {
10640 regtype = spec [MONO_INST_SRC1 + srcindex];
10641 sreg = sregs [srcindex];
10643 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10644 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10645 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10646 MonoInst *use_ins = ins;
10647 MonoInst *load_ins;
10648 guint32 load_opcode;
10650 if (var->opcode == OP_REGVAR) {
10651 sregs [srcindex] = var->dreg;
10652 //mono_inst_set_src_registers (ins, sregs);
10653 live_range_end [sreg] = use_ins;
10654 live_range_end_bb [sreg] = bb;
10655 continue;
10658 g_assert (var->opcode == OP_REGOFFSET);
10660 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10662 g_assert (load_opcode != OP_LOADV_MEMBASE);
10664 if (vreg_to_lvreg [sreg]) {
10665 g_assert (vreg_to_lvreg [sreg] != -1);
10667 /* The variable is already loaded to an lvreg */
10668 if (G_UNLIKELY (cfg->verbose_level > 2))
10669 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10670 sregs [srcindex] = vreg_to_lvreg [sreg];
10671 //mono_inst_set_src_registers (ins, sregs);
10672 continue;
10675 /* Try to fuse the load into the instruction */
10676 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10677 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10678 sregs [0] = var->inst_basereg;
10679 //mono_inst_set_src_registers (ins, sregs);
10680 ins->inst_offset = var->inst_offset;
10681 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10682 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10683 sregs [1] = var->inst_basereg;
10684 //mono_inst_set_src_registers (ins, sregs);
10685 ins->inst_offset = var->inst_offset;
10686 } else {
10687 if (MONO_IS_REAL_MOVE (ins)) {
10688 ins->opcode = OP_NOP;
10689 sreg = ins->dreg;
10690 } else {
10691 //printf ("%d ", srcindex); mono_print_ins (ins);
10693 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10695 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10696 if (var->dreg == prev_dreg) {
10698 * sreg refers to the value loaded by the load
10699 * emitted below, but we need to use ins->dreg
10700 * since it refers to the store emitted earlier.
10702 sreg = ins->dreg;
10704 g_assert (sreg != -1);
10705 vreg_to_lvreg [var->dreg] = sreg;
10706 g_assert (lvregs_len < 1024);
10707 lvregs [lvregs_len ++] = var->dreg;
10711 sregs [srcindex] = sreg;
10712 //mono_inst_set_src_registers (ins, sregs);
10714 if (regtype == 'l') {
10715 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10716 mono_bblock_insert_before_ins (bb, ins, load_ins);
10717 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10718 mono_bblock_insert_before_ins (bb, ins, load_ins);
10719 use_ins = load_ins;
10721 else {
10722 #if SIZEOF_REGISTER == 4
10723 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10724 #endif
10725 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10726 mono_bblock_insert_before_ins (bb, ins, load_ins);
10727 use_ins = load_ins;
10731 if (var->dreg < orig_next_vreg) {
10732 live_range_end [var->dreg] = use_ins;
10733 live_range_end_bb [var->dreg] = bb;
10737 mono_inst_set_src_registers (ins, sregs);
10739 if (dest_has_lvreg) {
10740 g_assert (ins->dreg != -1);
10741 vreg_to_lvreg [prev_dreg] = ins->dreg;
10742 g_assert (lvregs_len < 1024);
10743 lvregs [lvregs_len ++] = prev_dreg;
10744 dest_has_lvreg = FALSE;
10747 if (store) {
10748 tmp_reg = ins->dreg;
10749 ins->dreg = ins->sreg2;
10750 ins->sreg2 = tmp_reg;
10753 if (MONO_IS_CALL (ins)) {
10754 /* Clear vreg_to_lvreg array */
10755 for (i = 0; i < lvregs_len; i++)
10756 vreg_to_lvreg [lvregs [i]] = 0;
10757 lvregs_len = 0;
10758 } else if (ins->opcode == OP_NOP) {
10759 ins->dreg = -1;
10760 MONO_INST_NULLIFY_SREGS (ins);
10763 if (cfg->verbose_level > 2)
10764 mono_print_ins_index (1, ins);
10768 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10770 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10771 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10773 for (i = 0; i < cfg->num_varinfo; ++i) {
10774 int vreg = MONO_VARINFO (cfg, i)->vreg;
10775 MonoInst *ins;
10777 if (live_range_start [vreg]) {
10778 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10779 ins->inst_c0 = i;
10780 ins->inst_c1 = vreg;
10781 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10783 if (live_range_end [vreg]) {
10784 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10785 ins->inst_c0 = i;
10786 ins->inst_c1 = vreg;
10787 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10790 #endif
10792 g_free (live_range_start);
10793 g_free (live_range_end);
10794 g_free (live_range_start_bb);
10795 g_free (live_range_end_bb);
10799 * FIXME:
10800 * - use 'iadd' instead of 'int_add'
10801 * - handling ovf opcodes: decompose in method_to_ir.
10802 * - unify iregs/fregs
10803 * -> partly done, the missing parts are:
10804 * - a more complete unification would involve unifying the hregs as well, so
10805 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10806 * would no longer map to the machine hregs, so the code generators would need to
10807 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10808 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10809 * fp/non-fp branches speeds it up by about 15%.
10810 * - use sext/zext opcodes instead of shifts
10811 * - add OP_ICALL
10812 * - get rid of TEMPLOADs if possible and use vregs instead
10813 * - clean up usage of OP_P/OP_ opcodes
10814 * - cleanup usage of DUMMY_USE
10815 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10816 * stack
10817 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10818 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10819 * - make sure handle_stack_args () is called before the branch is emitted
10820 * - when the new IR is done, get rid of all unused stuff
10821 * - COMPARE/BEQ as separate instructions or unify them ?
10822 * - keeping them separate allows specialized compare instructions like
10823 * compare_imm, compare_membase
10824 * - most back ends unify fp compare+branch, fp compare+ceq
10825 * - integrate mono_save_args into inline_method
10826 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10827 * - handle long shift opts on 32 bit platforms somehow: they require
10828 * 3 sregs (2 for arg1 and 1 for arg2)
10829 * - make byref a 'normal' type.
10830 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10831 * variable if needed.
10832 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10833 * like inline_method.
10834 * - remove inlining restrictions
10835 * - fix LNEG and enable cfold of INEG
10836 * - generalize x86 optimizations like ldelema as a peephole optimization
10837 * - add store_mem_imm for amd64
10838 * - optimize the loading of the interruption flag in the managed->native wrappers
10839 * - avoid special handling of OP_NOP in passes
10840 * - move code inserting instructions into one function/macro.
10841 * - try a coalescing phase after liveness analysis
10842 * - add float -> vreg conversion + local optimizations on !x86
10843 * - figure out how to handle decomposed branches during optimizations, ie.
10844 * compare+branch, op_jump_table+op_br etc.
10845 * - promote RuntimeXHandles to vregs
10846 * - vtype cleanups:
10847 * - add a NEW_VARLOADA_VREG macro
10848 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10849 * accessing vtype fields.
10850 * - get rid of I8CONST on 64 bit platforms
10851 * - dealing with the increase in code size due to branches created during opcode
10852 * decomposition:
10853 * - use extended basic blocks
10854 * - all parts of the JIT
10855 * - handle_global_vregs () && local regalloc
10856 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10857 * - sources of increase in code size:
10858 * - vtypes
10859 * - long compares
10860 * - isinst and castclass
10861 * - lvregs not allocated to global registers even if used multiple times
10862 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10863 * meaningful.
10864 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10865 * - add all micro optimizations from the old JIT
10866 * - put tree optimizations into the deadce pass
10867 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10868 * specific function.
10869 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10870 * fcompare + branchCC.
10871 * - create a helper function for allocating a stack slot, taking into account
10872 * MONO_CFG_HAS_SPILLUP.
10873 * - merge r68207.
10874 * - merge the ia64 switch changes.
10875 * - optimize mono_regstate2_alloc_int/float.
10876 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10877 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10878 * parts of the tree could be separated by other instructions, killing the tree
10879 * arguments, or stores killing loads etc. Also, should we fold loads into other
10880 * instructions if the result of the load is used multiple times ?
10881 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10882 * - LAST MERGE: 108395.
10883 * - when returning vtypes in registers, generate IR and append it to the end of the
10884 * last bb instead of doing it in the epilog.
10885 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10890 NOTES
10891 -----
10893 - When to decompose opcodes:
10894 - earlier: this makes some optimizations hard to implement, since the low level IR
10895 no longer contains the neccessary information. But it is easier to do.
10896 - later: harder to implement, enables more optimizations.
10897 - Branches inside bblocks:
10898 - created when decomposing complex opcodes.
10899 - branches to another bblock: harmless, but not tracked by the branch
10900 optimizations, so need to branch to a label at the start of the bblock.
10901 - branches to inside the same bblock: very problematic, trips up the local
10902 reg allocator. Can be fixed by spitting the current bblock, but that is a
10903 complex operation, since some local vregs can become global vregs etc.
10904 - Local/global vregs:
10905 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10906 local register allocator.
10907 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10908 structure, created by mono_create_var (). Assigned to hregs or the stack by
10909 the global register allocator.
10910 - When to do optimizations like alu->alu_imm:
10911 - earlier -> saves work later on since the IR will be smaller/simpler
10912 - later -> can work on more instructions
10913 - Handling of valuetypes:
10914 - When a vtype is pushed on the stack, a new temporary is created, an
10915 instruction computing its address (LDADDR) is emitted and pushed on
10916 the stack. Need to optimize cases when the vtype is used immediately as in
10917 argument passing, stloc etc.
10918 - Instead of the to_end stuff in the old JIT, simply call the function handling
10919 the values on the stack before emitting the last instruction of the bb.
10922 #endif /* DISABLE_JIT */