[netcore] Implement missing Bmi1/Bmi2 intrinsics (#16919)
[mono-project.git] / mono / mini / memory-access.c
blob93133a235813fe04c57ccde44aca0c6bce5b11b8
1 /**
2 * Emit memory access for the front-end.
4 */
6 #include <config.h>
7 #include <mono/utils/mono-compiler.h>
9 #ifndef DISABLE_JIT
11 #include <mono/metadata/gc-internals.h>
12 #include <mono/metadata/abi-details.h>
13 #include <mono/utils/mono-memory-model.h>
15 #include "mini.h"
16 #include "ir-emit.h"
17 #include "jit-icalls.h"
19 #ifdef ENABLE_NETCORE
20 #define MAX_INLINE_COPIES 16
21 #else
22 #define MAX_INLINE_COPIES 10
23 #endif
24 #define MAX_INLINE_COPY_SIZE 10000
26 void
27 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
29 int val_reg;
31 /*FIXME arbitrary hack to avoid unbound code expansion.*/
32 g_assert (size < MAX_INLINE_COPY_SIZE);
33 g_assert (val == 0);
34 g_assert (align > 0);
36 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
37 switch (size) {
38 case 1:
39 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
40 return;
41 case 2:
42 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
43 return;
44 case 4:
45 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
46 return;
47 #if SIZEOF_REGISTER == 8
48 case 8:
49 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
50 return;
51 #endif
55 val_reg = alloc_preg (cfg);
57 if (SIZEOF_REGISTER == 8)
58 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
59 else
60 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
62 if (align < TARGET_SIZEOF_VOID_P) {
63 if (align % 2 == 1)
64 goto set_1;
65 if (align % 4 == 2)
66 goto set_2;
67 if (TARGET_SIZEOF_VOID_P == 8 && align % 8 == 4)
68 goto set_4;
71 //Unaligned offsets don't naturaly happen in the runtime, so it's ok to be conservative in how we copy
72 //We assume that input src and dest are be aligned to `align` so offset just worsen it
73 int offsets_mask;
74 offsets_mask = offset & 0x7; //we only care about the misalignment part
75 if (offsets_mask) {
76 if (offsets_mask % 2 == 1)
77 goto set_1;
78 if (offsets_mask % 4 == 2)
79 goto set_2;
80 if (TARGET_SIZEOF_VOID_P == 8 && offsets_mask % 8 == 4)
81 goto set_4;
84 if (SIZEOF_REGISTER == 8) {
85 while (size >= 8) {
86 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
87 offset += 8;
88 size -= 8;
92 set_4:
93 while (size >= 4) {
94 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
95 offset += 4;
96 size -= 4;
100 set_2:
101 while (size >= 2) {
102 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
103 offset += 2;
104 size -= 2;
107 set_1:
108 while (size >= 1) {
109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
110 offset += 1;
111 size -= 1;
115 void
116 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
118 int cur_reg;
120 /*FIXME arbitrary hack to avoid unbound code expansion.*/
121 g_assert (size < MAX_INLINE_COPY_SIZE);
122 g_assert (align > 0);
124 if (align < TARGET_SIZEOF_VOID_P) {
125 if (align == 4)
126 goto copy_4;
127 if (align == 2)
128 goto copy_2;
129 goto copy_1;
132 //Unaligned offsets don't naturaly happen in the runtime, so it's ok to be conservative in how we copy
133 //We assume that input src and dest are be aligned to `align` so offset just worsen it
134 int offsets_mask;
135 offsets_mask = (doffset | soffset) & 0x7; //we only care about the misalignment part
136 if (offsets_mask) {
137 if (offsets_mask % 2 == 1)
138 goto copy_1;
139 if (offsets_mask % 4 == 2)
140 goto copy_2;
141 if (TARGET_SIZEOF_VOID_P == 8 && offsets_mask % 8 == 4)
142 goto copy_4;
146 if (SIZEOF_REGISTER == 8) {
147 while (size >= 8) {
148 cur_reg = alloc_preg (cfg);
149 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
150 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
151 doffset += 8;
152 soffset += 8;
153 size -= 8;
157 copy_4:
158 while (size >= 4) {
159 cur_reg = alloc_preg (cfg);
160 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
161 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
162 doffset += 4;
163 soffset += 4;
164 size -= 4;
167 copy_2:
168 while (size >= 2) {
169 cur_reg = alloc_preg (cfg);
170 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
172 doffset += 2;
173 soffset += 2;
174 size -= 2;
177 copy_1:
178 while (size >= 1) {
179 cur_reg = alloc_preg (cfg);
180 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
181 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
182 doffset += 1;
183 soffset += 1;
184 size -= 1;
188 static void
189 mini_emit_memcpy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size_ins, int size, int align)
191 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
193 /* We can't do copies at a smaller granule than the provided alignment */
194 if (size_ins || (size / align > MAX_INLINE_COPIES) || !(cfg->opt & MONO_OPT_INTRINS)) {
195 MonoInst *iargs [3];
196 iargs [0] = dest;
197 iargs [1] = src;
199 if (!size_ins)
200 EMIT_NEW_ICONST (cfg, size_ins, size);
201 iargs [2] = size_ins;
202 mono_emit_method_call (cfg, mini_get_memcpy_method (), iargs, NULL);
203 } else {
204 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, size, align);
208 static void
209 mini_emit_memset_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *value_ins, int value, MonoInst *size_ins, int size, int align)
211 /* FIXME: Optimize the case when dest is OP_LDADDR */
213 /* We can't do copies at a smaller granule than the provided alignment */
214 if (value_ins || size_ins || value != 0 || (size / align > MAX_INLINE_COPIES) || !(cfg->opt & MONO_OPT_INTRINS)) {
215 MonoInst *iargs [3];
216 iargs [0] = dest;
218 if (!value_ins)
219 EMIT_NEW_ICONST (cfg, value_ins, value);
220 iargs [1] = value_ins;
222 if (!size_ins)
223 EMIT_NEW_ICONST (cfg, size_ins, size);
224 iargs [2] = size_ins;
226 mono_emit_method_call (cfg, mini_get_memset_method (), iargs, NULL);
227 } else {
228 mini_emit_memset (cfg, dest->dreg, 0, size, value, align);
232 static void
233 mini_emit_memcpy_const_size (MonoCompile *cfg, MonoInst *dest, MonoInst *src, int size, int align)
235 mini_emit_memcpy_internal (cfg, dest, src, NULL, size, align);
238 static void
239 mini_emit_memset_const_size (MonoCompile *cfg, MonoInst *dest, int value, int size, int align)
241 mini_emit_memset_internal (cfg, dest, NULL, value, NULL, size, align);
245 static void
246 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
248 MonoClassField *field;
249 gpointer iter = NULL;
251 while ((field = mono_class_get_fields_internal (klass, &iter))) {
252 int foffset;
254 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
255 continue;
256 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
257 if (mini_type_is_reference (mono_field_get_type_internal (field))) {
258 g_assert ((foffset % TARGET_SIZEOF_VOID_P) == 0);
259 *wb_bitmap |= 1 << ((offset + foffset) / TARGET_SIZEOF_VOID_P);
260 } else {
261 MonoClass *field_class = mono_class_from_mono_type_internal (field->type);
262 if (m_class_has_references (field_class))
263 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
268 static gboolean
269 mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
271 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
272 unsigned need_wb = 0;
274 if (align == 0)
275 align = 4;
277 /*types with references can't have alignment smaller than sizeof(void*) */
278 if (align < TARGET_SIZEOF_VOID_P)
279 return FALSE;
281 if (size > 5 * TARGET_SIZEOF_VOID_P)
282 return FALSE;
284 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
286 destreg = iargs [0]->dreg;
287 srcreg = iargs [1]->dreg;
288 offset = 0;
290 dest_ptr_reg = alloc_preg (cfg);
291 tmp_reg = alloc_preg (cfg);
293 /*tmp = dreg*/
294 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
296 while (size >= TARGET_SIZEOF_VOID_P) {
297 MonoInst *load_inst;
298 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
299 load_inst->dreg = tmp_reg;
300 load_inst->inst_basereg = srcreg;
301 load_inst->inst_offset = offset;
302 MONO_ADD_INS (cfg->cbb, load_inst);
304 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
306 if (need_wb & 0x1)
307 mini_emit_write_barrier (cfg, iargs [0], load_inst);
309 offset += TARGET_SIZEOF_VOID_P;
310 size -= TARGET_SIZEOF_VOID_P;
311 need_wb >>= 1;
313 /*tmp += sizeof (void*)*/
314 if (size >= TARGET_SIZEOF_VOID_P) {
315 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, TARGET_SIZEOF_VOID_P);
316 MONO_ADD_INS (cfg->cbb, iargs [0]);
320 /* Those cannot be references since size < sizeof (void*) */
321 while (size >= 4) {
322 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
323 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
324 offset += 4;
325 size -= 4;
328 while (size >= 2) {
329 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
330 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
331 offset += 2;
332 size -= 2;
335 while (size >= 1) {
336 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
337 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
338 offset += 1;
339 size -= 1;
342 return TRUE;
345 static void
346 mini_emit_memory_copy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, int explicit_align, gboolean native)
348 MonoInst *iargs [4];
349 int size;
350 guint32 align = 0;
351 MonoInst *size_ins = NULL;
352 MonoInst *memcpy_ins = NULL;
354 g_assert (klass);
356 Fun fact about @native. It's false that @klass will have no ref when @native is true.
357 This happens in pinvoke2. What goes is that marshal.c uses CEE_MONO_LDOBJNATIVE and pass klass.
358 The actual stuff being copied will have no refs, but @klass might.
359 This means we can't assert !(klass->has_references && native).
362 if (cfg->gshared)
363 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
366 * This check breaks with spilled vars... need to handle it during verification anyway.
367 * g_assert (klass && klass == src->klass && klass == dest->klass);
370 if (mini_is_gsharedvt_klass (klass)) {
371 g_assert (!native);
372 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
373 memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
376 if (native)
377 size = mono_class_native_size (klass, &align);
378 else
379 size = mono_class_value_size (klass, &align);
381 if (!align)
382 align = TARGET_SIZEOF_VOID_P;
383 if (explicit_align)
384 align = explicit_align;
386 if (mini_type_is_reference (m_class_get_byval_arg (klass))) { // Refs *MUST* be naturally aligned
387 MonoInst *store, *load;
388 int dreg = alloc_ireg_ref (cfg);
390 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, src->dreg, 0);
391 MONO_ADD_INS (cfg->cbb, load);
393 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, dest->dreg, 0, dreg);
394 MONO_ADD_INS (cfg->cbb, store);
396 mini_emit_write_barrier (cfg, dest, src);
397 return;
399 } else if (cfg->gen_write_barriers && (m_class_has_references (klass) || size_ins) && !native) { /* if native is true there should be no references in the struct */
400 /* Avoid barriers when storing to the stack */
401 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
402 (dest->opcode == OP_LDADDR))) {
403 int context_used;
405 iargs [0] = dest;
406 iargs [1] = src;
408 context_used = mini_class_check_context_used (cfg, klass);
410 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
411 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, size, align)) {
412 } else if (size_ins || align < TARGET_SIZEOF_VOID_P) {
413 if (context_used) {
414 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
415 } else {
416 iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
417 if (!cfg->compile_aot)
418 mono_class_compute_gc_descriptor (klass);
420 if (size_ins)
421 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
422 else
423 mono_emit_jit_icall (cfg, mono_value_copy_internal, iargs);
424 } else {
425 /* We don't unroll more than 5 stores to avoid code bloat. */
426 /*This is harmless and simplify mono_gc_get_range_copy_func */
427 size += (TARGET_SIZEOF_VOID_P - 1);
428 size &= ~(TARGET_SIZEOF_VOID_P - 1);
430 EMIT_NEW_ICONST (cfg, iargs [2], size);
431 mono_emit_jit_icall (cfg, mono_gc_wbarrier_range_copy, iargs);
433 return;
437 if (size_ins) {
438 iargs [0] = dest;
439 iargs [1] = src;
440 iargs [2] = size_ins;
441 mini_emit_calli (cfg, mono_method_signature_internal (mini_get_memcpy_method ()), iargs, memcpy_ins, NULL, NULL);
442 } else {
443 mini_emit_memcpy_const_size (cfg, dest, src, size, align);
447 MonoInst*
448 mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag)
450 MonoInst *ins;
452 if (ins_flag & MONO_INST_UNALIGNED) {
453 MonoInst *addr, *tmp_var;
454 int align;
455 int size = mono_type_size (type, &align);
457 if (offset) {
458 MonoInst *add_offset;
459 NEW_BIALU_IMM (cfg, add_offset, OP_PADD_IMM, alloc_preg (cfg), src->dreg, offset);
460 MONO_ADD_INS (cfg->cbb, add_offset);
461 src = add_offset;
464 tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
465 EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
467 mini_emit_memcpy_const_size (cfg, addr, src, size, 1);
468 EMIT_NEW_TEMPLOAD (cfg, ins, tmp_var->inst_c0);
469 } else {
470 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, type, src->dreg, offset);
472 ins->flags |= ins_flag;
474 if (ins_flag & MONO_INST_VOLATILE) {
475 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
476 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
479 return ins;
483 void
484 mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag)
486 if (ins_flag & MONO_INST_VOLATILE) {
487 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
488 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
491 if (ins_flag & MONO_INST_UNALIGNED) {
492 MonoInst *addr, *mov, *tmp_var;
494 tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
495 EMIT_NEW_TEMPSTORE (cfg, mov, tmp_var->inst_c0, value);
496 EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
497 mini_emit_memory_copy_internal (cfg, dest, addr, mono_class_from_mono_type_internal (type), 1, FALSE);
498 } else {
499 MonoInst *ins;
501 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
502 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, type, dest->dreg, 0, value->dreg);
503 ins->flags |= ins_flag;
506 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
507 mini_type_is_reference (type) && !MONO_INS_IS_PCONST_NULL (value)) {
508 /* insert call to write barrier */
509 mini_emit_write_barrier (cfg, dest, value);
513 void
514 mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag)
516 int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : TARGET_SIZEOF_VOID_P;
519 * FIXME: It's unclear whether we should be emitting both the acquire
520 * and release barriers for cpblk. It is technically both a load and
521 * store operation, so it seems like that's the sensible thing to do.
523 * FIXME: We emit full barriers on both sides of the operation for
524 * simplicity. We should have a separate atomic memcpy method instead.
526 if (ins_flag & MONO_INST_VOLATILE) {
527 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
528 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
531 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST)) {
532 mini_emit_memcpy_const_size (cfg, dest, src, size->inst_c0, align);
533 } else {
534 mini_emit_memcpy_internal (cfg, dest, src, size, 0, align);
537 if (ins_flag & MONO_INST_VOLATILE) {
538 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
539 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
543 void
544 mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag)
546 int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : TARGET_SIZEOF_VOID_P;
548 if (ins_flag & MONO_INST_VOLATILE) {
549 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
550 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
553 //FIXME unrolled memset only supports zeroing
554 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST) && (value->opcode == OP_ICONST) && (value->inst_c0 == 0)) {
555 mini_emit_memset_const_size (cfg, dest, value->inst_c0, size->inst_c0, align);
556 } else {
557 mini_emit_memset_internal (cfg, dest, value, 0, size, 0, align);
563 * If @klass is a valuetype, emit code to copy a value with source address in @src and destination address in @dest.
564 * If @klass is a ref type, copy a pointer instead.
567 void
568 mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag)
570 int explicit_align = 0;
571 if (ins_flag & MONO_INST_UNALIGNED)
572 explicit_align = 1;
575 * FIXME: It's unclear whether we should be emitting both the acquire
576 * and release barriers for cpblk. It is technically both a load and
577 * store operation, so it seems like that's the sensible thing to do.
579 * FIXME: We emit full barriers on both sides of the operation for
580 * simplicity. We should have a separate atomic memcpy method instead.
582 if (ins_flag & MONO_INST_VOLATILE) {
583 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
584 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
587 mini_emit_memory_copy_internal (cfg, dest, src, klass, explicit_align, native);
589 if (ins_flag & MONO_INST_VOLATILE) {
590 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
591 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
594 #else /* !DISABLE_JIT */
596 MONO_EMPTY_SOURCE_FILE (memory_access);
597 #endif