[mini] Expand clr-memory-model effect (#17093)
[mono-project.git] / mono / mini / memory-access.c
blob780010e7014172bfc5492c8a313ba91f11fe6bf9
1 /**
2 * Emit memory access for the front-end.
4 */
6 #include <config.h>
7 #include <mono/utils/mono-compiler.h>
9 #ifndef DISABLE_JIT
11 #include <mono/metadata/gc-internals.h>
12 #include <mono/metadata/abi-details.h>
13 #include <mono/utils/mono-memory-model.h>
15 #include "mini.h"
16 #include "mini-runtime.h"
17 #include "ir-emit.h"
18 #include "jit-icalls.h"
20 #ifdef ENABLE_NETCORE
21 #define MAX_INLINE_COPIES 16
22 #else
23 #define MAX_INLINE_COPIES 10
24 #endif
25 #define MAX_INLINE_COPY_SIZE 10000
27 void
28 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
30 int val_reg;
32 /*FIXME arbitrary hack to avoid unbound code expansion.*/
33 g_assert (size < MAX_INLINE_COPY_SIZE);
34 g_assert (val == 0);
35 g_assert (align > 0);
37 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
38 switch (size) {
39 case 1:
40 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
41 return;
42 case 2:
43 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
44 return;
45 case 4:
46 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
47 return;
48 #if SIZEOF_REGISTER == 8
49 case 8:
50 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
51 return;
52 #endif
56 val_reg = alloc_preg (cfg);
58 if (SIZEOF_REGISTER == 8)
59 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
60 else
61 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
63 if (align < TARGET_SIZEOF_VOID_P) {
64 if (align % 2 == 1)
65 goto set_1;
66 if (align % 4 == 2)
67 goto set_2;
68 if (TARGET_SIZEOF_VOID_P == 8 && align % 8 == 4)
69 goto set_4;
72 //Unaligned offsets don't naturaly happen in the runtime, so it's ok to be conservative in how we copy
73 //We assume that input src and dest are be aligned to `align` so offset just worsen it
74 int offsets_mask;
75 offsets_mask = offset & 0x7; //we only care about the misalignment part
76 if (offsets_mask) {
77 if (offsets_mask % 2 == 1)
78 goto set_1;
79 if (offsets_mask % 4 == 2)
80 goto set_2;
81 if (TARGET_SIZEOF_VOID_P == 8 && offsets_mask % 8 == 4)
82 goto set_4;
85 if (SIZEOF_REGISTER == 8) {
86 while (size >= 8) {
87 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
88 offset += 8;
89 size -= 8;
93 set_4:
94 while (size >= 4) {
95 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
96 offset += 4;
97 size -= 4;
101 set_2:
102 while (size >= 2) {
103 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
104 offset += 2;
105 size -= 2;
108 set_1:
109 while (size >= 1) {
110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
111 offset += 1;
112 size -= 1;
116 void
117 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
119 int cur_reg;
121 /*FIXME arbitrary hack to avoid unbound code expansion.*/
122 g_assert (size < MAX_INLINE_COPY_SIZE);
123 g_assert (align > 0);
125 if (align < TARGET_SIZEOF_VOID_P) {
126 if (align == 4)
127 goto copy_4;
128 if (align == 2)
129 goto copy_2;
130 goto copy_1;
133 //Unaligned offsets don't naturaly happen in the runtime, so it's ok to be conservative in how we copy
134 //We assume that input src and dest are be aligned to `align` so offset just worsen it
135 int offsets_mask;
136 offsets_mask = (doffset | soffset) & 0x7; //we only care about the misalignment part
137 if (offsets_mask) {
138 if (offsets_mask % 2 == 1)
139 goto copy_1;
140 if (offsets_mask % 4 == 2)
141 goto copy_2;
142 if (TARGET_SIZEOF_VOID_P == 8 && offsets_mask % 8 == 4)
143 goto copy_4;
147 if (SIZEOF_REGISTER == 8) {
148 while (size >= 8) {
149 cur_reg = alloc_preg (cfg);
150 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
151 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
152 doffset += 8;
153 soffset += 8;
154 size -= 8;
158 copy_4:
159 while (size >= 4) {
160 cur_reg = alloc_preg (cfg);
161 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
162 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
163 doffset += 4;
164 soffset += 4;
165 size -= 4;
168 copy_2:
169 while (size >= 2) {
170 cur_reg = alloc_preg (cfg);
171 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
172 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
173 doffset += 2;
174 soffset += 2;
175 size -= 2;
178 copy_1:
179 while (size >= 1) {
180 cur_reg = alloc_preg (cfg);
181 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
182 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
183 doffset += 1;
184 soffset += 1;
185 size -= 1;
189 static void
190 mini_emit_memcpy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size_ins, int size, int align)
192 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
194 /* We can't do copies at a smaller granule than the provided alignment */
195 if (size_ins || (size / align > MAX_INLINE_COPIES) || !(cfg->opt & MONO_OPT_INTRINS)) {
196 MonoInst *iargs [3];
197 iargs [0] = dest;
198 iargs [1] = src;
200 if (!size_ins)
201 EMIT_NEW_ICONST (cfg, size_ins, size);
202 iargs [2] = size_ins;
203 mono_emit_method_call (cfg, mini_get_memcpy_method (), iargs, NULL);
204 } else {
205 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, size, align);
209 static void
210 mini_emit_memset_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *value_ins, int value, MonoInst *size_ins, int size, int align)
212 /* FIXME: Optimize the case when dest is OP_LDADDR */
214 /* We can't do copies at a smaller granule than the provided alignment */
215 if (value_ins || size_ins || value != 0 || (size / align > MAX_INLINE_COPIES) || !(cfg->opt & MONO_OPT_INTRINS)) {
216 MonoInst *iargs [3];
217 iargs [0] = dest;
219 if (!value_ins)
220 EMIT_NEW_ICONST (cfg, value_ins, value);
221 iargs [1] = value_ins;
223 if (!size_ins)
224 EMIT_NEW_ICONST (cfg, size_ins, size);
225 iargs [2] = size_ins;
227 mono_emit_method_call (cfg, mini_get_memset_method (), iargs, NULL);
228 } else {
229 mini_emit_memset (cfg, dest->dreg, 0, size, value, align);
233 static void
234 mini_emit_memcpy_const_size (MonoCompile *cfg, MonoInst *dest, MonoInst *src, int size, int align)
236 mini_emit_memcpy_internal (cfg, dest, src, NULL, size, align);
239 static void
240 mini_emit_memset_const_size (MonoCompile *cfg, MonoInst *dest, int value, int size, int align)
242 mini_emit_memset_internal (cfg, dest, NULL, value, NULL, size, align);
246 static void
247 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
249 MonoClassField *field;
250 gpointer iter = NULL;
252 while ((field = mono_class_get_fields_internal (klass, &iter))) {
253 int foffset;
255 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
256 continue;
257 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
258 if (mini_type_is_reference (mono_field_get_type_internal (field))) {
259 g_assert ((foffset % TARGET_SIZEOF_VOID_P) == 0);
260 *wb_bitmap |= 1 << ((offset + foffset) / TARGET_SIZEOF_VOID_P);
261 } else {
262 MonoClass *field_class = mono_class_from_mono_type_internal (field->type);
263 if (m_class_has_references (field_class))
264 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
269 static gboolean
270 mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
272 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
273 unsigned need_wb = 0;
275 if (align == 0)
276 align = 4;
278 /*types with references can't have alignment smaller than sizeof(void*) */
279 if (align < TARGET_SIZEOF_VOID_P)
280 return FALSE;
282 if (size > 5 * TARGET_SIZEOF_VOID_P)
283 return FALSE;
285 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
287 destreg = iargs [0]->dreg;
288 srcreg = iargs [1]->dreg;
289 offset = 0;
291 dest_ptr_reg = alloc_preg (cfg);
292 tmp_reg = alloc_preg (cfg);
294 /*tmp = dreg*/
295 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
297 if ((need_wb & 0x1) && mini_debug_options.clr_memory_model)
298 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
300 while (size >= TARGET_SIZEOF_VOID_P) {
301 MonoInst *load_inst;
302 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
303 load_inst->dreg = tmp_reg;
304 load_inst->inst_basereg = srcreg;
305 load_inst->inst_offset = offset;
306 MONO_ADD_INS (cfg->cbb, load_inst);
308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
310 if (need_wb & 0x1)
311 mini_emit_write_barrier (cfg, iargs [0], load_inst);
313 offset += TARGET_SIZEOF_VOID_P;
314 size -= TARGET_SIZEOF_VOID_P;
315 need_wb >>= 1;
317 /*tmp += sizeof (void*)*/
318 if (size >= TARGET_SIZEOF_VOID_P) {
319 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, TARGET_SIZEOF_VOID_P);
320 MONO_ADD_INS (cfg->cbb, iargs [0]);
324 /* Those cannot be references since size < sizeof (void*) */
325 while (size >= 4) {
326 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
327 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
328 offset += 4;
329 size -= 4;
332 while (size >= 2) {
333 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
334 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
335 offset += 2;
336 size -= 2;
339 while (size >= 1) {
340 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
342 offset += 1;
343 size -= 1;
346 return TRUE;
349 static void
350 mini_emit_memory_copy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, int explicit_align, gboolean native)
352 MonoInst *iargs [4];
353 int size;
354 guint32 align = 0;
355 MonoInst *size_ins = NULL;
356 MonoInst *memcpy_ins = NULL;
358 g_assert (klass);
360 Fun fact about @native. It's false that @klass will have no ref when @native is true.
361 This happens in pinvoke2. What goes is that marshal.c uses CEE_MONO_LDOBJNATIVE and pass klass.
362 The actual stuff being copied will have no refs, but @klass might.
363 This means we can't assert !(klass->has_references && native).
366 if (cfg->gshared)
367 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
370 * This check breaks with spilled vars... need to handle it during verification anyway.
371 * g_assert (klass && klass == src->klass && klass == dest->klass);
374 if (mini_is_gsharedvt_klass (klass)) {
375 g_assert (!native);
376 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
377 memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
380 if (native)
381 size = mono_class_native_size (klass, &align);
382 else
383 size = mono_class_value_size (klass, &align);
385 if (!align)
386 align = TARGET_SIZEOF_VOID_P;
387 if (explicit_align)
388 align = explicit_align;
390 if (mini_type_is_reference (m_class_get_byval_arg (klass))) { // Refs *MUST* be naturally aligned
391 MonoInst *store, *load;
392 int dreg = alloc_ireg_ref (cfg);
394 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, src->dreg, 0);
395 MONO_ADD_INS (cfg->cbb, load);
397 if (mini_debug_options.clr_memory_model)
398 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
400 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, dest->dreg, 0, dreg);
401 MONO_ADD_INS (cfg->cbb, store);
403 mini_emit_write_barrier (cfg, dest, src);
404 return;
406 } else if (cfg->gen_write_barriers && (m_class_has_references (klass) || size_ins) && !native) { /* if native is true there should be no references in the struct */
407 /* Avoid barriers when storing to the stack */
408 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
409 (dest->opcode == OP_LDADDR))) {
410 int context_used;
412 iargs [0] = dest;
413 iargs [1] = src;
415 context_used = mini_class_check_context_used (cfg, klass);
417 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
418 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, size, align)) {
419 } else if (size_ins || align < TARGET_SIZEOF_VOID_P) {
420 if (context_used) {
421 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
422 } else {
423 iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
424 if (!cfg->compile_aot)
425 mono_class_compute_gc_descriptor (klass);
427 if (size_ins)
428 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
429 else
430 mono_emit_jit_icall (cfg, mono_value_copy_internal, iargs);
431 } else {
432 /* We don't unroll more than 5 stores to avoid code bloat. */
433 /*This is harmless and simplify mono_gc_get_range_copy_func */
434 size += (TARGET_SIZEOF_VOID_P - 1);
435 size &= ~(TARGET_SIZEOF_VOID_P - 1);
437 EMIT_NEW_ICONST (cfg, iargs [2], size);
438 mono_emit_jit_icall (cfg, mono_gc_wbarrier_range_copy, iargs);
440 return;
444 if (size_ins) {
445 iargs [0] = dest;
446 iargs [1] = src;
447 iargs [2] = size_ins;
448 mini_emit_calli (cfg, mono_method_signature_internal (mini_get_memcpy_method ()), iargs, memcpy_ins, NULL, NULL);
449 } else {
450 mini_emit_memcpy_const_size (cfg, dest, src, size, align);
454 MonoInst*
455 mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag)
457 MonoInst *ins;
459 if (ins_flag & MONO_INST_UNALIGNED) {
460 MonoInst *addr, *tmp_var;
461 int align;
462 int size = mono_type_size (type, &align);
464 if (offset) {
465 MonoInst *add_offset;
466 NEW_BIALU_IMM (cfg, add_offset, OP_PADD_IMM, alloc_preg (cfg), src->dreg, offset);
467 MONO_ADD_INS (cfg->cbb, add_offset);
468 src = add_offset;
471 tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
472 EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
474 mini_emit_memcpy_const_size (cfg, addr, src, size, 1);
475 EMIT_NEW_TEMPLOAD (cfg, ins, tmp_var->inst_c0);
476 } else {
477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, type, src->dreg, offset);
479 ins->flags |= ins_flag;
481 if (ins_flag & MONO_INST_VOLATILE) {
482 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
483 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
486 return ins;
490 void
491 mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag)
493 if (ins_flag & MONO_INST_VOLATILE) {
494 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
495 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
496 } else if (mini_debug_options.clr_memory_model && mini_type_is_reference (type))
497 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
499 if (ins_flag & MONO_INST_UNALIGNED) {
500 MonoInst *addr, *mov, *tmp_var;
502 tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
503 EMIT_NEW_TEMPSTORE (cfg, mov, tmp_var->inst_c0, value);
504 EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
505 mini_emit_memory_copy_internal (cfg, dest, addr, mono_class_from_mono_type_internal (type), 1, FALSE);
506 } else {
507 MonoInst *ins;
509 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
510 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, type, dest->dreg, 0, value->dreg);
511 ins->flags |= ins_flag;
514 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
515 mini_type_is_reference (type) && !MONO_INS_IS_PCONST_NULL (value)) {
516 /* insert call to write barrier */
517 mini_emit_write_barrier (cfg, dest, value);
521 void
522 mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag)
524 int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : TARGET_SIZEOF_VOID_P;
527 * FIXME: It's unclear whether we should be emitting both the acquire
528 * and release barriers for cpblk. It is technically both a load and
529 * store operation, so it seems like that's the sensible thing to do.
531 * FIXME: We emit full barriers on both sides of the operation for
532 * simplicity. We should have a separate atomic memcpy method instead.
534 if (ins_flag & MONO_INST_VOLATILE) {
535 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
536 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
539 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST)) {
540 mini_emit_memcpy_const_size (cfg, dest, src, size->inst_c0, align);
541 } else {
542 mini_emit_memcpy_internal (cfg, dest, src, size, 0, align);
545 if (ins_flag & MONO_INST_VOLATILE) {
546 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
547 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
551 void
552 mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag)
554 int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : TARGET_SIZEOF_VOID_P;
556 if (ins_flag & MONO_INST_VOLATILE) {
557 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
558 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
561 //FIXME unrolled memset only supports zeroing
562 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST) && (value->opcode == OP_ICONST) && (value->inst_c0 == 0)) {
563 mini_emit_memset_const_size (cfg, dest, value->inst_c0, size->inst_c0, align);
564 } else {
565 mini_emit_memset_internal (cfg, dest, value, 0, size, 0, align);
571 * If @klass is a valuetype, emit code to copy a value with source address in @src and destination address in @dest.
572 * If @klass is a ref type, copy a pointer instead.
575 void
576 mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag)
578 int explicit_align = 0;
579 if (ins_flag & MONO_INST_UNALIGNED)
580 explicit_align = 1;
583 * FIXME: It's unclear whether we should be emitting both the acquire
584 * and release barriers for cpblk. It is technically both a load and
585 * store operation, so it seems like that's the sensible thing to do.
587 * FIXME: We emit full barriers on both sides of the operation for
588 * simplicity. We should have a separate atomic memcpy method instead.
590 if (ins_flag & MONO_INST_VOLATILE) {
591 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
592 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
595 mini_emit_memory_copy_internal (cfg, dest, src, klass, explicit_align, native);
597 if (ins_flag & MONO_INST_VOLATILE) {
598 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
599 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
602 #else /* !DISABLE_JIT */
604 MONO_EMPTY_SOURCE_FILE (memory_access);
605 #endif