[LoongArch64] Part-5:add loongarch support in some files for LoongArch64. (#21769)
[mono-project.git] / mono / mini / memory-access.c
blobf65105085df00a8d41980517153e2b710a5c0ee5
1 /**
2 * Emit memory access for the front-end.
4 */
6 #include <config.h>
7 #include <mono/utils/mono-compiler.h>
9 #ifndef DISABLE_JIT
11 #include <mono/metadata/gc-internals.h>
12 #include <mono/metadata/abi-details.h>
13 #include <mono/utils/mono-memory-model.h>
15 #include "mini.h"
16 #include "mini-runtime.h"
17 #include "ir-emit.h"
18 #include "jit-icalls.h"
20 #define MAX_INLINE_COPIES 10
21 #define MAX_INLINE_COPY_SIZE 10000
23 void
24 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
26 int val_reg;
28 /*FIXME arbitrary hack to avoid unbound code expansion.*/
29 g_assert (size < MAX_INLINE_COPY_SIZE);
30 g_assert (val == 0);
31 g_assert (align > 0);
33 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
34 switch (size) {
35 case 1:
36 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
37 return;
38 case 2:
39 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
40 return;
41 case 4:
42 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
43 return;
44 #if SIZEOF_REGISTER == 8
45 case 8:
46 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
47 return;
48 #endif
52 val_reg = alloc_preg (cfg);
54 if (SIZEOF_REGISTER == 8)
55 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
56 else
57 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
59 if (align < TARGET_SIZEOF_VOID_P) {
60 if (align % 2 == 1)
61 goto set_1;
62 if (align % 4 == 2)
63 goto set_2;
64 if (TARGET_SIZEOF_VOID_P == 8 && align % 8 == 4)
65 goto set_4;
68 //Unaligned offsets don't naturaly happen in the runtime, so it's ok to be conservative in how we copy
69 //We assume that input src and dest are be aligned to `align` so offset just worsen it
70 int offsets_mask;
71 offsets_mask = offset & 0x7; //we only care about the misalignment part
72 if (offsets_mask) {
73 if (offsets_mask % 2 == 1)
74 goto set_1;
75 if (offsets_mask % 4 == 2)
76 goto set_2;
77 if (TARGET_SIZEOF_VOID_P == 8 && offsets_mask % 8 == 4)
78 goto set_4;
81 if (SIZEOF_REGISTER == 8) {
82 while (size >= 8) {
83 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
84 offset += 8;
85 size -= 8;
89 set_4:
90 while (size >= 4) {
91 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
92 offset += 4;
93 size -= 4;
97 set_2:
98 while (size >= 2) {
99 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
100 offset += 2;
101 size -= 2;
104 set_1:
105 while (size >= 1) {
106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
107 offset += 1;
108 size -= 1;
112 void
113 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
115 int cur_reg;
117 /*FIXME arbitrary hack to avoid unbound code expansion.*/
118 g_assert (size < MAX_INLINE_COPY_SIZE);
119 g_assert (align > 0);
121 if (align < TARGET_SIZEOF_VOID_P) {
122 if (align == 4)
123 goto copy_4;
124 if (align == 2)
125 goto copy_2;
126 goto copy_1;
129 //Unaligned offsets don't naturaly happen in the runtime, so it's ok to be conservative in how we copy
130 //We assume that input src and dest are be aligned to `align` so offset just worsen it
131 int offsets_mask;
132 offsets_mask = (doffset | soffset) & 0x7; //we only care about the misalignment part
133 if (offsets_mask) {
134 if (offsets_mask % 2 == 1)
135 goto copy_1;
136 if (offsets_mask % 4 == 2)
137 goto copy_2;
138 if (TARGET_SIZEOF_VOID_P == 8 && offsets_mask % 8 == 4)
139 goto copy_4;
143 if (SIZEOF_REGISTER == 8) {
144 while (size >= 8) {
145 cur_reg = alloc_preg (cfg);
146 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
147 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
148 doffset += 8;
149 soffset += 8;
150 size -= 8;
154 copy_4:
155 while (size >= 4) {
156 cur_reg = alloc_preg (cfg);
157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
159 doffset += 4;
160 soffset += 4;
161 size -= 4;
164 copy_2:
165 while (size >= 2) {
166 cur_reg = alloc_preg (cfg);
167 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
168 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
169 doffset += 2;
170 soffset += 2;
171 size -= 2;
174 copy_1:
175 while (size >= 1) {
176 cur_reg = alloc_preg (cfg);
177 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
178 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
179 doffset += 1;
180 soffset += 1;
181 size -= 1;
185 static void
186 mini_emit_memcpy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size_ins, int size, int align)
188 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
190 /* We can't do copies at a smaller granule than the provided alignment */
191 if (size_ins || (size / align > MAX_INLINE_COPIES) || !(cfg->opt & MONO_OPT_INTRINS)) {
192 MonoInst *iargs [3];
193 iargs [0] = dest;
194 iargs [1] = src;
196 if (!size_ins)
197 EMIT_NEW_ICONST (cfg, size_ins, size);
198 iargs [2] = size_ins;
199 mono_emit_method_call (cfg, mini_get_memcpy_method (), iargs, NULL);
200 } else {
201 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, size, align);
205 static void
206 mini_emit_memset_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *value_ins, int value, MonoInst *size_ins, int size, int align)
208 /* FIXME: Optimize the case when dest is OP_LDADDR */
210 /* We can't do copies at a smaller granule than the provided alignment */
211 if (value_ins || size_ins || value != 0 || (size / align > MAX_INLINE_COPIES) || !(cfg->opt & MONO_OPT_INTRINS)) {
212 MonoInst *iargs [3];
213 iargs [0] = dest;
215 if (!value_ins)
216 EMIT_NEW_ICONST (cfg, value_ins, value);
217 iargs [1] = value_ins;
219 if (!size_ins)
220 EMIT_NEW_ICONST (cfg, size_ins, size);
221 iargs [2] = size_ins;
223 mono_emit_method_call (cfg, mini_get_memset_method (), iargs, NULL);
224 } else {
225 mini_emit_memset (cfg, dest->dreg, 0, size, value, align);
229 static void
230 mini_emit_memcpy_const_size (MonoCompile *cfg, MonoInst *dest, MonoInst *src, int size, int align)
232 mini_emit_memcpy_internal (cfg, dest, src, NULL, size, align);
235 static void
236 mini_emit_memset_const_size (MonoCompile *cfg, MonoInst *dest, int value, int size, int align)
238 mini_emit_memset_internal (cfg, dest, NULL, value, NULL, size, align);
242 static void
243 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
245 MonoClassField *field;
246 gpointer iter = NULL;
248 while ((field = mono_class_get_fields_internal (klass, &iter))) {
249 int foffset;
251 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
252 continue;
253 foffset = m_class_is_valuetype (klass) ? field->offset - MONO_ABI_SIZEOF (MonoObject): field->offset;
254 if (mini_type_is_reference (mono_field_get_type_internal (field))) {
255 g_assert ((foffset % TARGET_SIZEOF_VOID_P) == 0);
256 *wb_bitmap |= 1 << ((offset + foffset) / TARGET_SIZEOF_VOID_P);
257 } else {
258 MonoClass *field_class = mono_class_from_mono_type_internal (field->type);
259 if (cfg->gshared)
260 field_class = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (field_class)));
261 if (m_class_has_references (field_class))
262 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
267 static gboolean
268 mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
270 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
271 unsigned need_wb = 0;
273 if (align == 0)
274 align = 4;
276 /*types with references can't have alignment smaller than sizeof(void*) */
277 if (align < TARGET_SIZEOF_VOID_P)
278 return FALSE;
280 if (size > 5 * TARGET_SIZEOF_VOID_P)
281 return FALSE;
283 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
285 destreg = iargs [0]->dreg;
286 srcreg = iargs [1]->dreg;
287 offset = 0;
289 dest_ptr_reg = alloc_preg (cfg);
290 tmp_reg = alloc_preg (cfg);
292 /*tmp = dreg*/
293 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
295 if ((need_wb & 0x1) && !mini_debug_options.weak_memory_model)
296 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
298 while (size >= TARGET_SIZEOF_VOID_P) {
299 MonoInst *load_inst;
300 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
301 load_inst->dreg = tmp_reg;
302 load_inst->inst_basereg = srcreg;
303 load_inst->inst_offset = offset;
304 MONO_ADD_INS (cfg->cbb, load_inst);
306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
308 if (need_wb & 0x1)
309 mini_emit_write_barrier (cfg, iargs [0], load_inst);
311 offset += TARGET_SIZEOF_VOID_P;
312 size -= TARGET_SIZEOF_VOID_P;
313 need_wb >>= 1;
315 /*tmp += sizeof (void*)*/
316 if (size >= TARGET_SIZEOF_VOID_P) {
317 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, TARGET_SIZEOF_VOID_P);
318 MONO_ADD_INS (cfg->cbb, iargs [0]);
322 /* Those cannot be references since size < sizeof (void*) */
323 while (size >= 4) {
324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
326 offset += 4;
327 size -= 4;
330 while (size >= 2) {
331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
333 offset += 2;
334 size -= 2;
337 while (size >= 1) {
338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
340 offset += 1;
341 size -= 1;
344 return TRUE;
347 static void
348 mini_emit_memory_copy_internal (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, int explicit_align, gboolean native,
349 gboolean stack_store)
351 MonoInst *iargs [4];
352 int size;
353 guint32 align = 0;
354 MonoInst *size_ins = NULL;
355 MonoInst *memcpy_ins = NULL;
357 g_assert (klass);
359 Fun fact about @native. It's false that @klass will have no ref when @native is true.
360 This happens in pinvoke2. What goes is that marshal.c uses CEE_MONO_LDOBJNATIVE and pass klass.
361 The actual stuff being copied will have no refs, but @klass might.
362 This means we can't assert !(klass->has_references && native).
365 if (cfg->gshared)
366 klass = mono_class_from_mono_type_internal (mini_get_underlying_type (m_class_get_byval_arg (klass)));
369 * This check breaks with spilled vars... need to handle it during verification anyway.
370 * g_assert (klass && klass == src->klass && klass == dest->klass);
373 if (mini_is_gsharedvt_klass (klass)) {
374 g_assert (!native);
375 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
376 memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
379 if (native)
380 size = mono_class_native_size (klass, &align);
381 else
382 size = mono_class_value_size (klass, &align);
384 if (!align)
385 align = TARGET_SIZEOF_VOID_P;
386 if (explicit_align)
387 align = explicit_align;
389 if (mini_type_is_reference (m_class_get_byval_arg (klass))) { // Refs *MUST* be naturally aligned
390 MonoInst *store, *load;
391 int dreg = alloc_ireg_ref (cfg);
393 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, src->dreg, 0);
394 MONO_ADD_INS (cfg->cbb, load);
396 if (!mini_debug_options.weak_memory_model)
397 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
399 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, dest->dreg, 0, dreg);
400 MONO_ADD_INS (cfg->cbb, store);
402 mini_emit_write_barrier (cfg, dest, load);
403 return;
404 } else if (cfg->gen_write_barriers && (m_class_has_references (klass) || size_ins) &&
405 !native && !stack_store) { /* if native is true there should be no references in the struct */
406 /* Avoid barriers when storing to the stack */
407 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
408 (dest->opcode == OP_LDADDR))) {
409 int context_used;
411 iargs [0] = dest;
412 iargs [1] = src;
414 context_used = mini_class_check_context_used (cfg, klass);
416 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
417 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, size, align)) {
418 } else if (size_ins || align < TARGET_SIZEOF_VOID_P) {
419 if (context_used) {
420 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
421 } else {
422 iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
423 if (!cfg->compile_aot)
424 mono_class_compute_gc_descriptor (klass);
426 if (size_ins)
427 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
428 else
429 mono_emit_jit_icall (cfg, mono_value_copy_internal, iargs);
430 } else {
431 /* We don't unroll more than 5 stores to avoid code bloat. */
432 /*This is harmless and simplify mono_gc_get_range_copy_func */
433 size += (TARGET_SIZEOF_VOID_P - 1);
434 size &= ~(TARGET_SIZEOF_VOID_P - 1);
436 EMIT_NEW_ICONST (cfg, iargs [2], size);
437 mono_emit_jit_icall (cfg, mono_gc_wbarrier_range_copy, iargs);
439 return;
443 if (size_ins) {
444 iargs [0] = dest;
445 iargs [1] = src;
446 iargs [2] = size_ins;
447 mini_emit_calli (cfg, mono_method_signature_internal (mini_get_memcpy_method ()), iargs, memcpy_ins, NULL, NULL);
448 } else {
449 mini_emit_memcpy_const_size (cfg, dest, src, size, align);
453 MonoInst*
454 mini_emit_memory_load (MonoCompile *cfg, MonoType *type, MonoInst *src, int offset, int ins_flag)
456 MonoInst *ins;
458 /* LLVM can handle unaligned loads and stores, so there's no reason to
459 * manually decompose an unaligned load here into a memcpy if we're
460 * using LLVM. */
461 if ((ins_flag & MONO_INST_UNALIGNED) && !COMPILE_LLVM (cfg)) {
462 MonoInst *addr, *tmp_var;
463 int align;
464 int size = mono_type_size (type, &align);
466 if (offset) {
467 MonoInst *add_offset;
468 NEW_BIALU_IMM (cfg, add_offset, OP_PADD_IMM, alloc_preg (cfg), src->dreg, offset);
469 MONO_ADD_INS (cfg->cbb, add_offset);
470 src = add_offset;
473 tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
474 EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
476 mini_emit_memcpy_const_size (cfg, addr, src, size, 1);
477 EMIT_NEW_TEMPLOAD (cfg, ins, tmp_var->inst_c0);
478 } else {
479 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, type, src->dreg, offset);
481 ins->flags |= ins_flag;
483 if (ins_flag & MONO_INST_VOLATILE) {
484 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
485 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
488 return ins;
492 void
493 mini_emit_memory_store (MonoCompile *cfg, MonoType *type, MonoInst *dest, MonoInst *value, int ins_flag)
495 if (ins_flag & MONO_INST_VOLATILE) {
496 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
497 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
498 } else if (!mini_debug_options.weak_memory_model && mini_type_is_reference (type) && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
499 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
501 MONO_EMIT_NULL_CHECK (cfg, dest->dreg, FALSE);
503 if ((ins_flag & MONO_INST_UNALIGNED) && !COMPILE_LLVM (cfg)) {
504 MonoInst *addr, *mov, *tmp_var;
506 tmp_var = mono_compile_create_var (cfg, type, OP_LOCAL);
507 EMIT_NEW_TEMPSTORE (cfg, mov, tmp_var->inst_c0, value);
508 EMIT_NEW_VARLOADA (cfg, addr, tmp_var, tmp_var->inst_vtype);
509 mini_emit_memory_copy_internal (cfg, dest, addr, mono_class_from_mono_type_internal (type), 1, FALSE, (ins_flag & MONO_INST_STACK_STORE) != 0);
510 } else {
511 MonoInst *ins;
513 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
514 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, type, dest->dreg, 0, value->dreg);
515 ins->flags |= ins_flag;
518 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
519 mini_type_is_reference (type) && !MONO_INS_IS_PCONST_NULL (value)) {
520 /* insert call to write barrier */
521 mini_emit_write_barrier (cfg, dest, value);
525 void
526 mini_emit_memory_copy_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoInst *size, int ins_flag)
528 int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : TARGET_SIZEOF_VOID_P;
531 * FIXME: It's unclear whether we should be emitting both the acquire
532 * and release barriers for cpblk. It is technically both a load and
533 * store operation, so it seems like that's the sensible thing to do.
535 * FIXME: We emit full barriers on both sides of the operation for
536 * simplicity. We should have a separate atomic memcpy method instead.
538 if (ins_flag & MONO_INST_VOLATILE) {
539 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
540 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
543 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST)) {
544 mini_emit_memcpy_const_size (cfg, dest, src, size->inst_c0, align);
545 } else {
546 mini_emit_memcpy_internal (cfg, dest, src, size, 0, align);
549 if (ins_flag & MONO_INST_VOLATILE) {
550 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
551 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
555 void
556 mini_emit_memory_init_bytes (MonoCompile *cfg, MonoInst *dest, MonoInst *value, MonoInst *size, int ins_flag)
558 int align = (ins_flag & MONO_INST_UNALIGNED) ? 1 : TARGET_SIZEOF_VOID_P;
560 if (ins_flag & MONO_INST_VOLATILE) {
561 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
562 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
565 //FIXME unrolled memset only supports zeroing
566 if ((cfg->opt & MONO_OPT_INTRINS) && (size->opcode == OP_ICONST) && (value->opcode == OP_ICONST) && (value->inst_c0 == 0)) {
567 mini_emit_memset_const_size (cfg, dest, value->inst_c0, size->inst_c0, align);
568 } else {
569 mini_emit_memset_internal (cfg, dest, value, 0, size, 0, align);
575 * If @klass is a valuetype, emit code to copy a value with source address in @src and destination address in @dest.
576 * If @klass is a ref type, copy a pointer instead.
579 void
580 mini_emit_memory_copy (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native, int ins_flag)
582 int explicit_align = 0;
583 if (ins_flag & MONO_INST_UNALIGNED)
584 explicit_align = 1;
587 * FIXME: It's unclear whether we should be emitting both the acquire
588 * and release barriers for cpblk. It is technically both a load and
589 * store operation, so it seems like that's the sensible thing to do.
591 * FIXME: We emit full barriers on both sides of the operation for
592 * simplicity. We should have a separate atomic memcpy method instead.
594 if (ins_flag & MONO_INST_VOLATILE) {
595 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
596 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
599 mini_emit_memory_copy_internal (cfg, dest, src, klass, explicit_align, native, (ins_flag & MONO_INST_STACK_STORE) != 0);
601 if (ins_flag & MONO_INST_VOLATILE) {
602 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
603 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
606 #else /* !DISABLE_JIT */
608 MONO_EMPTY_SOURCE_FILE (memory_access);
609 #endif