[profiles] Fix MOBILE assemblies and tests compilation + Rename MOBILE to AOT_HYBRID
[mono-project.git] / mono / utils / memfuncs.c
blob455ffe77ea44acc865935bab96f757e0114e71b4
1 /*
2 * memfuncs.c: Our own bzero/memmove.
4 * Copyright (C) 2013-2015 Xamarin Inc
6 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
7 */
9 /*
10 * SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
11 * need to copy or zero out memory in code that might be interrupted by collections. To
12 * guarantee that those operations will not result in invalid pointers, we must do it
13 * word-atomically.
15 * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
16 * cases where one would assume so. For instance, some implementations (like Darwin's on
17 * x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
18 * the region preceding the first vector-aligned address. That region could be
19 * word-aligned, but it would still be copied byte-wise.
21 * All our memory writes here are to "volatile" locations. This is so that C compilers
22 * don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
23 * do that.
26 #include <config.h>
27 #include <glib.h>
28 #include <string.h>
30 #include "memfuncs.h"
32 #define ptr_mask ((sizeof (void*) - 1))
33 #define _toi(ptr) ((size_t)ptr)
34 #define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
35 #define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
36 #define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
37 #if SIZEOF_VOID_P == 4
38 #define bytes_to_words(n) ((size_t)(n) >> 2)
39 #elif SIZEOF_VOID_P == 8
40 #define bytes_to_words(n) ((size_t)(n) >> 3)
41 #else
42 #error We only support 32 and 64 bit architectures.
43 #endif
45 #define BZERO_WORDS(dest,words) do { \
46 void * volatile *__d = (void* volatile*)(dest); \
47 int __n = (words); \
48 int __i; \
49 for (__i = 0; __i < __n; ++__i) \
50 __d [__i] = NULL; \
51 } while (0)
54 /**
55 * mono_gc_bzero_aligned:
56 * @dest: address to start to clear
57 * @size: size of the region to clear
59 * Zero @size bytes starting at @dest.
60 * The address of @dest MUST be aligned to word boundaries
62 * FIXME borrow faster code from some BSD libc or bionic
64 void
65 mono_gc_bzero_aligned (void *dest, size_t size)
67 volatile char *d = (char*)dest;
68 size_t tail_bytes, word_bytes;
70 g_assert (unaligned_bytes (dest) == 0);
72 /* copy all words with memmove */
73 word_bytes = (size_t)align_down (size);
74 switch (word_bytes) {
75 case sizeof (void*) * 1:
76 BZERO_WORDS (d, 1);
77 break;
78 case sizeof (void*) * 2:
79 BZERO_WORDS (d, 2);
80 break;
81 case sizeof (void*) * 3:
82 BZERO_WORDS (d, 3);
83 break;
84 case sizeof (void*) * 4:
85 BZERO_WORDS (d, 4);
86 break;
87 default:
88 BZERO_WORDS (d, bytes_to_words (word_bytes));
91 tail_bytes = unaligned_bytes (size);
92 if (tail_bytes) {
93 d += word_bytes;
94 do {
95 *d++ = 0;
96 } while (--tail_bytes);
101 * mono_gc_bzero_atomic:
102 * @dest: address to start to clear
103 * @size: size of the region to clear
105 * Zero @size bytes starting at @dest.
107 * Use this to zero memory without word tearing when dest is aligned.
109 void
110 mono_gc_bzero_atomic (void *dest, size_t size)
112 if (unaligned_bytes (dest))
113 memset (dest, 0, size);
114 else
115 mono_gc_bzero_aligned (dest, size);
118 #define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
119 void * volatile *__d = (void* volatile*)(dest); \
120 void **__s = (void**)(src); \
121 int __n = (int)(words); \
122 int __i; \
123 for (__i = 0; __i < __n; ++__i) \
124 __d [__i] = __s [__i]; \
125 } while (0)
127 #define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
128 void * volatile *__d = (void* volatile*)(dest); \
129 void **__s = (void**)(src); \
130 int __n = (int)(words); \
131 int __i; \
132 for (__i = __n - 1; __i >= 0; --__i) \
133 __d [__i] = __s [__i]; \
134 } while (0)
138 * mono_gc_memmove_aligned:
139 * @dest: destination of the move
140 * @src: source
141 * @size: size of the block to move
143 * Move @size bytes from @src to @dest.
145 * Use this to copy memory without word tearing when both pointers are aligned
146 */void
147 mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
149 g_assert (unaligned_bytes (dest) == 0);
150 g_assert (unaligned_bytes (src) == 0);
153 If we're copying less than a word we don't need to worry about word tearing
154 so we bailout to memmove early.
156 if (size < sizeof(void*)) {
157 memmove (dest, src, size);
158 return;
162 * A bit of explanation on why we align only dest before doing word copies.
163 * Pointers to managed objects must always be stored in word aligned addresses, so
164 * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
166 * We don't need to case when source and destination have different alignments since we only do word stores
167 * using memmove, which must handle it.
169 if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
170 volatile char *p = (char*)dest + size;
171 char *s = (char*)src + size;
172 char *start = (char*)dest;
173 char *align_end = MAX((char*)dest, (char*)align_down (p));
174 char *word_start;
175 size_t bytes_to_memmove;
177 while (p > align_end)
178 *--p = *--s;
180 word_start = (char *)align_up (start);
181 bytes_to_memmove = p - word_start;
182 p -= bytes_to_memmove;
183 s -= bytes_to_memmove;
184 MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
185 } else {
186 volatile char *d = (char*)dest;
187 const char *s = (const char*)src;
188 size_t tail_bytes;
190 /* copy all words with memmove */
191 MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
193 tail_bytes = unaligned_bytes (size);
194 if (tail_bytes) {
195 d += (size_t)align_down (size);
196 s += (size_t)align_down (size);
197 do {
198 *d++ = *s++;
199 } while (--tail_bytes);
205 * mono_gc_memmove_atomic:
206 * @dest: destination of the move
207 * @src: source
208 * @size: size of the block to move
210 * Move @size bytes from @src to @dest.
212 * Use this to copy memory without word tearing when both pointers are aligned
214 void
215 mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
217 if (unaligned_bytes (_toi (dest) | _toi (src)))
218 memmove (dest, src, size);
219 else
220 mono_gc_memmove_aligned (dest, src, size);