2 * memfuncs.c: Our own bzero/memmove.
4 * Copyright (C) 2013-2015 Xamarin Inc
6 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
10 * SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
11 * need to copy or zero out memory in code that might be interrupted by collections. To
12 * guarantee that those operations will not result in invalid pointers, we must do it
15 * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
16 * cases where one would assume so. For instance, some implementations (like Darwin's on
17 * x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
18 * the region preceding the first vector-aligned address. That region could be
19 * word-aligned, but it would still be copied byte-wise.
21 * All our memory writes here are to "volatile" locations. This is so that C compilers
22 * don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
32 #define ptr_mask ((sizeof (void*) - 1))
33 #define _toi(ptr) ((size_t)ptr)
34 #define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
35 #define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
36 #define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
37 #if SIZEOF_VOID_P == 4
38 #define bytes_to_words(n) ((size_t)(n) >> 2)
39 #elif SIZEOF_VOID_P == 8
40 #define bytes_to_words(n) ((size_t)(n) >> 3)
42 #error We only support 32 and 64 bit architectures.
45 #define BZERO_WORDS(dest,words) do { \
46 void * volatile *__d = (void* volatile*)(dest); \
49 for (__i = 0; __i < __n; ++__i) \
55 * mono_gc_bzero_aligned:
56 * @dest: address to start to clear
57 * @size: size of the region to clear
59 * Zero @size bytes starting at @dest.
60 * The address of @dest MUST be aligned to word boundaries
62 * FIXME borrow faster code from some BSD libc or bionic
65 mono_gc_bzero_aligned (void *dest
, size_t size
)
67 volatile char *d
= (char*)dest
;
68 size_t tail_bytes
, word_bytes
;
70 g_assert (unaligned_bytes (dest
) == 0);
72 /* copy all words with memmove */
73 word_bytes
= (size_t)align_down (size
);
75 case sizeof (void*) * 1:
78 case sizeof (void*) * 2:
81 case sizeof (void*) * 3:
84 case sizeof (void*) * 4:
88 BZERO_WORDS (d
, bytes_to_words (word_bytes
));
91 tail_bytes
= unaligned_bytes (size
);
96 } while (--tail_bytes
);
101 * mono_gc_bzero_atomic:
102 * @dest: address to start to clear
103 * @size: size of the region to clear
105 * Zero @size bytes starting at @dest.
107 * Use this to zero memory without word tearing when dest is aligned.
110 mono_gc_bzero_atomic (void *dest
, size_t size
)
112 if (unaligned_bytes (dest
))
113 memset (dest
, 0, size
);
115 mono_gc_bzero_aligned (dest
, size
);
118 #define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
119 void * volatile *__d = (void* volatile*)(dest); \
120 void **__s = (void**)(src); \
121 int __n = (int)(words); \
123 for (__i = 0; __i < __n; ++__i) \
124 __d [__i] = __s [__i]; \
127 #define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
128 void * volatile *__d = (void* volatile*)(dest); \
129 void **__s = (void**)(src); \
130 int __n = (int)(words); \
132 for (__i = __n - 1; __i >= 0; --__i) \
133 __d [__i] = __s [__i]; \
138 * mono_gc_memmove_aligned:
139 * @dest: destination of the move
141 * @size: size of the block to move
143 * Move @size bytes from @src to @dest.
145 * Use this to copy memory without word tearing when both pointers are aligned
147 mono_gc_memmove_aligned (void *dest
, const void *src
, size_t size
)
149 g_assert (unaligned_bytes (dest
) == 0);
150 g_assert (unaligned_bytes (src
) == 0);
153 If we're copying less than a word we don't need to worry about word tearing
154 so we bailout to memmove early.
156 if (size
< sizeof(void*)) {
157 memmove (dest
, src
, size
);
162 * A bit of explanation on why we align only dest before doing word copies.
163 * Pointers to managed objects must always be stored in word aligned addresses, so
164 * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
166 * We don't need to case when source and destination have different alignments since we only do word stores
167 * using memmove, which must handle it.
169 if (dest
> src
&& ((size_t)((char*)dest
- (char*)src
) < size
)) { /*backward copy*/
170 volatile char *p
= (char*)dest
+ size
;
171 char *s
= (char*)src
+ size
;
172 char *start
= (char*)dest
;
173 char *align_end
= MAX((char*)dest
, (char*)align_down (p
));
175 size_t bytes_to_memmove
;
177 while (p
> align_end
)
180 word_start
= (char *)align_up (start
);
181 bytes_to_memmove
= p
- word_start
;
182 p
-= bytes_to_memmove
;
183 s
-= bytes_to_memmove
;
184 MEMMOVE_WORDS_DOWNWARD (p
, s
, bytes_to_words (bytes_to_memmove
));
186 volatile char *d
= (char*)dest
;
187 const char *s
= (const char*)src
;
190 /* copy all words with memmove */
191 MEMMOVE_WORDS_UPWARD (d
, s
, bytes_to_words (align_down (size
)));
193 tail_bytes
= unaligned_bytes (size
);
195 d
+= (size_t)align_down (size
);
196 s
+= (size_t)align_down (size
);
199 } while (--tail_bytes
);
205 * mono_gc_memmove_atomic:
206 * @dest: destination of the move
208 * @size: size of the block to move
210 * Move @size bytes from @src to @dest.
212 * Use this to copy memory without word tearing when both pointers are aligned
215 mono_gc_memmove_atomic (void *dest
, const void *src
, size_t size
)
217 if (unaligned_bytes (_toi (dest
) | _toi (src
)))
218 memmove (dest
, src
, size
);
220 mono_gc_memmove_aligned (dest
, src
, size
);