[interp] Remove unreachable code (#12411)
[mono-project.git] / mono / utils / memfuncs.c
blobfc1f2c9e5d2b5ae1496bc370e50d934efeeb42e0
1 /**
2 * \file
3 * Our own bzero/memmove.
5 * Copyright (C) 2013-2015 Xamarin Inc
7 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
8 */
11 * SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
12 * need to copy or zero out memory in code that might be interrupted by collections. To
13 * guarantee that those operations will not result in invalid pointers, we must do it
14 * word-atomically.
16 * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
17 * cases where one would assume so. For instance, some implementations (like Darwin's on
18 * x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
19 * the region preceding the first vector-aligned address. That region could be
20 * word-aligned, but it would still be copied byte-wise.
22 * All our memory writes here are to "volatile" locations. This is so that C compilers
23 * don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
24 * do that.
27 #include <config.h>
28 #include <glib.h>
29 #include <string.h>
31 #include "memfuncs.h"
33 #define ptr_mask ((sizeof (void*) - 1))
34 #define _toi(ptr) ((size_t)ptr)
35 #define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
36 #define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
37 #define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
38 #if SIZEOF_VOID_P == 4
39 #define bytes_to_words(n) ((size_t)(n) >> 2)
40 #elif SIZEOF_VOID_P == 8
41 #define bytes_to_words(n) ((size_t)(n) >> 3)
42 #else
43 #error We only support 32 and 64 bit architectures.
44 #endif
46 #define BZERO_WORDS(dest,words) do { \
47 void * volatile *__d = (void* volatile*)(dest); \
48 int __n = (words); \
49 int __i; \
50 for (__i = 0; __i < __n; ++__i) \
51 __d [__i] = NULL; \
52 } while (0)
55 /**
56 * mono_gc_bzero_aligned:
57 * \param dest address to start to clear
58 * \param size size of the region to clear
60 * Zero \p size bytes starting at \p dest.
61 * The address of \p dest MUST be aligned to word boundaries
63 * FIXME borrow faster code from some BSD libc or bionic
65 void
66 mono_gc_bzero_aligned (void *dest, size_t size)
68 volatile char *d = (char*)dest;
69 size_t tail_bytes, word_bytes;
71 g_assert (unaligned_bytes (dest) == 0);
73 /* copy all words with memmove */
74 word_bytes = (size_t)align_down (size);
75 switch (word_bytes) {
76 case sizeof (void*) * 1:
77 BZERO_WORDS (d, 1);
78 break;
79 case sizeof (void*) * 2:
80 BZERO_WORDS (d, 2);
81 break;
82 case sizeof (void*) * 3:
83 BZERO_WORDS (d, 3);
84 break;
85 case sizeof (void*) * 4:
86 BZERO_WORDS (d, 4);
87 break;
88 default:
89 BZERO_WORDS (d, bytes_to_words (word_bytes));
92 tail_bytes = unaligned_bytes (size);
93 if (tail_bytes) {
94 d += word_bytes;
95 do {
96 *d++ = 0;
97 } while (--tail_bytes);
102 * mono_gc_bzero_atomic:
103 * \param dest address to start to clear
104 * \param size size of the region to clear
106 * Zero \p size bytes starting at \p dest.
108 * Use this to zero memory without word tearing when \p dest is aligned.
110 void
111 mono_gc_bzero_atomic (void *dest, size_t size)
113 if (unaligned_bytes (dest))
114 memset (dest, 0, size);
115 else
116 mono_gc_bzero_aligned (dest, size);
119 #define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
120 void * volatile *__d = (void* volatile*)(dest); \
121 void **__s = (void**)(src); \
122 int __n = (int)(words); \
123 int __i; \
124 for (__i = 0; __i < __n; ++__i) \
125 __d [__i] = __s [__i]; \
126 } while (0)
128 #define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
129 void * volatile *__d = (void* volatile*)(dest); \
130 void **__s = (void**)(src); \
131 int __n = (int)(words); \
132 int __i; \
133 for (__i = __n - 1; __i >= 0; --__i) \
134 __d [__i] = __s [__i]; \
135 } while (0)
139 * mono_gc_memmove_aligned:
140 * \param dest destination of the move
141 * \param src source
142 * \param size size of the block to move
144 * Move \p size bytes from \p src to \p dest.
146 * Use this to copy memory without word tearing when both pointers are aligned
148 void
149 mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
151 g_assert (unaligned_bytes (dest) == 0);
152 g_assert (unaligned_bytes (src) == 0);
155 If we're copying less than a word we don't need to worry about word tearing
156 so we bailout to memmove early.
158 if (size < sizeof(void*)) {
159 memmove (dest, src, size);
160 return;
164 * A bit of explanation on why we align only dest before doing word copies.
165 * Pointers to managed objects must always be stored in word aligned addresses, so
166 * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
168 * We don't need to case when source and destination have different alignments since we only do word stores
169 * using memmove, which must handle it.
171 if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
172 volatile char *p = (char*)dest + size;
173 char *s = (char*)src + size;
174 char *start = (char*)dest;
175 char *align_end = MAX((char*)dest, (char*)align_down (p));
176 char *word_start;
177 size_t bytes_to_memmove;
179 while (p > align_end)
180 *--p = *--s;
182 word_start = (char *)align_up (start);
183 bytes_to_memmove = p - word_start;
184 p -= bytes_to_memmove;
185 s -= bytes_to_memmove;
186 MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
187 } else {
188 volatile char *d = (char*)dest;
189 const char *s = (const char*)src;
190 size_t tail_bytes;
192 /* copy all words with memmove */
193 MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
195 tail_bytes = unaligned_bytes (size);
196 if (tail_bytes) {
197 d += (size_t)align_down (size);
198 s += (size_t)align_down (size);
199 do {
200 *d++ = *s++;
201 } while (--tail_bytes);
207 * mono_gc_memmove_atomic:
208 * \param dest destination of the move
209 * \param src source
210 * \param size size of the block to move
212 * Move \p size bytes from \p src to \p dest.
214 * Use this to copy memory without word tearing when both pointers are aligned
216 void
217 mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
219 if (unaligned_bytes (_toi (dest) | _toi (src)))
220 memmove (dest, src, size);
221 else
222 mono_gc_memmove_aligned (dest, src, size);