Ignore BC size budget on very cheap regions
[hiphop-php.git] / hphp / util / word-mem.h
blobeef7e3787eec7dafe8fbd149776624b3d743060e
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #pragma once
18 #include <limits>
19 #include <folly/Portability.h>
21 #include "hphp/util/assertions.h"
22 #include "hphp/util/portability.h"
24 extern "C" void* _memcpy8(void* dst, const void* src, size_t len);
25 extern "C" void* _memcpy16(void* dst, const void* src, size_t len);
26 extern "C" void _bcopy32(void* dst, const void* src, size_t len);
27 extern "C" void _bcopy_in_64(void* dst, const void* src, size_t lenIn64);
29 namespace HPHP {
32 * Specialized memcpy implementations that takes advantage of the known
33 * properties in length and alignment.
35 * o memcpy8(dst, src, len) is equivalent to
36 * static_cast<char*>(memcpy(dst, src, (len + 7) / 8 * 8)) + len;
37 * it returns a char* pointing to dst[len] instead of dst, in order to
38 * ease its use in string operations.
40 * Note that it could overrun the buffer by up to 7 bytes, depending on len
41 * and alignment of the buffers. When both src and dst are aligned to 8
42 * bytes, it is safe. It can also be used in other situations given
43 * sufficient readable space after the buffers.
45 * o memcpy16(dst, src, len) is equivalent to
46 * assert(len > 0 && len % 16 == 0);
47 * memcpy(dst, src, len);
49 * o bcopy32(dst, src, len) is equivalent to
50 * assert(len >= 32);
51 * memcpy(dst, src, len / 32 * 32);
52 * except that it returns void.
54 * o bcopy_in_64(dst, src, lenIn64) is equivalent to
55 * assert(lenIn64 > 0);
56 * memcpy(dst, src, 64 * lenIn64);
57 * except that it returns void.
60 inline char* memcpy8(void* dst, const void* src, size_t len) {
61 #if defined(__x86_64__)
62 return reinterpret_cast<char*>(_memcpy8(dst, src, len));
63 #else
64 memcpy(dst, src, len);
65 return reinterpret_cast<char*>(dst) + len;
66 #endif
69 inline char* memcpy16(void* dst, const void* src, size_t len) {
70 assertx(len > 0 && len % 16 == 0);
71 #if defined(__x86_64__)
72 return reinterpret_cast<char*>(_memcpy16(dst, src, len));
73 #else
74 return reinterpret_cast<char*>(memcpy(dst, src, len));
75 #endif
78 inline void bcopy32(void* dst, const void* src, size_t len) {
79 assertx(len >= 32);
80 #if defined(__x86_64__)
81 _bcopy32(dst, src, len);
82 #else
83 memcpy(dst, src, len / 32 * 32);
84 #endif
87 inline void bcopy_in_64(void* dst, const void* src, size_t lenIn64) {
88 assertx(lenIn64 != 0);
89 #if defined(__x86_64__)
90 _bcopy_in_64(dst, src, lenIn64);
91 #else
92 memcpy(dst, src, lenIn64 * 64);
93 #endif
96 // Inline assembly version to avoid a function call.
97 inline void bcopy32_inline(void* dst, const void* src, size_t len) {
98 assertx(len >= 32);
99 #if defined(__x86_64__)
100 __asm__ __volatile__("shr $5, %0\n"
101 ASM_LOCAL_LABEL("BCP32%=") ":\n"
102 "movdqu (%1), %%xmm0\n"
103 "movdqu 16(%1), %%xmm1\n"
104 "add $32, %1\n"
105 "movdqu %%xmm0, (%2)\n"
106 "movdqu %%xmm1, 16(%2)\n"
107 "add $32, %2\n"
108 "dec %0\n"
109 "jg " ASM_LOCAL_LABEL("BCP32%=") "\n"
110 : "+r"(len), "+r"(src), "+r"(dst)
111 :: "xmm0", "xmm1"
113 #elif defined(__aarch64__)
114 int64_t t3, t4, t5, t6, t7;
115 __asm__ __volatile__("lsr %x0, %x0, #5\n"
116 "sub %x1, %x1, #16\n"
117 "sub %x2, %x2, #16\n"
118 ASM_LOCAL_LABEL("BCP32%=") ":\n"
119 "ldp %x3, %x4, [%x1, #16]\n"
120 "ldp %x5, %x6, [%x1, #32]!\n"
121 "stp %x3, %x4, [%x2, #16]\n"
122 "stp %x5, %x6, [%x2, #32]!\n"
123 "subs %x0, %x0, #1\n"
124 "bgt " ASM_LOCAL_LABEL("BCP32%=") "\n"
125 : "+r"(len), "+r"(src), "+r"(dst),
126 "=r"(t3), "=r"(t4), "=r"(t5), "=r"(t6), "=r"(t7)
127 :: "cc"
129 #else
130 bcopy32(dst, src, len);
131 #endif
134 inline void memcpy16_inline(void* dst, const void* src, size_t len) {
135 assertx(len >=16 && len % 16 == 0);
136 #if defined(__x86_64__)
137 __asm__ __volatile__("movdqu -16(%1, %0), %%xmm0\n"
138 "movdqu %%xmm0, -16(%2, %0)\n"
139 "shr $5, %0\n"
140 "jz " ASM_LOCAL_LABEL("END%=") "\n"
141 ASM_LOCAL_LABEL("R32%=") ":\n"
142 "movdqu (%1), %%xmm0\n"
143 "movdqu 16(%1), %%xmm1\n"
144 "add $32, %1\n"
145 "movdqu %%xmm0, (%2)\n"
146 "movdqu %%xmm1, 16(%2)\n"
147 "add $32, %2\n"
148 "dec %0\n"
149 "jg " ASM_LOCAL_LABEL("R32%=") "\n"
150 ASM_LOCAL_LABEL("END%=") ":\n"
151 : "+r"(len), "+r"(src), "+r"(dst)
152 :: "xmm0", "xmm1"
154 #elif defined(__aarch64__)
155 int64_t t3, t4, t5, t6, s1, d1, d2;
156 __asm__ __volatile__("mov %x7, %x1\n"
157 "add %x1, %x1, %x0\n"
158 "ldp %x3, %x4, [%x1, #-16]!\n"
159 "mov %x8, %x2\n"
160 "add %x2, %x2, %x0\n"
161 "stp %x3, %x4, [%x2, #-16]!\n"
162 "lsr %x0, %x0, #5\n"
163 "cbz %x0, " ASM_LOCAL_LABEL("END%=") "\n"
164 "sub %x7, %x7, #16\n"
165 "sub %x8, %x8, #16\n"
166 ASM_LOCAL_LABEL("R32%=") ":\n"
167 "ldp %x3, %x4, [%x7, #16]\n"
168 "ldp %x5, %x6, [%x7, #32]!\n"
169 "stp %x3, %x4, [%x8, #16]\n"
170 "stp %x5, %x6, [%x8, #32]!\n"
171 "subs %x0, %x0, #1\n"
172 "bgt " ASM_LOCAL_LABEL("R32%=") "\n"
173 ASM_LOCAL_LABEL("END%=") ":\n"
174 : "+r"(len), "+r"(src), "+r"(dst),
175 "=r"(t3), "=r"(t4), "=r"(t5), "=r"(t6),
176 "=r"(s1), "=r"(d1), "=r"(d2)
177 :: "cc"
179 #else
180 memcpy16(dst, src, len);
181 #endif
184 //////////////////////////////////////////////////////////////////////
187 * Word at a time comparison for two strings of length `lenBytes'. Returns
188 * true if the regions are the same. This should be invoked only when we know
189 * the two strings have the same length. It will not check for the null
190 * terminator.
192 * Assumes that the buffer addresses are word aligned, and that it can read
193 * lenBytes rounded up to a whole word. This is possible in HPHP because we
194 * always allocate whole numbers of words. The final word compare is adjusted
195 * to handle the slack in lenBytes so only the bytes we care about are
196 * compared.
198 ALWAYS_INLINE
199 bool wordsame(const void* mem1, const void* mem2, uint32_t lenBytes) {
200 using T = uint64_t;
201 auto constexpr DEBUG_ONLY W = sizeof(T);
203 assert(reinterpret_cast<const uintptr_t>(mem1) % W == 0);
204 assert(reinterpret_cast<const uintptr_t>(mem2) % W == 0);
206 // ASan is less precise than valgrind and believes this function overruns reads
207 #if !FOLLY_SANITIZE
209 // For speed, we count up towards 0 from -lenBytes * 8 in units of a word of
210 // bits. When we reach a value >= 0, that is the number of bits we need to
211 // ignore in the last compare. Since we're on a little-endian architecture,
212 // we can do the ignoring by shifting left by that many bits. We also unroll
213 // the nBits increment from the first iteration, because we can fold that
214 // calculation together with the multiply by 8 into a single lea instruction.
215 const int32_t nBytes = -lenBytes;
216 // We need to bail out early if len is 0, and we can save a test instruction
217 // if we reuse the flags from the negation we just did.
218 if (UNLIKELY(nBytes == 0)) return true;
219 int64_t nBits = int64_t(nBytes) * 8 + (W * 8);
221 // Use the base+index addressing mode in x86, so that we only need to
222 // increment the base pointer in the loop.
223 auto p1 = reinterpret_cast<intptr_t>(mem1);
224 auto const diff = reinterpret_cast<intptr_t>(mem2) - p1;
226 T data;
227 do {
228 data = *(reinterpret_cast<const T*>(p1));
229 data ^= *(reinterpret_cast<const T*>(p1 + diff));
230 if (nBits >= 0) {
231 // As a note for future consideration, we could consider precomputing a
232 // 64-bit mask, so that the fraction of the last qword can be checked
233 // faster. But that would require an additional register for the
234 // mask. So it depends on register pressure of the call site.
235 return !(data << nBits);
237 p1 += W;
238 nBits += W * 8;
239 } while (data == 0);
240 return false;
242 #else // FOLLY_SANITIZE
244 return !memcmp(mem1, mem2, lenBytes);
246 #endif
250 * Like memcpy, but copies numT POD values 8 bytes at a time.
251 * The actual number of bytes copied must be a multiple of 8.
253 template<class T>
254 T* wordcpy(T* to, const T* from, size_t numT) {
255 assert(numT < std::numeric_limits<int64_t>::max() &&
256 (numT * sizeof(T)) % 8 == 0);
257 size_t numWords = numT * sizeof(T) / 8;
258 assert(numWords != 0);
259 auto d = (int64_t*)to;
260 auto s = (int64_t*)from;
261 do {
262 *d++ = *s++;
263 } while (--numWords);
264 return to;
268 * Fills a memory area with ones, 8 bytes at a time.
270 template<class T>
271 T* wordfillones(T* ptr, size_t numT) {
272 assert(numT < std::numeric_limits<int64_t>::max() &&
273 (numT * sizeof(T)) % 8 == 0);
274 assert(numT != 0);
275 auto numWords = numT * sizeof(T) / 8;
276 auto d = (int64_t*)ptr;
277 do {
278 *d++ = -1;
279 } while (--numWords);
280 return ptr;
283 //////////////////////////////////////////////////////////////////////