fix build with UBSAN
[hiphop-php.git] / hphp / util / alloc.h
blob2776ec8e40da4c8975556bc021bf94db4a79323b
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_UTIL_ALLOC_H_
18 #define incl_HPHP_UTIL_ALLOC_H_
20 #include <stdint.h>
21 #include <cassert>
22 #include <atomic>
24 #include <folly/Portability.h>
25 #include <folly/portability/PThread.h>
27 #include "hphp/util/assertions.h"
28 #include "hphp/util/exception.h"
30 #if defined(FOLLY_SANITIZE_ADDRESS) || defined(FOLLY_SANITIZE_THREAD) || \
31 defined(UNDEFINED_SANITIZER)
32 // ASan is less precise than valgrind so we'll need a superset of those tweaks
33 # define VALGRIND
34 // TODO: (t2869817) ASan doesn't play well with jemalloc
35 # ifdef USE_JEMALLOC
36 # undef USE_JEMALLOC
37 # endif
38 #endif
40 #ifdef USE_TCMALLOC
41 #include <gperftools/malloc_extension.h>
42 #endif
44 #ifndef USE_JEMALLOC
45 # ifdef __FreeBSD__
46 # include "stdlib.h"
47 # include "malloc_np.h"
48 # else
49 # include "malloc.h"
50 # endif
51 #else
52 # include <jemalloc/jemalloc.h>
53 #if (JEMALLOC_VERSION_MAJOR == 5) && defined(__linux__)
54 # define USE_JEMALLOC_EXTENT_HOOKS 1
55 # endif
56 # if (JEMALLOC_VERSION_MAJOR > 4)
57 # define JEMALLOC_NEW_ARENA_CMD "arenas.create"
58 # else
59 # define JEMALLOC_NEW_ARENA_CMD "arenas.extend"
60 # endif
61 #endif
63 #include "hphp/util/maphuge.h"
65 extern "C" {
66 #ifdef USE_TCMALLOC
67 #define MallocExtensionInstance _ZN15MallocExtension8instanceEv
68 MallocExtension* MallocExtensionInstance() __attribute__((__weak__));
69 #endif
71 #ifdef USE_JEMALLOC
73 int mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
74 size_t newlen) __attribute__((__weak__));
75 int mallctlnametomib(const char *name, size_t* mibp, size_t*miblenp)
76 __attribute__((__weak__));
77 int mallctlbymib(const size_t* mibp, size_t miblen, void *oldp,
78 size_t *oldlenp, void *newp, size_t newlen) __attribute__((__weak__));
79 void malloc_stats_print(void (*write_cb)(void *, const char *),
80 void *cbopaque, const char *opts)
81 __attribute__((__weak__));
82 #endif
85 enum class NotNull {};
88 * The placement-new provided by the standard library is required by the
89 * C++ specification to perform a null check because it is marked with noexcept
90 * or throw() depending on the compiler version. This override of placement
91 * new doesn't use either of these, so it is allowed to omit the null check.
93 inline void* operator new(size_t, NotNull, void* location) {
94 assert(location);
95 return location;
98 namespace HPHP {
99 ///////////////////////////////////////////////////////////////////////////////
101 constexpr bool use_jemalloc =
102 #ifdef USE_JEMALLOC
103 true
104 #else
105 false
106 #endif
109 // ASAN modifies the generated code in ways that cause abnormally high C++
110 // stack usage.
111 constexpr size_t kStackSizeMinimum =
112 #ifdef FOLLY_SANITIZE_ADDRESS
113 16 << 20;
114 #else
115 8 << 20;
116 #endif
118 struct OutOfMemoryException : Exception {
119 explicit OutOfMemoryException(size_t size)
120 : Exception("Unable to allocate %zu bytes of memory", size) {}
121 EXCEPTION_COMMON_IMPL(OutOfMemoryException);
124 ///////////////////////////////////////////////////////////////////////////////
126 #ifdef USE_JEMALLOC
127 extern unsigned low_arena;
128 extern std::atomic<int> low_huge_pages;
130 inline int low_mallocx_flags() {
131 // Allocate from low_arena, and bypass the implicit tcache to assure that the
132 // result actually comes from low_arena.
133 #ifdef MALLOCX_TCACHE_NONE
134 return MALLOCX_ARENA(low_arena) | MALLOCX_TCACHE_NONE;
135 #else
136 return MALLOCX_ARENA(low_arena);
137 #endif
140 #ifdef MALLOCX_TCACHE_NONE
141 inline constexpr int low_dallocx_flags() {
142 // Bypass the implicit tcache for this deallocation.
143 return MALLOCX_TCACHE_NONE;
145 #else
146 inline int low_dallocx_flags() {
147 // Prior to the introduction of MALLOCX_TCACHE_NONE, explicitly specifying
148 // MALLOCX_ARENA(a) caused jemalloc to bypass tcache.
149 return MALLOCX_ARENA(low_arena);
151 #endif
154 #ifdef USE_JEMALLOC_EXTENT_HOOKS
155 extern unsigned low_huge1g_arena;
156 extern unsigned high_huge1g_arena;
158 // Explicit per-thread tcache for the huge arenas.
159 extern __thread int high_huge1g_tcache;
161 inline int low_mallocx_huge1g_flags() {
162 // MALLOCX_TCACHE_NONE is introduced earlier than the chunk hook API
163 return MALLOCX_ARENA(low_huge1g_arena) | MALLOCX_TCACHE_NONE;
166 inline constexpr int low_dallocx_huge1g_flags() {
167 return MALLOCX_TCACHE_NONE;
170 inline int mallocx_huge1g_flags() {
171 return MALLOCX_ARENA(high_huge1g_arena) | MALLOCX_TCACHE(high_huge1g_tcache);
174 inline int dallocx_huge1g_flags() {
175 return MALLOCX_TCACHE(high_huge1g_tcache);
178 // Functions to manipulate tcaches for huge arenas
179 void thread_huge_tcache_create(); // tcache.create
180 void thread_huge_tcache_flush(); // tcache.flush
181 void thread_huge_tcache_destroy(); // tcache.destroy
183 #endif
185 #endif
187 inline void* low_malloc(size_t size) {
188 #ifndef USE_JEMALLOC
189 return malloc(size);
190 #else
191 extern void* low_malloc_impl(size_t size);
192 return low_malloc_impl(size);
193 #endif
196 inline void low_free(void* ptr) {
197 #ifndef USE_JEMALLOC
198 free(ptr);
199 #else
200 if (ptr) dallocx(ptr, low_dallocx_flags());
201 #endif
204 inline void low_malloc_huge_pages(int pages) {
205 #ifdef USE_JEMALLOC
206 low_huge_pages = pages;
207 #endif
210 void low_malloc_skip_huge(void* start, void* end);
212 inline void* low_malloc_data(size_t size) {
213 #ifndef USE_JEMALLOC_EXTENT_HOOKS
214 return low_malloc(size);
215 #else
216 extern void* low_malloc_huge1g_impl(size_t);
217 return low_malloc_huge1g_impl(size);
218 #endif
221 inline void low_free_data(void* ptr) {
222 #ifndef USE_JEMALLOC_EXTENT_HOOKS
223 low_free(ptr);
224 #else
225 if (ptr) dallocx(ptr, low_dallocx_huge1g_flags());
226 #endif
229 inline void* malloc_huge(size_t size) {
230 #ifndef USE_JEMALLOC_EXTENT_HOOKS
231 return malloc(size);
232 #else
233 extern void* malloc_huge1g_impl(size_t);
234 return malloc_huge1g_impl(size);
235 #endif
238 inline void free_huge(void* ptr) {
239 #ifndef USE_JEMALLOC_EXTENT_HOOKS
240 free(ptr);
241 #else
242 if (ptr) dallocx(ptr, dallocx_huge1g_flags());
243 #endif
248 * Safe memory allocation.
250 inline void* safe_malloc(size_t size) {
251 void* p = malloc(size);
252 if (!p) throw OutOfMemoryException(size);
253 return p;
256 inline void* safe_calloc(size_t count, size_t size) {
257 void* p = calloc(count, size);
258 if (!p) throw OutOfMemoryException(size);
259 return p;
262 inline void* safe_realloc(void* ptr, size_t size) {
263 ptr = realloc(ptr, size);
264 if (!ptr && size > 0) throw OutOfMemoryException(size);
265 return ptr;
268 inline void safe_free(void* ptr) {
269 return free(ptr);
273 * Instruct low level memory allocator to free memory back to system. Called
274 * when thread's been idle and predicted to continue to be idle for a while.
276 void flush_thread_caches();
279 * Instruct the kernel to free parts of the unused stack back to the system.
280 * Like flush_thread_caches, this is called when the thread has been idle
281 * and predicted to continue to be idle for a while.
283 void flush_thread_stack();
286 * Free all unused memory back to system. On error, returns false and, if
287 * not null, sets an error message in *errStr.
289 bool purge_all(std::string* errStr = nullptr);
292 * Like scoped_ptr, but calls free() on destruct
294 struct ScopedMem {
295 private:
296 ScopedMem(const ScopedMem&); // disable copying
297 ScopedMem& operator=(const ScopedMem&);
298 public:
299 ScopedMem() : m_ptr(0) {}
300 explicit ScopedMem(void* ptr) : m_ptr(ptr) {}
301 ~ScopedMem() { free(m_ptr); }
302 ScopedMem& operator=(void* ptr) {
303 assert(!m_ptr);
304 m_ptr = ptr;
305 return *this;
307 private:
308 void* m_ptr;
311 extern __thread uintptr_t s_stackLimit;
312 extern __thread size_t s_stackSize;
313 void init_stack_limits(pthread_attr_t* attr);
315 extern const size_t s_pageSize;
318 * The numa node this thread is bound to
320 extern __thread int32_t s_numaNode;
322 * enable the numa support in hhvm,
323 * and determine whether threads should default to using
324 * local memory.
326 void enable_numa(bool local);
328 * Set the thread affinity, and the jemalloc arena for the current
329 * thread.
330 * Also initializes s_numaNode
332 void set_numa_binding(int node);
335 * mallctl wrappers.
339 * Call mallctl, reading/writing values of type <T> if out/in are non-null,
340 * respectively. Assert/log on error, depending on errOk.
342 template <typename T>
343 int mallctlHelper(const char *cmd, T* out, T* in, bool errOk) {
344 #ifdef USE_JEMALLOC
345 assert(mallctl != nullptr);
346 size_t outLen = sizeof(T);
347 int err = mallctl(cmd,
348 out, out ? &outLen : nullptr,
349 in, in ? sizeof(T) : 0);
350 assert(err != 0 || outLen == sizeof(T));
351 #else
352 int err = ENOENT;
353 #endif
354 if (err != 0) {
355 if (!errOk) {
356 std::string errStr =
357 folly::format("mallctl {}: {} ({})", cmd, strerror(err), err).str();
358 // Do not use Logger here because JEMallocInitializer() calls this
359 // function and JEMallocInitializer has the highest constructor priority.
360 // The static variables in Logger are not initialized yet.
361 fprintf(stderr, "%s\n", errStr.c_str());
363 always_assert(errOk || err == 0);
365 return err;
368 template <typename T>
369 int mallctlReadWrite(const char *cmd, T* out, T in, bool errOk=false) {
370 return mallctlHelper(cmd, out, &in, errOk);
373 template <typename T>
374 int mallctlRead(const char* cmd, T* out, bool errOk=false) {
375 return mallctlHelper(cmd, out, static_cast<T*>(nullptr), errOk);
378 template <typename T>
379 int mallctlWrite(const char* cmd, T in, bool errOk=false) {
380 return mallctlHelper(cmd, static_cast<T*>(nullptr), &in, errOk);
383 int mallctlCall(const char* cmd, bool errOk=false);
386 * jemalloc pprof utility functions.
388 int jemalloc_pprof_enable();
389 int jemalloc_pprof_disable();
390 int jemalloc_pprof_dump(const std::string& prefix, bool force);
392 template <class T>
393 struct LowAllocator {
394 typedef T value_type;
395 typedef T* pointer;
396 typedef const T* const_pointer;
397 typedef T& reference;
398 typedef const T& const_reference;
399 typedef std::size_t size_type;
400 typedef std::ptrdiff_t difference_type;
402 template <class U>
403 struct rebind { using other = LowAllocator<U>; };
405 pointer address(reference value) {
406 return &value;
408 const_pointer address(const_reference value) const {
409 return &value;
412 LowAllocator() noexcept {}
413 template<class U> LowAllocator(const LowAllocator<U>&) noexcept {}
414 ~LowAllocator() noexcept {}
416 size_type max_size() const {
417 return std::numeric_limits<std::size_t>::max() / sizeof(T);
420 pointer allocate(size_type num, const void* = nullptr) {
421 pointer ret = (pointer)low_malloc_data(num * sizeof(T));
422 return ret;
425 template<class U, class... Args>
426 void construct(U* p, Args&&... args) {
427 ::new ((void*)p) U(std::forward<Args>(args)...);
430 void destroy(pointer p) {
431 p->~T();
434 void deallocate(pointer p, size_type /*num*/) { low_free_data((void*)p); }
436 template<class U> bool operator==(const LowAllocator<U>&) const {
437 return true;
440 template<class U> bool operator!=(const LowAllocator<U>&) const {
441 return false;
445 template <class T>
446 struct HugeAllocator {
447 using value_type = T;
448 using pointer = T*;
449 using const_pointer = const T*;
450 using reference = T&;
451 using const_reference = const T&;
452 using size_type = std::size_t;
453 using difference_type = std::ptrdiff_t;
455 template <class U>
456 struct rebind { using other = HugeAllocator<U>; };
458 pointer address(reference value) {
459 return &value;
461 const_pointer address(const_reference value) const {
462 return &value;
465 HugeAllocator() noexcept {}
466 template<class U> explicit HugeAllocator(const HugeAllocator<U>&) noexcept {}
467 ~HugeAllocator() noexcept {}
469 size_type max_size() const {
470 return std::numeric_limits<std::size_t>::max() / sizeof(T);
473 pointer allocate(size_type num, const void* = nullptr) {
474 pointer ret = (pointer)malloc_huge(num * sizeof(T));
475 return ret;
478 template<class U, class... Args>
479 void construct(U* p, Args&&... args) {
480 ::new ((void*)p) U(std::forward<Args>(args)...);
483 void destroy(pointer p) {
484 p->~T();
487 void deallocate(pointer p, size_type /*num*/) { free_huge((void*)p); }
489 template<class U> bool operator==(const HugeAllocator<U>&) const {
490 return true;
493 template<class U> bool operator!=(const HugeAllocator<U>&) const {
494 return false;
499 * Infomation from /proc/self/status, along with other HHVM-specific memory
500 * usage data. This it put here instead of in process.h, because we need to do
501 * mallctl() to access jemalloc stats.
503 * Kernel documentation: http://man7.org/linux/man-pages/man5/proc.5.html
505 struct MemStatus {
506 int64_t VmSize{-1}; // virtual memory size
507 int64_t VmRSS{-1}; // RSS, not including hugetlb pages
508 int64_t VmHWM{-1}; // peak RSS
509 int64_t HugetlbPages{0}; // Hugetlb mappings (2M + 1G)
511 // 'Real' memory usage that includes VMRSS and HugetlbPages, but excludes
512 // unused space held by jemalloc. This is mostly used to track regressions.
513 int64_t adjustedRSS{-1};
515 // Constructor reads /proc/self/status and queries jemalloc to get the real
516 // usage.
517 MemStatus();
518 bool valid() const {
519 return VmSize > 0 && VmRSS > 0 && VmHWM > 0 && HugetlbPages >= 0;
523 ///////////////////////////////////////////////////////////////////////////////
526 #endif // incl_HPHP_UTIL_ALLOC_H_