Add sub-controls for Hack array compat runtime checks
[hiphop-php.git] / hphp / runtime / base / rds.cpp
blob31a648b50f8bb4532732898ed4c8857af21578ea
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/base/rds.h"
17 #include "hphp/runtime/vm/vm-regs.h"
19 #include <cassert>
20 #include <cstdio>
21 #include <mutex>
22 #include <atomic>
23 #include <vector>
25 #ifndef _MSC_VER
26 #include <execinfo.h>
27 #endif
29 #include <folly/sorted_vector_types.h>
30 #include <folly/String.h>
31 #include <folly/Hash.h>
32 #include <folly/Bits.h>
33 #include <folly/portability/SysMman.h>
35 #include <tbb/concurrent_hash_map.h>
37 #include "hphp/util/logger.h"
38 #include "hphp/util/maphuge.h"
39 #include "hphp/util/numa.h"
40 #include "hphp/util/smalllocks.h"
41 #include "hphp/util/type-scan.h"
43 #include "hphp/runtime/base/rds-header.h"
44 #include "hphp/runtime/vm/debug/debug.h"
45 #include "hphp/runtime/vm/treadmill.h"
46 #include "hphp/runtime/vm/jit/vm-protect.h"
48 namespace HPHP { namespace rds {
50 //////////////////////////////////////////////////////////////////////
52 namespace {
54 //////////////////////////////////////////////////////////////////////
56 using Guard = std::lock_guard<std::mutex>;
59 * This mutex protects actually allocating from RDS (the above
60 * statics). It is ordered *after* the locks in s_linkTable.
62 std::mutex s_allocMutex;
64 //////////////////////////////////////////////////////////////////////
66 struct SymbolKind : boost::static_visitor<std::string> {
67 std::string operator()(StaticLocal /*k*/) const { return "StaticLocal"; }
68 std::string operator()(ClsConstant /*k*/) const { return "ClsConstant"; }
69 std::string operator()(StaticMethod /*k*/) const { return "StaticMethod"; }
70 std::string operator()(StaticMethodF /*k*/) const { return "StaticMethodF"; }
71 std::string operator()(Profile /*k*/) const { return "Profile"; }
72 std::string operator()(SPropCache /*k*/) const { return "SPropCache"; }
75 struct SymbolRep : boost::static_visitor<std::string> {
76 std::string operator()(StaticLocal k) const {
77 const Func* func = Func::fromFuncId(k.funcId);
78 const Class* cls = getOwningClassForFunc(func);
79 std::string name;
80 if (cls != func->cls()) {
81 name = cls->name()->toCppString() + "::" +
82 func->name()->toCppString();
83 } else {
84 name = func->fullName()->toCppString();
86 return name + "::" + k.name->toCppString();
89 std::string operator()(ClsConstant k) const {
90 return k.clsName->data() + std::string("::") + k.cnsName->data();
93 std::string operator()(StaticMethod k) const { return k.name->data(); }
94 std::string operator()(StaticMethodF k) const { return k.name->data(); }
96 std::string operator()(Profile k) const {
97 return folly::format(
98 "{}:t{}:{}",
99 k.name,
100 k.transId,
101 k.bcOff
102 ).str();
104 std::string operator()(SPropCache k) const {
105 return k.cls->name()->toCppString() + "::" +
106 k.cls->staticProperties()[k.slot].name->toCppString();
110 struct SymbolEq : boost::static_visitor<bool> {
111 template<class T, class U>
112 typename std::enable_if<
113 !std::is_same<T,U>::value,
114 bool
115 >::type operator()(const T&, const U&) const { return false; }
117 bool operator()(StaticLocal k1, StaticLocal k2) const {
118 assert(k1.name->isStatic() && k2.name->isStatic());
119 return k1.funcId == k2.funcId && k1.name == k2.name;
122 bool operator()(ClsConstant k1, ClsConstant k2) const {
123 assert(k1.clsName->isStatic() && k1.cnsName->isStatic());
124 assert(k2.clsName->isStatic() && k2.cnsName->isStatic());
125 return k1.clsName->isame(k2.clsName) &&
126 k1.cnsName == k2.cnsName;
129 bool operator()(Profile k1, Profile k2) const {
130 assert(k1.name->isStatic() && k2.name->isStatic());
131 return k1.transId == k2.transId &&
132 k1.bcOff == k2.bcOff &&
133 k1.name == k2.name;
136 template<class T>
137 typename std::enable_if<
138 std::is_same<T,StaticMethod>::value ||
139 std::is_same<T,StaticMethodF>::value,
140 bool
141 >::type operator()(const T& t1, const T& t2) const {
142 assert(t1.name->isStatic() && t2.name->isStatic());
143 return t1.name->isame(t2.name);
146 bool operator()(SPropCache k1, SPropCache k2) const {
147 return k1.cls == k2.cls && k1.slot == k2.slot;
151 struct SymbolHash : boost::static_visitor<size_t> {
152 size_t operator()(StaticLocal k) const {
153 return folly::hash::hash_128_to_64(
154 std::hash<FuncId>()(k.funcId),
155 k.name->hash()
159 size_t operator()(ClsConstant k) const {
160 return folly::hash::hash_128_to_64(
161 k.clsName->hash(),
162 k.cnsName->hash()
166 size_t operator()(Profile k) const {
167 return folly::hash::hash_combine(
168 k.transId,
169 k.bcOff,
170 k.name->hash()
174 size_t operator()(StaticMethod k) const { return k.name->hash(); }
175 size_t operator()(StaticMethodF k) const { return k.name->hash(); }
177 size_t operator()(SPropCache k) const {
178 return folly::hash::hash_combine(
179 k.cls, k.slot
185 struct HashCompare {
186 bool equal(const Symbol& k1, const Symbol& k2) const {
187 return boost::apply_visitor(SymbolEq(), k1, k2);
190 size_t hash(const Symbol& k) const {
191 return boost::apply_visitor(SymbolHash(), k);
195 using LinkTable = tbb::concurrent_hash_map<
196 Symbol,
197 Handle,
198 HashCompare
200 LinkTable s_linkTable;
202 using RevLinkTable = tbb::concurrent_hash_map<Handle,Symbol>;
203 RevLinkTable s_handleTable;
205 //////////////////////////////////////////////////////////////////////
208 * Space wasted by alignment is tracked in these maps. We don't bother with
209 * free lists for local RDS because we aren't sensitive to its layout or
210 * compactness.
212 using FreeLists = folly::sorted_vector_map<unsigned,
213 std::deque<rds::Handle>>;
214 FreeLists s_normal_free_lists;
215 FreeLists s_persistent_free_lists;
219 //////////////////////////////////////////////////////////////////////
221 namespace detail {
223 // Current allocation frontier for the non-persistent region.
224 size_t s_normal_frontier = sizeof(Header);
226 // Frontier and base of the persistent region.
227 size_t s_persistent_base = 0;
228 size_t s_persistent_frontier = 0;
230 // Frontier for the "local" part of the persistent region (data not
231 // shared between threads, but not zero'd)---downward-growing.
232 size_t s_local_frontier = 0;
234 AllocDescriptorList s_normal_alloc_descs;
235 AllocDescriptorList s_local_alloc_descs;
238 * Round base up to align, which must be a power of two.
240 size_t roundUp(size_t base, size_t align) {
241 assert(folly::isPowTwo(align));
242 --align;
243 return (base + align) & ~align;
247 * Add the given offset to the free list for its size.
249 void addFreeBlock(FreeLists& lists, size_t where, size_t size) {
250 if (size == 0) return;
251 lists[size].emplace_back(where);
255 * Try to find a tracked free block of a suitable size. If an oversized block is
256 * found instead, the remaining space before and/or after the return space it
257 * re-added to the appropriate free lists.
259 folly::Optional<Handle> findFreeBlock(FreeLists& lists, size_t size,
260 size_t align) {
261 for (auto it = lists.lower_bound(size); it != lists.end(); ++it) {
262 for (auto list_it = it->second.begin();
263 list_it != it->second.end();
264 ++list_it) {
265 auto const blockSize = it->first;
266 auto const raw = *list_it;
267 auto const end = raw + blockSize;
269 auto const handle = roundUp(raw, align);
271 if (handle + size > end) continue;
272 it->second.erase(list_it);
274 auto const headerSize = handle - raw;
275 addFreeBlock(lists, raw, headerSize);
277 auto const footerSize = blockSize - size - headerSize;
278 addFreeBlock(lists, handle + size, footerSize);
280 return handle;
283 return folly::none;
286 Handle alloc(Mode mode, size_t numBytes,
287 size_t align, type_scan::Index tyIndex) {
288 switch (mode) {
289 case Mode::Normal: {
290 align = folly::nextPowTwo(std::max(align, alignof(GenNumber)));
291 auto const prefix = roundUp(sizeof(GenNumber), align);
292 auto const adjBytes = numBytes + prefix;
293 always_assert(align <= adjBytes);
295 if (auto free = findFreeBlock(s_normal_free_lists, adjBytes, align)) {
296 auto const begin = *free;
297 addFreeBlock(s_normal_free_lists, begin, prefix - sizeof(GenNumber));
298 auto const handle = begin + prefix;
299 if (type_scan::hasScanner(tyIndex)) {
300 s_normal_alloc_descs.push_back(
301 AllocDescriptor{Handle(handle), uint32_t(numBytes), tyIndex}
304 return handle;
307 auto const oldFrontier = s_normal_frontier;
308 s_normal_frontier = roundUp(s_normal_frontier, align);
310 addFreeBlock(s_normal_free_lists, oldFrontier,
311 s_normal_frontier - oldFrontier);
312 s_normal_frontier += adjBytes;
313 if (debug && !jit::VMProtect::is_protected) {
314 memset(
315 (char*)(tl_base) + oldFrontier,
316 kRDSTrashFill,
317 s_normal_frontier - oldFrontier
320 always_assert_flog(
321 s_normal_frontier < s_local_frontier,
322 "Ran out of RDS space (mode=Normal)"
325 auto const begin = s_normal_frontier - adjBytes;
326 addFreeBlock(s_normal_free_lists, begin, prefix - sizeof(GenNumber));
328 auto const handle = begin + prefix;
330 if (type_scan::hasScanner(tyIndex)) {
331 s_normal_alloc_descs.push_back(
332 AllocDescriptor{Handle(handle), uint32_t(numBytes), tyIndex}
335 return handle;
337 case Mode::Persistent: {
338 align = folly::nextPowTwo(align);
339 always_assert(align <= numBytes);
341 if (auto free = findFreeBlock(s_persistent_free_lists, numBytes, align)) {
342 return *free;
345 // Note: it's ok not to zero new allocations, because we've never done
346 // anything with this part of the page yet, so it must still be zero.
347 auto const oldFrontier = s_persistent_frontier;
348 s_persistent_frontier = roundUp(s_persistent_frontier, align);
349 addFreeBlock(s_persistent_free_lists, oldFrontier,
350 s_persistent_frontier - oldFrontier);
351 s_persistent_frontier += numBytes;
353 always_assert_flog(
354 s_persistent_frontier < RuntimeOption::EvalJitTargetCacheSize,
355 "Ran out of RDS space (mode=Persistent)"
358 return s_persistent_frontier - numBytes;
360 case Mode::Local: {
361 align = folly::nextPowTwo(align);
362 always_assert(align <= numBytes);
364 auto& frontier = s_local_frontier;
366 frontier -= numBytes;
367 frontier &= ~(align - 1);
369 always_assert_flog(
370 frontier >= s_normal_frontier,
371 "Ran out of RDS space (mode=Local)"
373 if (type_scan::hasScanner(tyIndex)) {
374 s_local_alloc_descs.push_back(
375 AllocDescriptor{Handle(frontier), uint32_t(numBytes), tyIndex}
378 return frontier;
382 not_reached();
385 Handle allocUnlocked(Mode mode, size_t numBytes,
386 size_t align, type_scan::Index tyIndex) {
387 Guard g(s_allocMutex);
388 return alloc(mode, numBytes, align, tyIndex);
391 Handle bindImpl(Symbol key, Mode mode, size_t sizeBytes,
392 size_t align, type_scan::Index tyIndex) {
393 LinkTable::const_accessor acc;
394 if (s_linkTable.find(acc, key)) return acc->second;
396 Guard g(s_allocMutex);
397 if (s_linkTable.find(acc, key)) return acc->second;
399 auto const handle = alloc(mode, sizeBytes, align, tyIndex);
400 recordRds(handle, sizeBytes, key);
402 LinkTable::const_accessor insert_acc;
403 // insert_acc lives until after s_handleTable is updated
404 if (!s_linkTable.insert(insert_acc, LinkTable::value_type(key, handle))) {
405 always_assert(0);
407 if (type_scan::hasScanner(tyIndex)) {
408 s_handleTable.insert(std::make_pair(handle, key));
410 return handle;
413 Handle attachImpl(Symbol key) {
414 LinkTable::const_accessor acc;
415 if (s_linkTable.find(acc, key)) return acc->second;
416 return kUninitHandle;
419 NEVER_INLINE
420 void bindOnLinkImpl(std::atomic<Handle>& handle,
421 std::function<Handle()> fun,
422 type_scan::Index tyIndex) {
423 Handle c = kUninitHandle;
424 if (handle.compare_exchange_strong(c, kBeingBound,
425 std::memory_order_relaxed,
426 std::memory_order_relaxed)) {
427 // we flipped it from kUninitHandle, so we get to fill in the value.
428 if (handle.exchange(fun(), std::memory_order_relaxed) ==
429 kBeingBoundWithWaiters) {
430 futex_wake(&handle, INT_MAX);
432 return;
434 // Someone else beat us to it, so wait until they've filled it in.
435 if (c == kBeingBound) {
436 handle.compare_exchange_strong(c, kBeingBoundWithWaiters,
437 std::memory_order_relaxed,
438 std::memory_order_relaxed);
440 while (handle.load(std::memory_order_relaxed) == kBeingBoundWithWaiters) {
441 futex_wait(&handle, kBeingBoundWithWaiters);
443 assertx(isHandleBound(handle.load(std::memory_order_relaxed)));
446 NEVER_INLINE
447 void bindOnLinkImpl(std::atomic<Handle>& handle,
448 Mode mode,
449 size_t sizeBytes,
450 size_t align,
451 type_scan::Index tyIndex) {
452 bindOnLinkImpl(handle,
453 [&] {
454 Guard g(s_allocMutex);
455 return alloc(mode, sizeBytes, align, tyIndex);
457 tyIndex);
462 void unbind(Symbol key, Handle handle) {
463 Guard g(s_allocMutex);
464 s_linkTable.erase(key);
465 s_handleTable.erase(handle);
468 using namespace detail;
470 //////////////////////////////////////////////////////////////////////
472 __thread void* tl_base = nullptr;
474 THREAD_LOCAL_PROXY(ArrayData, s_constantsStorage);
476 // All threads tl_bases are kept in a set, to allow iterating Local
477 // and Normal RDS sections across threads.
478 std::mutex s_tlBaseListLock;
479 std::vector<void*> s_tlBaseList;
481 //////////////////////////////////////////////////////////////////////
483 static size_t s_next_bit;
484 static size_t s_bits_to_go;
485 static int s_tc_fd;
487 // Mapping from names to targetcache locations.
488 typedef tbb::concurrent_hash_map<const StringData*, Handle,
489 StringDataHashICompare>
490 HandleMapIS;
492 typedef tbb::concurrent_hash_map<const StringData*, Handle,
493 StringDataHashCompare>
494 HandleMapCS;
496 //////////////////////////////////////////////////////////////////////
498 void requestInit() {
499 assert(tl_base);
500 s_constantsStorage.set(nullptr);
501 assert(!s_constants().get());
503 auto gen = header()->currentGen;
504 memset(tl_base, 0, sizeof(Header));
505 if (debug) {
506 // Trash the normal section in debug mode, so that we can catch errors with
507 // not checking the gen number quickly.
508 memset(
509 static_cast<char*>(tl_base) + sizeof(Header),
510 kRDSTrashFill,
511 s_normal_frontier - sizeof(Header)
513 gen = 1;
514 } else if (++gen == kInvalidGenNumber) {
515 // If the current gen number has wrapped around back to the "invalid"
516 // number, memset the entire normal section. Once the current gen number
517 // wraps, it becomes ambiguous whether any given gen number is up to date.
518 memset(
519 static_cast<char*>(tl_base) + sizeof(Header),
520 kInvalidGenNumber,
521 s_normal_frontier - sizeof(Header)
523 ++gen;
525 header()->currentGen = gen;
528 void requestExit() {
529 s_constantsStorage.set(nullptr); // it will be swept
530 // Don't bother running the dtor ...
533 void flush() {
534 if (madvise(tl_base, s_normal_frontier, MADV_DONTNEED) == -1) {
535 Logger::Warning("RDS madvise failure: %s\n",
536 folly::errnoStr(errno).c_str());
538 size_t offset = s_local_frontier & ~0xfff;
539 if (madvise(static_cast<char*>(tl_base) + offset,
540 s_persistent_base - offset, MADV_DONTNEED)) {
541 Logger::Warning("RDS local madvise failure: %s\n",
542 folly::errnoStr(errno).c_str());
546 /* RDS Layout:
547 * +-------------+ <-- tl_base
548 * | Header |
549 * +-------------+
550 * | |
551 * | Normal | growing higher
552 * | region | vvv
553 * | |
554 * +-------------+ <-- tl_base + s_normal_frontier
555 * | \ \ \ \ \ \ |
556 * +-------------+ <-- tl_base + s_local_frontier
557 * | |
558 * | Local | ^^^
559 * | region | growing lower
560 * | |
561 * +-------------+ <-- tl_base + s_persistent_base
562 * | |
563 * | Persistent | growing higher
564 * | region | vvv
565 * | |
566 * +-------------+ <-- tl_base + s_persistent_frontier
567 * | \ \ \ \ \ \ |
568 * +-------------+ higher addresses
571 size_t usedBytes() {
572 return s_normal_frontier;
575 size_t usedLocalBytes() {
576 return s_persistent_base - s_local_frontier;
579 size_t usedPersistentBytes() {
580 return s_persistent_frontier - s_persistent_base;
583 folly::Range<const char*> normalSection() {
584 return {(const char*)tl_base, usedBytes()};
587 folly::Range<const char*> localSection() {
588 return {(const char*)tl_base + s_local_frontier, usedLocalBytes()};
591 folly::Range<const char*> persistentSection() {
592 return {(const char*)tl_base + s_persistent_base, usedPersistentBytes()};
595 Array& s_constants() {
596 return *reinterpret_cast<Array*>(&s_constantsStorage.m_p);
599 //////////////////////////////////////////////////////////////////////
601 namespace {
603 constexpr std::size_t kAllocBitNumBytes = 8;
607 /////////////////////////////////////////////////////////////////////
609 GenNumber currentGenNumber() {
610 return header()->currentGen;
613 Handle currentGenNumberHandle() {
614 return offsetof(Header, currentGen);
617 size_t allocBit() {
618 Guard g(s_allocMutex);
619 if (s_bits_to_go == 0) {
620 auto const handle = detail::alloc(
621 Mode::Normal,
622 kAllocBitNumBytes,
623 kAllocBitNumBytes,
624 type_scan::getIndexForScan<unsigned char[kAllocBitNumBytes]>()
626 s_next_bit = handle * CHAR_BIT;
627 s_bits_to_go = kAllocBitNumBytes * CHAR_BIT;
628 recordRds(handle, kAllocBitNumBytes, "Unknown", "bits");
630 s_bits_to_go--;
631 return s_next_bit++;
634 bool testAndSetBit(size_t bit) {
635 size_t block = bit / CHAR_BIT;
636 unsigned char mask = 1 << (bit % CHAR_BIT);
637 Handle handle = block & ~(kAllocBitNumBytes - 1);
639 if (!isHandleInit(handle, NormalTag{})) {
640 auto ptr = &handleToRef<unsigned char>(handle);
641 for (size_t i = 0; i < kAllocBitNumBytes; ++i) ptr[i] = 0;
642 initHandle(handle);
644 bool ret = handleToRef<unsigned char>(block) & mask;
645 handleToRef<unsigned char>(block) |= mask;
646 return ret;
649 bool isValidHandle(Handle handle) {
650 return handle >= sizeof(Header) &&
651 handle < RuntimeOption::EvalJitTargetCacheSize;
654 static void initPersistentCache() {
655 Guard g(s_allocMutex);
656 if (s_tc_fd) return;
657 std::string tmpName = folly::sformat("/HHVM_tc{}", getpid());
658 always_assert(tmpName.size() <= NAME_MAX);
659 // Get a file descriptor to a shared memory object. This is normally located
660 // in /dev/shm, which is a tmpfs filesystem that shouldn't run out of space
661 // unlike /tmp
662 s_tc_fd = shm_open(tmpName.c_str(),
663 O_RDWR | O_CREAT | O_EXCL,
664 S_IWUSR | S_IRUSR);
665 s_persistent_base = RuntimeOption::EvalJitTargetCacheSize * 3 / 4;
666 s_persistent_base -= s_persistent_base & (4 * 1024 - 1);
667 if (s_tc_fd != -1) {
668 shm_unlink(tmpName.c_str());
669 if (ftruncate(s_tc_fd,
670 RuntimeOption::EvalJitTargetCacheSize - s_persistent_base)) {
671 close(s_tc_fd);
672 s_tc_fd = -1;
675 if (s_tc_fd == -1) {
676 // Fall back to a file in /tmp. If things don't work out now kill the
677 // process.
678 char tmpName[] = "/tmp/tcXXXXXX";
679 s_tc_fd = mkstemp(tmpName);
680 always_assert(s_tc_fd != -1);
681 unlink(tmpName);
682 auto const fail = ftruncate(s_tc_fd,
683 RuntimeOption::EvalJitTargetCacheSize
684 - s_persistent_base);
685 always_assert(fail == 0);
687 s_local_frontier = s_persistent_frontier = s_persistent_base;
690 void threadInit(bool shouldRegister) {
691 assert(tl_base == nullptr);
693 if (!s_tc_fd) {
694 initPersistentCache();
697 tl_base = mmap(nullptr, RuntimeOption::EvalJitTargetCacheSize,
698 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
699 always_assert_flog(
700 tl_base != MAP_FAILED,
701 "Failed to mmap persistent RDS region. errno = {}",
702 folly::errnoStr(errno).c_str()
704 #ifdef _MSC_VER
705 // MapViewOfFileEx() requires "the specified memory region is not already in
706 // use by the calling process" when mapping the shared area below. Otherwise
707 // it will return MAP_FAILED. We first map the full size to make sure the
708 // memory area is available. Then we unmap and map the lower portion of the
709 // RDS at the same address.
710 munmap(tl_base, RuntimeOption::EvalJitTargetCacheSize);
711 void* tl_same = mmap(tl_base, s_persistent_base,
712 PROT_READ | PROT_WRITE,
713 MAP_ANON | MAP_PRIVATE | MAP_FIXED,
714 -1, 0);
715 always_assert(tl_same == tl_base);
716 #endif
717 numa_bind_to(tl_base, s_persistent_base, s_numaNode);
718 #ifdef NDEBUG
719 // A huge-page RDS is incompatible with VMProtect in vm-regs.cpp
720 if (RuntimeOption::EvalMapTgtCacheHuge) {
721 hintHuge(tl_base, RuntimeOption::EvalJitTargetCacheSize);
723 #endif
725 if (shouldRegister) {
726 Guard g(s_tlBaseListLock);
727 assert(std::find(begin(s_tlBaseList), end(s_tlBaseList), tl_base) ==
728 end(s_tlBaseList));
729 s_tlBaseList.push_back(tl_base);
732 void* shared_base = (char*)tl_base + s_persistent_base;
734 * Map the upper portion of the RDS to a shared area. This is used
735 * for persistent classes and functions, so they are always defined,
736 * and always visible to all threads.
738 void* mem = mmap(shared_base,
739 RuntimeOption::EvalJitTargetCacheSize - s_persistent_base,
740 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, s_tc_fd, 0);
741 always_assert(mem == shared_base);
743 if (RuntimeOption::EvalPerfDataMap) {
744 Debug::DebugInfo::recordDataMap(
745 tl_base,
746 (char*)tl_base + RuntimeOption::EvalJitTargetCacheSize,
747 "rds");
750 header()->currentGen = 1;
753 void threadExit(bool shouldUnregister) {
754 if (shouldUnregister) {
755 Guard g(s_tlBaseListLock);
756 auto it = std::find(begin(s_tlBaseList), end(s_tlBaseList), tl_base);
757 if (it != end(s_tlBaseList)) {
758 s_tlBaseList.erase(it);
762 if (RuntimeOption::EvalPerfDataMap) {
763 Debug::DebugInfo::recordDataMap(
764 tl_base,
765 (char*)tl_base + RuntimeOption::EvalJitTargetCacheSize,
766 "-rds");
769 auto const base = tl_base;
770 auto do_unmap = [base] {
771 #ifdef _MSC_VER
772 munmap(base, s_persistent_base);
773 munmap((char*)base + s_persistent_base,
774 RuntimeOption::EvalJitTargetCacheSize - s_persistent_base);
775 #else
776 munmap(base, RuntimeOption::EvalJitTargetCacheSize);
777 #endif
780 // Other requests may be reading from this rds section via the s_tlBaseList.
781 // We just removed ourself from the list now, but defer the unmap until after
782 // any outstanding requests have completed.
783 if (shouldUnregister) {
784 Treadmill::enqueue(std::move(do_unmap));
785 } else {
786 do_unmap();
790 void recordRds(Handle h, size_t size,
791 const std::string& type, const std::string& msg) {
792 if (RuntimeOption::EvalPerfDataMap) {
793 if (isNormalHandle(h)) {
794 h = genNumberHandleFrom(h);
795 size += sizeof(GenNumber);
797 Debug::DebugInfo::recordDataMap(
798 (char*)(intptr_t)h,
799 (char*)(intptr_t)h + size,
800 folly::format("rds+{}-{}", type, msg).str());
804 void recordRds(Handle h, size_t size, const Symbol& sym) {
805 if (RuntimeOption::EvalPerfDataMap) {
806 recordRds(h, size,
807 boost::apply_visitor(SymbolKind(), sym),
808 boost::apply_visitor(SymbolRep(), sym));
812 std::vector<void*> allTLBases() {
813 Guard g(s_tlBaseListLock);
814 return s_tlBaseList;
817 folly::Optional<Symbol> reverseLink(Handle handle) {
818 RevLinkTable::const_accessor acc;
819 if (s_handleTable.find(acc, handle)) {
820 return acc->second;
822 return folly::none;
825 //////////////////////////////////////////////////////////////////////