2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/base/rds.h"
24 #include <folly/Bits.h>
25 #include <folly/Hash.h>
26 #include <folly/portability/SysMman.h>
27 #include <folly/sorted_vector_types.h>
28 #include <folly/String.h>
30 #include <tbb/concurrent_hash_map.h>
32 #include "hphp/util/logger.h"
33 #include "hphp/util/maphuge.h"
34 #include "hphp/util/numa.h"
35 #include "hphp/util/smalllocks.h"
36 #include "hphp/util/type-scan.h"
38 #include "hphp/runtime/base/rds-header.h"
39 #include "hphp/runtime/base/rds-local.h"
40 #include "hphp/runtime/vm/debug/debug.h"
41 #include "hphp/runtime/vm/jit/mcgen.h"
42 #include "hphp/runtime/vm/jit/mcgen-translate.h"
43 #include "hphp/runtime/vm/jit/vm-protect.h"
44 #include "hphp/runtime/vm/treadmill.h"
45 #include "hphp/runtime/vm/vm-regs.h"
47 namespace HPHP
{ namespace rds
{
49 //////////////////////////////////////////////////////////////////////
53 //////////////////////////////////////////////////////////////////////
55 using Guard
= std::lock_guard
<std::mutex
>;
58 * This mutex protects actually allocating from RDS (the above
59 * statics). It is ordered *after* the locks in s_linkTable.
61 std::mutex s_allocMutex
;
63 //////////////////////////////////////////////////////////////////////
65 struct SymbolKind
: boost::static_visitor
<std::string
> {
66 std::string
operator()(ClsConstant
/*k*/) const { return "ClsConstant"; }
67 std::string
operator()(StaticMethod
/*k*/) const { return "StaticMethod"; }
68 std::string
operator()(StaticMethodF
/*k*/) const { return "StaticMethodF"; }
70 std::string
operator()(Profile
<T
> /*k*/) const { return "Profile"; }
71 std::string
operator()(SPropCache
/*k*/) const { return "SPropCache"; }
72 std::string
operator()(StaticMemoValue
) const { return "StaticMemoValue"; }
73 std::string
operator()(StaticMemoCache
) const { return "StaticMemoCache"; }
74 std::string
operator()(LSBMemoValue
) const { return "LSBMemoValue"; }
75 std::string
operator()(LSBMemoCache
) const { return "LSBMemoCache"; }
78 struct SymbolRep
: boost::static_visitor
<std::string
> {
79 std::string
operator()(ClsConstant k
) const {
80 return k
.clsName
->data() + std::string("::") + k
.cnsName
->data();
83 std::string
operator()(StaticMethod k
) const { return k
.name
->data(); }
84 std::string
operator()(StaticMethodF k
) const { return k
.name
->data(); }
87 std::string
operator()(Profile
<T
> k
) const {
95 std::string
operator()(SPropCache k
) const {
96 return k
.cls
->name()->toCppString() + "::" +
97 k
.cls
->staticProperties()[k
.slot
].name
->toCppString();
100 std::string
operator()(StaticMemoValue k
) const {
101 auto const func
= Func::fromFuncId(k
.funcId
);
102 return func
->fullName()->toCppString();
104 std::string
operator()(StaticMemoCache k
) const {
105 auto const func
= Func::fromFuncId(k
.funcId
);
106 return func
->fullName()->toCppString();
109 std::string
operator()(LSBMemoValue k
) const {
110 auto const clsName
= k
.cls
->name()->toCppString();
111 auto const funcName
= Func::fromFuncId(k
.funcId
)->fullName()->toCppString();
112 return clsName
+ "::" + funcName
;
114 std::string
operator()(LSBMemoCache k
) const {
115 auto const clsName
= k
.cls
->name()->toCppString();
116 auto const funcName
= Func::fromFuncId(k
.funcId
)->fullName()->toCppString();
117 return clsName
+ "::" + funcName
;
121 struct SymbolEq
: boost::static_visitor
<bool> {
122 template<class T
, class U
>
123 typename
std::enable_if
<
124 !std::is_same
<T
,U
>::value
,
126 >::type
operator()(const T
&, const U
&) const { return false; }
128 bool operator()(ClsConstant k1
, ClsConstant k2
) const {
129 assertx(k1
.clsName
->isStatic() && k1
.cnsName
->isStatic());
130 assertx(k2
.clsName
->isStatic() && k2
.cnsName
->isStatic());
131 return k1
.clsName
->isame(k2
.clsName
) &&
132 k1
.cnsName
== k2
.cnsName
;
136 bool operator()(Profile
<T
> k1
, Profile
<T
> k2
) const {
137 assertx(k1
.name
->isStatic() && k2
.name
->isStatic());
138 return k1
.transId
== k2
.transId
&&
139 k1
.bcOff
== k2
.bcOff
&&
144 typename
std::enable_if
<
145 std::is_same
<T
,StaticMethod
>::value
||
146 std::is_same
<T
,StaticMethodF
>::value
,
148 >::type
operator()(const T
& t1
, const T
& t2
) const {
149 assertx(t1
.name
->isStatic() && t2
.name
->isStatic());
150 return t1
.name
->isame(t2
.name
);
153 bool operator()(SPropCache k1
, SPropCache k2
) const {
154 return k1
.cls
== k2
.cls
&& k1
.slot
== k2
.slot
;
157 bool operator()(StaticMemoValue k1
, StaticMemoValue k2
) const {
158 return k1
.funcId
== k2
.funcId
;
161 bool operator()(StaticMemoCache k1
, StaticMemoCache k2
) const {
162 return k1
.funcId
== k2
.funcId
;
165 bool operator()(LSBMemoValue k1
, LSBMemoValue k2
) const {
166 return k1
.cls
== k2
.cls
&& k1
.funcId
== k2
.funcId
;
169 bool operator()(LSBMemoCache k1
, LSBMemoCache k2
) const {
170 return k1
.cls
== k2
.cls
&& k1
.funcId
== k2
.funcId
;
174 struct SymbolHash
: boost::static_visitor
<size_t> {
175 size_t operator()(ClsConstant k
) const {
176 return folly::hash::hash_128_to_64(
183 size_t operator()(Profile
<T
> k
) const {
184 return folly::hash::hash_combine(
191 size_t operator()(StaticMethod k
) const { return k
.name
->hash(); }
192 size_t operator()(StaticMethodF k
) const { return k
.name
->hash(); }
194 size_t operator()(SPropCache k
) const {
195 return folly::hash::hash_combine(
200 size_t operator()(StaticMemoValue k
) const {
201 return std::hash
<FuncId
>()(k
.funcId
);
203 size_t operator()(StaticMemoCache k
) const {
204 return std::hash
<FuncId
>()(k
.funcId
);
207 size_t operator()(LSBMemoValue k
) const {
208 return folly::hash::hash_combine(
209 k
.cls
.get(), std::hash
<FuncId
>()(k
.funcId
)
212 size_t operator()(LSBMemoCache k
) const {
213 return folly::hash::hash_combine(
214 k
.cls
.get(), std::hash
<FuncId
>()(k
.funcId
)
220 bool equal(const Symbol
& k1
, const Symbol
& k2
) const {
221 return boost::apply_visitor(SymbolEq(), k1
, k2
);
224 size_t hash(const Symbol
& k
) const {
225 return boost::apply_visitor(SymbolHash(), k
);
234 using LinkTable
= tbb::concurrent_hash_map
<
239 LinkTable s_linkTable
;
241 using RevLinkTable
= tbb::concurrent_hash_map
<Handle
,Symbol
>;
242 RevLinkTable s_handleTable
;
244 //////////////////////////////////////////////////////////////////////
247 * Space wasted by alignment is tracked in these maps. We don't bother with
248 * free lists for local RDS because we aren't sensitive to its layout or
251 using FreeLists
= folly::sorted_vector_map
<unsigned,
252 std::deque
<rds::Handle
>>;
253 FreeLists s_normal_free_lists
;
254 FreeLists s_persistent_free_lists
;
256 #if RDS_FIXED_PERSISTENT_BASE
257 // Allocate 2M from low memory each time.
258 constexpr size_t kPersistentChunkSize
= 16u << 10;
262 //////////////////////////////////////////////////////////////////////
266 // Current allocation frontier for the non-persistent region.
267 size_t s_normal_frontier
= sizeof(Header
);
269 // Frontier for the "local" part of the persistent region (data not
270 // shared between threads, but not zero'd)---downward-growing.
271 size_t s_local_frontier
= 0;
272 size_t s_local_base
= 0;
274 #if !RDS_FIXED_PERSISTENT_BASE
275 uintptr_t s_persistent_base
= 0;
276 size_t s_persistent_size
= 0;
278 // It is a constexpr equal to 0 defined in rds-inl.h
281 // Persistent region grows down from frontier towards limit, when it runs out of
282 // space, we can allocate another chunk and redefine the frontier and the limit,
283 // as guarded by s_allocMutex.
284 uintptr_t s_persistent_frontier
= 0;
285 uintptr_t s_persistent_limit
= 0;
287 size_t s_persistent_usage
= 0;
289 AllocDescriptorList s_normal_alloc_descs
;
290 AllocDescriptorList s_local_alloc_descs
;
293 * Round base up to align, which must be a power of two.
295 size_t roundUp(size_t base
, size_t align
) {
296 assertx(folly::isPowTwo(align
));
298 return (base
+ align
) & ~align
;
302 * Add the given offset to the free list for its size.
304 void addFreeBlock(FreeLists
& lists
, size_t where
, size_t size
) {
305 if (size
== 0) return;
306 lists
[size
].emplace_back(where
);
310 * Try to find a tracked free block of a suitable size. If an oversized block is
311 * found instead, the remaining space before and/or after the return space is
312 * re-added to the appropriate free lists.
314 folly::Optional
<Handle
> findFreeBlock(FreeLists
& lists
, size_t size
,
316 for (auto it
= lists
.lower_bound(size
); it
!= lists
.end(); ++it
) {
317 auto const blockSize
= it
->first
;
318 for (auto list_it
= it
->second
.begin();
319 list_it
!= it
->second
.end();
321 auto const raw
= static_cast<size_t>(*list_it
);
322 static_assert(sizeof(raw
) > 4, "avoid 32-bit overflow");
323 auto const end
= raw
+ blockSize
;
324 auto const handle
= roundUp(raw
, align
);
326 if (handle
+ size
> end
) continue;
327 it
->second
.erase(list_it
);
329 auto const headerSize
= handle
- raw
;
330 addFreeBlock(lists
, raw
, headerSize
);
332 auto const footerSize
= blockSize
- size
- headerSize
;
333 addFreeBlock(lists
, handle
+ size
, footerSize
);
341 // Create a new chunk for use in persistent RDS, but don't add to
342 // 's_persistent_free_lists' yet.
343 NEVER_INLINE
void addNewPersistentChunk(size_t size
) {
344 assertx(size
> 0 && size
< kMaxHandle
&& size
% 4096 == 0);
345 auto const raw
= static_cast<char*>(lower_malloc(size
));
346 auto const addr
= reinterpret_cast<uintptr_t>(raw
);
347 memset(raw
, 0, size
);
348 #if !RDS_FIXED_PERSISTENT_BASE
349 // This is only called once in processInit() if we don't have a persistent
351 always_assert(s_persistent_base
== 0);
352 s_persistent_limit
= addr
;
353 s_persistent_frontier
= addr
+ size
;
354 s_persistent_base
= s_persistent_frontier
- size4g
;
356 always_assert_flog(addr
>= kMinPersistentHandle
&& addr
< size4g
,
357 "failed to suitable address for persistent RDS");
358 assertx(s_persistent_frontier
>= s_persistent_limit
);
359 if (s_persistent_frontier
!= s_persistent_limit
) {
360 addFreeBlock(s_persistent_free_lists
,
361 ptrToHandle
<Mode::Persistent
>(s_persistent_limit
),
362 s_persistent_frontier
- s_persistent_limit
);
364 s_persistent_limit
= addr
;
365 s_persistent_frontier
= addr
+ size
;
369 Handle
alloc(Mode mode
, size_t numBytes
,
370 size_t align
, type_scan::Index tyIndex
) {
371 assertx(align
<= 16);
374 align
= folly::nextPowTwo(std::max(align
, alignof(GenNumber
)));
375 auto const prefix
= roundUp(sizeof(GenNumber
), align
);
376 auto const adjBytes
= numBytes
+ prefix
;
377 always_assert(align
<= adjBytes
);
379 if (auto free
= findFreeBlock(s_normal_free_lists
, adjBytes
, align
)) {
380 auto const begin
= *free
;
381 addFreeBlock(s_normal_free_lists
, begin
, prefix
- sizeof(GenNumber
));
382 auto const handle
= begin
+ prefix
;
383 if (type_scan::hasScanner(tyIndex
)) {
384 s_normal_alloc_descs
.push_back(
385 AllocDescriptor
{Handle(handle
), uint32_t(numBytes
), tyIndex
}
391 auto const oldFrontier
= s_normal_frontier
;
392 s_normal_frontier
= roundUp(s_normal_frontier
, align
);
394 addFreeBlock(s_normal_free_lists
, oldFrontier
,
395 s_normal_frontier
- oldFrontier
);
396 s_normal_frontier
+= adjBytes
;
397 if (debug
&& !jit::VMProtect::is_protected
) {
399 (char*)(tl_base
) + oldFrontier
,
401 s_normal_frontier
- oldFrontier
405 s_normal_frontier
< s_local_frontier
,
406 "Ran out of RDS space (mode=Normal)"
409 auto const begin
= s_normal_frontier
- adjBytes
;
410 addFreeBlock(s_normal_free_lists
, begin
, prefix
- sizeof(GenNumber
));
412 auto const handle
= begin
+ prefix
;
414 if (type_scan::hasScanner(tyIndex
)) {
415 s_normal_alloc_descs
.push_back(
416 AllocDescriptor
{Handle(handle
), uint32_t(numBytes
), tyIndex
}
421 case Mode::Persistent
: {
422 align
= folly::nextPowTwo(align
);
423 always_assert(align
<= numBytes
);
424 s_persistent_usage
+= numBytes
;
426 if (auto free
= findFreeBlock(s_persistent_free_lists
, numBytes
, align
)) {
430 auto const newFrontier
=
431 (s_persistent_frontier
- numBytes
) & ~(align
- 1);
432 if (newFrontier
>= s_persistent_limit
) {
433 s_persistent_frontier
= newFrontier
;
434 return ptrToHandle
<Mode::Persistent
>(newFrontier
);
437 #if RDS_FIXED_PERSISTENT_BASE
438 // Allocate on demand, add kPersistentChunkSize each time.
439 assertx(numBytes
<= kPersistentChunkSize
);
440 addNewPersistentChunk(kPersistentChunkSize
);
441 return alloc(mode
, numBytes
, align
, tyIndex
); // retry after a new chunk
443 // We reserved plenty of space in s_persistent_free_lists in the beginning
444 // of the process, but maybe it is time to increase the size in the
448 "Ran out of RDS space (mode=Persistent)"
453 align
= folly::nextPowTwo(align
);
454 always_assert(align
<= numBytes
);
456 auto& frontier
= s_local_frontier
;
458 frontier
-= numBytes
;
459 frontier
&= ~(align
- 1);
462 frontier
>= s_normal_frontier
,
463 "Ran out of RDS space (mode=Local)"
465 if (type_scan::hasScanner(tyIndex
)) {
466 s_local_alloc_descs
.push_back(
467 AllocDescriptor
{Handle(frontier
), uint32_t(numBytes
), tyIndex
}
477 Handle
allocUnlocked(Mode mode
, size_t numBytes
,
478 size_t align
, type_scan::Index tyIndex
) {
479 Guard
g(s_allocMutex
);
480 return alloc(mode
, numBytes
, align
, tyIndex
);
483 Handle
bindImpl(Symbol key
, Mode mode
, size_t sizeBytes
,
484 size_t align
, type_scan::Index tyIndex
) {
485 LinkTable::const_accessor acc
;
486 if (s_linkTable
.find(acc
, key
)) return acc
->second
.handle
;
488 Guard
g(s_allocMutex
);
489 if (s_linkTable
.find(acc
, key
)) return acc
->second
.handle
;
491 auto const handle
= alloc(mode
, sizeBytes
, align
, tyIndex
);
492 recordRds(handle
, sizeBytes
, key
);
494 LinkTable::const_accessor insert_acc
;
495 // insert_acc lives until after s_handleTable is updated
496 if (!s_linkTable
.insert(
498 LinkTable::value_type(key
, {handle
, safe_cast
<uint32_t>(sizeBytes
)}))) {
501 if (type_scan::hasScanner(tyIndex
)) {
502 s_handleTable
.insert(std::make_pair(handle
, key
));
507 Handle
attachImpl(Symbol key
) {
508 LinkTable::const_accessor acc
;
509 if (s_linkTable
.find(acc
, key
)) return acc
->second
.handle
;
510 return kUninitHandle
;
514 void bindOnLinkImpl(std::atomic
<Handle
>& handle
, std::function
<Handle()> fun
,
515 const void* init
, size_t size
,
516 type_scan::Index
/*tyIndex*/) {
517 Handle c
= kUninitHandle
;
518 if (handle
.compare_exchange_strong(c
, kBeingBound
,
519 std::memory_order_relaxed
,
520 std::memory_order_relaxed
)) {
521 // we flipped it from kUninitHandle, so we get to fill in the value.
522 auto const h
= fun();
523 if (size
&& isPersistentHandle(h
)) {
524 memcpy(handleToPtr
<void, Mode::Persistent
>(h
), init
, size
);
526 if (handle
.exchange(h
, std::memory_order_relaxed
) ==
527 kBeingBoundWithWaiters
) {
528 futex_wake(&handle
, INT_MAX
);
532 // Someone else beat us to it, so wait until they've filled it in.
533 if (c
== kBeingBound
) {
534 handle
.compare_exchange_strong(c
, kBeingBoundWithWaiters
,
535 std::memory_order_relaxed
,
536 std::memory_order_relaxed
);
538 while (handle
.load(std::memory_order_relaxed
) == kBeingBoundWithWaiters
) {
539 futex_wait(&handle
, kBeingBoundWithWaiters
);
541 assertx(isHandleBound(handle
.load(std::memory_order_relaxed
)));
545 void bindOnLinkImpl(std::atomic
<Handle
>& handle
,
549 type_scan::Index tyIndex
) {
550 bindOnLinkImpl(handle
,
552 Guard
g(s_allocMutex
);
553 return alloc(mode
, sizeBytes
, align
, tyIndex
);
555 nullptr, 0, tyIndex
);
560 void unbind(Symbol key
, Handle handle
) {
561 Guard
g(s_allocMutex
);
562 s_linkTable
.erase(key
);
563 s_handleTable
.erase(handle
);
566 using namespace detail
;
568 void visitSymbols(std::function
<void(const Symbol
&,Handle
,uint32_t)> fun
) {
569 Guard
g(s_allocMutex
);
570 // make sure that find/count don't interfere with iteration.
571 s_linkTable
.rehash();
572 for (auto it
: s_linkTable
) {
573 fun(it
.first
, it
.second
.handle
, it
.second
.size
);
577 //////////////////////////////////////////////////////////////////////
579 __thread
void* tl_base
= nullptr;
581 RDS_LOCAL_NO_CHECK(ArrayData
*, s_constantsStorage
)(nullptr);
583 rds::Link
<bool, Mode::Persistent
> s_persistentTrue
;
585 // All threads tl_bases are kept in a set, to allow iterating Local
586 // and Normal RDS sections across threads.
587 std::mutex s_tlBaseListLock
;
588 std::vector
<void*> s_tlBaseList
;
590 //////////////////////////////////////////////////////////////////////
592 static size_t s_next_bit
;
593 static size_t s_bits_to_go
;
595 //////////////////////////////////////////////////////////////////////
598 assertx(!s_local_base
);
599 if (RuntimeOption::EvalJitTargetCacheSize
> 1u << 30) {
600 // The encoding of RDS handles require that the normal and local regions
601 // together be smaller than 1G.
602 RuntimeOption::EvalJitTargetCacheSize
= 1u << 30;
604 s_local_base
= RuntimeOption::EvalJitTargetCacheSize
* 3 / 4;
605 s_local_frontier
= s_local_base
;
607 #if RDS_FIXED_PERSISTENT_BASE
608 auto constexpr allocSize
= kPersistentChunkSize
;
610 auto const allocSize
= RuntimeOption::EvalJitTargetCacheSize
/ 4;
612 addNewPersistentChunk(allocSize
),
614 s_persistentTrue
.bind(Mode::Persistent
);
615 *s_persistentTrue
= true;
622 *s_constantsStorage
= nullptr;
623 assertx(!s_constants().get());
625 auto gen
= header()->currentGen
;
626 memset(tl_base
, 0, sizeof(Header
));
628 // Trash the normal section in debug mode, so that we can catch errors with
629 // not checking the gen number quickly.
631 static_cast<char*>(tl_base
) + sizeof(Header
),
633 s_normal_frontier
- sizeof(Header
)
636 } else if (++gen
== kInvalidGenNumber
) {
637 // If the current gen number has wrapped around back to the "invalid"
638 // number, memset the entire normal section. Once the current gen number
639 // wraps, it becomes ambiguous whether any given gen number is up to date.
641 static_cast<char*>(tl_base
) + sizeof(Header
),
643 s_normal_frontier
- sizeof(Header
)
647 header()->currentGen
= gen
;
651 *s_constantsStorage
= nullptr; // it will be swept
652 // Don't bother running the dtor ...
656 if (madvise(tl_base
, s_normal_frontier
, MADV_DONTNEED
) == -1) {
657 Logger::Warning("RDS madvise failure: %s\n",
658 folly::errnoStr(errno
).c_str());
660 if (jit::mcgen::retranslateAllEnabled() &&
661 !jit::mcgen::retranslateAllPending()) {
662 size_t offset
= s_local_frontier
& ~0xfff;
663 size_t protectedSpace
= local::detail::s_usedbytes
+
664 (-local::detail::s_usedbytes
& 0xfff);
665 if (madvise(static_cast<char*>(tl_base
) + offset
,
666 s_local_base
- protectedSpace
- offset
,
668 Logger::Warning("RDS local madvise failure: %s\n",
669 folly::errnoStr(errno
).c_str());
675 * +-------------+ <-- tl_base
679 * | Normal | growing higher
682 * +-------------+ <-- tl_base + s_normal_frontier
684 * +-------------+ <-- tl_base + s_local_frontier
687 * | region | growing lower
689 * +-------------+ <-- tl_base + s_local_base
691 * +-------------+ higher addresses
693 * +-------------+ <--- s_persistent_base
695 * | Persistent | not necessarily contiguous when RDS_FIXED_PERSISTENT_BASE
702 return s_normal_frontier
;
705 size_t usedLocalBytes() {
706 return s_local_base
- s_local_frontier
;
709 size_t usedPersistentBytes() {
710 return s_persistent_usage
;
713 folly::Range
<const char*> normalSection() {
714 return {(const char*)tl_base
, usedBytes()};
717 folly::Range
<const char*> localSection() {
718 return {(const char*)tl_base
+ s_local_frontier
, usedLocalBytes()};
721 Array
& s_constants() {
722 return *reinterpret_cast<Array
*>(s_constantsStorage
.get());
725 GenNumber
currentGenNumber() {
726 return header()->currentGen
;
729 Handle
currentGenNumberHandle() {
730 return offsetof(Header
, currentGen
);
733 constexpr size_t kAllocBitNumBytes
= 8;
736 Guard
g(s_allocMutex
);
737 if (s_bits_to_go
== 0) {
738 auto const handle
= detail::alloc(
742 type_scan::getIndexForScan
<unsigned char[kAllocBitNumBytes
]>()
744 s_next_bit
= handle
* CHAR_BIT
;
745 s_bits_to_go
= kAllocBitNumBytes
* CHAR_BIT
;
746 recordRds(handle
, kAllocBitNumBytes
, "Unknown", "bits");
752 bool testAndSetBit(size_t bit
) {
753 size_t block
= bit
/ CHAR_BIT
;
754 unsigned char mask
= 1 << (bit
% CHAR_BIT
);
755 Handle handle
= block
& ~(kAllocBitNumBytes
- 1);
757 if (!isHandleInit(handle
, NormalTag
{})) {
758 auto ptr
= handleToPtr
<unsigned char, Mode::Normal
>(handle
);
759 memset(ptr
, 0, kAllocBitNumBytes
);
762 auto& ref
= handleToRef
<unsigned char, Mode::Normal
>(block
);
763 bool ret
= ref
& mask
;
768 bool isValidHandle(Handle handle
) {
769 return handle
>= kMinPersistentHandle
||
770 (handle
>= sizeof(Header
) && handle
< s_normal_frontier
) ||
771 (handle
>= s_local_frontier
&& handle
< s_local_base
);
774 void threadInit(bool shouldRegister
) {
778 assertx(tl_base
== nullptr);
779 tl_base
= mmap(nullptr, s_local_base
, PROT_READ
| PROT_WRITE
,
780 MAP_ANON
| MAP_PRIVATE
, -1, 0);
782 tl_base
!= MAP_FAILED
,
783 "Failed to mmap RDS region. errno = {}",
784 folly::errnoStr(errno
).c_str()
786 numa_bind_to(tl_base
, s_local_base
, s_numaNode
);
788 // A huge-page RDS is incompatible with VMProtect in vm-regs.cpp
789 if (RuntimeOption::EvalMapTgtCacheHuge
) {
790 hintHuge(tl_base
, s_local_base
);
794 if (shouldRegister
) {
795 Guard
g(s_tlBaseListLock
);
796 assertx(std::find(begin(s_tlBaseList
), end(s_tlBaseList
), tl_base
) ==
798 s_tlBaseList
.push_back(tl_base
);
801 if (RuntimeOption::EvalPerfDataMap
) {
802 Debug::DebugInfo::recordDataMap(
804 (char*)tl_base
+ s_local_base
,
808 header()->currentGen
= 1;
809 if (shouldRegister
) {
814 void threadExit(bool shouldUnregister
) {
815 if (shouldUnregister
) {
817 Guard
g(s_tlBaseListLock
);
818 auto it
= std::find(begin(s_tlBaseList
), end(s_tlBaseList
), tl_base
);
819 if (it
!= end(s_tlBaseList
)) {
820 s_tlBaseList
.erase(it
);
824 if (RuntimeOption::EvalPerfDataMap
) {
825 Debug::DebugInfo::recordDataMap(
827 (char*)tl_base
+ s_local_base
,
831 auto const base
= tl_base
;
832 auto do_unmap
= [base
] {
833 munmap(base
, s_local_base
);
836 // Other requests may be reading from this rds section via the s_tlBaseList.
837 // We just removed ourself from the list now, but defer the unmap until after
838 // any outstanding requests have completed.
839 if (shouldUnregister
) {
840 Treadmill::enqueue(std::move(do_unmap
));
846 void recordRds(Handle h
, size_t size
,
847 folly::StringPiece type
, folly::StringPiece msg
) {
848 if (RuntimeOption::EvalPerfDataMap
) {
849 if (isNormalHandle(h
)) {
850 h
= genNumberHandleFrom(h
);
851 size
+= sizeof(GenNumber
);
853 Debug::DebugInfo::recordDataMap(
855 (char*)(intptr_t)h
+ size
,
856 folly::sformat("rds+{}-{}", type
, msg
));
860 void recordRds(Handle h
, size_t size
, const Symbol
& sym
) {
861 if (RuntimeOption::EvalPerfDataMap
) {
863 boost::apply_visitor(SymbolKind(), sym
),
864 boost::apply_visitor(SymbolRep(), sym
));
868 std::vector
<void*> allTLBases() {
869 Guard
g(s_tlBaseListLock
);
873 folly::Optional
<Symbol
> reverseLink(Handle handle
) {
874 RevLinkTable::const_accessor acc
;
875 if (s_handleTable
.find(acc
, handle
)) {
881 //////////////////////////////////////////////////////////////////////