2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_MEMORY_MANAGER_DEFS_H
18 #define incl_HPHP_MEMORY_MANAGER_DEFS_H
20 #include "hphp/runtime/base/apc-local-array.h"
21 #include "hphp/runtime/base/apc-local-array-defs.h"
22 #include "hphp/runtime/base/mixed-array-defs.h"
23 #include "hphp/runtime/base/packed-array-defs.h"
24 #include "hphp/runtime/base/set-array.h"
26 #include "hphp/runtime/vm/globals-array.h"
27 #include "hphp/runtime/vm/native-data.h"
28 #include "hphp/runtime/vm/resumable.h"
30 #include "hphp/runtime/ext/asio/ext_asio.h"
31 #include "hphp/runtime/ext/asio/ext_await-all-wait-handle.h"
32 #include "hphp/runtime/ext/asio/ext_condition-wait-handle.h"
33 #include "hphp/runtime/ext/asio/ext_external-thread-event-wait-handle.h"
34 #include "hphp/runtime/ext/asio/ext_reschedule-wait-handle.h"
35 #include "hphp/runtime/ext/asio/ext_sleep-wait-handle.h"
36 #include "hphp/runtime/ext/asio/ext_static-wait-handle.h"
37 #include "hphp/runtime/ext/asio/ext_async-generator-wait-handle.h"
38 #include "hphp/runtime/ext/asio/ext_wait-handle.h"
39 #include "hphp/runtime/ext/asio/ext_async-function-wait-handle.h"
40 #include "hphp/runtime/ext/collections/ext_collections-map.h"
41 #include "hphp/runtime/ext/collections/ext_collections-pair.h"
42 #include "hphp/runtime/ext/collections/ext_collections-set.h"
43 #include "hphp/runtime/ext/collections/ext_collections-vector.h"
44 #include "hphp/runtime/ext/collections/hash-collection.h"
45 #include "hphp/runtime/ext/std/ext_std_closure.h"
46 #include "hphp/util/bitset-utils.h"
52 ///////////////////////////////////////////////////////////////////////////////
54 using HdrBlock
= MemRange
<HeapObject
*>;
56 // The first slab may be smaller than the normal size when it is
57 // preallocated. The heap scanner needs to know about it.
58 extern __thread MemBlock s_firstSlab
;
61 * Struct Slab encapsulates the header attached to each large block of memory
62 * used for allocating smaller blocks. The header contains a start-bit map,
63 * used for quickly locating the start of an object, given an interior pointer
64 * (a pointer to somewhere in the body). The start-bit map marks the location
65 * of the start of each object. One bit represents kSmallSizeAlign bytes.
67 struct alignas(kSmallSizeAlign
) Slab
: HeapObject
{
70 initHeader_32(HeaderKind::Slab
, 0);
73 static_assert(sizeof(*this) % kSmallSizeAlign
== 0, "");
77 * call fn(h) on each object in the slab, without calling allocSize(),
78 * by scanning through the start-bits alone.
80 template<class Fn
> void iter_starts(Fn fn
) const;
82 void setStart(const void* p
) {
83 bitvec_set(starts_
, start_index(p
));
87 * set start bits for as many objects of size class index, that fit
88 * between [start,end).
90 void setStarts(const void* start
, const void* end
, size_t nbytes
,
93 bool isStart(const void* p
) const {
94 return bitvec_test(starts_
, start_index(p
));
97 HeapObject
* find(const void* ptr
) const;
99 static Slab
* fromHeader(HeapObject
* h
) {
100 assertx(h
->kind() == HeaderKind::Slab
);
101 return reinterpret_cast<Slab
*>(h
);
104 size_t size() const {
105 assertx(s_firstSlab
.size
<= kSlabSize
);
106 return (this == s_firstSlab
.ptr
) ? s_firstSlab
.size
: kSlabSize
;
109 static Slab
* fromPtr(const void* p
) {
110 static_assert(kSlabAlign
== kSlabSize
, "");
111 auto slab
= reinterpret_cast<Slab
*>(uintptr_t(p
) & ~(kSlabAlign
- 1));
112 assertx(slab
->kind() == HeaderKind::Slab
);
116 char* start() { return (char*)(this + 1); }
117 char* end() { return (char*)this + size(); }
118 const char* start() const { return (const char*)(this + 1); }
119 const char* end() const { return (const char*)this + size(); }
123 // access whole start-bits word, for testing
124 uint64_t start_bits(const void* p
) const {
125 return starts_
[start_index(p
) / kBitsPerStart
];
130 memset(starts_
, 0, sizeof(starts_
));
133 static size_t start_index(const void* p
) {
134 return ((size_t)p
& (kSlabSize
- 1)) / kSmallSizeAlign
;
138 static constexpr size_t kBitsPerStart
= 64; // bits per starts_[] entry
139 static constexpr auto kNumStarts
= kSlabSize
/ kBitsPerStart
/
142 // tables for bulk-initializing start bits in setStarts(). kNumMasks
143 // covers the small size classes that span <= 64 start bits (up to 1K).
144 static constexpr size_t kNumMasks
= 20;
145 static std::array
<uint64_t,kNumMasks
> masks_
;
146 static std::array
<uint8_t,kNumMasks
> shifts_
;
149 // 1 = a HeapObject starts at corresponding address
150 // 0 = in the middle of an object or free space
151 uint64_t starts_
[kNumStarts
];
154 static_assert(kMaxSmallSize
< kSlabSize
- sizeof(Slab
),
155 "kMaxSmallSize must fit in Slab");
157 inline const Resumable
* resumable(const HeapObject
* h
) {
158 assertx(h
->kind() == HeaderKind::AsyncFuncFrame
);
159 auto native
= static_cast<const NativeNode
*>(h
);
160 return reinterpret_cast<const Resumable
*>(
161 (const char*)native
+ native
->obj_offset
- sizeof(Resumable
)
165 inline Resumable
* resumable(HeapObject
* h
) {
166 assertx(h
->kind() == HeaderKind::AsyncFuncFrame
);
167 auto native
= static_cast<NativeNode
*>(h
);
168 return reinterpret_cast<Resumable
*>(
169 (char*)native
+ native
->obj_offset
- sizeof(Resumable
)
173 inline const c_Awaitable
* asyncFuncWH(const HeapObject
* h
) {
174 assertx(resumable(h
)->actRec()->func()->isAsyncFunction());
175 auto native
= static_cast<const NativeNode
*>(h
);
176 auto obj
= reinterpret_cast<const c_Awaitable
*>(
177 (const char*)native
+ native
->obj_offset
179 assertx(obj
->headerKind() == HeaderKind::AsyncFuncWH
);
183 inline c_Awaitable
* asyncFuncWH(HeapObject
* h
) {
184 assertx(resumable(h
)->actRec()->func()->isAsyncFunction());
185 auto native
= static_cast<NativeNode
*>(h
);
186 auto obj
= reinterpret_cast<c_Awaitable
*>(
187 (char*)native
+ native
->obj_offset
189 assertx(obj
->headerKind() == HeaderKind::AsyncFuncWH
);
193 inline const ObjectData
* closureObj(const HeapObject
* h
) {
194 assertx(h
->kind() == HeaderKind::ClosureHdr
);
195 auto closure_hdr
= static_cast<const ClosureHdr
*>(h
);
196 auto obj
= reinterpret_cast<const ObjectData
*>(closure_hdr
+ 1);
197 assertx(obj
->headerKind() == HeaderKind::Closure
);
201 inline ObjectData
* closureObj(HeapObject
* h
) {
202 assertx(h
->kind() == HeaderKind::ClosureHdr
);
203 auto closure_hdr
= static_cast<ClosureHdr
*>(h
);
204 auto obj
= reinterpret_cast<ObjectData
*>(closure_hdr
+ 1);
205 assertx(obj
->headerKind() == HeaderKind::Closure
);
209 // if this header is one of the types that contains an ObjectData,
210 // return the (possibly inner ptr) ObjectData*
211 inline const ObjectData
* innerObj(const HeapObject
* h
) {
212 return isObjectKind(h
->kind()) ? static_cast<const ObjectData
*>(h
) :
213 h
->kind() == HeaderKind::AsyncFuncFrame
? asyncFuncWH(h
) :
214 h
->kind() == HeaderKind::NativeData
?
215 Native::obj(static_cast<const NativeNode
*>(h
)) :
216 h
->kind() == HeaderKind::ClosureHdr
? closureObj(h
) :
220 // constexpr version of MemoryManager::sizeClass.
221 // requires sizeof(T) <= kMaxSmallSizeLookup
222 template<class T
> constexpr size_t sizeClass() {
223 return kSizeIndex2Size
[
224 kSmallSize2Index
[(sizeof(T
)-1) >> kLgSmallSizeQuantum
]
229 * Return the size of h in bytes, rounded up to size class if necessary.
231 inline size_t allocSize(const HeapObject
* h
) {
232 // Ordering depends on ext_wait-handle.h.
233 static const uint16_t waithandle_sizes
[] = {
234 sizeClass
<c_StaticWaitHandle
>(),
235 0, /* AsyncFunction */
236 sizeClass
<c_AsyncGeneratorWaitHandle
>(),
238 sizeClass
<c_ConditionWaitHandle
>(),
239 sizeClass
<c_RescheduleWaitHandle
>(),
240 sizeClass
<c_SleepWaitHandle
>(),
241 sizeClass
<c_ExternalThreadEventWaitHandle
>(),
244 // Ordering depends on header-kind.h.
245 static constexpr uint16_t kind_sizes
[] = {
248 sizeClass
<ArrayData
>(), /* Empty */
249 0, /* APCLocalArray */
250 sizeClass
<GlobalsArray
>(),
256 sizeClass
<RefData
>(),
258 0, /* NativeObject */
260 sizeClass
<c_AsyncFunctionWaitHandle
>(),
263 sizeClass
<c_Vector
>(),
267 sizeClass
<c_ImmVector
>(),
268 sizeClass
<c_ImmMap
>(),
269 sizeClass
<c_ImmSet
>(),
270 0, /* AsyncFuncFrame */
279 sizeof(Slab
), /* Slab */
281 #define CHECKSIZE(knd, type) \
282 static_assert(kind_sizes[(int)HeaderKind::knd] == sizeClass<type>(), #knd);
283 CHECKSIZE(Empty
, ArrayData
)
284 CHECKSIZE(Globals
, GlobalsArray
)
285 CHECKSIZE(Ref
, RefData
)
286 CHECKSIZE(AsyncFuncWH
, c_AsyncFunctionWaitHandle
)
287 CHECKSIZE(Vector
, c_Vector
)
288 CHECKSIZE(Map
, c_Map
)
289 CHECKSIZE(Set
, c_Set
)
290 CHECKSIZE(Pair
, c_Pair
)
291 CHECKSIZE(ImmVector
, c_ImmVector
)
292 CHECKSIZE(ImmMap
, c_ImmMap
)
293 CHECKSIZE(ImmSet
, c_ImmSet
)
294 static_assert(kind_sizes
[(int)HeaderKind::Slab
] == sizeof(Slab
), "");
296 #define CHECKSIZE(knd)\
297 static_assert(kind_sizes[(int)HeaderKind::knd] == 0, #knd);
307 CHECKSIZE(NativeObject
)
308 CHECKSIZE(WaitHandle
)
309 CHECKSIZE(AwaitAllWH
)
311 CHECKSIZE(AsyncFuncFrame
)
312 CHECKSIZE(NativeData
)
313 CHECKSIZE(ClosureHdr
)
315 CHECKSIZE(SmallMalloc
)
322 auto kind
= h
->kind();
323 if (auto size
= kind_sizes
[(int)kind
]) {
327 // In the cases below, kinds that don't need size-class rounding will
328 // return their size immediately; the rest break to the bottom, then
329 // return a rounded-up size.
332 case HeaderKind::Packed
:
333 case HeaderKind::VecArray
:
334 // size = kSizeIndex2Size[h->aux16]
335 size
= PackedArray::heapSize(static_cast<const ArrayData
*>(h
));
337 case HeaderKind::Mixed
:
338 case HeaderKind::Dict
:
339 // size = fn of h->m_scale
340 size
= static_cast<const MixedArray
*>(h
)->heapSize();
342 case HeaderKind::Keyset
:
343 // size = fn of h->m_scale
344 size
= static_cast<const SetArray
*>(h
)->heapSize();
346 case HeaderKind::Apc
:
347 // size = h->m_size * 16 + sizeof(APCLocalArray)
348 size
= static_cast<const APCLocalArray
*>(h
)->heapSize();
350 case HeaderKind::String
:
351 // size = isFlat ? isRefCounted ? table[aux16] : m_size+C : proxy_size;
352 size
= static_cast<const StringData
*>(h
)->heapSize();
354 case HeaderKind::Closure
:
355 case HeaderKind::Object
:
356 // size = h->m_cls->m_declProperties.m_extra * sz(TV) + sz(OD)
357 // [ObjectData][props]
358 size
= static_cast<const ObjectData
*>(h
)->heapSize();
360 case HeaderKind::ClosureHdr
:
362 // [ClosureHdr][ObjectData][use vars]
363 size
= static_cast<const ClosureHdr
*>(h
)->size();
365 case HeaderKind::WaitHandle
: {
366 // size = table[h->whkind & mask]
367 // [ObjectData][subclass]
368 auto obj
= static_cast<const ObjectData
*>(h
);
369 auto whKind
= wait_handle
<c_Awaitable
>(obj
)->getKind();
370 size
= waithandle_sizes
[(int)whKind
];
371 assertx(size
!= 0); // AsyncFuncFrame or AwaitAllWH
372 assertx(size
== MemoryManager::sizeClass(size
));
375 case HeaderKind::AwaitAllWH
:
376 // size = h->m_cap * sz(Node) + sz(c_AAWH)
377 // [ObjectData][children...]
378 size
= static_cast<const c_AwaitAllWaitHandle
*>(h
)->heapSize();
380 case HeaderKind::Resource
:
382 // [ResourceHdr][ResourceData subclass]
383 size
= static_cast<const ResourceHdr
*>(h
)->heapSize();
385 case HeaderKind::Cpp
:
386 case HeaderKind::SmallMalloc
: // [MallocNode][bytes...]
387 // size = h->nbytes // 64-bit
388 size
= static_cast<const MallocNode
*>(h
)->nbytes
;
390 case HeaderKind::AsyncFuncFrame
:
391 // size = h->obj_offset + C // 32-bit
392 // [NativeNode][locals][Resumable][c_AsyncFunctionWaitHandle]
393 size
= static_cast<const NativeNode
*>(h
)->obj_offset
+
394 sizeof(c_AsyncFunctionWaitHandle
);
396 case HeaderKind::NativeData
: {
397 // h->obj_offset + (h+h->obj_offset)->m_cls->m_extra * sz(TV) + sz(OD)
398 // [NativeNode][NativeData][ObjectData][props] is one allocation.
400 // [NativeNode][NativeData<locals><Resumable><GeneratorData>][ObjectData]
401 auto native
= static_cast<const NativeNode
*>(h
);
402 size
= native
->obj_offset
+ Native::obj(native
)->heapSize();
405 case HeaderKind::BigObj
: // [MallocNode][HeapObject...]
406 case HeaderKind::BigMalloc
: // [MallocNode][raw bytes...]
407 size
= static_cast<const MallocNode
*>(h
)->nbytes
;
409 // not rounded up to size class, because we never need to iterate over
410 // more than one allocated block. avoid calling nallocx().
412 case HeaderKind::Free
:
413 case HeaderKind::Hole
:
414 size
= static_cast<const FreeNode
*>(h
)->size();
416 // Free objects are guaranteed to be already size-class aligned.
417 // Holes are not size-class aligned, so neither need to be rounded up.
419 case HeaderKind::Slab
:
420 case HeaderKind::NativeObject
:
421 case HeaderKind::AsyncFuncWH
:
422 case HeaderKind::Empty
:
423 case HeaderKind::Globals
:
424 case HeaderKind::Vector
:
425 case HeaderKind::Map
:
426 case HeaderKind::Set
:
427 case HeaderKind::Pair
:
428 case HeaderKind::ImmVector
:
429 case HeaderKind::ImmMap
:
430 case HeaderKind::ImmSet
:
431 case HeaderKind::Ref
:
434 return MemoryManager::sizeClass(size
);
437 ///////////////////////////////////////////////////////////////////////////////
439 // call fn(h) on each object in the slab, without calling allocSize()
441 void Slab::iter_starts(Fn fn
) const {
442 auto ptr
= (char*)this;
443 for (auto it
= starts_
, end
= starts_
+ kNumStarts
; it
< end
; ++it
) {
444 for (auto bits
= *it
; bits
!= 0; bits
&= (bits
- 1)) {
445 auto k
= ffs64(bits
);
446 auto h
= (HeapObject
*)(ptr
+ k
* kSmallSizeAlign
);
449 ptr
+= kBitsPerStart
* kSmallSizeAlign
;
454 * Search from ptr backwards, for a HeapObject that contains ptr.
455 * Returns the HeapObject* for the containing object, else nullptr.
457 inline HeapObject
* Slab::find(const void* ptr
) const {
458 auto const i
= start_index(ptr
);
459 if (bitvec_test(starts_
, i
)) {
460 return reinterpret_cast<HeapObject
*>(
461 uintptr_t(ptr
) & ~(kSmallSizeAlign
- 1)
464 // compute a mask with 0s at i+1 and higher
465 auto const mask
= ~0ull >> (kBitsPerStart
- 1 - i
% kBitsPerStart
);
466 auto cursor
= starts_
+ i
/ kBitsPerStart
;
467 for (auto bits
= *cursor
& mask
;; bits
= *cursor
) {
469 auto k
= fls64(bits
);
470 auto h
= reinterpret_cast<HeapObject
*>(
471 (char*)this + ((cursor
- starts_
) * kBitsPerStart
+ k
) * kSmallSizeAlign
473 auto size
= allocSize(h
);
474 if ((char*)ptr
>= (char*)h
+ size
) break;
477 if (--cursor
< starts_
) break;
483 * Set multiple start bits. See implementation notes near Slab::InitMasks
484 * in memory-manager.cpp
486 inline void Slab::setStarts(const void* start
, const void* end
,
487 size_t nbytes
, size_t index
) {
488 auto const start_bit
= start_index(start
);
489 auto const end_bit
= start_index((char*)end
- kSmallSizeAlign
) + 1;
490 auto const nbits
= nbytes
/ kSmallSizeAlign
;
491 if (nbits
<= kBitsPerStart
&&
492 start_bit
/ kBitsPerStart
< end_bit
/ kBitsPerStart
) {
493 assertx(index
< kNumMasks
);
494 auto const mask
= masks_
[index
];
495 size_t const k
= shifts_
[index
];
496 // initially n = how much mask should be shifted to line up with start_bit
497 auto n
= start_bit
% kBitsPerStart
;
498 // first word (containing start_bit) |= left-shifted mask
499 auto s
= &starts_
[start_bit
/ kBitsPerStart
];
501 // subsequently, n accumulates shifts required by the size class
503 // store middle words with shifted mask, update shift amount each time
504 for (auto e
= &starts_
[end_bit
/ kBitsPerStart
]; s
< e
;) {
506 n
= n
+ k
>= nbits
? n
+ k
- nbits
: n
+ k
; // n = (n+k) % nbits
508 // last word (containing end_bit) |= mask with end_bit and higher zeroed
509 *s
|= (mask
<< n
) & ~(~0ull << end_bit
% kBitsPerStart
);
511 // Either the size class is too large to fit 1+ bits per 64bit word,
512 // or the ncontig range was too small. Set one bit at a time.
513 for (auto i
= start_bit
; i
< end_bit
; i
+= nbits
) {
514 bitvec_set(starts_
, i
);
519 template<class OnBig
, class OnSlab
>
520 void SparseHeap::iterate(OnBig onBig
, OnSlab onSlab
) {
521 // slabs and bigs are sorted; walk through both in address order
522 const auto SENTINEL
= (HeapObject
*) ~0LL;
523 auto slab
= std::begin(m_slabs
);
524 auto big
= std::begin(m_bigs
);
525 auto slabend
= std::end(m_slabs
);
526 auto bigend
= std::end(m_bigs
);
527 while (slab
!= slabend
|| big
!= bigend
) {
528 HeapObject
* slab_hdr
= slab
!= slabend
? (HeapObject
*)slab
->ptr
: SENTINEL
;
529 HeapObject
* big_hdr
= big
!= bigend
? *big
: SENTINEL
;
530 assertx(slab_hdr
< SENTINEL
|| big_hdr
< SENTINEL
);
531 if (slab_hdr
< big_hdr
) {
532 onSlab(slab_hdr
, slab
->size
);
535 assertx(big_hdr
< slab_hdr
);
536 onBig(big_hdr
, allocSize(big_hdr
));
542 template<class Fn
> void SparseHeap::iterate(Fn fn
) {
543 // slabs and bigs are sorted; walk through both in address order
544 const auto SENTINEL
= (HeapObject
*) ~0LL;
545 auto slab
= std::begin(m_slabs
);
546 auto big
= std::begin(m_bigs
);
547 auto slabend
= std::end(m_slabs
);
548 auto bigend
= std::end(m_bigs
);
549 while (slab
!= slabend
|| big
!= bigend
) {
550 HeapObject
* slab_hdr
= slab
!= slabend
? (HeapObject
*)slab
->ptr
: SENTINEL
;
551 HeapObject
* big_hdr
= big
!= bigend
? *big
: SENTINEL
;
552 assertx(slab_hdr
< SENTINEL
|| big_hdr
< SENTINEL
);
554 if (slab_hdr
< big_hdr
) {
556 end
= (HeapObject
*)((char*)h
+ slab
->size
);
558 assertx(end
<= big_hdr
); // otherwise slab overlaps next big
561 end
= nullptr; // ensure we don't loop below
565 auto size
= allocSize(h
);
567 h
= (HeapObject
*)((char*)h
+ size
);
569 assertx(!end
|| h
== end
); // otherwise, last object was truncated
573 template<class Fn
> void MemoryManager::iterate(Fn fn
) {
574 m_heap
.iterate([&](HeapObject
* h
, size_t allocSize
) {
575 if (h
->kind() == HeaderKind::BigObj
) {
577 h
= static_cast<MallocNode
*>(h
) + 1;
578 allocSize
-= sizeof(MallocNode
);
579 } else if (h
->kind() >= HeaderKind::Hole
) {
580 assertx(unsigned(h
->kind()) < NumHeaderKinds
);
581 // no valid pointer can point here.
582 return; // continue iterating
588 template<class Fn
> void MemoryManager::forEachHeapObject(Fn fn
) {
593 template<class Fn
> void MemoryManager::forEachObject(Fn fn
) {
594 if (debug
) checkHeap("MemoryManager::forEachObject");
595 std::vector
<const ObjectData
*> ptrs
;
596 forEachHeapObject([&](HeapObject
* h
, size_t) {
597 if (auto obj
= innerObj(h
)) {
601 for (auto ptr
: ptrs
) {
606 template<class Fn
> void MemoryManager::sweepApcArrays(Fn fn
) {
607 for (size_t i
= 0; i
< m_apc_arrays
.size();) {
608 auto a
= m_apc_arrays
[i
];
618 template<class Fn
> void MemoryManager::sweepApcStrings(Fn fn
) {
619 auto& head
= getStringList();
620 for (StringDataNode
*next
, *n
= head
.next
; n
!= &head
; n
= next
) {
622 assertx(next
&& uintptr_t(next
) != kSmallFreeWord
);
623 assertx(next
&& uintptr_t(next
) != kMallocFreeWord
);
624 auto const s
= StringData::node2str(n
);
631 ///////////////////////////////////////////////////////////////////////////////