2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/base/apc-gc-manager.h"
17 #include "hphp/runtime/base/req-containers.h"
18 #include "hphp/runtime/base/mixed-array-defs.h"
19 #include "hphp/runtime/base/memory-manager-defs.h"
20 #include "hphp/runtime/base/heap-scan.h"
21 #include "hphp/runtime/base/thread-info.h"
22 #include "hphp/runtime/base/heap-graph.h"
23 #include "hphp/runtime/base/weakref-data.h"
24 #include "hphp/runtime/ext/weakref/weakref-data-handle.h"
25 #include "hphp/runtime/vm/vm-regs.h"
26 #include "hphp/util/alloc.h"
27 #include "hphp/util/bloom-filter.h"
28 #include "hphp/util/cycles.h"
29 #include "hphp/util/process.h"
30 #include "hphp/util/ptr-map.h"
31 #include "hphp/util/struct-log.h"
32 #include "hphp/util/timer.h"
33 #include "hphp/util/trace.h"
34 #include "hphp/util/type-scan.h"
37 #include <boost/dynamic_bitset.hpp>
38 #include <folly/portability/Unistd.h>
39 #include <folly/Range.h>
51 void operator+=(size_t n
) {
57 bool hasNativeData(const HeapObject
* h
) {
58 return h
->kind() == HeaderKind::NativeObject
;
61 constexpr auto MinMark
= GCBits(1);
62 constexpr auto MaxMark
= GCBits(3);
67 * Eval.EnableGC - Default value of the per-request MemoryManager::m_gc_enabled
68 * flag. This flag can be dynamically set/cleared by PHP via
69 * ini_set("zend.enable_gc"). In turn, m_gc_enabled enables automatic background
70 * garbage collection. Regardless of its value, PHP can call gc_collect_cycles()
73 * Eval.EagerGC - If set, trigger collection after every allocation, in debug
74 * builds. Has no effect in opt builds or when m_gc_enabled == false.
76 * Eval.FilterGCPoints - If true, use a bloom filter to only do an eager
77 * collection once per unique VMPC. This makes eager mode fast enough to be
78 * usable for unit tests, and almost tolerable for large integration tests.
80 * Eval.GCSampleRate - per *request* sample rate to enable GC logging.
81 * If coinflip is true, every GC for the current request will be logged.
82 * Note this is not the per-collection sample rate: we do one coinflip per
85 * Eval.GCMinTrigger - Minimum heap growth, in bytes since the last collection,
86 * before triggering the next collection. See MemoryManager::updateNextGc().
88 * Eval.GCTriggerPct - Minimum heap growth, as a percent of remaining heap
89 * space, before triggering the next collection. see updateNextGC().
91 * Eval.Quarantine - If true, objects swept by GC will be trash filled and
92 * leaked, never reallocated.
94 * Experimental options
96 * Eval.GCForAPC - enable whole-process APC collection. See APCGCManager.
97 * Eval.GCForAPCTrigger - trigger threshold; see APCGCManager.
99 * Eval.TwoPhaseGC - perform tracing in two phases, the second of which
100 * must only encounter exactly-scanned pointers, to enable object copying.
104 * Collector state needed during a single whole-heap mark-sweep collection.
107 explicit Collector(HeapImpl
& heap
, APCGCManager
* apcgc
, GCBits mark_version
)
108 : heap_(heap
), mark_version_
{mark_version
}, apcgc_(apcgc
)
110 template<bool apcgc
> void collect();
113 template<bool apcgc
> void traceAll();
114 template<bool apcgc
> void traceConservative();
115 template<bool apcgc
> void traceExact();
117 // mark ambiguous pointers in the range [start,start+len)
119 void conservativeScan(const void* start
, size_t len
);
121 bool marked(const HeapObject
* h
) {
122 return h
->marks() == mark_version_
;
124 template<bool apcgc
> void checkedEnqueue(const void* p
);
125 template<bool apcgc
> void exactEnqueue(const void* p
);
126 HeapObject
* find(const void*);
128 size_t slab_index(const void* h
) {
129 assertx((char*)h
>= (char*)slabs_range_
.ptr
&&
130 (char*)h
< (char*)slabs_range_
.ptr
+ slabs_range_
.size
);
131 return (uintptr_t(h
) - uintptr_t(slabs_range_
.ptr
)) >> kLgSlabSize
;
135 GCBits
const mark_version_
;
136 size_t num_small_
{0}, num_big_
{0}, num_slabs_
{0};
137 size_t marked_
{0}, pinned_
{0}, unknown_
{0}; // object counts
138 Counter cscanned_roots_
, cscanned_
; // bytes
139 Counter xscanned_roots_
, xscanned_
; // bytes
140 size_t init_ns_
, initfree_ns_
, roots_ns_
{0}, mark_ns_
{0}, sweep_ns_
;
141 size_t max_worklist_
{0}; // max size of cwork_ + xwork_
142 size_t freed_bytes_
{0};
143 PtrMap
<const HeapObject
*> ptrs_
;
144 MemBlock slabs_range_
;
145 boost::dynamic_bitset
<> slab_map_
; // 1 bit per 2M
146 type_scan::Scanner type_scanner_
;
147 std::vector
<const HeapObject
*> cwork_
, xwork_
;
148 APCGCManager
* const apcgc_
;
151 HeapObject
* Collector::find(const void* ptr
) {
152 if (uintptr_t(ptr
) - uintptr_t(slabs_range_
.ptr
) < slabs_range_
.size
&&
153 slab_map_
.test(slab_index(ptr
))) {
154 return Slab::fromPtr(ptr
)->find(ptr
);
156 return const_cast<HeapObject
*>(ptrs_
.start(ptr
));
159 DEBUG_ONLY
bool checkEnqueuedKind(const HeapObject
* h
) {
161 case HeaderKind::Apc
:
162 case HeaderKind::Globals
:
163 case HeaderKind::Ref
:
164 case HeaderKind::Resource
:
165 case HeaderKind::Packed
:
166 case HeaderKind::Mixed
:
167 case HeaderKind::Dict
:
168 case HeaderKind::VecArray
:
169 case HeaderKind::Keyset
:
170 case HeaderKind::Empty
:
171 case HeaderKind::Cpp
:
172 case HeaderKind::SmallMalloc
:
173 case HeaderKind::BigMalloc
:
174 case HeaderKind::String
:
176 case HeaderKind::Free
:
177 case HeaderKind::Hole
:
178 // these can be on the worklist because we don't expect to find
179 // dangling pointers. they are ignored when popped from the worklist.
181 case HeaderKind::Object
:
182 case HeaderKind::Vector
:
183 case HeaderKind::Map
:
184 case HeaderKind::Set
:
185 case HeaderKind::Pair
:
186 case HeaderKind::ImmVector
:
187 case HeaderKind::ImmMap
:
188 case HeaderKind::ImmSet
:
189 case HeaderKind::WaitHandle
:
190 case HeaderKind::AwaitAllWH
:
191 // Object kinds. None of these have native-data, because if they
192 // do, the mapped header should be for the NativeData prefix.
194 case HeaderKind::AsyncFuncFrame
:
195 case HeaderKind::NativeData
:
196 case HeaderKind::ClosureHdr
:
197 // these have inner objects, but we queued the outer one.
199 case HeaderKind::Closure
:
200 case HeaderKind::AsyncFuncWH
:
201 case HeaderKind::NativeObject
:
202 // These headers shouldn't be found during heap or slab iteration because
203 // they are appended to ClosureHdr, AsyncFuncFrame, or NativeData.
204 case HeaderKind::BigObj
:
205 case HeaderKind::Slab
:
206 // These header types are not allocated objects; they are handled
207 // earlier and should never be queued on the gc worklist.
208 always_assert(false && "bad header kind");
214 bool willScanConservative(const HeapObject
* h
) {
215 return (h
->kind() == HeaderKind::SmallMalloc
||
216 h
->kind() == HeaderKind::BigMalloc
) &&
217 type_scan::hasConservativeScanner(
218 static_cast<const MallocNode
*>(h
)->typeIndex()
222 template <bool apcgc
>
223 void Collector::checkedEnqueue(const void* p
) {
224 if (auto h
= find(p
)) {
225 // enqueue h the first time. If it's an object with no pointers (eg String),
226 // we'll skip it when we process the queue.
227 auto old
= h
->marks();
228 if (old
!= mark_version_
) {
229 h
->setmarks(mark_version_
);
231 auto& work
= willScanConservative(h
) ? cwork_
: xwork_
;
233 max_worklist_
= std::max(max_worklist_
, cwork_
.size() + xwork_
.size());
234 assertx(checkEnqueuedKind(h
));
237 // If p doesn't belong to any APC data, APCGCManager won't do anything
242 // It is correct to ignore willScanConservative(h) in phase 2 because:
243 // * target is !type_scan::isKnownType, making it an "unknown" root,
244 // and scanned & pinned in phase 1; OR
245 // * target is a marked (thus pinned) req::container buffer, found in phase 1,
246 // so we can disregard this pointer to it, since it won't move; OR
247 // * target is an unmarked req::container buffer. p is a (possibly interior)
248 // pointer into it. p shouldn't keep the buffer alive, since whoever
249 // owns it, will scan it using the container's iterator api; OR
250 // * p could be a stale pointer of any interesting type, that randomly
251 // is pointing to recycled memory. ignoring it is actually desireable.
252 template <bool apcgc
>
253 void Collector::exactEnqueue(const void* p
) {
254 if (auto h
= find(p
)) {
255 auto old
= h
->marks();
256 if (old
!= mark_version_
&& !willScanConservative(h
)) {
257 h
->setmarks(mark_version_
);
260 max_worklist_
= std::max(max_worklist_
, xwork_
.size());
261 assertx(checkEnqueuedKind(h
));
264 // If p doesn't belong to any APC data, APCGCManager won't do anything
269 // mark ambigous pointers in the range [start,start+len). If the start or
270 // end is a partial word, don't scan that word.
271 template <bool apcgc
>
272 void FOLLY_DISABLE_ADDRESS_SANITIZER
273 Collector::conservativeScan(const void* start
, size_t len
) {
274 constexpr uintptr_t M
{7}; // word size - 1
275 auto s
= (char**)((uintptr_t(start
) + M
) & ~M
); // round up
276 auto e
= (char**)((uintptr_t(start
) + len
) & ~M
); // round down
277 cscanned_
+= uintptr_t(e
) - uintptr_t(s
);
279 checkedEnqueue
<apcgc
>(
280 // Mask off the upper 16-bits to handle things like
281 // DiscriminatedPtr which stores things up there.
282 (void*)(uintptr_t(*s
) & (-1ULL >> 16))
287 inline int64_t cpu_ns() {
288 return HPHP::Timer::GetThreadCPUTimeNanos();
292 * If we have non-conservative scanners, we must treat all unknown
293 * type-index allocations in the heap as roots. Why? The generated
294 * scanners will only report a pointer if it knows the pointer can point
295 * to an object on the request heap. It does this by tracking all types
296 * which are allocated via the allocation functions via the type-index
297 * mechanism. If an allocation has an unknown type-index, then by definition
298 * we don't know which type it contains, and therefore the auto generated
299 * scanners will never report a pointer to such a type.
301 * The only good way to solve this is to treat such allocations as roots
302 * and conservative scan them. If we're conservative scanning everything,
303 * we need to take no special action, as the above problem only applies to
304 * auto generated scanners.
307 // initially parse the heap to find valid objects and initialize metadata.
308 NEVER_INLINE
void Collector::init() {
309 auto const t0
= cpu_ns();
310 SCOPE_EXIT
{ init_ns_
= cpu_ns() - t0
; };
311 tl_heap
->initFree(); // calls HeapImpl::sort(), required below
312 initfree_ns_
= cpu_ns() - t0
;
314 slabs_range_
= heap_
.slab_range();
315 slab_map_
.resize((slabs_range_
.size
+ kSlabSize
- 1) >> kLgSlabSize
);
318 [&](HeapObject
* h
, size_t size
) { // onBig
319 if (h
->kind() == HeaderKind::BigMalloc
) {
320 ptrs_
.insert(h
, size
);
321 if (!type_scan::isKnownType(static_cast<MallocNode
*>(h
)->typeIndex())) {
323 h
->setmarks(mark_version_
);
327 // put the inner big object in ptrs_ without the BigObj header
328 assertx(h
->kind() == HeaderKind::BigObj
);
329 ptrs_
.insert(static_cast<MallocNode
*>(h
)+1, size
- sizeof(MallocNode
));
332 [&](HeapObject
* h
, size_t size
) { // onSlab
333 slab_map_
.set(slab_index(h
));
339 // Collect the heap using mark/sweep.
341 // Init: prepare object-start bitmaps, and mark/enqueue unknown allocations.
343 // Trace (two-phase):
344 // 1. scan all conservative roots, or hybrid roots which might have
345 // conservative fields. Also scan any conservative heap objects reached
346 // via conservative scanning. After phase 1, all conservative scanning is
347 // done and it's safe to move objects while tracing.
348 // 2. scan all exact roots and exact heap objects. Ignore any exactly scanned
349 // pointers to conservatively scanned objects (see comments in exactEnqueue()
352 // Trace (one-phase). This is used if no exact type_scanners are available.
353 // 1. scan all roots, then the transitive closures of all heap objects,
357 // 1. iterate through any tables holding "weak" pointers, clearing entries
358 // if the target(s) aren't marked, including nulling out WeakRef objects.
359 // 2. free all unmarked objects, except SmallMalloc/BigMalloc nodes: We don't
360 // sweep "unknown" allocations or req::container buffers, because we don't
361 // expect to have found all pointers to them. Any other objects allocated
362 // this way are treated similarly.
364 template <bool apcgc
> void Collector::collect() {
366 if (type_scan::hasNonConservative() && RuntimeOption::EvalTwoPhaseGC
) {
367 traceConservative
<apcgc
>();
375 // Phase 1: Scan only conservative or mixed conservative/exact roots, plus any
376 // malloc'd heap objects that are themselves fully conservatively scanned.
377 template <bool apcgc
>
378 NEVER_INLINE
void Collector::traceConservative() {
380 for (auto r
: type_scanner_
.m_conservative
) {
381 conservativeScan
<apcgc
>(r
.first
, r
.second
);
383 type_scanner_
.m_conservative
.clear();
384 // Accumulate m_addrs until traceExact()
385 // Accumulate m_weak until sweep()
387 auto const t0
= cpu_ns();
388 iterateConservativeRoots(
389 [&](const void* p
, size_t size
, type_scan::Index tyindex
) {
390 type_scanner_
.scanByIndex(tyindex
, p
, size
);
393 auto const t1
= cpu_ns();
394 roots_ns_
+= t1
- t0
;
395 cscanned_roots_
= cscanned_
;
396 while (!cwork_
.empty()) {
397 auto h
= cwork_
.back();
399 scanHeapObject(h
, type_scanner_
);
402 mark_ns_
+= cpu_ns() - t1
;
406 // Phase 2: Scan pointers deferred from phase 1, exact roots, and the remainder
407 // of the heap, which is expected to be fully exactly-scannable. Assert if
408 // any conservatively-scanned regions are found in this phase. Any unmarked
409 // objects found in this phase may be safely copied.
410 template <bool apcgc
>
411 NEVER_INLINE
void Collector::traceExact() {
413 assertx(cwork_
.empty() && type_scanner_
.m_conservative
.empty());
414 for (auto addr
: type_scanner_
.m_addrs
) {
415 xscanned_
+= sizeof(*addr
);
416 exactEnqueue
<apcgc
>(*addr
);
418 type_scanner_
.m_addrs
.clear();
419 // Accumulate m_weak until sweep()
421 auto const t0
= cpu_ns();
422 finish(); // from phase 1
424 [&](const void* p
, size_t size
, type_scan::Index tyindex
) {
425 type_scanner_
.scanByIndex(tyindex
, p
, size
);
428 auto const t1
= cpu_ns();
429 roots_ns_
+= t1
- t0
;
430 xscanned_roots_
= xscanned_
;
431 while (!xwork_
.empty()) {
432 auto h
= xwork_
.back();
434 scanHeapObject(h
, type_scanner_
);
437 mark_ns_
+= cpu_ns() - t1
;
440 // Scan all roots & heap in one pass
441 template <bool apcgc
>
442 NEVER_INLINE
void Collector::traceAll() {
444 for (auto r
: type_scanner_
.m_conservative
) {
445 conservativeScan
<apcgc
>(r
.first
, r
.second
);
447 type_scanner_
.m_conservative
.clear();
448 for (auto addr
: type_scanner_
.m_addrs
) {
449 xscanned_
+= sizeof(*addr
);
450 checkedEnqueue
<apcgc
>(*addr
);
452 type_scanner_
.m_addrs
.clear();
453 // Accumulate m_weak until sweep()
455 auto const t0
= cpu_ns();
456 iterateRoots([&](const void* p
, size_t size
, type_scan::Index tyindex
) {
457 type_scanner_
.scanByIndex(tyindex
, p
, size
);
460 auto const t1
= cpu_ns();
461 roots_ns_
+= t1
- t0
;
462 cscanned_roots_
= cscanned_
;
463 xscanned_roots_
= xscanned_
;
464 while (!cwork_
.empty() || !xwork_
.empty()) {
465 auto& work
= !cwork_
.empty() ? cwork_
: xwork_
;
466 auto h
= work
.back();
468 scanHeapObject(h
, type_scanner_
);
471 mark_ns_
+= cpu_ns() - t1
;
475 // another pass through the heap, this time using the PtrMap we computed
476 // in init(). Free and maybe quarantine unmarked objects.
477 NEVER_INLINE
void Collector::sweep() {
479 auto const t0
= cpu_ns();
480 auto const usage0
= mm
.currentUsage();
481 MemoryManager::FreelistArray quarantine
;
482 if (RuntimeOption::EvalQuarantine
) quarantine
= mm
.beginQuarantine();
484 if (RuntimeOption::EvalQuarantine
) mm
.endQuarantine(std::move(quarantine
));
485 freed_bytes_
= usage0
- mm
.currentUsage();
486 sweep_ns_
= cpu_ns() - t0
;
487 assertx(freed_bytes_
>= 0);
490 // Clear weak references as needed.
491 for (auto w
: type_scanner_
.m_weak
) {
492 auto wref
= static_cast<const WeakRefDataHandle
*>(w
);
493 assertx(wref
->acquire_count
== 0);
494 assertx(wref
->wr_data
);
495 auto type
= wref
->wr_data
->pointee
.m_type
;
496 if (type
== KindOfObject
) {
497 auto h
= find(wref
->wr_data
->pointee
.m_data
.pobj
);
499 WeakRefData::invalidateWeakRef(uintptr_t(h
));
504 assertx(type
== KindOfNull
|| type
== KindOfUninit
);
506 type_scanner_
.m_weak
.clear();
508 bool need_reinit_free
= false;
509 g_context
->sweepDynPropTable([&](const ObjectData
* obj
) {
510 if (need_reinit_free
) mm
.reinitFree();
512 // if we return true, call reinitFree() before calling find() again,
513 // to ensure the heap remains walkable.
514 return need_reinit_free
= !h
|| !marked(h
);
517 mm
.sweepApcArrays([&](APCLocalArray
* a
) {
521 mm
.sweepApcStrings([&](StringData
* s
) {
528 [&](HeapObject
* big
, size_t big_size
) { // onBig
530 if (big
->kind() == HeaderKind::BigObj
) {
531 HeapObject
* h2
= static_cast<MallocNode
*>(big
) + 1;
532 if (!marked(h2
) && h2
->kind() != HeaderKind::SmallMalloc
) {
537 [&](HeapObject
* big
, size_t /*big_size*/) { // onSlab
539 auto slab
= Slab::fromHeader(big
);
540 slab
->iter_starts([&](HeapObject
* h
) {
542 auto kind
= h
->kind();
543 if (!isFreeKind(kind
) && kind
!= HeaderKind::SmallMalloc
&&
545 mm
.freeSmallSize(h
, allocSize(h
));
550 // This should be removed after global GC API is provided
551 // Currently we do this to sweeping only when script mode
556 thread_local
bool t_eager_gc
{false};
557 thread_local BloomFilter
<256*1024> t_surprise_filter
;
559 // Structured Logging
561 thread_local
std::atomic
<size_t> g_req_num
;
562 __thread
size_t t_req_num
; // snapshot thread-local copy of g_req_num;
563 __thread
size_t t_gc_num
; // nth collection in this request.
564 __thread
bool t_enable_samples
;
565 __thread
int64_t t_trigger
;
566 __thread
int64_t t_trigger_allocated
;
567 __thread
int64_t t_req_age
;
568 __thread MemoryUsageStats t_pre_stats
;
570 StructuredLogEntry
logCommon() {
571 StructuredLogEntry sample
;
572 sample
.setInt("req_num", t_req_num
);
574 sample
.setInt("memory_limit", tl_heap
->getMemoryLimit());
575 sample
.setInt("usage", t_pre_stats
.usage());
576 sample
.setInt("mm_usage", t_pre_stats
.mmUsage());
577 sample
.setInt("mm_allocated", t_pre_stats
.mmAllocated());
578 sample
.setInt("aux_usage", t_pre_stats
.auxUsage());
579 sample
.setInt("mm_capacity", t_pre_stats
.capacity());
580 sample
.setInt("peak_usage", t_pre_stats
.peakUsage
);
581 sample
.setInt("peak_capacity", t_pre_stats
.peakCap
);
582 sample
.setInt("total_alloc", t_pre_stats
.totalAlloc
);
586 void traceCollection(const Collector
& collector
) {
587 constexpr auto MB
= 1024 * 1024;
588 auto const cscanned_heap
= collector
.cscanned_
.bytes
-
589 collector
.cscanned_roots_
.bytes
;
590 auto const xscanned_heap
= collector
.xscanned_
.bytes
-
591 collector
.xscanned_roots_
.bytes
;
592 auto const total_ns
= collector
.init_ns_
+ collector
.initfree_ns_
+
593 collector
.roots_ns_
+ collector
.mark_ns_
+ collector
.sweep_ns_
;
594 Trace::ftraceRelease(
595 "gc age {}ms mmUsage {}M trigger {}M "
596 "init {}ms mark {}ms sweep {}ms total {}ms "
597 "marked {} pinned {} free {:.1f}M "
598 "cscan-heap {:.1f}M xscan-heap {:.1f}M\n",
600 t_pre_stats
.mmUsage() / MB
,
602 collector
.init_ns_
/ 1000000,
603 collector
.mark_ns_
/ 1000000,
604 collector
.sweep_ns_
/ 1000000,
608 double(collector
.freed_bytes_
) / MB
,
609 double(cscanned_heap
) / MB
,
610 double(xscanned_heap
) / MB
614 void logCollection(const char* phase
, const Collector
& collector
) {
615 auto sample
= logCommon();
616 sample
.setStr("phase", phase
);
617 std::string
scanner(type_scan::hasNonConservative() ? "typescan" : "ts-cons");
618 sample
.setStr("scanner", !debug
? scanner
: scanner
+ "-debug");
619 sample
.setInt("gc_num", t_gc_num
);
620 sample
.setInt("req_age_micros", t_req_age
);
621 // timers of gc-sub phases
622 sample
.setInt("init_micros", collector
.init_ns_
/1000);
623 sample
.setInt("initfree_micros", collector
.initfree_ns_
/1000);
624 sample
.setInt("roots_micros", collector
.roots_ns_
/1000);
625 sample
.setInt("mark_micros", collector
.mark_ns_
/1000);
626 sample
.setInt("sweep_micros", collector
.sweep_ns_
/1000);
627 // object metrics counted at sweep time
628 sample
.setInt("slab_count", collector
.num_slabs_
);
629 sample
.setInt("small_count", collector
.num_small_
);
630 sample
.setInt("big_count", collector
.num_big_
);
631 // size metrics gathered during gc
632 sample
.setInt("allocd_span", collector
.ptrs_
.span().second
);
633 sample
.setInt("marked_count", collector
.marked_
);
634 sample
.setInt("pinned_count", collector
.pinned_
);
635 sample
.setInt("unknown_count", collector
.unknown_
);
636 sample
.setInt("freed_bytes", collector
.freed_bytes_
);
637 sample
.setInt("trigger_bytes", t_trigger
);
638 sample
.setInt("trigger_allocated", t_trigger_allocated
);
639 sample
.setInt("cscanned_roots", collector
.cscanned_roots_
.bytes
);
640 sample
.setInt("xscanned_roots", collector
.xscanned_roots_
.bytes
);
641 sample
.setInt("cscanned_heap",
642 collector
.cscanned_
.bytes
- collector
.cscanned_roots_
.bytes
);
643 sample
.setInt("xscanned_heap",
644 collector
.xscanned_
.bytes
- collector
.xscanned_roots_
.bytes
);
645 sample
.setInt("rds_normal_size", rds::normalSection().size());
646 sample
.setInt("rds_normal_count", rds::detail::s_normal_alloc_descs
.size());
647 sample
.setInt("rds_local_size", rds::localSection().size());
648 sample
.setInt("rds_local_count", rds::detail::s_local_alloc_descs
.size());
649 sample
.setInt("max_worklist", collector
.max_worklist_
);
650 StructuredLog::log("hhvm_gc", sample
);
653 void collectImpl(HeapImpl
& heap
, const char* phase
, GCBits
& mark_version
) {
655 if (t_eager_gc
&& RuntimeOption::EvalFilterGCPoints
) {
658 if (t_surprise_filter
.test(pc
)) {
659 if (RuntimeOption::EvalGCForAPC
) {
660 if (!APCGCManager::getInstance().excessedGCTriggerBar()) {
667 t_surprise_filter
.insert(pc
);
668 TRACE(2, "eager gc %s at %p\n", phase
, pc
);
671 TRACE(2, "normal gc %s at %p\n", phase
, vmpc());
674 t_enable_samples
= StructuredLog::coinflip(RuntimeOption::EvalGCSampleRate
);
676 t_pre_stats
= tl_heap
->getStatsCopy(); // don't check or trigger OOM
677 mark_version
= (mark_version
== MaxMark
) ? MinMark
:
678 GCBits(uint8_t(mark_version
) + 1);
681 RuntimeOption::EvalGCForAPC
? &APCGCManager::getInstance() : nullptr,
684 if (RuntimeOption::EvalGCForAPC
) {
685 collector
.collect
<true>();
687 collector
.collect
<false>();
689 if (Trace::moduleEnabledRelease(Trace::gc
, 1)) {
690 traceCollection(collector
);
692 if (t_enable_samples
) {
693 logCollection(phase
, collector
);
700 void MemoryManager::resetGC() {
701 t_req_num
= ++g_req_num
;
703 if (rds::header()) updateNextGc();
706 void MemoryManager::resetEagerGC() {
707 if (RuntimeOption::EvalEagerGC
&& RuntimeOption::EvalFilterGCPoints
) {
708 t_surprise_filter
.clear();
712 void MemoryManager::requestEagerGC() {
713 if (RuntimeOption::EvalEagerGC
&& rds::header()) {
715 setSurpriseFlag(PendingGCFlag
);
719 void MemoryManager::checkGC() {
720 if (m_stats
.mmUsage() > m_nextGC
) {
721 assertx(rds::header());
722 setSurpriseFlag(PendingGCFlag
);
723 if (t_trigger_allocated
== -1) {
724 t_trigger_allocated
= m_stats
.mmAllocated();
730 * Compute the next threshold to trigger GC. We wish to ignore auxUsage
731 * for the purpose of this calculation, even though auxUsage is counted
732 * against the request for the sake of OOM. To accomplish this, subtract
733 * auxUsage from the heap limit, before our calculations.
735 * GC will then be triggered the next time we notice mmUsage > m_nextGc (see
738 void MemoryManager::updateNextGc() {
739 t_trigger_allocated
= -1;
740 if (!isGCEnabled()) {
741 m_nextGC
= kNoNextGC
;
746 auto const stats
= getStatsCopy();
747 auto const clearance
=
748 static_cast<uint64_t>(m_usageLimit
) -
749 stats
.auxUsage() - stats
.mmUsage();
751 int64_t delta
= clearance
> std::numeric_limits
<int64_t>::max() ?
752 0 : clearance
* RuntimeOption::EvalGCTriggerPct
;
753 delta
= std::max(delta
, RuntimeOption::EvalGCMinTrigger
);
754 m_nextGC
= stats
.mmUsage() + delta
;
758 void MemoryManager::collect(const char* phase
) {
760 t_req_age
= cpu_ns()/1000 - m_req_start_micros
;
761 t_trigger
= m_nextGC
;
762 collectImpl(m_heap
, phase
, m_mark_version
);
766 void MemoryManager::setMemoryLimit(size_t limit
) {
767 assertx(limit
<= (size_t)std::numeric_limits
<int64_t>::max());
768 m_usageLimit
= limit
;