2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/base/memory-manager.h"
24 #include "hphp/runtime/base/builtin-functions.h"
25 #include "hphp/runtime/base/memory-profile.h"
26 #include "hphp/runtime/base/runtime-option.h"
27 #include "hphp/runtime/base/stack-logger.h"
28 #include "hphp/runtime/base/surprise-flags.h"
29 #include "hphp/runtime/base/sweepable.h"
30 #include "hphp/runtime/base/thread-info.h"
31 #include "hphp/runtime/server/http-server.h"
33 #include "hphp/util/alloc.h"
34 #include "hphp/util/logger.h"
35 #include "hphp/util/process.h"
36 #include "hphp/util/trace.h"
38 #include <folly/ScopeGuard.h>
39 #include "hphp/runtime/base/memory-manager-defs.h"
43 TRACE_SET_MOD(smartalloc
);
45 //////////////////////////////////////////////////////////////////////
47 std::atomic
<MemoryManager::ReqProfContext
*>
48 MemoryManager::s_trigger
{nullptr};
50 // generate mmap flags for contiguous heap
51 uint32_t getRequestHeapFlags() {
54 // check if MAP_UNITIALIZED is supported
55 auto mapUninitializedSupported
=
56 (stat("/sys/kernel/debug/fb_map_uninitialized", &buf
) == 0);
57 auto mmapFlags
= MAP_NORESERVE
| MAP_ANON
| MAP_PRIVATE
;
59 /* Check whether mmap(2) supports the MAP_UNINITIALIZED flag. */
60 if (mapUninitializedSupported
) {
61 mmapFlags
|= MAP_UNINITIALIZED
;
66 static auto s_mmapFlags
= getRequestHeapFlags();
69 bool MemoryManager::s_statsEnabled
= false;
70 size_t MemoryManager::s_cactiveLimitCeiling
= 0;
72 static size_t threadAllocatedpMib
[2];
73 static size_t threadDeallocatedpMib
[2];
74 static size_t statsCactiveMib
[2];
75 static pthread_once_t threadStatsOnce
= PTHREAD_ONCE_INIT
;
77 void MemoryManager::threadStatsInit() {
78 if (!mallctlnametomib
) return;
79 size_t miblen
= sizeof(threadAllocatedpMib
) / sizeof(size_t);
80 if (mallctlnametomib("thread.allocatedp", threadAllocatedpMib
, &miblen
)) {
83 miblen
= sizeof(threadDeallocatedpMib
) / sizeof(size_t);
84 if (mallctlnametomib("thread.deallocatedp", threadDeallocatedpMib
, &miblen
)) {
87 miblen
= sizeof(statsCactiveMib
) / sizeof(size_t);
88 if (mallctlnametomib("stats.cactive", statsCactiveMib
, &miblen
)) {
91 MemoryManager::s_statsEnabled
= true;
93 // In threadStats() we wish to solve for cactiveLimit in:
95 // footprint + cactiveLimit + headRoom == MemTotal
97 // However, headRoom comes from RuntimeOption::ServerMemoryHeadRoom, which
98 // isn't initialized until after the code here runs. Therefore, compute
99 // s_cactiveLimitCeiling here in order to amortize the cost of introspecting
100 // footprint and MemTotal.
102 // cactiveLimit == (MemTotal - footprint) - headRoom
104 // cactiveLimit == s_cactiveLimitCeiling - headRoom
106 // s_cactiveLimitCeiling == MemTotal - footprint
107 size_t footprint
= Process::GetCodeFootprint(Process::GetProcessId());
110 size_t pageSize
= size_t(sysconf(_SC_PAGESIZE
));
111 MemTotal
= size_t(sysconf(_SC_PHYS_PAGES
)) * pageSize
;
113 int mib
[2] = { CTL_HW
, HW_MEMSIZE
};
114 u_int namelen
= sizeof(mib
) / sizeof(mib
[0]);
115 size_t len
= sizeof(MemTotal
);
116 sysctl(mib
, namelen
, &MemTotal
, &len
, nullptr, 0);
118 if (MemTotal
> footprint
) {
119 MemoryManager::s_cactiveLimitCeiling
= MemTotal
- footprint
;
124 void MemoryManager::threadStats(uint64_t*& allocated
, uint64_t*& deallocated
,
125 size_t*& cactive
, size_t& cactiveLimit
) {
126 pthread_once(&threadStatsOnce
, threadStatsInit
);
127 if (!MemoryManager::s_statsEnabled
) return;
129 size_t len
= sizeof(allocated
);
130 if (mallctlbymib(threadAllocatedpMib
,
131 sizeof(threadAllocatedpMib
) / sizeof(size_t),
132 &allocated
, &len
, nullptr, 0)) {
136 len
= sizeof(deallocated
);
137 if (mallctlbymib(threadDeallocatedpMib
,
138 sizeof(threadDeallocatedpMib
) / sizeof(size_t),
139 &deallocated
, &len
, nullptr, 0)) {
143 len
= sizeof(cactive
);
144 if (mallctlbymib(statsCactiveMib
,
145 sizeof(statsCactiveMib
) / sizeof(size_t),
146 &cactive
, &len
, nullptr, 0)) {
150 int64_t headRoom
= RuntimeOption::ServerMemoryHeadRoom
;
151 // Compute cactiveLimit based on s_cactiveLimitCeiling, as computed in
152 // threadStatsInit().
153 if (headRoom
!= 0 && headRoom
< MemoryManager::s_cactiveLimitCeiling
) {
154 cactiveLimit
= MemoryManager::s_cactiveLimitCeiling
- headRoom
;
156 cactiveLimit
= std::numeric_limits
<size_t>::max();
161 static void* MemoryManagerInit() {
162 // We store the free list pointers right at the start of each
163 // object, overlapping SmartHeader.data, and we also clobber _count
164 // as a free-object flag when the object is deallocated. This
165 // assert just makes sure they don't overflow.
166 assert(FAST_REFCOUNT_OFFSET
+ sizeof(int) <=
167 MemoryManager::smartSizeClass(1));
168 MemoryManager::TlsWrapper tls
;
169 return (void*)tls
.getNoCheck
;
172 void* MemoryManager::TlsInitSetup
= MemoryManagerInit();
174 void MemoryManager::Create(void* storage
) {
175 new (storage
) MemoryManager();
178 void MemoryManager::Delete(MemoryManager
* mm
) {
179 mm
->~MemoryManager();
182 void MemoryManager::OnThreadExit(MemoryManager
* mm
) {
183 mm
->~MemoryManager();
186 MemoryManager::MemoryManager()
189 , m_sweeping(false) {
191 threadStats(m_allocated
, m_deallocated
, m_cactive
, m_cactiveLimit
);
193 resetStatsImpl(true);
194 m_stats
.maxBytes
= std::numeric_limits
<int64_t>::max();
195 // make the circular-lists empty.
196 m_strings
.next
= m_strings
.prev
= &m_strings
;
197 m_bypassSlabAlloc
= RuntimeOption::DisableSmartAllocator
;
200 void MemoryManager::resetRuntimeOptions() {
203 // check that every allocation in heap has been freed before reset
204 for (auto h
= begin(), lim
= end(); h
!= lim
; ++h
) {
205 if (h
->kind_
== HeaderKind::Debug
) {
208 assert(h2
->kind_
== HeaderKind::Free
);
211 assert(h
->kind_
== HeaderKind::Free
||
212 h
->kind_
== HeaderKind::Hole
);
216 MemoryManager::TlsWrapper::destroy();
217 MemoryManager::TlsWrapper::getCheck();
220 void MemoryManager::resetStatsImpl(bool isInternalCall
) {
222 FTRACE(1, "resetStatsImpl({}) pre:\n", isInternalCall
);
223 FTRACE(1, "usage: {}\nalloc: {}\npeak usage: {}\npeak alloc: {}\n",
224 m_stats
.usage
, m_stats
.alloc
, m_stats
.peakUsage
, m_stats
.peakAlloc
);
225 FTRACE(1, "total alloc: {}\nje alloc: {}\nje dealloc: {}\n",
226 m_stats
.totalAlloc
, m_prevAllocated
, m_prevDeallocated
);
227 FTRACE(1, "je debt: {}\n\n", m_stats
.jemallocDebt
);
229 FTRACE(1, "resetStatsImpl({}) pre:\n"
230 "usage: {}\nalloc: {}\npeak usage: {}\npeak alloc: {}\n\n",
232 m_stats
.usage
, m_stats
.alloc
, m_stats
.peakUsage
, m_stats
.peakAlloc
);
234 if (isInternalCall
) {
235 m_statsIntervalActive
= false;
238 m_stats
.peakUsage
= 0;
239 m_stats
.peakAlloc
= 0;
240 m_stats
.totalAlloc
= 0;
241 m_stats
.peakIntervalUsage
= 0;
242 m_stats
.peakIntervalAlloc
= 0;
244 m_enableStatsSync
= false;
245 } else if (!m_enableStatsSync
) {
249 // This is only set by the jemalloc stats sync which we don't enable until
250 // after this has been called.
251 assert(m_stats
.totalAlloc
== 0);
253 assert(m_stats
.jemallocDebt
>= m_stats
.alloc
);
256 // The effect of this call is simply to ignore anything we've done *outside*
257 // the smart allocator after we initialized to avoid attributing shared
258 // structure initialization that happens during init_thread_locals() to this
261 // We don't want to clear the other values because we do already have some
262 // sized smart allocator usage and live slabs and wiping now will result in
263 // negative values when we try to reconcile our accounting with jemalloc.
265 // Anything that was definitively allocated by the smart allocator should
266 // be counted in this number even if we're otherwise zeroing out the count
268 m_stats
.totalAlloc
= s_statsEnabled
? m_stats
.jemallocDebt
: 0;
270 m_enableStatsSync
= s_statsEnabled
;
272 m_stats
.totalAlloc
= 0;
276 if (s_statsEnabled
) {
277 m_stats
.jemallocDebt
= 0;
278 m_prevDeallocated
= *m_deallocated
;
279 m_prevAllocated
= *m_allocated
;
283 FTRACE(1, "resetStatsImpl({}) post:\n", isInternalCall
);
284 FTRACE(1, "usage: {}\nalloc: {}\npeak usage: {}\npeak alloc: {}\n",
285 m_stats
.usage
, m_stats
.alloc
, m_stats
.peakUsage
, m_stats
.peakAlloc
);
286 FTRACE(1, "total alloc: {}\nje alloc: {}\nje dealloc: {}\n",
287 m_stats
.totalAlloc
, m_prevAllocated
, m_prevDeallocated
);
288 FTRACE(1, "je debt: {}\n\n", m_stats
.jemallocDebt
);
290 FTRACE(1, "resetStatsImpl({}) post:\n"
291 "usage: {}\nalloc: {}\npeak usage: {}\npeak alloc: {}\n\n",
293 m_stats
.usage
, m_stats
.alloc
, m_stats
.peakUsage
, m_stats
.peakAlloc
);
297 void MemoryManager::refreshStatsHelperExceeded() {
298 setSurpriseFlag(MemExceededFlag
);
300 if (RuntimeOption::LogNativeStackOnOOM
) {
301 log_native_stack("Exceeded memory limit");
306 void MemoryManager::refreshStatsHelperStop() {
307 HttpServer::Server
->stop();
308 // Increase the limit to the maximum possible value, so that this method
309 // won't be called again.
310 m_cactiveLimit
= std::numeric_limits
<size_t>::max();
315 * Refresh stats to reflect directly malloc()ed memory, and determine
316 * whether the request memory limit has been exceeded.
318 * The stats parameter allows the updates to be applied to either
319 * m_stats as in refreshStats() or to a separate MemoryUsageStats
320 * struct as in getStatsSafe().
322 * The template variable live controls whether or not MemoryManager
323 * member variables are updated and whether or not to call helper
324 * methods in response to memory anomalies.
327 void MemoryManager::refreshStatsImpl(MemoryUsageStats
& stats
) {
329 // Incrementally incorporate the difference between the previous and current
330 // deltas into the memory usage statistic. For reference, the total
331 // malloced memory usage could be calculated as such, if delta0 were
332 // recorded in resetStatsImpl():
334 // int64 musage = delta - delta0;
336 // Note however, that SmartAllocator adds to m_stats.jemallocDebt
337 // when it calls malloc(), so that this function can avoid
338 // double-counting the malloced memory. Thus musage in the example
339 // code may well substantially exceed m_stats.usage.
340 if (m_enableStatsSync
) {
341 uint64_t jeDeallocated
= *m_deallocated
;
342 uint64_t jeAllocated
= *m_allocated
;
344 // We can't currently handle wrapping so make sure this isn't happening.
345 assert(jeAllocated
>= 0 &&
346 jeAllocated
<= std::numeric_limits
<int64_t>::max());
347 assert(jeDeallocated
>= 0 &&
348 jeDeallocated
<= std::numeric_limits
<int64_t>::max());
350 // This is the delta between the current and the previous jemalloc reading.
351 int64_t jeMMDeltaAllocated
=
352 int64_t(jeAllocated
) - int64_t(m_prevAllocated
);
354 FTRACE(1, "Before stats sync:\n");
355 FTRACE(1, "je alloc:\ncurrent: {}\nprevious: {}\ndelta with MM: {}\n",
356 jeAllocated
, m_prevAllocated
, jeAllocated
- m_prevAllocated
);
357 FTRACE(1, "je dealloc:\ncurrent: {}\nprevious: {}\ndelta with MM: {}\n",
358 jeDeallocated
, m_prevDeallocated
, jeDeallocated
- m_prevDeallocated
);
359 FTRACE(1, "usage: {}\ntotal (je) alloc: {}\nje debt: {}\n",
360 stats
.usage
, stats
.totalAlloc
, stats
.jemallocDebt
);
362 if (!contiguous_heap
) {
363 // Since these deltas potentially include memory allocated from another
364 // thread but deallocated on this one, it is possible for these nubmers to
366 int64_t jeDeltaAllocated
=
367 int64_t(jeAllocated
) - int64_t(jeDeallocated
);
368 int64_t mmDeltaAllocated
=
369 int64_t(m_prevAllocated
) - int64_t(m_prevDeallocated
);
370 FTRACE(1, "je delta:\ncurrent: {}\nprevious: {}\n",
371 jeDeltaAllocated
, mmDeltaAllocated
);
373 // Subtract the old jemalloc adjustment (delta0) and add the current one
374 // (delta) to arrive at the new combined usage number.
375 stats
.usage
+= jeDeltaAllocated
- mmDeltaAllocated
;
376 // Remove the "debt" accrued from allocating the slabs so we don't double
377 // count the slab-based allocations.
378 stats
.usage
-= stats
.jemallocDebt
;
381 stats
.jemallocDebt
= 0;
382 // We need to do the calculation instead of just setting it to jeAllocated
383 // because of the MaskAlloc capability.
384 stats
.totalAlloc
+= jeMMDeltaAllocated
;
386 m_prevAllocated
= jeAllocated
;
387 m_prevDeallocated
= jeDeallocated
;
390 FTRACE(1, "After stats sync:\n");
391 FTRACE(1, "usage: {}\ntotal (je) alloc: {}\n\n",
392 stats
.usage
, stats
.totalAlloc
);
395 if (stats
.usage
> stats
.peakUsage
) {
396 // NOTE: the peak memory usage monotonically increases, so there cannot
397 // be a second OOM exception in one request.
398 assert(stats
.maxBytes
> 0);
399 if (live
&& m_couldOOM
&& stats
.usage
> stats
.maxBytes
) {
400 refreshStatsHelperExceeded();
402 // Check whether the process's active memory limit has been exceeded, and
403 // if so, stop the server.
405 // Only check whether the total memory limit was exceeded if this request
406 // is at a new high water mark. This check could be performed regardless
407 // of this request's current memory usage (because other request threads
408 // could be to blame for the increased memory usage), but doing so would
409 // measurably increase computation for little benefit.
411 // (*m_cactive) consistency is achieved via atomic operations. The fact
412 // that we do not use an atomic operation here means that we could get a
413 // stale read, but in practice that poses no problems for how we are
415 if (live
&& s_statsEnabled
&& *m_cactive
> m_cactiveLimit
) {
416 refreshStatsHelperStop();
419 stats
.peakUsage
= stats
.usage
;
421 if (live
&& m_statsIntervalActive
) {
422 if (stats
.usage
> stats
.peakIntervalUsage
) {
423 stats
.peakIntervalUsage
= stats
.usage
;
425 if (stats
.alloc
> stats
.peakIntervalAlloc
) {
426 stats
.peakIntervalAlloc
= stats
.alloc
;
431 template void MemoryManager::refreshStatsImpl
<true>(MemoryUsageStats
& stats
);
432 template void MemoryManager::refreshStatsImpl
<false>(MemoryUsageStats
& stats
);
434 void MemoryManager::sweep() {
436 if (debug
) checkHeap();
437 if (debug
) traceHeap();
439 SCOPE_EXIT
{ m_sweeping
= false; };
440 DEBUG_ONLY
size_t num_sweepables
= 0, num_natives
= 0;
442 // iterate until both sweep lists are empty. Entries can be added or
443 // removed from either list during sweeping.
445 while (!m_sweepables
.empty()) {
447 auto obj
= m_sweepables
.next();
451 while (!m_natives
.empty()) {
453 assert(m_natives
.back()->sweep_index
== m_natives
.size() - 1);
454 auto node
= m_natives
.back();
455 m_natives
.pop_back();
456 auto obj
= Native::obj(node
);
457 auto ndi
= obj
->getVMClass()->getNativeDataInfo();
459 // trash the native data but leave the header and object parsable
460 assert(memset(node
+1, kSmartFreeFill
, node
->obj_offset
- sizeof(*node
)));
462 } while (!m_sweepables
.empty());
464 TRACE(1, "sweep: sweepable %lu native %lu\n", num_sweepables
, num_natives
);
465 if (debug
) checkHeap();
468 void MemoryManager::resetAllocator() {
469 assert(m_natives
.empty() && m_sweepables
.empty());
470 // decref apc strings and arrays referenced by this request
471 DEBUG_ONLY
auto napcs
= m_apc_arrays
.size();
472 while (!m_apc_arrays
.empty()) {
473 auto a
= m_apc_arrays
.back();
474 m_apc_arrays
.pop_back();
477 DEBUG_ONLY
auto nstrings
= StringData::sweepAll();
482 // zero out freelists
483 for (auto& i
: m_freelists
) i
.head
= nullptr;
484 m_front
= m_limit
= 0;
486 resetStatsImpl(true);
487 TRACE(1, "reset: apc-arrays %lu strings %u\n", napcs
, nstrings
);
490 void MemoryManager::flush() {
491 always_assert(empty());
493 m_apc_arrays
= std::vector
<APCLocalArray
*>();
494 m_natives
= std::vector
<NativeNode
*>();
498 * smart_malloc & friends implementation notes
500 * There are three kinds of smart mallocation:
502 * a) Large allocations. (size >= kMaxSmartSize)
504 * In this case we behave as a wrapper around the normal libc
505 * malloc/free. We insert a SweepNode header at the front of the
506 * allocation in order to find these at sweep time (end of
507 * request) so we can give them back to libc.
509 * b) Size-tracked small allocations.
511 * This is used for the generic case, for callers who can't tell
512 * us the size of the allocation at free time.
514 * In this situation, we put a SmallNode header at the front of
515 * the block that tells us the size for when we need to free it
516 * later. We differentiate this from a SweepNode (for a big
517 * allocation) by assuming that no SweepNode::prev will point to
518 * an address in the first kMaxSmartSize bytes of virtual address
521 * c) Size-untracked small allocation
523 * Many callers have an easy time telling you how big the object
524 * was when they need to free it. In this case we can avoid the
525 * SmallNode, which saves us some memory and also let's us give
526 * out 16-byte aligned pointers easily.
528 * We know when we have one of these because it has to be freed
529 * through a different entry point. (E.g. MM().smartFreeSize or
530 * MM().smartFreeSizeBig.)
532 * When small blocks are freed (case b and c), they're placed in the
533 * appropriate size-segregated freelist. Large blocks are immediately
534 * passed back to libc via free.
536 * There are currently two kinds of freelist entries: entries where
537 * there is already a valid SmallNode on the list (case b), and
538 * entries where there isn't (case c). The reason for this is that
539 * that way, when allocating for case b, you don't need to store the
540 * SmallNode size again. Much of the heap is going through case b at
541 * the time of this writing, so it is a measurable regression to try
542 * to just combine the free lists, but presumably we can move more to
543 * case c and combine the lists eventually.
546 inline void* MemoryManager::smartMalloc(size_t nbytes
) {
547 auto const nbytes_padded
= nbytes
+ sizeof(SmallNode
);
548 if (LIKELY(nbytes_padded
) <= kMaxSmartSize
) {
549 auto const ptr
= static_cast<SmallNode
*>(smartMallocSize(nbytes_padded
));
550 ptr
->padbytes
= nbytes_padded
;
551 ptr
->kind
= HeaderKind::SmallMalloc
;
554 return smartMallocBig(nbytes
);
562 static_assert(sizeof(SmallNode
) == sizeof(BigNode
), "");
564 inline void MemoryManager::smartFree(void* ptr
) {
566 auto const n
= static_cast<MallocNode
*>(ptr
) - 1;
567 auto const padbytes
= n
->small
.padbytes
;
568 if (LIKELY(padbytes
<= kMaxSmartSize
)) {
569 return smartFreeSize(&n
->small
, n
->small
.padbytes
);
574 inline void* MemoryManager::smartRealloc(void* ptr
, size_t nbytes
) {
575 FTRACE(3, "smartRealloc: {} to {}\n", ptr
, nbytes
);
577 auto const n
= static_cast<MallocNode
*>(ptr
) - 1;
578 if (LIKELY(n
->small
.padbytes
<= kMaxSmartSize
)) {
579 void* newmem
= smart_malloc(nbytes
);
580 auto const copySize
= std::min(
581 n
->small
.padbytes
- sizeof(SmallNode
),
584 newmem
= memcpy(newmem
, ptr
, copySize
);
588 // Ok, it's a big allocation.
589 auto block
= m_heap
.resizeBig(ptr
, nbytes
);
595 DEBUG_ONLY
const char* header_names
[] = {
596 "Packed", "Struct", "Mixed", "Empty", "Apc", "Globals", "Proxy",
597 "String", "Object", "ResumableObj", "AwaitAllWH", "Resource", "Ref",
598 "Resumable", "Native", "SmallMalloc", "BigMalloc", "BigObj",
599 "Free", "Hole", "Debug"
601 static_assert(sizeof(header_names
)/sizeof(*header_names
) == NumHeaderKinds
, "");
603 // Reverse lookup table from size class index back to block size.
605 size_t table
[kNumSmartSizes
];
607 #define SMART_SIZE(i,d,s) table[i] = s;
610 assert(table
[27] == 4096 && table
[28] == 0);
611 // pick up where the macros left off
615 for (; i
< kNumSmartSizes
; d
*= 2) {
616 // each power of two size has 4 linear spaced size classes
617 table
[i
++] = (s
+= d
);
618 if (i
< kNumSmartSizes
) table
[i
++] = (s
+= d
);
619 if (i
< kNumSmartSizes
) table
[i
++] = (s
+= d
);
620 if (i
< kNumSmartSizes
) table
[i
++] = (s
+= d
);
623 static_assert(LG_SMART_SIZES_PER_DOUBLING
== 2, "");
625 SizeTable s_index2size
;
629 // initialize a Hole header in the unused memory between m_front and m_limit
630 void MemoryManager::initHole() {
631 if ((char*)m_front
< (char*)m_limit
) {
632 auto hdr
= static_cast<FreeNode
*>(m_front
);
633 hdr
->kind
= HeaderKind::Hole
;
634 hdr
->size
= (char*)m_limit
- (char*)m_front
;
638 // initialize the FreeNode header on all freelist entries.
639 void MemoryManager::initFree() {
640 for (size_t i
= 0; i
< kNumSmartSizes
; i
++) {
641 auto size
= s_index2size
.table
[i
];
642 for (auto n
= m_freelists
[i
].head
; n
; n
= n
->next
) {
643 n
->kind_size
= HeaderKind::Free
<<24 | size
<<32;
648 BigHeap::iterator
MemoryManager::begin() {
651 return m_heap
.begin();
654 BigHeap::iterator
MemoryManager::end() {
658 // test iterating objects in slabs
659 void MemoryManager::checkHeap() {
661 std::vector
<Header
*> hdrs
;
662 std::unordered_set
<FreeNode
*> free_blocks
;
663 std::unordered_set
<APCLocalArray
*> apc_arrays
;
664 std::unordered_set
<StringData
*> apc_strings
;
665 size_t counts
[NumHeaderKinds
];
666 for (unsigned i
=0; i
< NumHeaderKinds
; i
++) counts
[i
] = 0;
667 for (auto h
= begin(), lim
= end(); h
!= lim
; ++h
) {
669 TRACE(2, "checkHeap: hdr %p\n", hdrs
[hdrs
.size()-1]);
671 counts
[(int)h
->kind_
]++;
673 case HeaderKind::Debug
: {
674 // the next block's parsed size should agree with DebugHeader
677 assert(h2
->kind_
!= HeaderKind::Debug
);
678 assert(h
->debug_
.returnedCap
==
679 MemoryManager::smartSizeClass(h2
->size()));
683 case HeaderKind::Free
:
684 free_blocks
.insert(&h
->free_
);
686 case HeaderKind::Apc
:
687 apc_arrays
.insert(&h
->apc_
);
689 case HeaderKind::String
:
690 if (h
->str_
.isShared()) apc_strings
.insert(&h
->str_
);
692 case HeaderKind::Packed
:
693 case HeaderKind::Struct
:
694 case HeaderKind::Mixed
:
695 case HeaderKind::Empty
:
696 case HeaderKind::Globals
:
697 case HeaderKind::Proxy
:
698 case HeaderKind::Object
:
699 case HeaderKind::ResumableObj
:
700 case HeaderKind::AwaitAllWH
:
701 case HeaderKind::Resource
:
702 case HeaderKind::Ref
:
703 case HeaderKind::Resumable
:
704 case HeaderKind::Native
:
705 case HeaderKind::SmallMalloc
:
706 case HeaderKind::BigMalloc
:
707 case HeaderKind::BigObj
:
708 case HeaderKind::Hole
:
713 // check the free lists
714 for (size_t i
= 0; i
< kNumSmartSizes
; i
++) {
715 for (auto n
= m_freelists
[i
].head
; n
; n
= n
->next
) {
716 assert(free_blocks
.find(n
) != free_blocks
.end());
717 free_blocks
.erase(n
);
720 assert(free_blocks
.empty());
722 // check the apc array list
723 for (auto a
: m_apc_arrays
) {
724 assert(apc_arrays
.find(a
) != apc_arrays
.end());
727 assert(apc_arrays
.empty());
729 // check the apc string list
730 for (StringDataNode
*next
, *n
= m_strings
.next
; n
!= &m_strings
; n
= next
) {
732 auto const s
= StringData::node2str(n
);
733 assert(s
->isShared());
734 assert(apc_strings
.find(s
) != apc_strings
.end());
735 apc_strings
.erase(s
);
737 assert(apc_strings
.empty());
739 TRACE(1, "checkHeap: %lu objects %lu bytes\n", hdrs
.size(), bytes
);
740 TRACE(1, "checkHeap-types: ");
741 for (unsigned i
= 0; i
< NumHeaderKinds
; ++i
) {
742 TRACE(1, "%s %lu%s", header_names
[i
], counts
[i
],
743 (i
+ 1 < NumHeaderKinds
? " " : "\n"));
748 * Get a new slab, then allocate nbytes from it and install it in our
749 * slab list. Return the newly allocated nbytes-sized block.
751 NEVER_INLINE
void* MemoryManager::newSlab(size_t nbytes
) {
752 if (UNLIKELY(m_stats
.usage
> m_stats
.maxBytes
)) {
755 initHole(); // enable parsing the leftover space in the old slab
756 if (debug
&& RuntimeOption::EvalCheckHeapOnAlloc
) checkHeap();
757 if (debug
) traceHeap();
758 auto slab
= m_heap
.allocSlab(kSlabSize
);
759 assert((uintptr_t(slab
.ptr
) & kSmartSizeAlignMask
) == 0);
760 m_stats
.borrow(slab
.size
);
761 m_stats
.alloc
+= slab
.size
;
762 if (m_stats
.alloc
> m_stats
.peakAlloc
) {
763 m_stats
.peakAlloc
= m_stats
.alloc
;
765 m_front
= (void*)(uintptr_t(slab
.ptr
) + nbytes
);
766 m_limit
= (void*)(uintptr_t(slab
.ptr
) + slab
.size
);
767 FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab
.ptr
, m_limit
);
772 * Allocate `bytes' from the current slab, aligned to kSmartSizeAlign.
774 void* MemoryManager::slabAlloc(uint32_t bytes
, unsigned index
) {
775 FTRACE(3, "slabAlloc({}, {})\n", bytes
, index
);
776 size_t nbytes
= debugAddExtra(smartSizeClass(bytes
));
778 assert(nbytes
<= kSlabSize
);
779 assert((nbytes
& kSmartSizeAlignMask
) == 0);
780 assert((uintptr_t(m_front
) & kSmartSizeAlignMask
) == 0);
782 if (UNLIKELY(m_bypassSlabAlloc
)) {
783 // Stats correction; smartMallocSizeBig() pulls stats from jemalloc.
784 m_stats
.usage
-= bytes
;
785 // smartMallocSizeBig already wraps its allocation in a debug header, but
786 // the caller will try to do it again, so we need to adjust this pointer
787 // before returning it.
788 return ((char*)smartMallocSizeBig
<false>(nbytes
).ptr
) - kDebugExtraSize
;
793 void* next
= (void*)(uintptr_t(ptr
) + nbytes
);
794 if (uintptr_t(next
) <= uintptr_t(m_limit
)) {
797 ptr
= newSlab(nbytes
);
800 // Preallocate more of the same in order to amortize entry into this method.
802 if (nbytes
* kSmartPreallocCountLimit
<= kSmartPreallocBytesLimit
) {
803 nPrealloc
= kSmartPreallocCountLimit
;
805 nPrealloc
= kSmartPreallocBytesLimit
/ nbytes
;
808 void* front
= (void*)(uintptr_t(m_front
) + nPrealloc
*nbytes
);
809 if (uintptr_t(front
) > uintptr_t(m_limit
)) {
810 nPrealloc
= ((uintptr_t)m_limit
- uintptr_t(m_front
)) / nbytes
;
811 front
= (void*)(uintptr_t(m_front
) + nPrealloc
*nbytes
);
815 for (void* p
= (void*)(uintptr_t(m_front
) - nbytes
); p
!= ptr
;
816 p
= (void*)(uintptr_t(p
) - nbytes
)) {
817 auto usable
= debugRemoveExtra(nbytes
);
818 auto ptr
= debugPostAllocate(p
, usable
, usable
);
819 debugPreFree(ptr
, usable
, usable
);
820 m_freelists
[index
].push(ptr
, usable
);
825 inline void MemoryManager::updateBigStats() {
826 // If we are using jemalloc, it is keeping track of allocations outside of
827 // the slabs and the usage so we should force this after an allocation that
828 // was too large for one of the existing slabs. When we're not using jemalloc
829 // this check won't do anything so avoid the extra overhead.
830 if (use_jemalloc
|| UNLIKELY(m_stats
.usage
> m_stats
.maxBytes
)) {
836 void* MemoryManager::smartMallocBig(size_t nbytes
) {
838 auto block
= m_heap
.allocBig(nbytes
, HeaderKind::BigMalloc
);
843 template NEVER_INLINE
844 MemBlock
MemoryManager::smartMallocSizeBig
<true>(size_t);
845 template NEVER_INLINE
846 MemBlock
MemoryManager::smartMallocSizeBig
<false>(size_t);
848 template<bool callerSavesActualSize
> NEVER_INLINE
849 MemBlock
MemoryManager::smartMallocSizeBig(size_t bytes
) {
850 auto block
= m_heap
.allocBig(debugAddExtra(bytes
), HeaderKind::BigObj
);
851 auto szOut
= debugRemoveExtra(block
.size
);
853 // NB: We don't report the SweepNode size in the stats.
854 auto const delta
= callerSavesActualSize
? szOut
: bytes
;
855 m_stats
.usage
+= int64_t(delta
);
856 // Adjust jemalloc otherwise we'll double count the direct allocation.
857 m_stats
.borrow(delta
);
859 m_stats
.usage
+= bytes
;
862 auto ptrOut
= debugPostAllocate(block
.ptr
, bytes
, szOut
);
863 FTRACE(3, "smartMallocSizeBig: {} ({} requested, {} usable)\n",
864 ptrOut
, bytes
, szOut
);
865 return {ptrOut
, szOut
};
869 void* MemoryManager::smartCallocBig(size_t totalbytes
) {
870 assert(totalbytes
> 0);
871 auto block
= m_heap
.callocBig(totalbytes
);
876 // smart_malloc api entry points, with support for malloc/free corner cases.
878 void* smart_malloc(size_t nbytes
) {
880 auto const size
= std::max(nbytes
, size_t(1));
881 return mm
.smartMalloc(size
);
884 void* smart_calloc(size_t count
, size_t nbytes
) {
886 auto const totalBytes
= std::max
<size_t>(count
* nbytes
, 1);
887 if (totalBytes
<= kMaxSmartSize
) {
888 return memset(smart_malloc(totalBytes
), 0, totalBytes
);
890 return mm
.smartCallocBig(totalBytes
);
893 void* smart_realloc(void* ptr
, size_t nbytes
) {
895 if (!ptr
) return smart_malloc(nbytes
);
900 return mm
.smartRealloc(ptr
, nbytes
);
903 void smart_free(void* ptr
) {
904 if (ptr
) MM().smartFree(ptr
);
907 //////////////////////////////////////////////////////////////////////
909 void MemoryManager::addNativeObject(NativeNode
* node
) {
910 if (debug
) for (DEBUG_ONLY
auto n
: m_natives
) assert(n
!= node
);
911 node
->sweep_index
= m_natives
.size();
912 m_natives
.push_back(node
);
915 void MemoryManager::removeNativeObject(NativeNode
* node
) {
916 assert(node
->sweep_index
< m_natives
.size());
917 assert(m_natives
[node
->sweep_index
] == node
);
918 auto index
= node
->sweep_index
;
919 auto last
= m_natives
.back();
920 m_natives
[index
] = last
;
921 m_natives
.pop_back();
922 last
->sweep_index
= index
;
925 void MemoryManager::addApcArray(APCLocalArray
* a
) {
926 a
->m_sweep_index
= m_apc_arrays
.size();
927 m_apc_arrays
.push_back(a
);
930 void MemoryManager::removeApcArray(APCLocalArray
* a
) {
931 assert(a
->m_sweep_index
< m_apc_arrays
.size());
932 assert(m_apc_arrays
[a
->m_sweep_index
] == a
);
933 auto index
= a
->m_sweep_index
;
934 auto last
= m_apc_arrays
.back();
935 m_apc_arrays
[index
] = last
;
936 m_apc_arrays
.pop_back();
937 last
->m_sweep_index
= index
;
940 void MemoryManager::addSweepable(Sweepable
* obj
) {
941 obj
->enlist(&m_sweepables
);
944 // defined here because memory-manager.h includes sweepable.h
945 Sweepable::Sweepable() {
946 MM().addSweepable(this);
949 //////////////////////////////////////////////////////////////////////
953 void* MemoryManager::debugPostAllocate(void* p
,
955 size_t returnedCap
) {
956 auto const header
= static_cast<DebugHeader
*>(p
);
957 header
->allocatedMagic
= DebugHeader::kAllocatedMagic
;
958 header
->kind
= HeaderKind::Debug
;
959 header
->requestedSize
= bytes
;
960 header
->returnedCap
= returnedCap
;
961 return (void*)(uintptr_t(header
) + kDebugExtraSize
);
964 void* MemoryManager::debugPreFree(void* p
,
966 size_t userSpecifiedBytes
) {
967 auto const header
= reinterpret_cast<DebugHeader
*>(uintptr_t(p
) -
969 assert(checkPreFree(header
, bytes
, userSpecifiedBytes
));
970 header
->requestedSize
= DebugHeader::kFreedMagic
;
971 memset(p
, kSmartFreeFill
, bytes
);
977 bool MemoryManager::checkPreFree(DebugHeader
* p
,
979 size_t userSpecifiedBytes
) const {
981 assert(p
->allocatedMagic
== DebugHeader::kAllocatedMagic
);
983 if (userSpecifiedBytes
!= 0) {
984 // For size-specified frees, the size they report when freeing
985 // must be between the requested size and the actual capacity.
986 assert(userSpecifiedBytes
>= p
->requestedSize
&&
987 userSpecifiedBytes
<= p
->returnedCap
);
989 if (!m_bypassSlabAlloc
&& bytes
!= 0 && bytes
<= kMaxSmartSize
) {
990 assert(m_heap
.contains(p
));
996 void MemoryManager::logAllocation(void* p
, size_t bytes
) {
997 MemoryProfile::logAllocation(p
, bytes
);
1000 void MemoryManager::logDeallocation(void* p
) {
1001 MemoryProfile::logDeallocation(p
);
1004 void MemoryManager::resetCouldOOM(bool state
) {
1005 clearSurpriseFlag(MemExceededFlag
);
1010 ///////////////////////////////////////////////////////////////////////////////
1011 // Request profiling.
1013 bool MemoryManager::triggerProfiling(const std::string
& filename
) {
1014 auto trigger
= new ReqProfContext();
1015 trigger
->flag
= true;
1016 trigger
->filename
= filename
;
1018 ReqProfContext
* expected
= nullptr;
1020 if (!s_trigger
.compare_exchange_strong(expected
, trigger
)) {
1027 void MemoryManager::requestInit() {
1028 auto trigger
= s_trigger
.exchange(nullptr);
1030 // If the trigger has already been claimed, do nothing.
1031 if (trigger
== nullptr) return;
1033 always_assert(MM().empty());
1035 // Initialize the request-local context from the trigger.
1036 auto& profctx
= MM().m_profctx
;
1037 assert(!profctx
.flag
);
1039 MM().m_bypassSlabAlloc
= true;
1045 size_t boolsz
= sizeof(bool);
1047 // Reset jemalloc stats.
1048 if (mallctl("prof.reset", nullptr, nullptr, nullptr, 0)) {
1052 // Enable jemalloc thread-local heap dumps.
1053 if (mallctl("prof.active",
1054 &profctx
.prof_active
, &boolsz
,
1055 &active
, sizeof(bool))) {
1056 profctx
= ReqProfContext
{};
1059 if (mallctl("thread.prof.active",
1060 &profctx
.thread_prof_active
, &boolsz
,
1061 &active
, sizeof(bool))) {
1062 mallctl("prof.active", nullptr, nullptr,
1063 &profctx
.prof_active
, sizeof(bool));
1064 profctx
= ReqProfContext
{};
1070 void MemoryManager::requestShutdown() {
1071 auto& profctx
= MM().m_profctx
;
1073 if (!profctx
.flag
) return;
1076 jemalloc_pprof_dump(profctx
.filename
, true);
1078 mallctl("thread.prof.active", nullptr, nullptr,
1079 &profctx
.thread_prof_active
, sizeof(bool));
1080 mallctl("prof.active", nullptr, nullptr,
1081 &profctx
.prof_active
, sizeof(bool));
1084 MM().m_bypassSlabAlloc
= RuntimeOption::DisableSmartAllocator
;
1085 profctx
= ReqProfContext
{};
1088 ///////////////////////////////////////////////////////////////////////////////
1090 void BigHeap::reset() {
1091 TRACE(1, "BigHeap-reset: slabs %lu bigs %lu\n", m_slabs
.size(),
1093 for (auto slab
: m_slabs
) {
1097 for (auto n
: m_bigs
) {
1103 void BigHeap::flush() {
1105 m_slabs
= std::vector
<MemBlock
>{};
1106 m_bigs
= std::vector
<BigNode
*>{};
1109 MemBlock
BigHeap::allocSlab(size_t size
) {
1110 void* slab
= safe_malloc(size
);
1111 m_slabs
.push_back({slab
, size
});
1112 return {slab
, size
};
1115 void BigHeap::enlist(BigNode
* n
, HeaderKind kind
, size_t size
) {
1118 n
->index
= m_bigs
.size();
1119 m_bigs
.push_back(n
);
1122 MemBlock
BigHeap::allocBig(size_t bytes
, HeaderKind kind
) {
1124 auto n
= static_cast<BigNode
*>(mallocx(bytes
+ sizeof(BigNode
), 0));
1125 auto cap
= sallocx(n
, 0);
1127 auto cap
= bytes
+ sizeof(BigNode
);
1128 auto n
= static_cast<BigNode
*>(safe_malloc(cap
));
1130 enlist(n
, kind
, cap
);
1131 return {n
+ 1, cap
- sizeof(BigNode
)};
1134 MemBlock
BigHeap::callocBig(size_t nbytes
) {
1135 auto cap
= nbytes
+ sizeof(BigNode
);
1136 auto const n
= static_cast<BigNode
*>(safe_calloc(cap
, 1));
1137 enlist(n
, HeaderKind::BigMalloc
, cap
);
1138 return {n
+ 1, nbytes
};
1141 bool BigHeap::contains(void* ptr
) const {
1142 auto const ptrInt
= reinterpret_cast<uintptr_t>(ptr
);
1143 auto it
= std::find_if(std::begin(m_slabs
), std::end(m_slabs
),
1144 [&] (MemBlock slab
) {
1145 auto const baseInt
= reinterpret_cast<uintptr_t>(slab
.ptr
);
1146 return ptrInt
>= baseInt
&& ptrInt
< baseInt
+ slab
.size
;
1149 return it
!= std::end(m_slabs
);
1153 void BigHeap::freeBig(void* ptr
) {
1154 auto n
= static_cast<BigNode
*>(ptr
) - 1;
1156 auto last
= m_bigs
.back();
1163 MemBlock
BigHeap::resizeBig(void* ptr
, size_t newsize
) {
1164 // Since we don't know how big it is (i.e. how much data we should memcpy),
1165 // we have no choice but to ask malloc to realloc for us.
1166 auto const n
= static_cast<BigNode
*>(ptr
) - 1;
1167 auto const newNode
= static_cast<BigNode
*>(
1168 safe_realloc(n
, newsize
+ sizeof(BigNode
))
1171 m_bigs
[newNode
->index
] = newNode
;
1173 return {newNode
+ 1, newsize
};
1176 BigHeap::iterator
BigHeap::begin() {
1177 if (!m_slabs
.empty()) return iterator
{m_slabs
.begin(), *this};
1178 return iterator
{m_bigs
.begin(), *this};
1181 BigHeap::iterator
BigHeap::end() {
1182 return iterator
{m_bigs
.end(), *this};
1185 /////////////////////////////////////////////////////////////////////////
1188 void ContiguousHeap::reset() {
1191 // if there is a new peak, store it
1192 if (m_peak
< m_used
) {
1194 // convert usage to MB.. used later for comparison with water marks
1195 m_heapUsage
= ((uintptr_t)m_peak
- (uintptr_t) m_base
) >> 20;
1199 bool resetHeap
= false;
1201 // check if we are above low water mark
1202 if (m_heapUsage
> RuntimeOption::HeapLowWaterMark
) {
1203 // check if we are above below water mark
1204 if (m_heapUsage
> RuntimeOption::HeapHighWaterMark
) {
1205 // we are above high water mark... always reset
1208 // if between watermarks, free based on request count and usage
1209 int requestCount
= RuntimeOption::HeapResetCountBase
;
1211 // Assumption : low and high water mark are power of 2 aligned
1212 for( auto resetStep
= RuntimeOption::HeapHighWaterMark
/ 2 ;
1213 resetStep
> m_heapUsage
;
1215 requestCount
*= RuntimeOption::HeapResetCountMultiple
;
1217 if (requestCount
<= m_requestCount
) {
1225 auto oldPeak
= m_peak
;
1226 m_peak
-= ((m_peak
- m_base
) / 2);
1227 m_peak
= (char*)((uintptr_t)m_peak
& ~(s_pageSize
- 1));
1229 (uintptr_t)oldPeak
- (uintptr_t)m_peak
,
1230 MADV_DONTNEED
) == 0)
1233 TRACE(1, "ContiguousHeap-reset: bytes %lu\n",
1234 (uintptr_t)m_end
- (uintptr_t)m_peak
);
1237 "ContiguousHeap-reset: madvise failed, trying again next request");
1240 TRACE(1, "ContiguousHeap-reset: nothing release");
1243 m_freeList
.next
= nullptr;
1244 m_freeList
.size
= 0;
1245 always_assert(m_base
);
1250 void ContiguousHeap::flush() {
1251 madvise(m_base
, m_peak
-m_base
, MADV_DONTNEED
);
1252 m_used
= m_peak
= m_base
;
1253 m_freeList
.size
= 0;
1254 m_freeList
.next
= nullptr;
1255 m_slabs
= std::vector
<MemBlock
>{};
1256 m_bigs
= std::vector
<BigNode
*>{};
1259 MemBlock
ContiguousHeap::allocSlab(size_t size
) {
1261 void* slab
= heapAlloc(size
, cap
);
1262 m_slabs
.push_back({slab
, cap
});
1266 MemBlock
ContiguousHeap::allocBig(size_t bytes
, HeaderKind kind
) {
1268 auto n
= static_cast<BigNode
*>(heapAlloc(bytes
+ sizeof(BigNode
), cap
));
1269 enlist(n
, kind
, cap
);
1270 return {n
+ 1, cap
- sizeof(BigNode
)};
1273 MemBlock
ContiguousHeap::callocBig(size_t nbytes
) {
1275 auto const n
= static_cast<BigNode
*>(
1276 heapAlloc(nbytes
+ sizeof(BigNode
), cap
));
1278 enlist(n
, HeaderKind::BigMalloc
, cap
);
1279 return {n
+ 1, cap
- sizeof(BigNode
)};
1282 bool ContiguousHeap::contains(void* ptr
) const {
1283 auto const ptrInt
= reinterpret_cast<uintptr_t>(ptr
);
1284 return ptrInt
>= reinterpret_cast<uintptr_t>(m_base
) &&
1285 ptrInt
< reinterpret_cast<uintptr_t>(m_used
);
1289 void ContiguousHeap::freeBig(void* ptr
) {
1290 // remove from big list
1291 auto n
= static_cast<BigNode
*>(ptr
) - 1;
1293 auto last
= m_bigs
.back();
1294 auto size
= n
->nbytes
;
1300 // freed nodes are stored in address ordered freelist
1301 auto free
= &m_freeList
;
1302 auto node
= reinterpret_cast<FreeNode
*>(n
);
1304 while (free
->next
!= nullptr && free
->next
< node
) {
1307 // Coalesce Nodes if possible with adjacent free nodes
1308 if ((uintptr_t)free
+ free
->size
+ node
->size
== (uintptr_t)free
->next
) {
1309 free
->size
+= node
->size
+ free
->next
->size
;
1310 free
->next
= free
->next
->next
;
1311 } else if ((uintptr_t)free
+ free
->size
== (uintptr_t)ptr
) {
1312 free
->size
+= node
->size
;
1313 } else if ((uintptr_t)node
+ node
->size
== (uintptr_t)free
->next
){
1314 node
->next
= free
->next
->next
;
1315 node
->size
+= free
->next
->size
;
1318 node
->next
= free
->next
;
1323 MemBlock
ContiguousHeap::resizeBig(void* ptr
, size_t newsize
) {
1324 // Since we don't know how big it is (i.e. how much data we should memcpy),
1325 // we have no choice but to ask malloc to realloc for us.
1326 auto const n
= static_cast<BigNode
*>(ptr
) - 1;
1328 BigNode
* newNode
= nullptr;
1329 if (n
->nbytes
>= newsize
+ sizeof(BigNode
)) {
1332 newNode
= static_cast<BigNode
*>(
1333 heapAlloc(newsize
+ sizeof(BigNode
),cap
)
1335 memcpy(newNode
, ptr
, n
->nbytes
);
1336 newNode
->nbytes
= cap
;
1339 m_bigs
[newNode
->index
] = newNode
;
1342 return {newNode
+ 1, n
->nbytes
- sizeof(BigNode
)};
1345 void* ContiguousHeap::heapAlloc(size_t nbytes
, size_t &cap
) {
1346 if (UNLIKELY(!m_base
)) {
1347 // Lazy allocation of heap
1348 createRequestHeap();
1351 void* ptr
= nullptr;
1352 auto alignedSize
= (nbytes
+ s_pageSize
- 1) & ~(s_pageSize
- 1);
1354 // freeList is address ordered first fit
1355 auto prev
= &m_freeList
;
1356 auto cur
= m_freeList
.next
;
1357 while (cur
!= nullptr ) {
1358 if (cur
->size
>= alignedSize
&&
1359 cur
->size
< alignedSize
+ kMaxSmartSize
) {
1360 // found freed heap node that fits allocation and doesn't need to split
1362 prev
->next
= cur
->next
;
1366 if (cur
->size
> alignedSize
) {
1367 // split free heap node
1368 prev
->next
= reinterpret_cast<FreeNode
*>(((char*)cur
) + alignedSize
);
1369 prev
->next
->next
= cur
->next
;
1370 prev
->next
->size
= cur
->size
- alignedSize
;
1378 ptr
= (void*)m_used
;
1379 m_used
+= alignedSize
;
1381 if (UNLIKELY(m_used
> m_end
)) {
1382 always_assert_flog(0,
1383 "Heap address space exhausted\nbase:{}\nend:{}\nused{}",
1384 m_base
, m_end
, m_used
);
1385 // Throw exception when t4840214 is fixed
1386 // throw FatalErrorException("Request heap out of memory");
1387 } else if (UNLIKELY(m_used
> m_OOMMarker
)) {
1388 setSurpriseFlag(MemExceededFlag
);
1393 void ContiguousHeap::createRequestHeap() {
1395 m_contiguousHeapSize
= RuntimeOption::HeapSizeMB
* 1024 * 1024;
1397 if (( m_base
= (char*)mmap(NULL
,
1398 m_contiguousHeapSize
,
1399 PROT_WRITE
| PROT_READ
,
1402 0)) != MAP_FAILED
) {
1405 always_assert_flog(0, "Heap Creation Failed");
1407 m_end
= m_base
+ m_contiguousHeapSize
;
1409 m_OOMMarker
= m_end
- (m_contiguousHeapSize
/2);
1410 m_freeList
.next
= nullptr;
1411 m_freeList
.size
= 0;
1414 ContiguousHeap::~ContiguousHeap(){