1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
43 #ifdef AVMPLUS_SAMPLER
46 void recordAllocationSample(const void* item
, size_t size
);
47 void recordDeallocationSample(const void* item
, size_t size
);
51 #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD
52 extern void RedirectLogOutput(void (*)(const char*));
53 static FILE* fp
= NULL
;
55 void logToFile(const char* s
)
61 static void startGCLogToFile()
63 fp
= fopen("gcbehavior.txt", "w");
65 RedirectLogOutput(logToFile
);
68 static void endGCLogToFile()
70 RedirectLogOutput(NULL
);
76 #endif // MMGC_POLICY_PROFILING && !AVMSHELL_BUILD
80 GCHeap
*GCHeap::instance
= NULL
;
81 bool GCHeap::instanceEnterLockInitialized
= false;
82 vmpi_spin_lock_t
GCHeap::instanceEnterLock
;
84 // GCHeap instance has the C++ runtime call dtor which causes problems
85 AVMPLUS_ALIGN8(uint8_t) heapSpace
[sizeof(GCHeap
)];
87 const size_t kLargeItemBlockId
= ~0U;
89 size_t GCHeap::leakedBytes
;
91 #ifdef MMGC_MEMORY_PROFILER
92 MemoryProfiler
* GCHeap::profiler
= (MemoryProfiler
*)-1;
95 GCHeapConfig::GCHeapConfig() :
97 heapLimit(kDefaultHeapLimit
),
100 useVirtualMemory(VMPI_useVirtualMemory()),
101 trimVirtualMemory(true),
102 mergeContiguousRegions(VMPI_canMergeContiguousRegions()),
103 sloppyCommit(VMPI_canCommitAlreadyCommittedMemory()),
106 gcstats(false), // tracking
107 autoGCStats(false), // auto printing
108 #ifdef AVMSHELL_BUILD
109 gcbehavior(0), // controlled by command line switch
111 gcbehavior(2), // unconditional, if MMGC_POLICY_PROFILING is on
113 eagerSweeping(false),
114 #ifdef MMGC_HEAP_GRAPH
115 dumpFalsePositives(false),
117 gcLoadCeiling(1.0), // 1.0 is probably OK for desktop, maybe less so for mobile - more experiments needed
119 _checkFixedMemory(true) // See comment in GCHeap.h for why the default must be 'true'
121 // Bugzilla 544695 - large heaps need to be controlled more tightly than
122 // small heaps due to the reliance of the Player on the GC for removing some
123 // objects from the AS2 scriptThreadList and because people don't want to
124 // use as much memory as a single policy across all heap sizes would require.
125 // As reference counting takes care of a lot of storage management, there's
126 // little harm in running the incremental GC more aggressively in large
127 // heaps - most of the time is spent elsewhere.
129 // These numbers are not based on any profiling data but are intended to be
130 // approximately right. The 1.05 could be a little too tight; we'll see.
132 GCAssert(GCHeapConfig::kNumLoadFactors
>= 7);
134 gcLoad
[0] = 2.5; gcLoadCutoff
[0] = 10; // Breathing room for warmup
135 gcLoad
[1] = 2.0; gcLoadCutoff
[1] = 25; // Classical 2x factor
136 gcLoad
[2] = 1.75; gcLoadCutoff
[2] = 50; // Tighten
137 gcLoad
[3] = 1.5; gcLoadCutoff
[3] = 75; // the
138 gcLoad
[4] = 1.25; gcLoadCutoff
[4] = 100; // screws
139 gcLoad
[5] = 1.1; gcLoadCutoff
[5] = 150; // Large heaps are
140 gcLoad
[6] = 1.05; gcLoadCutoff
[6] = DBL_MAX
;// controlled (very) tightly
143 trimVirtualMemory
= false; // no need
145 const char *envValue
= VMPI_getenv("MMGC_HEAP_LIMIT");
147 heapLimit
= VMPI_strtol(envValue
, 0, 10);
148 envValue
= VMPI_getenv("MMGC_HEAP_SOFT_LIMIT");
150 heapSoftLimit
= VMPI_strtol(envValue
, 0, 10);
154 void GCHeap::ResetStatics()
157 #ifdef MMGC_MEMORY_PROFILER
158 if(profiler
&& IsProfilerInitialized())
160 profiler
= (MemoryProfiler
*)-1;
164 void GCHeap::Init(const GCHeapConfig
& config
)
166 GCAssert(instance
== NULL
);
167 void *p
= (void*)heapSpace
;
168 instance
= new (p
) GCHeap(config
);
171 size_t GCHeap::Destroy()
174 GCAssert(instance
!= NULL
);
175 instance
->DestroyInstance();
180 GCHeap::GCHeap(const GCHeapConfig
& c
)
181 : kNativePageSize(VMPI_getVMPageSize()),
191 externalCodeMemory(0),
193 m_notificationThread(NULL
),
199 #ifdef MMGC_MEMORY_PROFILER
203 #ifdef MMGC_POLICY_PROFILING
210 entryChecksEnabled(true),
211 abortStatusNotificationSent(false)
213 VMPI_lockInit(&m_spinlock
);
214 VMPI_lockInit(&gclog_spinlock
);
216 // ResetStatics should be called at the start here before using/initializing any statics
219 // Initialize free lists
220 HeapBlock
*block
= freelists
;
221 for (uint32_t i
=0; i
<kNumFreeLists
; i
++) {
222 block
->FreelistInit();
226 // Create the initial heap
228 MMGC_LOCK(m_spinlock
);
229 if (!ExpandHeap((int)config
.initialSize
))
235 fixedMalloc
.InitInstance(this);
239 #ifdef MMGC_MEMORY_PROFILER
240 //create profiler if turned on and if it is not already created
241 if(!IsProfilerInitialized())
248 hooksEnabled
= true; // set only after creating profiler
249 hasSpy
= VMPI_spySetup();
253 #ifdef MMGC_MEMORY_INFO
254 hooksEnabled
= true; // always track allocs in DEBUG builds
257 #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD
262 void GCHeap::DestroyInstance()
264 #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD
271 leakedBytes
= GetFixedMalloc()->GetBytesInUse();
272 fixedMalloc
.DestroyInstance();
273 GCAssertMsg(leakedBytes
== 0 || GetStatus() == kMemAbort
, "Leaks!");
275 size_t internalNum
= AddrToBlock(blocks
)->size
+ numRegionBlocks
;
277 // numAlloc should just be the size of the HeapBlock's space
278 if(numAlloc
!= internalNum
&& status
!= kMemAbort
)
280 for (unsigned int i
=0; i
<blocksLen
; i
++)
282 HeapBlock
*block
= &blocks
[i
];
283 if(block
->inUse() && block
->baseAddr
&& block
->baseAddr
!= (char*)blocks
)
289 GCLog("Block 0x%x not freed\n", block
->baseAddr
);
291 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
292 if(block
->allocTrace
)
293 PrintStackTrace(block
->allocTrace
);
300 #ifdef MMGC_MEMORY_PROFILER
301 hooksEnabled
= false;
310 // Acquire all the locks before destroying them to make reasonably
311 // sure we're the last consumers. This is probably not exactly
312 // right, see https://bugzilla.mozilla.org/show_bug.cgi?id=548347
313 // and linked bugs for a discussion. Note we can't acquire these
314 // much higher up because we get into situations where GCHeap code
315 // will want to lock these locks, but they are already locked.
317 VMPI_lockAcquire(&m_spinlock
);
318 VMPI_lockRelease(&m_spinlock
);
319 VMPI_lockDestroy(&m_spinlock
);
321 VMPI_lockAcquire(&gclog_spinlock
);
322 VMPI_lockRelease(&gclog_spinlock
);
323 VMPI_lockDestroy(&gclog_spinlock
);
326 enterFrame
->Destroy(); // Destroy the pointed-to value
327 enterFrame
.destroy(); // Destroy the thread-local itself
330 void* GCHeap::Alloc(size_t size
, uint32_t flags
, size_t alignment
)
333 GCAssert(alignment
> 0);
336 // Alignment must be a power of 2
337 size_t a
= alignment
;
345 bool zero
= (flags
& kZero
) != 0;
346 bool expand
= (flags
& kExpand
) != 0;
348 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
350 bool saved_oomHandling
= m_oomHandling
;
351 m_oomHandling
= saved_oomHandling
&& (flags
& kNoOOMHandling
) == 0;
353 baseAddr
= AllocHelper(size
, expand
, zero
, alignment
);
355 // If expand didn't work, or expand flag not set, then try to free
356 // memory then realloc from the heap
359 SendFreeMemorySignal(size
);
360 baseAddr
= AllocHelper(size
, expand
, zero
, alignment
);
363 // If we're still unable to allocate, we're done
366 if (flags
& kCanFail
)
368 m_oomHandling
= saved_oomHandling
;
377 #ifdef MMGC_MEMORY_PROFILER
378 if((flags
& kProfile
) && HooksEnabled() && profiler
) {
379 profiler
->RecordAllocation(baseAddr
, size
* kBlockSize
, size
* kBlockSize
);
383 // Only check for memory limits if we're allowing OOM notifications
386 CheckForMemoryLimitsExceeded();
389 m_oomHandling
= saved_oomHandling
;
392 GCAssert(Size(baseAddr
) == size
);
394 // Zero out the memory, if requested to do so
396 // These pages may have been seen by valgrind before and
397 // they become unaddressable when we last called
398 // FREELIKE_BLOCK or MEMPOOL_DESTROY, use MAKE_MEM_DEFINED
399 // to silence write to freed memory errors.
400 VALGRIND_MAKE_MEM_DEFINED(baseAddr
, size
* kBlockSize
);
401 VMPI_memset(baseAddr
, 0, size
* kBlockSize
);
402 // and then make the memory undefined again, we do this because
403 // we do this because either the VALGRIND_MALLOCLIKE_BLOCK call
404 // below will define it, or the suballocator will, ie this is
405 // here to keep the sub allocators honest.
406 VALGRIND_MAKE_MEM_UNDEFINED(baseAddr
, size
* kBlockSize
);
409 // Fail the allocation if we're a "canFail" allocation that has pushed beyond one of our limits.
410 if((flags
& kCanFail
) != 0 && (status
== kMemSoftLimit
|| SoftLimitExceeded() || HardLimitExceeded() ))
412 FreeInternal(baseAddr
, (flags
& kProfile
) != 0, m_oomHandling
);
416 // We utilize the "profile" flag to tell the difference
417 // between client requests and sub-allocator requests. Direct
418 // client requests are reported to valgrind here, sub
419 // allocators need to tell valgrind about memory themselves.
420 if ((flags
& kProfile
) != 0) {
421 VALGRIND_MALLOCLIKE_BLOCK(baseAddr
, size
* kBlockSize
, 0, (flags
&kZero
) != 0);
424 GCAssert(((uintptr_t)baseAddr
>> kBlockShift
) % alignment
== 0);
428 void *GCHeap::AllocHelper(size_t size
, bool expand
, bool& zero
, size_t alignment
)
430 // first try to find it in our existing free memory
431 HeapBlock
*block
= AllocBlock(size
, zero
, alignment
);
433 // Try to expand if the flag is set
434 if(!block
&& expand
) {
436 // Look ahead at memory limits to see if we should trigger a free memory signal
437 if ( (HardLimitExceeded(size
) || SoftLimitExceeded(size
)))
439 SendFreeMemorySignal(size
);
442 // Don't allow our memory consumption to grow beyond hard limit
443 if(HardLimitExceeded(size
))
446 if(size
>= kOSAllocThreshold
&& config
.useVirtualMemory
) {
447 return LargeAlloc(size
, alignment
);
450 block
= AllocBlock(size
, zero
, alignment
);
454 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
455 if(profiler
&& block
)
456 block
->allocTrace
= profiler
->GetStackTrace();
459 return block
!= NULL
? block
->baseAddr
: NULL
;
462 void GCHeap::SignalCodeMemoryAllocation(size_t size
, bool gcheap_memory
)
465 gcheapCodeMemory
+= size
;
467 externalCodeMemory
+= size
;
468 CheckForMemoryLimitsExceeded();
471 void GCHeap::CheckForMemoryLimitsExceeded()
474 // If we're already in the process of sending out memory notifications, don't bother verifying now.
475 if (status
== MMgc::kMemAbort
|| statusNotificationBeingSent())
479 if (SoftLimitExceeded())
481 overage
= GetTotalHeapSize() + externalPressure
/kBlockSize
- config
.heapSoftLimit
;
483 else if (HardLimitExceeded())
485 overage
= (GetTotalHeapSize() + externalPressure
/kBlockSize
) - config
.heapLimit
+ (config
.heapLimit
/ 10);
490 SendFreeMemorySignal(overage
);
492 CheckForHardLimitExceeded();
494 CheckForSoftLimitExceeded(overage
);
498 void GCHeap::FreeInternal(const void *item
, bool profile
, bool oomHandling
)
502 // recursive free calls are allowed from StatusChangeNotify
503 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
505 bool saved_oomHandling
= m_oomHandling
;
506 m_oomHandling
= saved_oomHandling
&& oomHandling
;
508 HeapBlock
*block
= AddrToBlock(item
);
512 size
= LargeAllocSize(item
);
518 GCAssert(numAlloc
>= (unsigned int)size
);
521 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
522 if(profiler
&& block
)
523 block
->freeTrace
= profiler
->GetStackTrace();
526 #ifdef MMGC_MEMORY_PROFILER
527 if(profile
&& HooksEnabled() && profiler
) {
528 profiler
->RecordDeallocation(item
, size
* kBlockSize
);
538 VALGRIND_FREELIKE_BLOCK(item
, 0);
540 m_oomHandling
= saved_oomHandling
;
543 void GCHeap::Decommit()
545 // keep at least initialSize free
546 if(!config
.returnMemory
)
549 // don't decommit if OOM handling is disabled; there's a guard in the OOM code so this
550 // should never happen, but belt and suspenders...
554 size_t heapSize
= GetTotalHeapSize();
555 size_t freeSize
= GetFreeHeapSize();
557 size_t decommitSize
= 0;
558 // commit if > kDecommitThresholdPercentage is free
559 if(FreeMemoryExceedsDecommitThreshold())
561 decommitSize
= int((freeSize
* 100 - heapSize
* kDecommitThresholdPercentage
) / 100);
563 // If we're over the heapLimit, attempt to decommit enough to get just under the limit
564 else if ( (heapSize
> config
.heapLimit
) && ((heapSize
- freeSize
) < config
.heapLimit
))
566 decommitSize
= heapSize
- config
.heapLimit
+ 1;
569 // If we're over the SoftLimit, attempt to decommit enough to get just under the softLimit
570 else if ((config
.heapSoftLimit
!= 0) && (heapSize
> config
.heapSoftLimit
) && ((heapSize
- freeSize
) < config
.heapSoftLimit
))
572 decommitSize
= heapSize
- config
.heapSoftLimit
+ 1;
578 if ((decommitSize
< (size_t)kMinHeapIncrement
) && (freeSize
> (size_t)kMinHeapIncrement
))
581 decommitSize
= kMinHeapIncrement
;
584 // Don't decommit more than our initial config size.
585 if (heapSize
- decommitSize
< config
.initialSize
)
587 decommitSize
= heapSize
- config
.initialSize
;
591 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
595 // search from the end of the free list so we decommit big blocks
596 HeapBlock
*freelist
= freelists
+kNumFreeLists
-1;
598 HeapBlock
*endOfBigFreelists
= &freelists
[GetFreeListIndex(1)];
600 for (; freelist
>= endOfBigFreelists
&& decommitSize
> 0; freelist
--)
603 // We may call RemoveLostBlock below which splits regions
604 // and may need to create a new one, don't let it expand
605 // though, expanding while Decommit'ing would be silly.
606 if(!EnsureFreeRegion(/*expand=*/false))
610 HeapBlock
*block
= freelist
;
611 while ((block
= block
->prev
) != freelist
&& decommitSize
> 0)
613 // decommitting already decommitted blocks doesn't help
614 // temporary replacement for commented out conditional below
615 GCAssert(block
->size
!= 0);
616 if(!block
->committed
/*|| block->size == 0*/)
619 if(config
.useVirtualMemory
)
621 RemoveFromList(block
);
622 if((size_t)block
->size
> decommitSize
)
624 HeapBlock
*newBlock
= Split(block
, (int)decommitSize
);
625 AddToFreeList(newBlock
);
628 Region
*region
= AddrToRegion(block
->baseAddr
);
629 if(config
.trimVirtualMemory
&&
630 freeSize
* 100 > heapSize
* kReleaseThresholdPercentage
&&
631 // if block is as big or bigger than a region, free the whole region
632 block
->baseAddr
<= region
->baseAddr
&&
633 region
->reserveTop
<= block
->endAddr() )
636 if(block
->baseAddr
< region
->baseAddr
)
638 HeapBlock
*newBlock
= Split(block
, int((region
->baseAddr
- block
->baseAddr
) / kBlockSize
));
639 AddToFreeList(block
);
642 if(block
->endAddr() > region
->reserveTop
)
644 HeapBlock
*newBlock
= Split(block
, int((region
->reserveTop
- block
->baseAddr
) / kBlockSize
));
645 AddToFreeList(newBlock
);
648 decommitSize
-= block
->size
;
652 else if(VMPI_decommitMemory(block
->baseAddr
, block
->size
* kBlockSize
))
654 block
->committed
= false;
655 block
->dirty
= false;
656 decommitSize
-= block
->size
;
658 GCLog("decommitted %d page block from %p\n", block
->size
, block
->baseAddr
);
664 // this can happen on mac where we release and re-reserve the memory and another thread may steal it from us
665 RemoveLostBlock(block
);
668 // if the VM API's fail us bail
673 numDecommitted
+= block
->size
;
675 // merge with previous/next if not in use and not committed
676 HeapBlock
*prev
= block
- block
->sizePrevious
;
677 if(block
->sizePrevious
!= 0 && !prev
->committed
&& !prev
->inUse()) {
678 RemoveFromList(prev
);
680 prev
->size
+= block
->size
;
683 block
->sizePrevious
= 0;
689 HeapBlock
*next
= block
+ block
->size
;
690 if(next
->size
!= 0 && !next
->committed
&& !next
->inUse()) {
691 RemoveFromList(next
);
693 block
->size
+= next
->size
;
696 next
->sizePrevious
= 0;
700 next
= block
+ block
->size
;
701 next
->sizePrevious
= block
->size
;
703 // add this block to the back of the bus to make sure we consume committed memory
705 HeapBlock
*backOfTheBus
= &freelists
[kNumFreeLists
-1];
706 HeapBlock
*pointToInsert
= backOfTheBus
;
707 while ((pointToInsert
= pointToInsert
->next
) != backOfTheBus
) {
708 if (pointToInsert
->size
>= block
->size
&& !pointToInsert
->committed
) {
712 AddToFreeList(block
, pointToInsert
);
714 // so we keep going through freelist properly
717 } else { // not using virtual memory
719 // if we aren't using mmap we can only do something if the block maps to a region
720 // that is completely empty
721 Region
*region
= AddrToRegion(block
->baseAddr
);
722 if(block
->baseAddr
== region
->baseAddr
&& // beginnings match
723 region
->commitTop
== block
->baseAddr
+ block
->size
*kBlockSize
) {
725 RemoveFromList(block
);
737 CheckForStatusReturnToNormal();
740 // m_spinlock is held
741 void GCHeap::CheckForHardLimitExceeded()
743 if (!HardLimitExceeded())
749 // m_spinlock is held
750 void GCHeap::CheckForSoftLimitExceeded(size_t request
)
752 if(config
.heapSoftLimit
== 0 || status
!= kMemNormal
|| !SoftLimitExceeded())
755 size_t externalBlocks
= externalPressure
/ kBlockSize
;
756 GCDebugMsg(false, "*** Alloc exceeded softlimit: ask for %u, usedheapsize =%u, totalHeap =%u, of which external =%u\n",
758 unsigned(GetUsedHeapSize() + externalBlocks
),
759 unsigned(GetTotalHeapSize() + externalBlocks
),
760 unsigned(externalBlocks
));
762 if(statusNotificationBeingSent())
765 StatusChangeNotify(kMemSoftLimit
);
768 // m_spinlock is held
769 void GCHeap::CheckForStatusReturnToNormal()
771 if(!statusNotificationBeingSent() && statusNotNormalOrAbort())
773 size_t externalBlocks
= externalPressure
/ kBlockSize
;
774 size_t total
= GetTotalHeapSize() + externalBlocks
;
776 // return to normal if we drop below heapSoftLimit
777 if(config
.heapSoftLimit
!= 0 && status
== kMemSoftLimit
)
779 if (!SoftLimitExceeded())
781 size_t used
= GetUsedHeapSize() + externalBlocks
;
782 GCDebugMsg(false, "### Alloc dropped below softlimit: usedheapsize =%u, totalHeap =%u, of which external =%u\n",
785 unsigned(externalBlocks
) );
786 StatusChangeNotify(kMemNormal
);
789 // or if we shrink to below %10 of the max
790 else if ((maxTotalHeapSize
/ kBlockSize
+ externalBlocks
) * 9 > total
* 10)
791 StatusChangeNotify(kMemNormal
);
797 void GCHeap::RemoveLostBlock(HeapBlock
*block
)
800 GCLog("Removing block %p %d\n", block
->baseAddr
, block
->size
);
805 Region
*region
= AddrToRegion(block
->baseAddr
);
806 if(region
->baseAddr
== block
->baseAddr
&& region
->reserveTop
== block
->endAddr()) {
807 RemoveBlock(block
, /*release*/false);
812 while(AddrToRegion(block
->baseAddr
) != AddrToRegion(block
->endAddr()-1)) {
813 // split block into parts mapping to regions
814 Region
*r
= AddrToRegion(block
->baseAddr
);
815 size_t numBlocks
= (r
->commitTop
- block
->baseAddr
) / kBlockSize
;
816 char *next
= Split(block
, numBlocks
)->baseAddr
;
818 RemoveLostBlock(block
);
819 block
= AddrToBlock(next
);
822 Region
*region
= AddrToRegion(block
->baseAddr
);
823 // save these off since we'll potentially shift region
824 char *regionBaseAddr
= region
->baseAddr
;
825 size_t regionBlockId
= region
->blockId
;
827 // if we don't line up with beginning or end we need a new region
828 if(block
->baseAddr
!= region
->baseAddr
&& region
->commitTop
!= block
->endAddr()) {
830 GCAssertMsg(HaveFreeRegion(), "Decommit was supposed to ensure this!");
832 NewRegion(block
->endAddr(), region
->reserveTop
,
833 region
->commitTop
> block
->endAddr() ? region
->commitTop
: block
->endAddr(),
834 region
->blockId
+ (block
->endAddr() - region
->baseAddr
) / kBlockSize
);
836 if(region
->baseAddr
!= block
->baseAddr
) {
837 // end this region at the start of block going away
838 region
->reserveTop
= block
->baseAddr
;
839 if(region
->commitTop
> block
->baseAddr
)
840 region
->commitTop
= block
->baseAddr
;
843 } else if(region
->baseAddr
== block
->baseAddr
) {
844 region
->blockId
+= block
->size
;
845 region
->baseAddr
= block
->endAddr();
846 } else if(region
->commitTop
== block
->endAddr()) {
847 // end this region at the start of block going away
848 region
->reserveTop
= block
->baseAddr
;
849 if(region
->commitTop
> block
->baseAddr
)
850 region
->commitTop
= block
->baseAddr
;
852 GCAssertMsg(false, "This shouldn't be possible");
856 // create temporary region for this block
857 Region
temp(this, block
->baseAddr
, block
->endAddr(), block
->endAddr(), regionBlockId
+ (block
->baseAddr
- regionBaseAddr
) / kBlockSize
);
859 RemoveBlock(block
, /*release*/false);
861 // pop temp from freelist, put there by RemoveBlock
862 freeRegion
= *(Region
**)freeRegion
;
867 // doing this here is an extra validation step
877 void GCHeap::RemoveBlock(HeapBlock
*block
, bool release
)
879 Region
*region
= AddrToRegion(block
->baseAddr
);
881 GCAssert(region
->baseAddr
== block
->baseAddr
);
882 GCAssert(region
->reserveTop
== block
->endAddr());
884 size_t newBlocksLen
= blocksLen
- block
->size
;
886 HeapBlock
*nextBlock
= block
+ block
->size
;
888 bool need_sentinel
= false;
889 bool remove_sentinel
= false;
891 if( block
->sizePrevious
&& nextBlock
->size
) {
892 // This block is contiguous with the blocks before and after it
893 // so we need to add a sentinel
894 need_sentinel
= true;
896 else if ( !block
->sizePrevious
&& !nextBlock
->size
) {
897 // the block is not contigous with the block before or after it - we need to remove a sentinel
898 // since there would already be one on each side.
899 remove_sentinel
= true;
902 // update nextblock's sizePrevious
903 nextBlock
->sizePrevious
= need_sentinel
? 0 : block
->sizePrevious
;
905 // Add space for the sentinel - the remaining blocks won't be contiguous
908 else if(remove_sentinel
)
911 // just re-use blocks; small wastage possible
912 HeapBlock
*newBlocks
= blocks
;
914 // the memmove will overwrite this so save it
915 size_t blockSize
= block
->size
;
917 size_t offset
= int(block
-blocks
);
918 int32_t sen_offset
= 0;
919 HeapBlock
*src
= block
+ block
->size
;
921 if( need_sentinel
) {
922 offset
= int(block
-blocks
)+1;
924 HeapBlock
* sentinel
= newBlocks
+ (block
-blocks
);
925 sentinel
->baseAddr
= NULL
;
927 sentinel
->sizePrevious
= block
->sizePrevious
;
928 sentinel
->prev
= NULL
;
929 sentinel
->next
= NULL
;
930 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
931 sentinel
->allocTrace
= 0;
934 else if( remove_sentinel
) {
935 // skip trailing sentinel
941 int lastChunkSize
= int((blocks
+ blocksLen
) - src
);
942 GCAssert(lastChunkSize
+ offset
== newBlocksLen
);
943 memmove(newBlocks
+ offset
, src
, lastChunkSize
* sizeof(HeapBlock
));
945 // Fix up the prev/next pointers of each freelist. This is a little more complicated
946 // than the similiar code in ExpandHeap because blocks after the one we are free'ing
947 // are sliding down by block->size
948 HeapBlock
*fl
= freelists
;
949 for (uint32_t i
=0; i
<kNumFreeLists
; i
++) {
950 HeapBlock
*temp
= fl
;
952 if (temp
->prev
!= fl
) {
953 if(temp
->prev
> block
) {
954 temp
->prev
= newBlocks
+ (temp
->prev
-blocks
-blockSize
) + sen_offset
;
957 if (temp
->next
!= fl
) {
958 if(temp
->next
> block
) {
959 temp
->next
= newBlocks
+ (temp
->next
-blocks
-blockSize
) + sen_offset
;
962 } while ((temp
= temp
->next
) != fl
);
966 // Need to decrement blockId for regions in blocks after block
967 Region
*r
= lastRegion
;
969 if(r
->blockId
> region
->blockId
&& r
->blockId
!= kLargeItemBlockId
) {
970 r
->blockId
-= (blockSize
-sen_offset
);
975 blocksLen
= newBlocksLen
;
976 RemoveRegion(region
, release
);
978 // make sure we did everything correctly
980 ValidateHeapBlocks();
983 void GCHeap::ValidateHeapBlocks()
986 // iterate through HeapBlocks making sure:
987 // non-contiguous regions have a sentinel
988 HeapBlock
*block
= blocks
;
989 while(block
- blocks
< (intptr_t)blocksLen
) {
990 Region
*r
= AddrToRegion(block
->baseAddr
);
991 if(r
&& r
->baseAddr
== block
->baseAddr
)
992 GCAssert(r
->blockId
== (size_t)(block
-blocks
));
994 HeapBlock
*next
= NULL
;
996 next
= block
+ block
->size
;
997 GCAssert(next
- blocks
< (intptr_t)blocksLen
);
998 GCAssert(next
->sizePrevious
== block
->size
);
1000 HeapBlock
*prev
= NULL
;
1001 if(block
->sizePrevious
) {
1002 prev
= block
- block
->sizePrevious
;
1003 GCAssert(prev
- blocks
>= 0);
1004 GCAssert(prev
->size
== block
->sizePrevious
);
1005 } else if(block
!= blocks
) {
1006 // I have no prev and I'm not the first, check sentinel
1007 HeapBlock
*sentinel
= block
-1;
1008 GCAssert(sentinel
->baseAddr
== NULL
);
1009 GCAssert(sentinel
->size
== 0);
1010 GCAssert(sentinel
->sizePrevious
!= 0);
1012 if(block
->baseAddr
) {
1014 GCAssert(block
->baseAddr
== prev
->baseAddr
+ (kBlockSize
* prev
->size
));
1016 // we should always end on a sentinel
1017 GCAssert(next
- blocks
< (int)blocksLen
);
1019 // block is a sentinel
1020 GCAssert(block
->size
== 0);
1021 // FIXME: the following asserts are firing and we need to understand why, could be bugs
1022 // make sure last block ends at commitTop
1023 Region
*prevRegion
= AddrToRegion(prev
->baseAddr
+ (prev
->size
*kBlockSize
) - 1);
1024 GCAssert(prev
->baseAddr
+ (prev
->size
*kBlockSize
) == prevRegion
->commitTop
);
1026 // either we've reached the end or the next isn't a sentinel
1027 GCAssert(block
- blocks
== (intptr_t)blocksLen
|| block
->size
!= 0);
1030 GCAssert(block
- blocks
== (intptr_t)blocksLen
);
1034 GCHeap::Region
*GCHeap::AddrToRegion(const void *item
) const
1036 // Linear search of regions list to find this address.
1037 // The regions list should usually be pretty short.
1038 for (Region
*region
= lastRegion
;
1040 region
= region
->prev
)
1042 if (item
>= region
->baseAddr
&& item
< region
->reserveTop
) {
1049 GCHeap::HeapBlock
* GCHeap::AddrToBlock(const void *item
) const
1051 Region
*region
= AddrToRegion(item
);
1053 if(region
->blockId
== kLargeItemBlockId
)
1055 size_t index
= ((char*)item
- region
->baseAddr
) / kBlockSize
;
1056 HeapBlock
*b
= blocks
+ region
->blockId
+ index
;
1057 GCAssert(item
>= b
->baseAddr
&& item
< b
->baseAddr
+ b
->size
* GCHeap::kBlockSize
);
1063 size_t GCHeap::SafeSize(const void *item
)
1065 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
1066 GCAssert((uintptr_t(item
) & kOffsetMask
) == 0);
1067 HeapBlock
*block
= AddrToBlock(item
);
1070 Region
*r
= AddrToRegion(item
);
1071 if(r
&& r
->blockId
== kLargeItemBlockId
)
1072 return LargeAllocSize(item
);
1076 // Return the number of blocks of slop at the beginning of an object
1077 // starting at baseAddr for the given alignment. Alignment is a
1078 // number of blocks and must be a power of 2. baseAddr must
1079 // point to the beginning of a block.
1081 static inline size_t alignmentSlop(char* baseAddr
, size_t alignment
)
1083 return (alignment
- (size_t)(((uintptr_t)baseAddr
>> GCHeap::kBlockShift
) & (alignment
- 1))) & (alignment
- 1);
1086 void *GCHeap::LargeAlloc(size_t size
, size_t alignment
)
1088 GCAssert(config
.useVirtualMemory
);
1090 size_t sizeInBytes
= size
* kBlockSize
;
1092 if(!EnsureFreeRegion(true))
1095 char* addr
= (char*)VMPI_reserveMemoryRegion(NULL
, sizeInBytes
);
1100 size_t unalignedSize
= sizeInBytes
;
1102 if(alignmentSlop(addr
, alignment
) != 0) {
1103 VMPI_releaseMemoryRegion(addr
, sizeInBytes
);
1104 unalignedSize
= sizeInBytes
+ (alignment
-1) * kBlockSize
;
1105 addr
= (char*)VMPI_reserveMemoryRegion(NULL
, unalignedSize
);
1110 char *alignedAddr
= addr
+ alignmentSlop(addr
, alignment
) * kBlockSize
;
1111 if(!VMPI_commitMemory(alignedAddr
, sizeInBytes
)) {
1112 VMPI_releaseMemoryRegion(addr
, sizeInBytes
);
1116 // Note that we don't actually track the beginning of the committed region
1117 // LargeFree doesn't need it.
1119 addr
+ unalignedSize
, // reserveTop
1120 alignedAddr
+ sizeInBytes
, // commitTop
1122 largeAllocs
+= size
;
1123 CheckForNewMaxTotalHeapSize();
1128 void GCHeap::LargeFree(const void *item
)
1130 GCAssert(config
.useVirtualMemory
);
1132 size_t size
= LargeAllocSize(item
);
1133 largeAllocs
-= size
;
1134 Region
*r
= AddrToRegion(item
);
1135 // Must use r->baseAddr which may be less than item due to alignment,
1136 // and we must calculate full size
1137 VMPI_releaseMemoryRegion(r
->baseAddr
, r
->reserveTop
- r
->baseAddr
);
1138 RemoveRegion(r
, false);
1141 GCHeap::HeapBlock
* GCHeap::AllocBlock(size_t size
, bool& zero
, size_t alignment
)
1143 uint32_t startList
= GetFreeListIndex(size
);
1144 HeapBlock
*freelist
= &freelists
[startList
];
1146 HeapBlock
*decommittedSuitableBlock
= NULL
;
1148 // Search for a big enough block in the free lists
1150 for (uint32_t i
= startList
; i
< kNumFreeLists
; i
++)
1152 HeapBlock
*block
= freelist
;
1153 while ((block
= block
->next
) != freelist
)
1155 // Prefer a single committed block that is at least large enough.
1157 if (block
->size
>= size
+ alignmentSlop(block
->baseAddr
, alignment
) && block
->committed
) {
1158 RemoveFromList(block
);
1159 return AllocCommittedBlock(block
, size
, zero
, alignment
);
1162 // Look for a sequence of decommitted and committed blocks that together would
1163 // be large enough, in case a single committed block can't be found.
1165 if(config
.useVirtualMemory
&& decommittedSuitableBlock
== NULL
&& !block
->committed
)
1167 // Note, 'block' should be invariant throughout this scope, it's the block
1168 // whose successors and predecessors we're inspecting
1170 GCAssert(!block
->inUse());
1172 size_t totalSize
= block
->size
;
1173 HeapBlock
*firstFree
= block
;
1174 size_t firstSlop
= alignmentSlop(firstFree
->baseAddr
, alignment
);
1176 // Coalesce with predecessors
1177 while(totalSize
< size
+ firstSlop
&& firstFree
->sizePrevious
!= 0)
1179 HeapBlock
*prevBlock
= firstFree
- firstFree
->sizePrevious
;
1180 if(prevBlock
->inUse() || prevBlock
->size
== 0)
1182 totalSize
+= prevBlock
->size
;
1183 firstFree
= prevBlock
;
1184 firstSlop
= alignmentSlop(firstFree
->baseAddr
, alignment
);
1187 // Coalesce with successors
1188 HeapBlock
*nextBlock
= block
+ block
->size
;
1189 while (totalSize
< size
+ firstSlop
&& !(nextBlock
->inUse() || nextBlock
->size
== 0)) {
1190 totalSize
+= nextBlock
->size
;
1191 nextBlock
= nextBlock
+ nextBlock
->size
;
1194 // Keep it if it's large enough
1195 if(totalSize
>= size
+ firstSlop
)
1196 decommittedSuitableBlock
= firstFree
;
1202 // We only get here if we couldn't find a single committed large enough block.
1204 if (decommittedSuitableBlock
!= NULL
)
1205 return AllocCommittedBlock(CreateCommittedBlock(decommittedSuitableBlock
, size
, alignment
),
1213 GCHeap::HeapBlock
* GCHeap::AllocCommittedBlock(HeapBlock
* block
, size_t size
, bool& zero
, size_t alignment
)
1215 GCAssert(block
->committed
);
1216 GCAssert(block
->size
>= size
);
1217 GCAssert(block
->inUse());
1219 size_t slop
= alignmentSlop(block
->baseAddr
, alignment
);
1223 HeapBlock
*oldBlock
= block
;
1224 block
= Split(block
, slop
);
1225 AddToFreeList(oldBlock
);
1226 GCAssert(alignmentSlop(block
->baseAddr
, alignment
) == 0);
1227 GCAssert(block
->size
>= size
);
1230 if(block
->size
> size
)
1232 HeapBlock
*newBlock
= Split(block
, size
);
1233 AddToFreeList(newBlock
);
1238 zero
= block
->dirty
&& zero
;
1245 const uint32_t* base_u
;
1247 base_c
= block
->baseAddr
;
1248 GCAssert(*base_u
== 0);
1254 // Turn a sequence of committed and uncommitted blocks into a single committed
1255 // block that's at least large enough to satisfy the request.
1257 GCHeap::HeapBlock
* GCHeap::CreateCommittedBlock(HeapBlock
* block
, size_t size
, size_t alignment
)
1259 RemoveFromList(block
);
1261 // We'll need to allocate extra space to account for the space that will
1262 // later be removed from the start of the block.
1264 size
+= alignmentSlop(block
->baseAddr
, alignment
);
1266 // If the first block is too small then coalesce it with the following blocks
1267 // to create a block that's large enough. Some parts of the total block may
1268 // already be committed. If the platform allows it we commit the entire
1269 // range with one call even if parts were committed before, on the assumption
1270 // that that is faster than several commit() calls, one for each decommitted
1271 // block. (We have no current data to support that; now == 201-03-19.)
1273 if(block
->size
< size
)
1275 size_t amountRecommitted
= block
->committed
? 0 : block
->size
;
1276 bool dirty
= block
->dirty
;
1278 // The first block needs to be committed when sloppyCommit is disabled.
1279 if(!config
.sloppyCommit
&& !block
->committed
)
1282 while(block
->size
< size
)
1284 // Coalesce the next block into this one.
1286 HeapBlock
*nextBlock
= block
+ block
->size
;
1287 RemoveFromList(nextBlock
);
1289 if (nextBlock
->committed
)
1290 dirty
= dirty
|| nextBlock
->dirty
;
1293 if (block
->size
+ nextBlock
->size
>= size
) // Last block?
1294 PruneDecommittedBlock(nextBlock
, block
->size
+ nextBlock
->size
, size
);
1296 amountRecommitted
+= nextBlock
->size
;
1298 if (!config
.sloppyCommit
)
1302 block
->size
+= nextBlock
->size
;
1304 nextBlock
->size
= 0;
1305 nextBlock
->baseAddr
= 0;
1306 nextBlock
->sizePrevious
= 0;
1309 (block
+ block
->size
)->sizePrevious
= block
->size
;
1311 GCAssert(amountRecommitted
> 0);
1313 if (config
.sloppyCommit
)
1315 block
->dirty
= dirty
;
1319 PruneDecommittedBlock(block
, block
->size
, size
);
1323 GCAssert(block
->size
>= size
&& block
->committed
);
1330 // If the tail of a coalesced block is decommitted and committing it creates
1331 // a block that's too large for the request then we may wish to split the tail
1332 // before committing it in order to avoid committing memory we won't need.
1334 // 'available' is the amount of memory available including the memory in 'block',
1335 // and 'request' is the amount of memory required.
1337 void GCHeap::PruneDecommittedBlock(HeapBlock
* block
, size_t available
, size_t request
)
1339 GCAssert(available
>= request
);
1340 GCAssert(!block
->committed
);
1342 size_t toCommit
= request
> kMinHeapIncrement
? request
: kMinHeapIncrement
;
1343 size_t leftOver
= available
- request
;
1345 if (available
> toCommit
&& leftOver
> 0)
1347 HeapBlock
*newBlock
= Split(block
, block
->size
- leftOver
);
1348 AddToFreeList(newBlock
);
1352 GCHeap::HeapBlock
*GCHeap::Split(HeapBlock
*block
, size_t size
)
1354 GCAssert(block
->size
> size
);
1355 HeapBlock
*newBlock
= block
+ size
;
1356 newBlock
->Init(block
->baseAddr
+ kBlockSize
* size
, block
->size
- size
, block
->dirty
);
1357 newBlock
->sizePrevious
= size
;
1358 newBlock
->committed
= block
->committed
;
1361 // Update sizePrevious in block after that
1362 HeapBlock
*nextBlock
= newBlock
+ newBlock
->size
;
1363 nextBlock
->sizePrevious
= newBlock
->size
;
1368 void GCHeap::Commit(HeapBlock
*block
)
1370 GCAssert(config
.sloppyCommit
|| !block
->committed
);
1372 if(!VMPI_commitMemory(block
->baseAddr
, block
->size
* kBlockSize
))
1376 if(config
.verbose
) {
1377 GCLog("recommitted %d pages\n", block
->size
);
1380 numDecommitted
-= block
->size
;
1381 block
->committed
= true;
1382 block
->dirty
= VMPI_areNewPagesDirty();
1386 // Non-debug version in GCHeap.h
1387 void GCHeap::CheckFreelist()
1389 HeapBlock
*freelist
= freelists
;
1390 for (uint32_t i
= 0; i
< kNumFreeLists
; i
++)
1392 HeapBlock
*block
= freelist
;
1393 while((block
= block
->next
) != freelist
)
1395 GCAssert(block
!= block
->next
);
1396 GCAssert(block
!= block
->next
->next
|| block
->next
== freelist
);
1398 // Coalescing is eager so no block on the list should have adjacent blocks
1399 // that are also on the free list and in the same committed state
1401 if(block
->sizePrevious
)
1403 HeapBlock
*prev
= block
- block
->sizePrevious
;
1404 GCAssert(block
->sizePrevious
== prev
->size
);
1405 GCAssert(prev
->inUse() || prev
->size
== 0 || prev
->committed
!= block
->committed
);
1408 HeapBlock
*next
= block
+ block
->size
;
1409 GCAssert(next
->inUse() || next
->size
== 0 || next
->committed
!= block
->committed
);
1415 // Debugging code to find problems with block/region layout
1416 // This code is slow, but can be useful for tracking down issues
1417 // It verifies that the memory for each block corresponds to one or more regions
1418 // and that each region points to a valid starting block
1419 Region
* r
= lastRegion
;
1423 for(block_idx
= 0; block_idx
< blocksLen
; ++block_idx
){
1424 HeapBlock
* b
= blocks
+ block_idx
;
1429 int contig_size
= 0;
1433 if(b
->baseAddr
>= r
->baseAddr
&& b
->baseAddr
< r
->reserveTop
) {
1434 // starts in this region
1435 char* end
= b
->baseAddr
+ b
->size
*kBlockSize
;
1436 if(end
> (r
->reserveTop
+ contig_size
) ){
1437 GCLog("error, block %d %p %d did not find a matching region\n", block_idx
, b
->baseAddr
, b
->size
);
1438 GCLog("Started in region %p - %p, contig size: %d\n", r
->baseAddr
, r
->reserveTop
, contig_size
);
1443 else if( r
->prev
&& r
->prev
->reserveTop
==r
->baseAddr
){
1444 contig_size
+=r
->reserveTop
- r
->baseAddr
;
1456 if(!blocks
[r
->blockId
].size
){
1457 for( int i
= r
->blockId
-1; i
>= 0 ; --i
)
1458 if( blocks
[i
].size
){
1459 //Look for spanning blocks
1460 if( ((blocks
[i
].baseAddr
+ blocks
[i
].size
*kBlockSize
) <= r
->baseAddr
) ) {
1461 GCLog("Invalid block id for region %p-%p %d\n", r
->baseAddr
, r
->reserveTop
, i
);
1474 GCLog("%p - %p\n", r
->baseAddr
, r
->reserveTop
);
1477 for(int b
= 0; b
< blocksLen
; ++b
){
1480 GCLog("%d %p %d\n", b
, blocks
[b
].baseAddr
, blocks
[b
].size
);
1488 bool GCHeap::BlocksAreContiguous(void *item1
, void *item2
)
1490 Region
*r1
= AddrToRegion(item1
);
1491 Region
*r2
= AddrToRegion(item2
);
1492 return r1
== r2
|| r1
->reserveTop
== r2
->baseAddr
;
1495 void GCHeap::AddToFreeList(HeapBlock
*block
, HeapBlock
* pointToInsert
)
1499 block
->next
= pointToInsert
;
1500 block
->prev
= pointToInsert
->prev
;
1501 block
->prev
->next
= block
;
1502 pointToInsert
->prev
= block
;
1507 void GCHeap::AddToFreeList(HeapBlock
* block
, bool makeDirty
)
1509 GCAssert(block
->size
!= 0);
1511 // Try to coalesce a committed block with its committed non-sentinel predecessor
1512 if(block
->committed
&& block
->sizePrevious
)
1514 HeapBlock
*prevBlock
= block
- block
->sizePrevious
;
1515 GCAssert(prevBlock
->size
> 0 || !prevBlock
->committed
);
1517 if (!prevBlock
->inUse() && prevBlock
->committed
)
1519 // Remove predecessor block from free list
1520 RemoveFromList(prevBlock
);
1522 // Increase size of predecessor block
1523 prevBlock
->size
+= block
->size
;
1526 block
->sizePrevious
= 0;
1527 block
->baseAddr
= 0;
1530 makeDirty
= makeDirty
|| block
->dirty
;
1534 // Try to coalesce a committed block with its committed non-sentinel successor
1535 if (block
->committed
)
1537 HeapBlock
*nextBlock
= block
+ block
->size
;
1538 // This is not correct - sentinels are not necessarily committed. We
1539 // may or may not want to fix that.
1540 //GCAssert(nextBlock->size > 0 || !nextBlock->committed);
1542 if (!nextBlock
->inUse() && nextBlock
->committed
) {
1543 // Remove successor block from free list
1544 RemoveFromList(nextBlock
);
1546 // Increase size of current block
1547 block
->size
+= nextBlock
->size
;
1548 nextBlock
->size
= 0;
1549 nextBlock
->baseAddr
= 0;
1550 nextBlock
->sizePrevious
= 0;
1551 makeDirty
= makeDirty
|| nextBlock
->dirty
;
1555 // Update sizePrevious in the next block
1556 HeapBlock
*nextBlock
= block
+ block
->size
;
1557 nextBlock
->sizePrevious
= block
->size
;
1559 block
->dirty
= block
->dirty
|| makeDirty
;
1561 // Add the coalesced block to the right free list, in the right
1562 // position. Free lists are ordered by increasing block size.
1564 int index
= GetFreeListIndex(block
->size
);
1565 HeapBlock
*freelist
= &freelists
[index
];
1566 HeapBlock
*pointToInsert
= freelist
;
1568 // If the block size is below kUniqueThreshold then its free list
1569 // will have blocks of only one size and no search is needed.
1571 if (block
->size
>= kUniqueThreshold
) {
1572 while ((pointToInsert
= pointToInsert
->next
) != freelist
) {
1573 if (pointToInsert
->size
>= block
->size
) {
1579 AddToFreeList(block
, pointToInsert
);
1583 void GCHeap::FreeBlock(HeapBlock
*block
)
1585 GCAssert(block
->inUse());
1588 // trash it. fb == free block
1589 if (!RUNNING_ON_VALGRIND
)
1590 VMPI_memset(block
->baseAddr
, uint8_t(MMFreedPoison
), block
->size
* kBlockSize
);
1593 AddToFreeList(block
, true);
1596 void GCHeap::CheckForNewMaxTotalHeapSize()
1598 // The guard on instance being non-NULL is a hack, to be fixed later (now=2009-07-20).
1599 // Some VMPI layers (WinMo is at least one of them) try to grab the GCHeap instance to get
1600 // at the map of private pages. But the GCHeap instance is not available during the initial
1601 // call to ExpandHeap. So sidestep that problem here.
1603 if (instance
!= NULL
) {
1604 // GetTotalHeapSize is probably fairly cheap; even so this strikes me
1605 // as a bit of a hack.
1606 size_t heapSizeNow
= GetTotalHeapSize() * kBlockSize
;
1607 if (heapSizeNow
> maxTotalHeapSize
) {
1608 maxTotalHeapSize
= heapSizeNow
;
1609 #ifdef MMGC_POLICY_PROFILING
1610 maxPrivateMemory
= VMPI_getPrivateResidentPageCount() * VMPI_getVMPageSize();
1616 bool GCHeap::ExpandHeap( size_t askSize
)
1618 bool bRetVal
= ExpandHeapInternal(askSize
);
1619 CheckForNewMaxTotalHeapSize();
1623 bool GCHeap::HardLimitExceeded(size_t additionalAllocationAmt
)
1625 return GetTotalHeapSize() + externalPressure
/kBlockSize
+ additionalAllocationAmt
> config
.heapLimit
;
1628 bool GCHeap::SoftLimitExceeded(size_t additionalAllocationAmt
)
1630 if (config
.heapSoftLimit
== 0) return false;
1631 return GetTotalHeapSize() + externalPressure
/kBlockSize
+ additionalAllocationAmt
> config
.heapSoftLimit
;
1634 #define roundUp(_s, _inc) (((_s + _inc - 1) / _inc) * _inc)
1636 bool GCHeap::ExpandHeapInternal(size_t askSize
)
1638 size_t size
= askSize
;
1641 // Turn this switch on to test bridging of contiguous
1643 bool test_bridging
= false;
1644 size_t defaultReserve
= test_bridging
? (size
+kMinHeapIncrement
) : kDefaultReserve
;
1646 const size_t defaultReserve
= kDefaultReserve
;
1649 char *baseAddr
= NULL
;
1650 char *newRegionAddr
= NULL
;
1651 size_t newRegionSize
= 0;
1652 bool contiguous
= false;
1653 size_t commitAvail
= 0;
1655 // Round up to the nearest kMinHeapIncrement
1656 size
= roundUp(size
, kMinHeapIncrement
);
1658 // when we allocate a new region the space needed for the HeapBlocks, if it won't fit
1659 // in existing space it must fit in new space so we may need to increase the new space
1661 HeapBlock
*newBlocks
= blocks
;
1663 if(blocksLen
!= 0 || // first time through just take what we need out of initialSize instead of adjusting
1664 config
.initialSize
== 0) // unless initializeSize is zero of course
1666 int extraBlocks
= 1; // for potential new sentinel
1667 if(nextRegion
== NULL
) {
1668 extraBlocks
++; // may need a new page for regions
1670 size_t curHeapBlocksSize
= blocks
? AddrToBlock(blocks
)->size
: 0;
1671 size_t newHeapBlocksSize
= numHeapBlocksToNumBlocks(blocksLen
+ size
+ extraBlocks
);
1673 // size is based on newSize and vice versa, loop to settle (typically one loop, sometimes two)
1674 while(newHeapBlocksSize
> curHeapBlocksSize
)
1676 // use askSize so HeapBlock's can fit in rounding slop
1677 size
= roundUp(askSize
+ newHeapBlocksSize
+ extraBlocks
, kMinHeapIncrement
);
1679 // tells us use new memory for blocks below
1682 // since newSize is based on size we have to repeat in case it changes
1683 curHeapBlocksSize
= newHeapBlocksSize
;
1684 newHeapBlocksSize
= numHeapBlocksToNumBlocks(blocksLen
+ size
+ extraBlocks
);
1688 // At this point we have adjusted/calculated the final size that would need to be committed.
1689 // We need to check that against the hardlimit to see if we are going to go above it.
1690 if (HardLimitExceeded(size
))
1693 if(config
.useVirtualMemory
)
1695 Region
*region
= lastRegion
;
1698 commitAvail
= (int)((region
->reserveTop
- region
->commitTop
) / kBlockSize
);
1700 // Can this request be satisfied purely by committing more memory that
1701 // is already reserved?
1702 if (size
<= commitAvail
) {
1703 if (VMPI_commitMemory(region
->commitTop
, size
* kBlockSize
))
1706 baseAddr
= region
->commitTop
;
1708 // check for continuity, we can only be contiguous with the end since
1709 // we don't have a "block insert" facility
1710 HeapBlock
*last
= &blocks
[blocksLen
-1] - blocks
[blocksLen
-1].sizePrevious
;
1711 contiguous
= last
->baseAddr
+ last
->size
* kBlockSize
== baseAddr
;
1713 // Update the commit top.
1714 region
->commitTop
+= size
*kBlockSize
;
1716 // Go set up the block list.
1721 // If we can't commit memory we've already reserved,
1722 // no other trick is going to work. Fail.
1727 // Try to reserve a region contiguous to the last region.
1729 // - Try for the "default reservation size" if it's larger than
1730 // the requested block.
1731 if (defaultReserve
> size
) {
1732 newRegionAddr
= (char*) VMPI_reserveMemoryRegion(region
->reserveTop
,
1733 defaultReserve
* kBlockSize
);
1734 newRegionSize
= defaultReserve
;
1737 // - If the default reservation size didn't work or isn't big
1738 // enough, go for the exact amount requested, minus the
1739 // committable space in the current region.
1740 if (newRegionAddr
== NULL
) {
1741 newRegionAddr
= (char*) VMPI_reserveMemoryRegion(region
->reserveTop
,
1742 (size
- commitAvail
)*kBlockSize
);
1743 newRegionSize
= size
- commitAvail
;
1745 // check for contiguity
1746 if(newRegionAddr
&& newRegionAddr
!= region
->reserveTop
) {
1747 // we can't use this space since we need commitAvail from prev region to meet
1748 // the size requested, toss it
1749 ReleaseMemory(newRegionAddr
, newRegionSize
*kBlockSize
);
1750 newRegionAddr
= NULL
;
1755 if (newRegionAddr
== region
->reserveTop
) // we'll use the region below as a separate region if its not contiguous
1757 // We were able to reserve some space.
1759 // Commit available space from the existing region.
1760 if (commitAvail
!= 0) {
1761 if (!VMPI_commitMemory(region
->commitTop
, commitAvail
* kBlockSize
))
1763 // We couldn't commit even this space. We're doomed.
1764 // Un-reserve the space we just reserved and fail.
1765 ReleaseMemory(newRegionAddr
, newRegionSize
);
1770 // Commit needed space from the new region.
1771 if (!VMPI_commitMemory(newRegionAddr
, (size
- commitAvail
) * kBlockSize
))
1773 // We couldn't commit this space. We can't meet the
1774 // request. Un-commit any memory we just committed,
1775 // un-reserve any memory we just reserved, and fail.
1776 if (commitAvail
!= 0) {
1777 VMPI_decommitMemory(region
->commitTop
,
1778 commitAvail
* kBlockSize
);
1780 ReleaseMemory(newRegionAddr
,
1781 (size
-commitAvail
)*kBlockSize
);
1785 // We successfully reserved a new contiguous region
1786 // and committed the memory we need. Finish up.
1787 baseAddr
= region
->commitTop
;
1788 region
->commitTop
= lastRegion
->reserveTop
;
1790 // check for continuity, we can only be contiguous with the end since
1791 // we don't have a "block insert" facility
1792 HeapBlock
*last
= &blocks
[blocksLen
-1] - blocks
[blocksLen
-1].sizePrevious
;
1793 contiguous
= last
->baseAddr
+ last
->size
* kBlockSize
== baseAddr
;
1799 // We were unable to allocate a contiguous region, or there
1800 // was no existing region to be contiguous to because this
1801 // is the first-ever expansion. Allocate a non-contiguous region.
1803 // Don't use any of the available space in the current region.
1806 // - Go for the default reservation size unless the requested
1808 if (newRegionAddr
== NULL
&& size
< defaultReserve
) {
1809 newRegionAddr
= (char*) VMPI_reserveMemoryRegion(NULL
,
1810 defaultReserve
*kBlockSize
);
1811 newRegionSize
= defaultReserve
;
1814 // - If that failed or the requested size is bigger than default,
1815 // go for the requested size exactly.
1816 if (newRegionAddr
== NULL
) {
1817 newRegionAddr
= (char*) VMPI_reserveMemoryRegion(NULL
,
1819 newRegionSize
= size
;
1822 // - If that didn't work, give up.
1823 if (newRegionAddr
== NULL
) {
1827 // - Try to commit the memory.
1828 if (VMPI_commitMemory(newRegionAddr
,
1829 size
*kBlockSize
) == 0)
1831 // Failed. Un-reserve the memory and fail.
1832 ReleaseMemory(newRegionAddr
, newRegionSize
*kBlockSize
);
1836 // If we got here, we've successfully allocated a
1837 // non-contiguous region.
1838 baseAddr
= newRegionAddr
;
1844 // Allocate the requested amount of space as a new region.
1845 newRegionAddr
= (char*)VMPI_allocateAlignedMemory(size
* kBlockSize
);
1846 baseAddr
= newRegionAddr
;
1847 newRegionSize
= size
;
1849 // If that didn't work, give up.
1850 if (newRegionAddr
== NULL
) {
1857 // If we were able to allocate a contiguous block, remove
1858 // the old top sentinel.
1863 // Expand the block list.
1864 size_t newBlocksLen
= blocksLen
+ size
;
1866 // Add space for the "top" sentinel
1870 newBlocks
= (HeapBlock
*)(void *)baseAddr
;
1873 // Copy all the existing blocks.
1874 if (blocks
&& blocks
!= newBlocks
) {
1875 VMPI_memcpy(newBlocks
, blocks
, blocksLen
* sizeof(HeapBlock
));
1877 // Fix up the prev/next pointers of each freelist.
1878 HeapBlock
*freelist
= freelists
;
1879 for (uint32_t i
=0; i
<kNumFreeLists
; i
++) {
1880 HeapBlock
*temp
= freelist
;
1882 if (temp
->prev
!= freelist
) {
1883 temp
->prev
= newBlocks
+ (temp
->prev
-blocks
);
1885 if (temp
->next
!= freelist
) {
1886 temp
->next
= newBlocks
+ (temp
->next
-blocks
);
1888 } while ((temp
= temp
->next
) != freelist
);
1894 // Create a single free block for the new space,
1895 // and add it to the free list.
1896 HeapBlock
*block
= newBlocks
+blocksLen
;
1897 block
->Init(baseAddr
, size
, newPagesDirty());
1899 // link up contiguous blocks
1900 if(blocksLen
&& contiguous
)
1902 // search backwards for first real block
1903 HeapBlock
*b
= &blocks
[blocksLen
-1];
1907 GCAssert(b
>= blocks
);
1909 block
->sizePrevious
= b
->size
;
1910 GCAssert((block
- block
->sizePrevious
)->size
== b
->size
);
1913 // if baseAddr was used for HeapBlocks split
1914 if((char*)newBlocks
== baseAddr
)
1916 size_t numBlocksNeededForHeapBlocks
= numHeapBlocksToNumBlocks(newBlocksLen
);
1917 HeapBlock
*next
= Split(block
, numBlocksNeededForHeapBlocks
);
1918 // this space counts as used space
1919 numAlloc
+= numBlocksNeededForHeapBlocks
;
1923 // get space for region allocations
1924 if(nextRegion
== NULL
) {
1925 nextRegion
= (Region
*)(void *)block
->baseAddr
;
1926 HeapBlock
*next
= Split(block
, 1);
1927 // this space counts as used space
1933 // Save off and add after initializing all blocks.
1934 HeapBlock
*newBlock
= block
;
1936 // Initialize the rest of the new blocks to empty.
1937 size_t freeBlockSize
= block
->size
;
1939 for (uint32_t i
=1; i
< freeBlockSize
; i
++) {
1944 // Fill in the sentinel for the top of the heap.
1947 block
->sizePrevious
= freeBlockSize
;
1949 AddToFreeList(newBlock
);
1951 // save for free'ing
1952 void *oldBlocks
= blocks
;
1955 blocksLen
= newBlocksLen
;
1957 // free old blocks space using new blocks (FreeBlock poisons blocks so can't use old blocks)
1958 if (oldBlocks
&& oldBlocks
!= newBlocks
) {
1959 HeapBlock
*oldBlocksHB
= AddrToBlock(oldBlocks
);
1960 numAlloc
-= oldBlocksHB
->size
;
1961 FreeBlock(oldBlocksHB
);
1964 // If we created a new region, save the base address so we can free later.
1965 if (newRegionAddr
) {
1966 /* The mergeContiguousRegions bit is broken, since we
1967 loop over all regions we may be contiguous with an
1968 existing older HeapBlock and we don't support inserting a
1969 new address range arbritrarily into the HeapBlock
1970 array (contiguous regions must be contiguous heap
1971 blocks vis-a-vie the region block id)
1973 config.mergeContiguousRegions) {
1974 lastRegion->reserveTop += newRegionSize*kBlockSize;
1975 lastRegion->commitTop +=
1976 (size-commitAvail)*kBlockSize;
1979 Region
*newRegion
= NewRegion(newRegionAddr
, // baseAddr
1980 newRegionAddr
+newRegionSize
*kBlockSize
, // reserve top
1981 newRegionAddr
+(size
-commitAvail
)*kBlockSize
, // commit top
1982 newBlocksLen
-(size
-commitAvail
)-1); // block id
1985 GCLog("reserved new region, %p - %p %s\n",
1986 newRegion
->baseAddr
,
1987 newRegion
->reserveTop
,
1988 contiguous
? "contiguous" : "non-contiguous");
1994 if(config
.verbose
) {
1995 GCLog("heap expanded by %d pages\n", size
);
1998 ValidateHeapBlocks();
2004 void GCHeap::RemoveRegion(Region
*region
, bool release
)
2006 Region
**next
= &lastRegion
;
2007 while(*next
!= region
)
2008 next
= &((*next
)->prev
);
2009 *next
= region
->prev
;
2011 ReleaseMemory(region
->baseAddr
,
2012 region
->reserveTop
-region
->baseAddr
);
2014 if(config
.verbose
) {
2015 GCLog("unreserved region 0x%p - 0x%p (commitTop: %p)\n", region
->baseAddr
, region
->reserveTop
, region
->commitTop
);
2021 void GCHeap::FreeAll()
2023 // Release all of the heap regions
2024 while (lastRegion
!= NULL
) {
2025 Region
*region
= lastRegion
;
2026 lastRegion
= lastRegion
->prev
;
2027 if(region
->blockId
== kLargeItemBlockId
) {
2028 // leaks can happen during abort
2029 GCAssertMsg(status
== kMemAbort
, "Allocation of large object not freed");
2030 VMPI_releaseMemoryRegion(region
->baseAddr
, region
->reserveTop
- region
->baseAddr
);
2032 ReleaseMemory(region
->baseAddr
,
2033 region
->reserveTop
-region
->baseAddr
);
2039 void GCHeap::AllocHook(const void *item
, size_t askSize
, size_t gotSize
)
2044 #ifdef MMGC_MEMORY_PROFILER
2049 profiler
->RecordAllocation(item
, askSize
, gotSize
);
2052 #ifdef MMGC_MEMORY_INFO
2053 DebugDecorate(item
, gotSize
);
2055 #ifdef AVMPLUS_SAMPLER
2056 avmplus::recordAllocationSample(item
, gotSize
);
2060 void GCHeap::FinalizeHook(const void *item
, size_t size
)
2062 (void)item
,(void)size
;
2063 #ifdef MMGC_MEMORY_PROFILER
2065 profiler
->RecordDeallocation(item
, size
);
2068 #ifdef AVMPLUS_SAMPLER
2069 avmplus::recordDeallocationSample(item
, size
);
2073 void GCHeap::FreeHook(const void *item
, size_t size
, int poison
)
2075 (void)poison
,(void)item
,(void)size
;
2076 #ifdef MMGC_MEMORY_INFO
2077 DebugFree(item
, poison
, size
, true);
2081 void GCHeap::PseudoFreeHook(const void *item
, size_t size
, int poison
)
2083 (void)poison
,(void)item
,(void)size
;
2084 #ifdef MMGC_MEMORY_INFO
2085 DebugFree(item
, poison
, size
, false);
2088 #endif // MMGC_HOOKS
2090 EnterFrame::EnterFrame() :
2093 m_abortUnwindList(NULL
),
2097 GCHeap
*heap
= GCHeap::GetGCHeap();
2098 EnterFrame
*ef
= m_previous
= heap
->GetEnterFrame();
2100 if(ef
&& ef
->Suspended()) {
2101 // propagate the active gc from the suspended frame
2102 m_gc
= ef
->GetActiveGC();
2105 if(ef
== NULL
|| ef
->Suspended()) {
2111 // this is the first thing we run after the Abort longjmp
2112 EnterFrame::~EnterFrame()
2115 GCHeap
*heap
= m_heap
;
2116 // this prevents us from doing multiple jumps in case leave results in more allocations
2122 void EnterFrame::UnwindAllObjects()
2124 while(m_abortUnwindList
)
2126 AbortUnwindObject
*previous
= m_abortUnwindList
;
2127 m_abortUnwindList
->Unwind();
2128 // The unwind call may remove the handler or may leave it on this list. If it leaves it, then make sure to advance the list,
2129 // otherwise, the list will automatically advance if it's removed.
2130 if (m_abortUnwindList
== previous
)
2132 m_abortUnwindList
= m_abortUnwindList
->next
;
2137 void EnterFrame::AddAbortUnwindObject(AbortUnwindObject
*obj
)
2139 GCAssertMsg(!EnterFrame::IsAbortUnwindObjectInList(obj
), "obj can't be added to list twice!");
2140 // Push it on the front
2141 obj
->next
= m_abortUnwindList
;
2142 if (m_abortUnwindList
)
2144 m_abortUnwindList
->previous
= obj
;
2146 m_abortUnwindList
= obj
;
2149 void EnterFrame::RemoveAbortUnwindObject(AbortUnwindObject
*obj
)
2151 GCAssertMsg(obj
== m_abortUnwindList
|| obj
->previous
!= NULL
, "Object not in list");
2153 if (obj
== m_abortUnwindList
)
2155 m_abortUnwindList
= obj
->next
;
2158 if (obj
->previous
!= NULL
)
2160 (obj
->previous
)->next
= obj
->next
;
2162 if (obj
->next
!= NULL
)
2164 (obj
->next
)->previous
= obj
->previous
;
2168 obj
->previous
= NULL
;
2173 AbortUnwindObject::~AbortUnwindObject()
2175 GCAssertMsg(!EnterFrame::IsAbortUnwindObjectInList(this), "RemoveAbortUnwindObject not called, dangling pointer in list.");
2179 bool EnterFrame::IsAbortUnwindObjectInList(AbortUnwindObject
*obj
)
2181 GCHeap
*heap
= GCHeap::GetGCHeap();
2183 if(heap
&& (frame
= heap
->GetEnterFrame()) != NULL
)
2185 AbortUnwindObject
*list
= frame
->m_abortUnwindList
;
2196 SuspendEnterFrame::SuspendEnterFrame() : m_ef(NULL
)
2198 GCHeap
*heap
= GCHeap::GetGCHeap();
2200 EnterFrame
*ef
= heap
->GetEnterFrame();
2208 SuspendEnterFrame::~SuspendEnterFrame()
2212 GCHeap
*heap
= GCHeap::GetGCHeap();
2213 GCAssertMsg(heap
->GetEnterFrame() == m_ef
, "EnterFrame's not unwound properly");
2214 if(heap
->GetStatus() == kMemAbort
)
2218 void GCHeap::SystemOOMEvent(size_t size
, int attempt
)
2220 if (attempt
== 0 && !statusNotificationBeingSent())
2221 SendFreeMemorySignal(size
/kBlockSize
+ 1);
2227 void GCHeap::SignalObjectTooLarge()
2229 GCLog("Implementation limit exceeded: attempting to allocate too-large object\n");
2230 GetGCHeap()->Abort();
2234 void GCHeap::SignalInconsistentHeapState(const char* reason
)
2236 GCAssert(!"Inconsistent heap state; aborting");
2237 GCLog("Inconsistent heap state: %s\n", reason
);
2238 GetGCHeap()->Abort();
2242 void GCHeap::SignalExternalAllocation(size_t nbytes
)
2244 GCHeap
* heap
= GetGCHeap();
2246 MMGC_LOCK_ALLOW_RECURSION(heap
->m_spinlock
, heap
->m_notificationThread
);
2248 heap
->externalPressure
+= nbytes
;
2250 heap
->CheckForMemoryLimitsExceeded();
2255 void GCHeap::SignalExternalDeallocation(size_t nbytes
)
2257 GCHeap
* heap
= GetGCHeap();
2259 MMGC_LOCK_ALLOW_RECURSION(heap
->m_spinlock
, heap
->m_notificationThread
);
2261 heap
->externalPressure
-= nbytes
;
2262 heap
->CheckForStatusReturnToNormal();
2266 void GCHeap::SignalExternalFreeMemory(size_t minimumBytesToFree
/*= kMaxObjectSize */)
2268 GCHeap
* heap
= GetGCHeap();
2269 GCAssertMsg(heap
!= NULL
, "GCHeap not valid!");
2271 MMGC_LOCK_ALLOW_RECURSION(heap
->m_spinlock
, heap
->m_notificationThread
);
2273 // When calling SendFreeMemorySignal with kMaxObjectSize it will try to release
2274 // as much memory as possible. Otherwise it interprets the parameter as number
2275 // of blocks to be freed. This function uses bytes instead. The value is converted
2276 // to blocks here, except when kMaxObjectSize has been passed in so that it will
2277 // still trigger freeing maximum amount of memory. The division may lose some
2278 // precision, but SendFreeMemorySignal adds one more block to the requested amount
2280 heap
->SendFreeMemorySignal((minimumBytesToFree
!= kMaxObjectSize
) ? minimumBytesToFree
/ GCHeap::kBlockSize
: minimumBytesToFree
);
2283 // This can *always* be called. It will clean up the state on the current thread
2284 // if appropriate, otherwise do nothing. It *must* be called by host code if the
2285 // host code jumps past an MMGC_ENTER instance. (The Flash player does that, in
2286 // some circumstances.)
2289 void GCHeap::SignalImminentAbort()
2291 if (instance
== NULL
)
2293 EnterFrame
* ef
= GetGCHeap()->GetEnterFrame();
2297 // We don't know if we're holding the lock but we can release it anyhow,
2298 // on the assumption that this operation will not cause problems if the
2299 // lock is not held or is held by another thread.
2301 // Release lock so we don't deadlock if exit or longjmp end up coming
2302 // back to GCHeap (all callers must have this lock).
2304 VMPI_lockRelease(&instance
->m_spinlock
);
2306 // If the current thread is holding a lock for a GC that's not currently active on the thread
2307 // then break the lock: the current thread is collecting in that GC, but the Abort has cancelled
2309 ef
->UnwindAllObjects();
2311 // Clear the enterFrame because we're jumping past MMGC_ENTER.
2312 GetGCHeap()->enterFrame
= NULL
;
2315 void GCHeap::Abort()
2318 EnterFrame
*ef
= enterFrame
;
2320 // If we hit abort, we need to turn m_oomHandling back on so that listeners are guaranteed to get this signal
2321 // We also need to set m_notoficationThread to NULL in case we hit abort while we were processing another memory status change
2322 m_oomHandling
= true;
2323 m_notificationThread
= NULL
;
2325 GCLog("error: out of memory\n");
2327 // release lock so we don't deadlock if exit or longjmp end up coming
2328 // back to GCHeap (all callers must have this lock)
2329 VMPI_lockRelease(&m_spinlock
);
2331 // Lock must not be held when we call VMPI_exit, deadlocks ensue on Linux
2332 if(config
.OOMExitCode
!= 0)
2334 VMPI_exit(config
.OOMExitCode
);
2339 // Guard against repeated jumps: ef->m_heap doubles as a flag. We go Abort->longjmp->~EnterFrame->Leave
2340 // and Leave calls StatusChangeNotify and the host code might do another allocation during shutdown
2341 // in which case we want to go to VMPI_abort instead. At that point m_heap will be NULL and the right
2343 if (ef
->m_heap
!= NULL
)
2345 ef
->UnwindAllObjects();
2346 VMPI_longjmpNoUnwind(ef
->jmpbuf
, 1);
2349 GCAssertMsg(false, "MMGC_ENTER missing or we allocated more memory trying to shutdown");
2353 void GCHeap::Enter(EnterFrame
*frame
)
2359 void GCHeap::Leave()
2362 MMGC_LOCK(m_spinlock
);
2364 if(status
== kMemAbort
&& !abortStatusNotificationSent
) {
2365 abortStatusNotificationSent
= true;
2366 StatusChangeNotify(kMemAbort
);
2372 // do this after StatusChangeNotify it affects ShouldNotEnter
2374 // need to check if enterFrame is valid, it might have been nulled out by SignalImminentAbort
2375 EnterFrame
* enter
= enterFrame
;
2377 enterFrame
= enter
->Previous();
2381 // last one out of the pool pulls the plug
2382 if(status
== kMemAbort
&& enterCount
== 0 && abortStatusNotificationSent
&& preventDestruct
== 0) {
2387 void GCHeap::log_percentage(const char *name
, size_t bytes
, size_t bytes_compare
)
2389 bytes_compare
= size_t((bytes
*100.0)/bytes_compare
);
2391 GCLog("%s %u (%.1fM) %u%%\n", name
, (unsigned int)(bytes
/ GCHeap::kBlockSize
), bytes
* 1.0 / (1024*1024), (unsigned int)(bytes_compare
));
2393 GCLog("%s %u (%uK) %u%%\n", name
, (unsigned int)(bytes
/ GCHeap::kBlockSize
), (unsigned int)(bytes
/ 1024), (unsigned int)(bytes_compare
));
2397 void GCHeap::DumpMemoryInfo()
2399 MMGC_LOCK(m_spinlock
);
2400 size_t priv
= VMPI_getPrivateResidentPageCount() * VMPI_getVMPageSize();
2401 size_t mmgc
= GetTotalHeapSize() * GCHeap::kBlockSize
;
2402 size_t unmanaged
= GetFixedMalloc()->GetTotalSize() * GCHeap::kBlockSize
;
2403 size_t fixed_alloced
;
2404 size_t fixed_asksize
;
2405 GetFixedMalloc()->GetUsageInfo(fixed_asksize
, fixed_alloced
);
2408 size_t gc_allocated_total
=0;
2409 size_t gc_ask_total
= 0;
2410 size_t gc_count
= 0;
2411 BasicListIterator
<GC
*> iter(gcManager
.gcs());
2413 while((gc
= iter
.next()) != NULL
)
2415 #ifdef MMGC_MEMORY_PROFILER
2416 GCLog("[mem] GC 0x%p:%s\n", (void*)gc
, GetAllocationName(gc
));
2418 GCLog("[mem] GC 0x%p\n", (void*)gc
);
2420 gc
->DumpMemoryInfo();
2424 gc
->GetUsageInfo(ask
, allocated
);
2425 gc_ask_total
+= ask
;
2426 gc_allocated_total
+= allocated
;
2429 gc_total
+= gc
->GetNumBlocks() * kBlockSize
;
2432 #ifdef MMGC_MEMORY_PROFILER
2433 fixedMalloc
.DumpMemoryInfo();
2436 // Gross stats are not meaningful if the profiler is running, see bugzilla 490014.
2437 // Disabling their printing is just an expedient fix to avoid misleading data being
2438 // printed. There are other, more complicated, fixes we should adopt.
2440 GCLog("[mem] ------- gross stats -----\n");
2441 #ifdef MMGC_MEMORY_PROFILER
2442 if (GCHeap::GetGCHeap()->GetProfiler() == NULL
)
2445 log_percentage("[mem] private", priv
, priv
);
2446 log_percentage("[mem]\t mmgc", mmgc
, priv
);
2447 log_percentage("[mem]\t\t unmanaged", unmanaged
, priv
);
2448 log_percentage("[mem]\t\t managed", gc_total
, priv
);
2449 log_percentage("[mem]\t\t free", (size_t)GetFreeHeapSize() * GCHeap::kBlockSize
, priv
);
2450 log_percentage("[mem]\t other", priv
- mmgc
, priv
);
2451 log_percentage("[mem] \tunmanaged overhead ", unmanaged
-fixed_alloced
, unmanaged
);
2452 log_percentage("[mem] \tmanaged overhead ", gc_total
- gc_allocated_total
, gc_total
);
2453 #ifdef MMGC_MEMORY_PROFILER
2456 log_percentage("[mem] \tunmanaged internal wastage", fixed_alloced
- fixed_asksize
, fixed_alloced
);
2457 log_percentage("[mem] \tmanaged internal wastage", gc_allocated_total
- gc_ask_total
, gc_allocated_total
);
2460 GCLog("[mem] number of collectors %u\n", unsigned(gc_count
));
2462 #ifdef MMGC_MEMORY_PROFILER
2464 GCLog("[mem] No gross stats available when profiler is enabled.\n");
2466 GCLog("[mem] -------- gross stats end -----\n");
2468 #ifdef MMGC_MEMORY_PROFILER
2477 void GCHeap::LogChar(char c
, size_t count
)
2480 char* buf
= count
< 100 ? tmp
: (char*)VMPI_alloc(count
+1);
2483 VMPI_memset(buf
, c
, count
);
2491 void GCHeap::DumpHeapRep()
2493 Region
**regions
= NULL
;
2494 Region
*r
= lastRegion
;
2497 GCLog("Heap representation format: \n");
2498 GCLog("region base address - commitTop/reserveTop\n");
2499 GCLog("[0 == free, 1 == committed, - = uncommitted]*\n");
2501 // count and sort regions
2506 regions
= (Region
**) VMPI_alloc(sizeof(Region
*)*numRegions
);
2507 if (regions
== NULL
)
2510 for(int i
=0; i
< numRegions
; i
++, r
= r
->prev
) {
2512 for(int j
=0; j
< i
; j
++) {
2513 if(r
->baseAddr
< regions
[j
]->baseAddr
) {
2514 memmove(®ions
[j
+1], ®ions
[j
], sizeof(Region
*) * (i
- j
));
2519 regions
[insert
] = r
;
2522 HeapBlock
*spanningBlock
= NULL
;
2523 for(int i
=0; i
< numRegions
; i
++)
2526 GCLog("0x%p - 0x%p/0x%p\n", r
->baseAddr
, r
->commitTop
, r
->reserveTop
);
2528 char *addr
= r
->baseAddr
;
2531 GCAssert(spanningBlock
->baseAddr
+ (spanningBlock
->size
* kBlockSize
) > r
->baseAddr
);
2532 GCAssert(spanningBlock
->baseAddr
< r
->baseAddr
);
2533 char *end
= spanningBlock
->baseAddr
+ (spanningBlock
->size
* kBlockSize
);
2534 if(end
> r
->reserveTop
)
2535 end
= r
->reserveTop
;
2537 LogChar(spanningBlock
->inUse() ? '1' : '0', (end
- addr
)/kBlockSize
);
2540 if(addr
== spanningBlock
->baseAddr
+ (spanningBlock
->size
* kBlockSize
))
2541 spanningBlock
= NULL
;
2544 while(addr
!= r
->commitTop
&& (hb
= AddrToBlock(addr
)) != NULL
) {
2545 GCAssert(hb
->size
!= 0);
2549 else if(hb
->committed
)
2554 for(i
=0, n
=hb
->size
; i
< n
; i
++, addr
+= GCHeap::kBlockSize
) {
2555 if(addr
== r
->reserveTop
) {
2565 LogChar('-', (r
->reserveTop
- addr
) / kBlockSize
);
2572 #ifdef MMGC_MEMORY_PROFILER
2575 void GCHeap::InitProfiler()
2577 GCAssert(IsProfilerInitialized() == false);
2580 #ifdef MMGC_MEMORY_INFO
2581 bool profilingEnabled
= true;
2583 bool profilingEnabled
= VMPI_isMemoryProfilingEnabled();
2585 if(profilingEnabled
)
2587 profiler
= new MemoryProfiler();
2591 #endif //MMGC_MEMORY_PROFILER
2593 #ifdef MMGC_MEMORY_PROFILER
2594 #ifdef MMGC_USE_SYSTEM_MALLOC
2596 void GCHeap::TrackSystemAlloc(void *addr
, size_t askSize
)
2598 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
2599 if(!IsProfilerInitialized())
2602 profiler
->RecordAllocation(addr
, askSize
, VMPI_size(addr
));
2605 void GCHeap::TrackSystemFree(void *addr
)
2607 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
2608 if(addr
&& profiler
)
2609 profiler
->RecordDeallocation(addr
, VMPI_size(addr
));
2612 #endif //MMGC_USE_SYSTEM_MALLOC
2613 #endif // MMGC_MEMORY_PROFILER
2615 void GCHeap::ReleaseMemory(char *address
, size_t size
)
2617 if(config
.useVirtualMemory
) {
2618 bool success
= VMPI_releaseMemoryRegion(address
, size
);
2622 VMPI_releaseAlignedMemory(address
);
2626 void GCManager::destroy()
2628 collectors
.Destroy();
2631 void GCManager::signalStartCollection(GC
* gc
)
2633 BasicListIterator
<GC
*> iter(collectors
);
2635 while((otherGC
= iter
.next()) != NULL
)
2636 otherGC
->policy
.signalStartCollection(gc
);
2639 void GCManager::signalEndCollection(GC
* gc
)
2641 BasicListIterator
<GC
*> iter(collectors
);
2643 while((otherGC
= iter
.next()) != NULL
)
2644 otherGC
->policy
.signalStartCollection(gc
);
2647 /* this method is the heart of the OOM system.
2648 its here that we call out to the mutator which may call
2649 back in to free memory or try to get more.
2651 Note! The caller needs to hold on to the m_spinlock before calling this!
2654 void GCHeap::SendFreeMemorySignal(size_t minimumBlocksToFree
)
2656 // If we're already in the process of sending out memory notifications, don't bother verifying now.
2657 // Also, we only want to send the "free memory" signal while our memory is in a normal state. Once
2658 // we've entered softLimit or abort state, we want to allow the softlimit or abort processing to return
2659 // the heap to normal before continuing.
2661 if (statusNotificationBeingSent() || status
!= kMemNormal
|| !m_oomHandling
)
2664 m_notificationThread
= VMPI_currentThread();
2666 size_t startingTotal
= GetTotalHeapSize() + externalPressure
/ kBlockSize
;
2667 size_t currentTotal
= startingTotal
;
2669 BasicListIterator
<OOMCallback
*> iter(callbacks
);
2670 OOMCallback
*cb
= NULL
;
2671 bool bContinue
= true;
2676 VMPI_lockRelease(&m_spinlock
);
2677 cb
->memoryStatusChange(kFreeMemoryIfPossible
, kFreeMemoryIfPossible
);
2678 VMPI_lockAcquire(&m_spinlock
);
2681 currentTotal
= GetTotalHeapSize() + externalPressure
/ kBlockSize
;
2683 // If we've freed MORE than the minimum amount, we can stop freeing
2684 if ((startingTotal
- currentTotal
) > minimumBlocksToFree
)
2689 } while(cb
!= NULL
&& bContinue
);
2691 iter
.MarkCursorInList();
2693 m_notificationThread
= NULL
;
2696 void GCHeap::StatusChangeNotify(MemoryStatus to
)
2698 // If we're already in the process of sending this notification, don't resend
2699 if ((statusNotificationBeingSent() && to
== status
) || !m_oomHandling
)
2702 m_notificationThread
= VMPI_currentThread();
2704 MemoryStatus oldStatus
= status
;
2707 BasicListIterator
<OOMCallback
*> iter(callbacks
);
2708 OOMCallback
*cb
= NULL
;
2715 VMPI_lockRelease(&m_spinlock
);
2716 cb
->memoryStatusChange(oldStatus
, to
);
2717 VMPI_lockAcquire(&m_spinlock
);
2719 } while(cb
!= NULL
);
2722 m_notificationThread
= NULL
;
2724 CheckForStatusReturnToNormal();
2728 bool GCHeap::ShouldNotEnter()
2730 // don't enter if the heap is already gone or we're aborting but not on the aborting call stack in a nested enter call
2731 GCHeap
*heap
= GetGCHeap();
2733 (heap
->GetStatus() == kMemAbort
&&
2734 (heap
->GetEnterFrame() == NULL
|| heap
->GetEnterFrame()->Suspended())))
2739 bool GCHeap::IsAddressInHeap(void *addr
)
2741 void *block
= (void*)(uintptr_t(addr
) & kBlockMask
);
2742 return SafeSize(block
) != (size_t)-1;
2745 // Every new GC must register itself with the GCHeap.
2746 void GCHeap::AddGC(GC
*gc
)
2748 bool bAdded
= false;
2750 MMGC_LOCK(m_spinlock
);
2751 // hack to allow GCManager's list back in for list mem operations
2752 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2753 m_notificationThread
= VMPI_currentThread();
2754 bAdded
= gcManager
.tryAddGC(gc
);
2755 m_notificationThread
= notificationThreadSave
;
2763 // When the GC is destroyed it must remove itself from the GCHeap.
2764 void GCHeap::RemoveGC(GC
*gc
)
2766 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
2767 // hack to allow GCManager's list back in for list mem operations
2768 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2769 m_notificationThread
= VMPI_currentThread();
2770 gcManager
.removeGC(gc
);
2771 m_notificationThread
= notificationThreadSave
;
2772 EnterFrame
* ef
= GetEnterFrame();
2773 if (ef
&& ef
->GetActiveGC() == gc
)
2774 ef
->SetActiveGC(NULL
);
2777 void GCHeap::AddOOMCallback(OOMCallback
*p
)
2779 bool bAdded
= false;
2781 MMGC_LOCK(m_spinlock
);
2782 // hack to allow GCManager's list back in for list mem operations
2783 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2784 m_notificationThread
= VMPI_currentThread();
2785 bAdded
= callbacks
.TryAdd(p
);
2786 m_notificationThread
= notificationThreadSave
;
2794 void GCHeap::RemoveOOMCallback(OOMCallback
*p
)
2796 MMGC_LOCK(m_spinlock
);
2797 // hack to allow GCManager's list back in for list mem operations
2798 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2799 m_notificationThread
= VMPI_currentThread();
2800 callbacks
.Remove(p
);
2801 m_notificationThread
= notificationThreadSave
;
2804 bool GCHeap::EnsureFreeRegion(bool allowExpansion
)
2806 if(!HaveFreeRegion()) {
2808 HeapBlock
*block
= AllocBlock(1, zero
, 1);
2810 nextRegion
= (Region
*)(void *)block
->baseAddr
;
2811 } else if(allowExpansion
) {
2813 // We must have hit the hard limit or OS limit
2814 if(nextRegion
== NULL
)
2821 GCHeap::Region
*GCHeap::NewRegion(char *baseAddr
, char *rTop
, char *cTop
, size_t blockId
)
2823 Region
*r
= freeRegion
;
2825 freeRegion
= *(Region
**)freeRegion
;
2828 if(roundUp((uintptr_t)nextRegion
, kBlockSize
) - (uintptr_t)nextRegion
< sizeof(Region
))
2829 nextRegion
= NULL
; // fresh page allocated in ExpandHeap
2831 new (r
) Region(this, baseAddr
, rTop
, cTop
, blockId
);
2835 void GCHeap::FreeRegion(Region
*r
)
2838 lastRegion
= r
->prev
;
2839 *(Region
**)r
= freeRegion
;
2845 void GCHeap::EnterLockInit()
2847 if (!instanceEnterLockInitialized
)
2849 instanceEnterLockInitialized
= true;
2850 VMPI_lockInit(&instanceEnterLock
);
2855 void GCHeap::EnterLockDestroy()
2857 GCAssert(instanceEnterLockInitialized
);
2858 VMPI_lockDestroy(&instanceEnterLock
);
2859 instanceEnterLockInitialized
= false;
2862 GCHeap::Region::Region(GCHeap
*heap
, char *baseAddr
, char *rTop
, char *cTop
, size_t blockId
)
2863 : prev(heap
->lastRegion
),
2869 heap
->lastRegion
= this;
2873 void GCHeap::CheckForOOMAbortAllocation()
2875 if(m_notificationThread
== VMPI_currentThread() && status
== kMemAbort
)
2876 GCAssertMsg(false, "Its not legal to perform allocations during OOM kMemAbort callback");
2880 bool GCHeap::QueryCanReturnToNormal()
2882 // must be below soft limit _AND_ above decommit threshold
2883 return GetUsedHeapSize() + externalPressure
/kBlockSize
< config
.heapSoftLimit
&&
2884 FreeMemoryExceedsDecommitThreshold();
2887 bool GCHeap::FreeMemoryExceedsDecommitThreshold()
2889 return GetFreeHeapSize() * 100 > GetTotalHeapSize() * kDecommitThresholdPercentage
;