1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
43 #ifdef AVMPLUS_SAMPLER
46 void recordAllocationSample(const void* item
, size_t size
);
47 void recordDeallocationSample(const void* item
, size_t size
);
51 #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD
52 extern void RedirectLogOutput(void (*)(const char*));
53 static FILE* fp
= NULL
;
55 void logToFile(const char* s
)
61 static void startGCLogToFile()
63 fp
= fopen("gcbehavior.txt", "w");
65 RedirectLogOutput(logToFile
);
68 static void endGCLogToFile()
70 RedirectLogOutput(NULL
);
76 #endif // MMGC_POLICY_PROFILING && !AVMSHELL_BUILD
80 GCHeap
*GCHeap::instance
= NULL
;
81 bool GCHeap::instanceEnterLockInitialized
= false;
82 vmpi_spin_lock_t
GCHeap::instanceEnterLock
;
84 // GCHeap instance has the C++ runtime call dtor which causes problems
85 AVMPLUS_ALIGN8(uint8_t) heapSpace
[sizeof(GCHeap
)];
87 const size_t kLargeItemBlockId
= ~0U;
89 size_t GCHeap::leakedBytes
;
91 #ifdef MMGC_MEMORY_PROFILER
92 MemoryProfiler
* GCHeap::profiler
= (MemoryProfiler
*)-1;
95 GCHeapConfig::GCHeapConfig() :
97 heapLimit(kDefaultHeapLimit
),
99 dispersiveAdversarial(0), // 0 means dispersive allocation is off.
101 useVirtualMemory(VMPI_useVirtualMemory()),
102 trimVirtualMemory(true),
103 mergeContiguousRegions(VMPI_canMergeContiguousRegions()),
104 sloppyCommit(VMPI_canCommitAlreadyCommittedMemory()),
107 gcstats(false), // tracking
108 autoGCStats(false), // auto printing
109 #ifdef AVMSHELL_BUILD
110 gcbehavior(0), // controlled by command line switch
112 gcbehavior(2), // unconditional, if MMGC_POLICY_PROFILING is on
114 eagerSweeping(false),
115 #ifdef MMGC_HEAP_GRAPH
116 dumpFalsePositives(false),
118 gcLoadCeiling(1.0), // 1.0 is probably OK for desktop, maybe less so for mobile - more experiments needed
120 _checkFixedMemory(true) // See comment in GCHeap.h for why the default must be 'true'
122 // Bugzilla 544695 - large heaps need to be controlled more tightly than
123 // small heaps due to the reliance of the Player on the GC for removing some
124 // objects from the AS2 scriptThreadList and because people don't want to
125 // use as much memory as a single policy across all heap sizes would require.
126 // As reference counting takes care of a lot of storage management, there's
127 // little harm in running the incremental GC more aggressively in large
128 // heaps - most of the time is spent elsewhere.
130 // These numbers are not based on any profiling data but are intended to be
131 // approximately right. The 1.05 could be a little too tight; we'll see.
133 GCAssert(GCHeapConfig::kNumLoadFactors
>= 7);
135 gcLoad
[0] = 2.5; gcLoadCutoff
[0] = 10; // Breathing room for warmup
136 gcLoad
[1] = 2.0; gcLoadCutoff
[1] = 25; // Classical 2x factor
137 gcLoad
[2] = 1.75; gcLoadCutoff
[2] = 50; // Tighten
138 gcLoad
[3] = 1.5; gcLoadCutoff
[3] = 75; // the
139 gcLoad
[4] = 1.25; gcLoadCutoff
[4] = 100; // screws
140 gcLoad
[5] = 1.1; gcLoadCutoff
[5] = 150; // Large heaps are
141 gcLoad
[6] = 1.05; gcLoadCutoff
[6] = DBL_MAX
;// controlled (very) tightly
144 trimVirtualMemory
= false; // no need
146 const char *envValue
= VMPI_getenv("MMGC_HEAP_LIMIT");
148 heapLimit
= VMPI_strtol(envValue
, 0, 10);
149 envValue
= VMPI_getenv("MMGC_HEAP_SOFT_LIMIT");
151 heapSoftLimit
= VMPI_strtol(envValue
, 0, 10);
155 void GCHeap::ResetStatics()
158 #ifdef MMGC_MEMORY_PROFILER
159 if(profiler
&& IsProfilerInitialized())
161 profiler
= (MemoryProfiler
*)-1;
165 void GCHeap::Init(const GCHeapConfig
& config
)
167 GCAssert(instance
== NULL
);
168 void *p
= (void*)heapSpace
;
169 instance
= new (p
) GCHeap(config
);
172 size_t GCHeap::Destroy()
175 GCAssert(instance
!= NULL
);
176 instance
->DestroyInstance();
181 GCHeap::GCHeap(const GCHeapConfig
& c
)
182 : kNativePageSize(VMPI_getVMPageSize()),
192 externalCodeMemory(0),
194 m_notificationThread(NULL
),
200 #ifdef MMGC_MEMORY_PROFILER
204 #ifdef MMGC_POLICY_PROFILING
211 entryChecksEnabled(true),
212 abortStatusNotificationSent(false)
214 VMPI_lockInit(&m_spinlock
);
215 VMPI_lockInit(&gclog_spinlock
);
217 // ResetStatics should be called at the start here before using/initializing any statics
220 // Initialize free lists
221 HeapBlock
*block
= freelists
;
222 for (uint32_t i
=0; i
<kNumFreeLists
; i
++) {
223 block
->FreelistInit();
227 // Create the initial heap
229 MMGC_LOCK(m_spinlock
);
230 if (!ExpandHeap((int)config
.initialSize
))
236 fixedMalloc
.InitInstance(this);
240 #ifdef MMGC_MEMORY_PROFILER
241 //create profiler if turned on and if it is not already created
242 if(!IsProfilerInitialized())
249 hooksEnabled
= true; // set only after creating profiler
250 hasSpy
= VMPI_spySetup();
254 #ifdef MMGC_MEMORY_INFO
255 hooksEnabled
= true; // always track allocs in DEBUG builds
258 #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD
263 void GCHeap::DestroyInstance()
265 #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD
272 leakedBytes
= GetFixedMalloc()->GetBytesInUse();
273 fixedMalloc
.DestroyInstance();
274 GCAssertMsg(leakedBytes
== 0 || GetStatus() == kMemAbort
, "Leaks!");
276 size_t internalNum
= AddrToBlock(blocks
)->size
+ numRegionBlocks
;
278 // numAlloc should just be the size of the HeapBlock's space
279 if(numAlloc
!= internalNum
&& status
!= kMemAbort
)
281 for (unsigned int i
=0; i
<blocksLen
; i
++)
283 HeapBlock
*block
= &blocks
[i
];
284 if(block
->inUse() && block
->baseAddr
&& block
->baseAddr
!= (char*)blocks
)
290 GCLog("Block 0x%x not freed\n", block
->baseAddr
);
292 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
293 if(block
->allocTrace
)
294 PrintStackTrace(block
->allocTrace
);
301 #ifdef MMGC_MEMORY_PROFILER
302 hooksEnabled
= false;
311 // Acquire all the locks before destroying them to make reasonably
312 // sure we're the last consumers. This is probably not exactly
313 // right, see https://bugzilla.mozilla.org/show_bug.cgi?id=548347
314 // and linked bugs for a discussion. Note we can't acquire these
315 // much higher up because we get into situations where GCHeap code
316 // will want to lock these locks, but they are already locked.
318 VMPI_lockAcquire(&m_spinlock
);
319 VMPI_lockRelease(&m_spinlock
);
320 VMPI_lockDestroy(&m_spinlock
);
322 VMPI_lockAcquire(&gclog_spinlock
);
323 VMPI_lockRelease(&gclog_spinlock
);
324 VMPI_lockDestroy(&gclog_spinlock
);
327 enterFrame
->Destroy(); // Destroy the pointed-to value
328 enterFrame
.destroy(); // Destroy the thread-local itself
331 void* GCHeap::Alloc(size_t size
, uint32_t flags
, size_t alignment
)
334 GCAssert(alignment
> 0);
337 // Alignment must be a power of 2
338 size_t a
= alignment
;
346 bool zero
= (flags
& kZero
) != 0;
347 bool expand
= (flags
& kExpand
) != 0;
349 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
351 bool saved_oomHandling
= m_oomHandling
;
352 m_oomHandling
= saved_oomHandling
&& (flags
& kNoOOMHandling
) == 0;
354 baseAddr
= AllocHelper(size
, expand
, zero
, alignment
);
356 // If expand didn't work, or expand flag not set, then try to free
357 // memory then realloc from the heap
360 SendFreeMemorySignal(size
);
361 baseAddr
= AllocHelper(size
, expand
, zero
, alignment
);
364 // If we're still unable to allocate, we're done
367 if (flags
& kCanFail
)
369 m_oomHandling
= saved_oomHandling
;
378 #ifdef MMGC_MEMORY_PROFILER
379 if((flags
& kProfile
) && HooksEnabled() && profiler
) {
380 profiler
->RecordAllocation(baseAddr
, size
* kBlockSize
, size
* kBlockSize
);
384 // Only check for memory limits if we're allowing OOM notifications
387 CheckForMemoryLimitsExceeded();
390 m_oomHandling
= saved_oomHandling
;
393 GCAssert(Size(baseAddr
) == size
);
395 // Zero out the memory, if requested to do so
397 // These pages may have been seen by valgrind before and
398 // they become unaddressable when we last called
399 // FREELIKE_BLOCK or MEMPOOL_DESTROY, use MAKE_MEM_DEFINED
400 // to silence write to freed memory errors.
401 VALGRIND_MAKE_MEM_DEFINED(baseAddr
, size
* kBlockSize
);
402 VMPI_memset(baseAddr
, 0, size
* kBlockSize
);
403 // and then make the memory undefined again, we do this because
404 // we do this because either the VALGRIND_MALLOCLIKE_BLOCK call
405 // below will define it, or the suballocator will, ie this is
406 // here to keep the sub allocators honest.
407 VALGRIND_MAKE_MEM_UNDEFINED(baseAddr
, size
* kBlockSize
);
410 // Fail the allocation if we're a "canFail" allocation that has pushed beyond one of our limits.
411 if((flags
& kCanFail
) != 0 && (status
== kMemSoftLimit
|| SoftLimitExceeded() || HardLimitExceeded() ))
413 FreeInternal(baseAddr
, (flags
& kProfile
) != 0, m_oomHandling
);
417 // We utilize the "profile" flag to tell the difference
418 // between client requests and sub-allocator requests. Direct
419 // client requests are reported to valgrind here, sub
420 // allocators need to tell valgrind about memory themselves.
421 if ((flags
& kProfile
) != 0) {
422 VALGRIND_MALLOCLIKE_BLOCK(baseAddr
, size
* kBlockSize
, 0, (flags
&kZero
) != 0);
425 GCAssert(((uintptr_t)baseAddr
>> kBlockShift
) % alignment
== 0);
429 void *GCHeap::AllocHelper(size_t size
, bool expand
, bool& zero
, size_t alignment
)
431 // first try to find it in our existing free memory
432 HeapBlock
*block
= AllocBlock(size
, zero
, alignment
);
434 // Try to expand if the flag is set
435 if(!block
&& expand
) {
437 // Look ahead at memory limits to see if we should trigger a free memory signal
438 if ( (HardLimitExceeded(size
) || SoftLimitExceeded(size
)))
440 SendFreeMemorySignal(size
);
443 // Don't allow our memory consumption to grow beyond hard limit
444 if(HardLimitExceeded(size
))
447 if(size
>= kOSAllocThreshold
&& config
.useVirtualMemory
) {
448 return LargeAlloc(size
, alignment
);
451 block
= AllocBlock(size
, zero
, alignment
);
455 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
456 if(profiler
&& block
)
457 block
->allocTrace
= profiler
->GetStackTrace();
460 return block
!= NULL
? block
->baseAddr
: NULL
;
463 void GCHeap::SignalCodeMemoryAllocation(size_t size
, bool gcheap_memory
)
466 gcheapCodeMemory
+= size
;
468 externalCodeMemory
+= size
;
469 CheckForMemoryLimitsExceeded();
472 void GCHeap::CheckForMemoryLimitsExceeded()
475 // If we're already in the process of sending out memory notifications, don't bother verifying now.
476 if (status
== MMgc::kMemAbort
|| statusNotificationBeingSent())
480 if (SoftLimitExceeded())
482 overage
= GetTotalHeapSize() + externalPressure
/kBlockSize
- config
.heapSoftLimit
;
484 else if (HardLimitExceeded())
486 overage
= (GetTotalHeapSize() + externalPressure
/kBlockSize
) - config
.heapLimit
+ (config
.heapLimit
/ 10);
491 SendFreeMemorySignal(overage
);
493 CheckForHardLimitExceeded();
495 CheckForSoftLimitExceeded(overage
);
499 void GCHeap::FreeInternal(const void *item
, bool profile
, bool oomHandling
)
503 // recursive free calls are allowed from StatusChangeNotify
504 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
506 bool saved_oomHandling
= m_oomHandling
;
507 m_oomHandling
= saved_oomHandling
&& oomHandling
;
509 HeapBlock
*block
= AddrToBlock(item
);
513 size
= LargeAllocSize(item
);
519 GCAssert(numAlloc
>= (unsigned int)size
);
522 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
523 if(profiler
&& block
)
524 block
->freeTrace
= profiler
->GetStackTrace();
527 #ifdef MMGC_MEMORY_PROFILER
528 if(profile
&& HooksEnabled() && profiler
) {
529 profiler
->RecordDeallocation(item
, size
* kBlockSize
);
539 VALGRIND_FREELIKE_BLOCK(item
, 0);
541 m_oomHandling
= saved_oomHandling
;
544 void GCHeap::Decommit()
546 // keep at least initialSize free
547 if(!config
.returnMemory
)
550 // don't decommit if OOM handling is disabled; there's a guard in the OOM code so this
551 // should never happen, but belt and suspenders...
555 size_t heapSize
= GetTotalHeapSize();
556 size_t freeSize
= GetFreeHeapSize();
558 size_t decommitSize
= 0;
559 // commit if > kDecommitThresholdPercentage is free
560 if(FreeMemoryExceedsDecommitThreshold())
562 decommitSize
= int((freeSize
* 100 - heapSize
* kDecommitThresholdPercentage
) / 100);
564 // If we're over the heapLimit, attempt to decommit enough to get just under the limit
565 else if ( (heapSize
> config
.heapLimit
) && ((heapSize
- freeSize
) < config
.heapLimit
))
567 decommitSize
= heapSize
- config
.heapLimit
+ 1;
570 // If we're over the SoftLimit, attempt to decommit enough to get just under the softLimit
571 else if ((config
.heapSoftLimit
!= 0) && (heapSize
> config
.heapSoftLimit
) && ((heapSize
- freeSize
) < config
.heapSoftLimit
))
573 decommitSize
= heapSize
- config
.heapSoftLimit
+ 1;
579 if ((decommitSize
< (size_t)kMinHeapIncrement
) && (freeSize
> (size_t)kMinHeapIncrement
))
582 decommitSize
= kMinHeapIncrement
;
585 // Don't decommit more than our initial config size.
586 if (heapSize
- decommitSize
< config
.initialSize
)
588 decommitSize
= heapSize
- config
.initialSize
;
592 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
596 // search from the end of the free list so we decommit big blocks
597 HeapBlock
*freelist
= freelists
+kNumFreeLists
-1;
599 HeapBlock
*endOfBigFreelists
= &freelists
[GetFreeListIndex(1)];
601 for (; freelist
>= endOfBigFreelists
&& decommitSize
> 0; freelist
--)
604 // We may call RemoveLostBlock below which splits regions
605 // and may need to create a new one, don't let it expand
606 // though, expanding while Decommit'ing would be silly.
607 if(!EnsureFreeRegion(/*expand=*/false))
611 HeapBlock
*block
= freelist
;
612 while ((block
= block
->prev
) != freelist
&& decommitSize
> 0)
614 // decommitting already decommitted blocks doesn't help
615 // temporary replacement for commented out conditional below
616 GCAssert(block
->size
!= 0);
617 if(!block
->committed
/*|| block->size == 0*/)
620 if(config
.useVirtualMemory
)
622 RemoveFromList(block
);
623 if((size_t)block
->size
> decommitSize
)
625 HeapBlock
*newBlock
= Split(block
, (int)decommitSize
);
626 AddToFreeList(newBlock
);
629 Region
*region
= AddrToRegion(block
->baseAddr
);
630 if(config
.trimVirtualMemory
&&
631 freeSize
* 100 > heapSize
* kReleaseThresholdPercentage
&&
632 // if block is as big or bigger than a region, free the whole region
633 block
->baseAddr
<= region
->baseAddr
&&
634 region
->reserveTop
<= block
->endAddr() )
637 if(block
->baseAddr
< region
->baseAddr
)
639 HeapBlock
*newBlock
= Split(block
, int((region
->baseAddr
- block
->baseAddr
) / kBlockSize
));
640 AddToFreeList(block
);
643 if(block
->endAddr() > region
->reserveTop
)
645 HeapBlock
*newBlock
= Split(block
, int((region
->reserveTop
- block
->baseAddr
) / kBlockSize
));
646 AddToFreeList(newBlock
);
649 decommitSize
-= block
->size
;
653 else if(VMPI_decommitMemory(block
->baseAddr
, block
->size
* kBlockSize
))
655 block
->committed
= false;
656 block
->dirty
= false;
657 decommitSize
-= block
->size
;
659 GCLog("decommitted %d page block from %p\n", block
->size
, block
->baseAddr
);
665 // this can happen on mac where we release and re-reserve the memory and another thread may steal it from us
666 RemoveLostBlock(block
);
669 // if the VM API's fail us bail
674 numDecommitted
+= block
->size
;
676 // merge with previous/next if not in use and not committed
677 HeapBlock
*prev
= block
- block
->sizePrevious
;
678 if(block
->sizePrevious
!= 0 && !prev
->committed
&& !prev
->inUse()) {
679 RemoveFromList(prev
);
681 prev
->size
+= block
->size
;
684 block
->sizePrevious
= 0;
690 HeapBlock
*next
= block
+ block
->size
;
691 if(next
->size
!= 0 && !next
->committed
&& !next
->inUse()) {
692 RemoveFromList(next
);
694 block
->size
+= next
->size
;
697 next
->sizePrevious
= 0;
701 next
= block
+ block
->size
;
702 next
->sizePrevious
= block
->size
;
704 // add this block to the back of the bus to make sure we consume committed memory
706 HeapBlock
*backOfTheBus
= &freelists
[kNumFreeLists
-1];
707 HeapBlock
*pointToInsert
= backOfTheBus
;
708 while ((pointToInsert
= pointToInsert
->next
) != backOfTheBus
) {
709 if (pointToInsert
->size
>= block
->size
&& !pointToInsert
->committed
) {
713 AddToFreeList(block
, pointToInsert
);
715 // so we keep going through freelist properly
718 } else { // not using virtual memory
720 // if we aren't using mmap we can only do something if the block maps to a region
721 // that is completely empty
722 Region
*region
= AddrToRegion(block
->baseAddr
);
723 if(block
->baseAddr
== region
->baseAddr
&& // beginnings match
724 region
->commitTop
== block
->baseAddr
+ block
->size
*kBlockSize
) {
726 RemoveFromList(block
);
738 CheckForStatusReturnToNormal();
741 // m_spinlock is held
742 void GCHeap::CheckForHardLimitExceeded()
744 if (!HardLimitExceeded())
750 // m_spinlock is held
751 void GCHeap::CheckForSoftLimitExceeded(size_t request
)
753 if(config
.heapSoftLimit
== 0 || status
!= kMemNormal
|| !SoftLimitExceeded())
756 size_t externalBlocks
= externalPressure
/ kBlockSize
;
757 GCDebugMsg(false, "*** Alloc exceeded softlimit: ask for %u, usedheapsize =%u, totalHeap =%u, of which external =%u\n",
759 unsigned(GetUsedHeapSize() + externalBlocks
),
760 unsigned(GetTotalHeapSize() + externalBlocks
),
761 unsigned(externalBlocks
));
763 if(statusNotificationBeingSent())
766 StatusChangeNotify(kMemSoftLimit
);
769 // m_spinlock is held
770 void GCHeap::CheckForStatusReturnToNormal()
772 if(!statusNotificationBeingSent() && statusNotNormalOrAbort())
774 size_t externalBlocks
= externalPressure
/ kBlockSize
;
775 size_t total
= GetTotalHeapSize() + externalBlocks
;
777 // return to normal if we drop below heapSoftLimit
778 if(config
.heapSoftLimit
!= 0 && status
== kMemSoftLimit
)
780 if (!SoftLimitExceeded())
782 size_t used
= GetUsedHeapSize() + externalBlocks
;
783 GCDebugMsg(false, "### Alloc dropped below softlimit: usedheapsize =%u, totalHeap =%u, of which external =%u\n",
786 unsigned(externalBlocks
) );
787 StatusChangeNotify(kMemNormal
);
790 // or if we shrink to below %10 of the max
791 else if ((maxTotalHeapSize
/ kBlockSize
+ externalBlocks
) * 9 > total
* 10)
792 StatusChangeNotify(kMemNormal
);
798 void GCHeap::RemoveLostBlock(HeapBlock
*block
)
801 GCLog("Removing block %p %d\n", block
->baseAddr
, block
->size
);
806 Region
*region
= AddrToRegion(block
->baseAddr
);
807 if(region
->baseAddr
== block
->baseAddr
&& region
->reserveTop
== block
->endAddr()) {
808 RemoveBlock(block
, /*release*/false);
813 while(AddrToRegion(block
->baseAddr
) != AddrToRegion(block
->endAddr()-1)) {
814 // split block into parts mapping to regions
815 Region
*r
= AddrToRegion(block
->baseAddr
);
816 size_t numBlocks
= (r
->commitTop
- block
->baseAddr
) / kBlockSize
;
817 char *next
= Split(block
, numBlocks
)->baseAddr
;
819 RemoveLostBlock(block
);
820 block
= AddrToBlock(next
);
823 Region
*region
= AddrToRegion(block
->baseAddr
);
824 // save these off since we'll potentially shift region
825 char *regionBaseAddr
= region
->baseAddr
;
826 size_t regionBlockId
= region
->blockId
;
828 // if we don't line up with beginning or end we need a new region
829 if(block
->baseAddr
!= region
->baseAddr
&& region
->commitTop
!= block
->endAddr()) {
831 GCAssertMsg(HaveFreeRegion(), "Decommit was supposed to ensure this!");
833 NewRegion(block
->endAddr(), region
->reserveTop
,
834 region
->commitTop
> block
->endAddr() ? region
->commitTop
: block
->endAddr(),
835 region
->blockId
+ (block
->endAddr() - region
->baseAddr
) / kBlockSize
);
837 if(region
->baseAddr
!= block
->baseAddr
) {
838 // end this region at the start of block going away
839 region
->reserveTop
= block
->baseAddr
;
840 if(region
->commitTop
> block
->baseAddr
)
841 region
->commitTop
= block
->baseAddr
;
844 } else if(region
->baseAddr
== block
->baseAddr
) {
845 region
->blockId
+= block
->size
;
846 region
->baseAddr
= block
->endAddr();
847 } else if(region
->commitTop
== block
->endAddr()) {
848 // end this region at the start of block going away
849 region
->reserveTop
= block
->baseAddr
;
850 if(region
->commitTop
> block
->baseAddr
)
851 region
->commitTop
= block
->baseAddr
;
853 GCAssertMsg(false, "This shouldn't be possible");
857 // create temporary region for this block
858 Region
temp(this, block
->baseAddr
, block
->endAddr(), block
->endAddr(), regionBlockId
+ (block
->baseAddr
- regionBaseAddr
) / kBlockSize
);
860 RemoveBlock(block
, /*release*/false);
862 // pop temp from freelist, put there by RemoveBlock
863 freeRegion
= *(Region
**)freeRegion
;
868 // doing this here is an extra validation step
878 void GCHeap::RemoveBlock(HeapBlock
*block
, bool release
)
880 Region
*region
= AddrToRegion(block
->baseAddr
);
882 GCAssert(region
->baseAddr
== block
->baseAddr
);
883 GCAssert(region
->reserveTop
== block
->endAddr());
885 size_t newBlocksLen
= blocksLen
- block
->size
;
887 HeapBlock
*nextBlock
= block
+ block
->size
;
889 bool need_sentinel
= false;
890 bool remove_sentinel
= false;
892 if( block
->sizePrevious
&& nextBlock
->size
) {
893 // This block is contiguous with the blocks before and after it
894 // so we need to add a sentinel
895 need_sentinel
= true;
897 else if ( !block
->sizePrevious
&& !nextBlock
->size
) {
898 // the block is not contigous with the block before or after it - we need to remove a sentinel
899 // since there would already be one on each side.
900 remove_sentinel
= true;
903 // update nextblock's sizePrevious
904 nextBlock
->sizePrevious
= need_sentinel
? 0 : block
->sizePrevious
;
906 // Add space for the sentinel - the remaining blocks won't be contiguous
909 else if(remove_sentinel
)
912 // just re-use blocks; small wastage possible
913 HeapBlock
*newBlocks
= blocks
;
915 // the memmove will overwrite this so save it
916 size_t blockSize
= block
->size
;
918 size_t offset
= int(block
-blocks
);
919 int32_t sen_offset
= 0;
920 HeapBlock
*src
= block
+ block
->size
;
922 if( need_sentinel
) {
923 offset
= int(block
-blocks
)+1;
925 HeapBlock
* sentinel
= newBlocks
+ (block
-blocks
);
926 sentinel
->baseAddr
= NULL
;
928 sentinel
->sizePrevious
= block
->sizePrevious
;
929 sentinel
->prev
= NULL
;
930 sentinel
->next
= NULL
;
931 #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO)
932 sentinel
->allocTrace
= 0;
935 else if( remove_sentinel
) {
936 // skip trailing sentinel
942 int lastChunkSize
= int((blocks
+ blocksLen
) - src
);
943 GCAssert(lastChunkSize
+ offset
== newBlocksLen
);
944 memmove(newBlocks
+ offset
, src
, lastChunkSize
* sizeof(HeapBlock
));
946 // Fix up the prev/next pointers of each freelist. This is a little more complicated
947 // than the similiar code in ExpandHeap because blocks after the one we are free'ing
948 // are sliding down by block->size
949 HeapBlock
*fl
= freelists
;
950 for (uint32_t i
=0; i
<kNumFreeLists
; i
++) {
951 HeapBlock
*temp
= fl
;
953 if (temp
->prev
!= fl
) {
954 if(temp
->prev
> block
) {
955 temp
->prev
= newBlocks
+ (temp
->prev
-blocks
-blockSize
) + sen_offset
;
958 if (temp
->next
!= fl
) {
959 if(temp
->next
> block
) {
960 temp
->next
= newBlocks
+ (temp
->next
-blocks
-blockSize
) + sen_offset
;
963 } while ((temp
= temp
->next
) != fl
);
967 // Need to decrement blockId for regions in blocks after block
968 Region
*r
= lastRegion
;
970 if(r
->blockId
> region
->blockId
&& r
->blockId
!= kLargeItemBlockId
) {
971 r
->blockId
-= (blockSize
-sen_offset
);
976 blocksLen
= newBlocksLen
;
977 RemoveRegion(region
, release
);
979 // make sure we did everything correctly
981 ValidateHeapBlocks();
984 void GCHeap::ValidateHeapBlocks()
987 // iterate through HeapBlocks making sure:
988 // non-contiguous regions have a sentinel
989 HeapBlock
*block
= blocks
;
990 while(block
- blocks
< (intptr_t)blocksLen
) {
991 Region
*r
= AddrToRegion(block
->baseAddr
);
992 if(r
&& r
->baseAddr
== block
->baseAddr
)
993 GCAssert(r
->blockId
== (size_t)(block
-blocks
));
995 HeapBlock
*next
= NULL
;
997 next
= block
+ block
->size
;
998 GCAssert(next
- blocks
< (intptr_t)blocksLen
);
999 GCAssert(next
->sizePrevious
== block
->size
);
1001 HeapBlock
*prev
= NULL
;
1002 if(block
->sizePrevious
) {
1003 prev
= block
- block
->sizePrevious
;
1004 GCAssert(prev
- blocks
>= 0);
1005 GCAssert(prev
->size
== block
->sizePrevious
);
1006 } else if(block
!= blocks
) {
1007 // I have no prev and I'm not the first, check sentinel
1008 HeapBlock
*sentinel
= block
-1;
1009 GCAssert(sentinel
->baseAddr
== NULL
);
1010 GCAssert(sentinel
->size
== 0);
1011 GCAssert(sentinel
->sizePrevious
!= 0);
1013 if(block
->baseAddr
) {
1015 GCAssert(block
->baseAddr
== prev
->baseAddr
+ (kBlockSize
* prev
->size
));
1017 // we should always end on a sentinel
1018 GCAssert(next
- blocks
< (int)blocksLen
);
1020 // block is a sentinel
1021 GCAssert(block
->size
== 0);
1022 // FIXME: the following asserts are firing and we need to understand why, could be bugs
1023 // make sure last block ends at commitTop
1024 Region
*prevRegion
= AddrToRegion(prev
->baseAddr
+ (prev
->size
*kBlockSize
) - 1);
1025 GCAssert(prev
->baseAddr
+ (prev
->size
*kBlockSize
) == prevRegion
->commitTop
);
1027 // either we've reached the end or the next isn't a sentinel
1028 GCAssert(block
- blocks
== (intptr_t)blocksLen
|| block
->size
!= 0);
1031 GCAssert(block
- blocks
== (intptr_t)blocksLen
);
1035 GCHeap::Region
*GCHeap::AddrToRegion(const void *item
) const
1037 // Linear search of regions list to find this address.
1038 // The regions list should usually be pretty short.
1039 for (Region
*region
= lastRegion
;
1041 region
= region
->prev
)
1043 if (item
>= region
->baseAddr
&& item
< region
->reserveTop
) {
1050 GCHeap::HeapBlock
* GCHeap::AddrToBlock(const void *item
) const
1052 Region
*region
= AddrToRegion(item
);
1054 if(region
->blockId
== kLargeItemBlockId
)
1056 size_t index
= ((char*)item
- region
->baseAddr
) / kBlockSize
;
1057 HeapBlock
*b
= blocks
+ region
->blockId
+ index
;
1058 GCAssert(item
>= b
->baseAddr
&& item
< b
->baseAddr
+ b
->size
* GCHeap::kBlockSize
);
1064 size_t GCHeap::SafeSize(const void *item
)
1066 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
1067 GCAssert((uintptr_t(item
) & kOffsetMask
) == 0);
1068 HeapBlock
*block
= AddrToBlock(item
);
1071 Region
*r
= AddrToRegion(item
);
1072 if(r
&& r
->blockId
== kLargeItemBlockId
)
1073 return LargeAllocSize(item
);
1077 // Return the number of blocks of slop at the beginning of an object
1078 // starting at baseAddr for the given alignment. Alignment is a
1079 // number of blocks and must be a power of 2. baseAddr must
1080 // point to the beginning of a block.
1082 static inline size_t alignmentSlop(char* baseAddr
, size_t alignment
)
1084 return (alignment
- (size_t)(((uintptr_t)baseAddr
>> GCHeap::kBlockShift
) & (alignment
- 1))) & (alignment
- 1);
1088 // Reserves region of size == sizeInBytes, while attempting to
1089 // insert filler >= fill_sz bytes between pairs of consecutively
1090 // reserved regions. (Goal: exercise address space extremities
1091 // w/o actually committing memory within the filler area itself.)
1092 static char* reserveSomeRegionDispersively(size_t fill_sz
, size_t sizeInBytes
) {
1093 static bool retLowAddr
= false; // each call toggles low/high.
1095 void *mem0
= VMPI_reserveMemoryRegion(NULL
, fill_sz
);
1096 void *mem1
= VMPI_reserveMemoryRegion(NULL
, fill_sz
);
1098 if ((retLowAddr
&& mem0
> mem1
) || ( !retLowAddr
&& mem0
< mem1
)) {
1099 void *swap_tmp
= mem0
;
1104 VMPI_releaseMemoryRegion(mem0
, fill_sz
);
1105 char *addr
= (char*)VMPI_reserveMemoryRegion(mem0
, sizeInBytes
);
1106 VMPI_releaseMemoryRegion(mem1
, fill_sz
);
1108 addr
= (char*)VMPI_reserveMemoryRegion(NULL
, sizeInBytes
);
1110 retLowAddr
= ! retLowAddr
;
1116 REALLY_INLINE
char *GCHeap::ReserveSomeRegion(size_t sizeInBytes
)
1119 if (!config
.dispersiveAdversarial
)
1120 return (char*)VMPI_reserveMemoryRegion(NULL
, sizeInBytes
);
1122 return reserveSomeRegionDispersively(config
.dispersiveAdversarial
,
1125 return (char*)VMPI_reserveMemoryRegion(NULL
, sizeInBytes
);
1129 void *GCHeap::LargeAlloc(size_t size
, size_t alignment
)
1131 GCAssert(config
.useVirtualMemory
);
1133 size_t sizeInBytes
= size
* kBlockSize
;
1135 if(!EnsureFreeRegion(true))
1138 char* addr
= ReserveSomeRegion(sizeInBytes
);
1143 size_t unalignedSize
= sizeInBytes
;
1145 if(alignmentSlop(addr
, alignment
) != 0) {
1146 VMPI_releaseMemoryRegion(addr
, sizeInBytes
);
1147 unalignedSize
= sizeInBytes
+ (alignment
-1) * kBlockSize
;
1148 addr
= ReserveSomeRegion(unalignedSize
);
1153 char *alignedAddr
= addr
+ alignmentSlop(addr
, alignment
) * kBlockSize
;
1154 if(!VMPI_commitMemory(alignedAddr
, sizeInBytes
)) {
1155 VMPI_releaseMemoryRegion(addr
, sizeInBytes
);
1159 // Note that we don't actually track the beginning of the committed region
1160 // LargeFree doesn't need it.
1162 addr
+ unalignedSize
, // reserveTop
1163 alignedAddr
+ sizeInBytes
, // commitTop
1165 largeAllocs
+= size
;
1166 CheckForNewMaxTotalHeapSize();
1171 void GCHeap::LargeFree(const void *item
)
1173 GCAssert(config
.useVirtualMemory
);
1175 size_t size
= LargeAllocSize(item
);
1176 largeAllocs
-= size
;
1177 Region
*r
= AddrToRegion(item
);
1178 // Must use r->baseAddr which may be less than item due to alignment,
1179 // and we must calculate full size
1180 VMPI_releaseMemoryRegion(r
->baseAddr
, r
->reserveTop
- r
->baseAddr
);
1181 RemoveRegion(r
, false);
1184 GCHeap::HeapBlock
* GCHeap::AllocBlock(size_t size
, bool& zero
, size_t alignment
)
1186 uint32_t startList
= GetFreeListIndex(size
);
1187 HeapBlock
*freelist
= &freelists
[startList
];
1189 HeapBlock
*decommittedSuitableBlock
= NULL
;
1191 // Search for a big enough block in the free lists
1193 for (uint32_t i
= startList
; i
< kNumFreeLists
; i
++)
1195 HeapBlock
*block
= freelist
;
1196 while ((block
= block
->next
) != freelist
)
1198 // Prefer a single committed block that is at least large enough.
1200 if (block
->size
>= size
+ alignmentSlop(block
->baseAddr
, alignment
) && block
->committed
) {
1201 RemoveFromList(block
);
1202 return AllocCommittedBlock(block
, size
, zero
, alignment
);
1205 // Look for a sequence of decommitted and committed blocks that together would
1206 // be large enough, in case a single committed block can't be found.
1208 if(config
.useVirtualMemory
&& decommittedSuitableBlock
== NULL
&& !block
->committed
)
1210 // Note, 'block' should be invariant throughout this scope, it's the block
1211 // whose successors and predecessors we're inspecting
1213 GCAssert(!block
->inUse());
1215 size_t totalSize
= block
->size
;
1216 HeapBlock
*firstFree
= block
;
1217 size_t firstSlop
= alignmentSlop(firstFree
->baseAddr
, alignment
);
1219 // Coalesce with predecessors
1220 while(totalSize
< size
+ firstSlop
&& firstFree
->sizePrevious
!= 0)
1222 HeapBlock
*prevBlock
= firstFree
- firstFree
->sizePrevious
;
1223 if(prevBlock
->inUse() || prevBlock
->size
== 0)
1225 totalSize
+= prevBlock
->size
;
1226 firstFree
= prevBlock
;
1227 firstSlop
= alignmentSlop(firstFree
->baseAddr
, alignment
);
1230 // Coalesce with successors
1231 HeapBlock
*nextBlock
= block
+ block
->size
;
1232 while (totalSize
< size
+ firstSlop
&& !(nextBlock
->inUse() || nextBlock
->size
== 0)) {
1233 totalSize
+= nextBlock
->size
;
1234 nextBlock
= nextBlock
+ nextBlock
->size
;
1237 // Keep it if it's large enough
1238 if(totalSize
>= size
+ firstSlop
)
1239 decommittedSuitableBlock
= firstFree
;
1245 // We only get here if we couldn't find a single committed large enough block.
1247 if (decommittedSuitableBlock
!= NULL
)
1248 return AllocCommittedBlock(CreateCommittedBlock(decommittedSuitableBlock
, size
, alignment
),
1256 GCHeap::HeapBlock
* GCHeap::AllocCommittedBlock(HeapBlock
* block
, size_t size
, bool& zero
, size_t alignment
)
1258 GCAssert(block
->committed
);
1259 GCAssert(block
->size
>= size
);
1260 GCAssert(block
->inUse());
1262 size_t slop
= alignmentSlop(block
->baseAddr
, alignment
);
1266 HeapBlock
*oldBlock
= block
;
1267 block
= Split(block
, slop
);
1268 AddToFreeList(oldBlock
);
1269 GCAssert(alignmentSlop(block
->baseAddr
, alignment
) == 0);
1270 GCAssert(block
->size
>= size
);
1273 if(block
->size
> size
)
1275 HeapBlock
*newBlock
= Split(block
, size
);
1276 AddToFreeList(newBlock
);
1281 zero
= block
->dirty
&& zero
;
1288 const uint32_t* base_u
;
1290 base_c
= block
->baseAddr
;
1291 GCAssert(*base_u
== 0);
1297 // Turn a sequence of committed and uncommitted blocks into a single committed
1298 // block that's at least large enough to satisfy the request.
1300 GCHeap::HeapBlock
* GCHeap::CreateCommittedBlock(HeapBlock
* block
, size_t size
, size_t alignment
)
1302 RemoveFromList(block
);
1304 // We'll need to allocate extra space to account for the space that will
1305 // later be removed from the start of the block.
1307 size
+= alignmentSlop(block
->baseAddr
, alignment
);
1309 // If the first block is too small then coalesce it with the following blocks
1310 // to create a block that's large enough. Some parts of the total block may
1311 // already be committed. If the platform allows it we commit the entire
1312 // range with one call even if parts were committed before, on the assumption
1313 // that that is faster than several commit() calls, one for each decommitted
1314 // block. (We have no current data to support that; now == 201-03-19.)
1316 if(block
->size
< size
)
1318 size_t amountRecommitted
= block
->committed
? 0 : block
->size
;
1319 bool dirty
= block
->dirty
;
1321 // The first block needs to be committed when sloppyCommit is disabled.
1322 if(!config
.sloppyCommit
&& !block
->committed
)
1325 while(block
->size
< size
)
1327 // Coalesce the next block into this one.
1329 HeapBlock
*nextBlock
= block
+ block
->size
;
1330 RemoveFromList(nextBlock
);
1332 if (nextBlock
->committed
)
1333 dirty
= dirty
|| nextBlock
->dirty
;
1336 if (block
->size
+ nextBlock
->size
>= size
) // Last block?
1337 PruneDecommittedBlock(nextBlock
, block
->size
+ nextBlock
->size
, size
);
1339 amountRecommitted
+= nextBlock
->size
;
1341 if (!config
.sloppyCommit
)
1345 block
->size
+= nextBlock
->size
;
1347 nextBlock
->size
= 0;
1348 nextBlock
->baseAddr
= 0;
1349 nextBlock
->sizePrevious
= 0;
1352 (block
+ block
->size
)->sizePrevious
= block
->size
;
1354 GCAssert(amountRecommitted
> 0);
1356 if (config
.sloppyCommit
)
1358 block
->dirty
= dirty
;
1362 PruneDecommittedBlock(block
, block
->size
, size
);
1366 GCAssert(block
->size
>= size
&& block
->committed
);
1373 // If the tail of a coalesced block is decommitted and committing it creates
1374 // a block that's too large for the request then we may wish to split the tail
1375 // before committing it in order to avoid committing memory we won't need.
1377 // 'available' is the amount of memory available including the memory in 'block',
1378 // and 'request' is the amount of memory required.
1380 void GCHeap::PruneDecommittedBlock(HeapBlock
* block
, size_t available
, size_t request
)
1382 GCAssert(available
>= request
);
1383 GCAssert(!block
->committed
);
1385 size_t toCommit
= request
> kMinHeapIncrement
? request
: kMinHeapIncrement
;
1386 size_t leftOver
= available
- request
;
1388 if (available
> toCommit
&& leftOver
> 0)
1390 HeapBlock
*newBlock
= Split(block
, block
->size
- leftOver
);
1391 AddToFreeList(newBlock
);
1395 GCHeap::HeapBlock
*GCHeap::Split(HeapBlock
*block
, size_t size
)
1397 GCAssert(block
->size
> size
);
1398 HeapBlock
*newBlock
= block
+ size
;
1399 newBlock
->Init(block
->baseAddr
+ kBlockSize
* size
, block
->size
- size
, block
->dirty
);
1400 newBlock
->sizePrevious
= size
;
1401 newBlock
->committed
= block
->committed
;
1404 // Update sizePrevious in block after that
1405 HeapBlock
*nextBlock
= newBlock
+ newBlock
->size
;
1406 nextBlock
->sizePrevious
= newBlock
->size
;
1411 void GCHeap::Commit(HeapBlock
*block
)
1413 GCAssert(config
.sloppyCommit
|| !block
->committed
);
1415 if(!VMPI_commitMemory(block
->baseAddr
, block
->size
* kBlockSize
))
1419 if(config
.verbose
) {
1420 GCLog("recommitted %d pages\n", block
->size
);
1423 numDecommitted
-= block
->size
;
1424 block
->committed
= true;
1425 block
->dirty
= VMPI_areNewPagesDirty();
1429 // Non-debug version in GCHeap.h
1430 void GCHeap::CheckFreelist()
1432 HeapBlock
*freelist
= freelists
;
1433 for (uint32_t i
= 0; i
< kNumFreeLists
; i
++)
1435 HeapBlock
*block
= freelist
;
1436 while((block
= block
->next
) != freelist
)
1438 GCAssert(block
!= block
->next
);
1439 GCAssert(block
!= block
->next
->next
|| block
->next
== freelist
);
1441 // Coalescing is eager so no block on the list should have adjacent blocks
1442 // that are also on the free list and in the same committed state
1444 if(block
->sizePrevious
)
1446 HeapBlock
*prev
= block
- block
->sizePrevious
;
1447 GCAssert(block
->sizePrevious
== prev
->size
);
1448 GCAssert(prev
->inUse() || prev
->size
== 0 || prev
->committed
!= block
->committed
);
1451 HeapBlock
*next
= block
+ block
->size
;
1452 GCAssert(next
->inUse() || next
->size
== 0 || next
->committed
!= block
->committed
);
1458 // Debugging code to find problems with block/region layout
1459 // This code is slow, but can be useful for tracking down issues
1460 // It verifies that the memory for each block corresponds to one or more regions
1461 // and that each region points to a valid starting block
1462 Region
* r
= lastRegion
;
1466 for(block_idx
= 0; block_idx
< blocksLen
; ++block_idx
){
1467 HeapBlock
* b
= blocks
+ block_idx
;
1472 int contig_size
= 0;
1476 if(b
->baseAddr
>= r
->baseAddr
&& b
->baseAddr
< r
->reserveTop
) {
1477 // starts in this region
1478 char* end
= b
->baseAddr
+ b
->size
*kBlockSize
;
1479 if(end
> (r
->reserveTop
+ contig_size
) ){
1480 GCLog("error, block %d %p %d did not find a matching region\n", block_idx
, b
->baseAddr
, b
->size
);
1481 GCLog("Started in region %p - %p, contig size: %d\n", r
->baseAddr
, r
->reserveTop
, contig_size
);
1486 else if( r
->prev
&& r
->prev
->reserveTop
==r
->baseAddr
){
1487 contig_size
+=r
->reserveTop
- r
->baseAddr
;
1499 if(!blocks
[r
->blockId
].size
){
1500 for( int i
= r
->blockId
-1; i
>= 0 ; --i
)
1501 if( blocks
[i
].size
){
1502 //Look for spanning blocks
1503 if( ((blocks
[i
].baseAddr
+ blocks
[i
].size
*kBlockSize
) <= r
->baseAddr
) ) {
1504 GCLog("Invalid block id for region %p-%p %d\n", r
->baseAddr
, r
->reserveTop
, i
);
1517 GCLog("%p - %p\n", r
->baseAddr
, r
->reserveTop
);
1520 for(int b
= 0; b
< blocksLen
; ++b
){
1523 GCLog("%d %p %d\n", b
, blocks
[b
].baseAddr
, blocks
[b
].size
);
1531 bool GCHeap::BlocksAreContiguous(void *item1
, void *item2
)
1533 Region
*r1
= AddrToRegion(item1
);
1534 Region
*r2
= AddrToRegion(item2
);
1535 return r1
== r2
|| r1
->reserveTop
== r2
->baseAddr
;
1538 void GCHeap::AddToFreeList(HeapBlock
*block
, HeapBlock
* pointToInsert
)
1542 block
->next
= pointToInsert
;
1543 block
->prev
= pointToInsert
->prev
;
1544 block
->prev
->next
= block
;
1545 pointToInsert
->prev
= block
;
1550 void GCHeap::AddToFreeList(HeapBlock
* block
, bool makeDirty
)
1552 GCAssert(block
->size
!= 0);
1554 // Try to coalesce a committed block with its committed non-sentinel predecessor
1555 if(block
->committed
&& block
->sizePrevious
)
1557 HeapBlock
*prevBlock
= block
- block
->sizePrevious
;
1558 GCAssert(prevBlock
->size
> 0 || !prevBlock
->committed
);
1560 if (!prevBlock
->inUse() && prevBlock
->committed
)
1562 // Remove predecessor block from free list
1563 RemoveFromList(prevBlock
);
1565 // Increase size of predecessor block
1566 prevBlock
->size
+= block
->size
;
1569 block
->sizePrevious
= 0;
1570 block
->baseAddr
= 0;
1573 makeDirty
= makeDirty
|| block
->dirty
;
1577 // Try to coalesce a committed block with its committed non-sentinel successor
1578 if (block
->committed
)
1580 HeapBlock
*nextBlock
= block
+ block
->size
;
1581 // This is not correct - sentinels are not necessarily committed. We
1582 // may or may not want to fix that.
1583 //GCAssert(nextBlock->size > 0 || !nextBlock->committed);
1585 if (!nextBlock
->inUse() && nextBlock
->committed
) {
1586 // Remove successor block from free list
1587 RemoveFromList(nextBlock
);
1589 // Increase size of current block
1590 block
->size
+= nextBlock
->size
;
1591 nextBlock
->size
= 0;
1592 nextBlock
->baseAddr
= 0;
1593 nextBlock
->sizePrevious
= 0;
1594 makeDirty
= makeDirty
|| nextBlock
->dirty
;
1598 // Update sizePrevious in the next block
1599 HeapBlock
*nextBlock
= block
+ block
->size
;
1600 nextBlock
->sizePrevious
= block
->size
;
1602 block
->dirty
= block
->dirty
|| makeDirty
;
1604 // Add the coalesced block to the right free list, in the right
1605 // position. Free lists are ordered by increasing block size.
1607 int index
= GetFreeListIndex(block
->size
);
1608 HeapBlock
*freelist
= &freelists
[index
];
1609 HeapBlock
*pointToInsert
= freelist
;
1611 // If the block size is below kUniqueThreshold then its free list
1612 // will have blocks of only one size and no search is needed.
1614 if (block
->size
>= kUniqueThreshold
) {
1615 while ((pointToInsert
= pointToInsert
->next
) != freelist
) {
1616 if (pointToInsert
->size
>= block
->size
) {
1622 AddToFreeList(block
, pointToInsert
);
1626 void GCHeap::FreeBlock(HeapBlock
*block
)
1628 GCAssert(block
->inUse());
1631 // trash it. fb == free block
1632 if (!RUNNING_ON_VALGRIND
)
1633 VMPI_memset(block
->baseAddr
, uint8_t(MMFreedPoison
), block
->size
* kBlockSize
);
1636 AddToFreeList(block
, true);
1639 void GCHeap::CheckForNewMaxTotalHeapSize()
1641 // GetTotalHeapSize is probably fairly cheap; even so this strikes me
1642 // as a bit of a hack.
1643 size_t heapSizeNow
= GetTotalHeapSize() * kBlockSize
;
1644 if (heapSizeNow
> maxTotalHeapSize
) {
1645 maxTotalHeapSize
= heapSizeNow
;
1646 #ifdef MMGC_POLICY_PROFILING
1647 // The guard on instance being non-NULL is a hack, to be fixed later (now=2009-07-20).
1648 // Some VMPI layers (WinMo is at least one of them) try to grab the GCHeap instance to get
1649 // at the map of private pages. But the GCHeap instance is not available during the initial
1650 // call to ExpandHeap. So sidestep that problem here.
1652 // Note that if CheckForNewMaxTotalHeapSize is only called once then maxPrivateMemory
1653 // will be out of sync with maxTotalHeapSize, see also bugzilla 608684.
1654 if (instance
!= NULL
)
1655 maxPrivateMemory
= VMPI_getPrivateResidentPageCount() * VMPI_getVMPageSize();
1660 bool GCHeap::ExpandHeap( size_t askSize
)
1662 bool bRetVal
= ExpandHeapInternal(askSize
);
1663 CheckForNewMaxTotalHeapSize();
1667 bool GCHeap::HardLimitExceeded(size_t additionalAllocationAmt
)
1669 return GetTotalHeapSize() + externalPressure
/kBlockSize
+ additionalAllocationAmt
> config
.heapLimit
;
1672 bool GCHeap::SoftLimitExceeded(size_t additionalAllocationAmt
)
1674 if (config
.heapSoftLimit
== 0) return false;
1675 return GetTotalHeapSize() + externalPressure
/kBlockSize
+ additionalAllocationAmt
> config
.heapSoftLimit
;
1678 #define roundUp(_s, _inc) (((_s + _inc - 1) / _inc) * _inc)
1680 bool GCHeap::ExpandHeapInternal(size_t askSize
)
1682 size_t size
= askSize
;
1685 // Turn this switch on to test bridging of contiguous
1687 bool test_bridging
= false;
1688 size_t defaultReserve
= test_bridging
? (size
+kMinHeapIncrement
) : kDefaultReserve
;
1690 const size_t defaultReserve
= kDefaultReserve
;
1693 char *baseAddr
= NULL
;
1694 char *newRegionAddr
= NULL
;
1695 size_t newRegionSize
= 0;
1696 bool contiguous
= false;
1697 size_t commitAvail
= 0;
1699 // Round up to the nearest kMinHeapIncrement
1700 size
= roundUp(size
, kMinHeapIncrement
);
1702 // when we allocate a new region the space needed for the HeapBlocks, if it won't fit
1703 // in existing space it must fit in new space so we may need to increase the new space
1705 HeapBlock
*newBlocks
= blocks
;
1707 if(blocksLen
!= 0 || // first time through just take what we need out of initialSize instead of adjusting
1708 config
.initialSize
== 0) // unless initializeSize is zero of course
1710 int extraBlocks
= 1; // for potential new sentinel
1711 if(nextRegion
== NULL
) {
1712 extraBlocks
++; // may need a new page for regions
1714 size_t curHeapBlocksSize
= blocks
? AddrToBlock(blocks
)->size
: 0;
1715 size_t newHeapBlocksSize
= numHeapBlocksToNumBlocks(blocksLen
+ size
+ extraBlocks
);
1717 // size is based on newSize and vice versa, loop to settle (typically one loop, sometimes two)
1718 while(newHeapBlocksSize
> curHeapBlocksSize
)
1720 // use askSize so HeapBlock's can fit in rounding slop
1721 size
= roundUp(askSize
+ newHeapBlocksSize
+ extraBlocks
, kMinHeapIncrement
);
1723 // tells us use new memory for blocks below
1726 // since newSize is based on size we have to repeat in case it changes
1727 curHeapBlocksSize
= newHeapBlocksSize
;
1728 newHeapBlocksSize
= numHeapBlocksToNumBlocks(blocksLen
+ size
+ extraBlocks
);
1732 // At this point we have adjusted/calculated the final size that would need to be committed.
1733 // We need to check that against the hardlimit to see if we are going to go above it.
1734 if (HardLimitExceeded(size
))
1737 if(config
.useVirtualMemory
)
1739 Region
*region
= lastRegion
;
1742 commitAvail
= (int)((region
->reserveTop
- region
->commitTop
) / kBlockSize
);
1744 // Can this request be satisfied purely by committing more memory that
1745 // is already reserved?
1746 if (size
<= commitAvail
) {
1747 if (VMPI_commitMemory(region
->commitTop
, size
* kBlockSize
))
1750 baseAddr
= region
->commitTop
;
1752 // check for continuity, we can only be contiguous with the end since
1753 // we don't have a "block insert" facility
1754 HeapBlock
*last
= &blocks
[blocksLen
-1] - blocks
[blocksLen
-1].sizePrevious
;
1755 contiguous
= last
->baseAddr
+ last
->size
* kBlockSize
== baseAddr
;
1757 // Update the commit top.
1758 region
->commitTop
+= size
*kBlockSize
;
1760 // Go set up the block list.
1765 // If we can't commit memory we've already reserved,
1766 // no other trick is going to work. Fail.
1771 // Try to reserve a region contiguous to the last region.
1773 // - Try for the "default reservation size" if it's larger than
1774 // the requested block.
1775 if (defaultReserve
> size
) {
1776 newRegionAddr
= (char*) VMPI_reserveMemoryRegion(region
->reserveTop
,
1777 defaultReserve
* kBlockSize
);
1778 newRegionSize
= defaultReserve
;
1781 // - If the default reservation size didn't work or isn't big
1782 // enough, go for the exact amount requested, minus the
1783 // committable space in the current region.
1784 if (newRegionAddr
== NULL
) {
1785 newRegionAddr
= (char*) VMPI_reserveMemoryRegion(region
->reserveTop
,
1786 (size
- commitAvail
)*kBlockSize
);
1787 newRegionSize
= size
- commitAvail
;
1789 // check for contiguity
1790 if(newRegionAddr
&& newRegionAddr
!= region
->reserveTop
) {
1791 // we can't use this space since we need commitAvail from prev region to meet
1792 // the size requested, toss it
1793 ReleaseMemory(newRegionAddr
, newRegionSize
*kBlockSize
);
1794 newRegionAddr
= NULL
;
1799 if (newRegionAddr
== region
->reserveTop
) // we'll use the region below as a separate region if its not contiguous
1801 // We were able to reserve some space.
1803 // Commit available space from the existing region.
1804 if (commitAvail
!= 0) {
1805 if (!VMPI_commitMemory(region
->commitTop
, commitAvail
* kBlockSize
))
1807 // We couldn't commit even this space. We're doomed.
1808 // Un-reserve the space we just reserved and fail.
1809 ReleaseMemory(newRegionAddr
, newRegionSize
);
1814 // Commit needed space from the new region.
1815 if (!VMPI_commitMemory(newRegionAddr
, (size
- commitAvail
) * kBlockSize
))
1817 // We couldn't commit this space. We can't meet the
1818 // request. Un-commit any memory we just committed,
1819 // un-reserve any memory we just reserved, and fail.
1820 if (commitAvail
!= 0) {
1821 VMPI_decommitMemory(region
->commitTop
,
1822 commitAvail
* kBlockSize
);
1824 ReleaseMemory(newRegionAddr
,
1825 (size
-commitAvail
)*kBlockSize
);
1829 // We successfully reserved a new contiguous region
1830 // and committed the memory we need. Finish up.
1831 baseAddr
= region
->commitTop
;
1832 region
->commitTop
= lastRegion
->reserveTop
;
1834 // check for continuity, we can only be contiguous with the end since
1835 // we don't have a "block insert" facility
1836 HeapBlock
*last
= &blocks
[blocksLen
-1] - blocks
[blocksLen
-1].sizePrevious
;
1837 contiguous
= last
->baseAddr
+ last
->size
* kBlockSize
== baseAddr
;
1843 // We were unable to allocate a contiguous region, or there
1844 // was no existing region to be contiguous to because this
1845 // is the first-ever expansion. Allocate a non-contiguous region.
1847 // Don't use any of the available space in the current region.
1850 // - Go for the default reservation size unless the requested
1852 if (newRegionAddr
== NULL
&& size
< defaultReserve
) {
1853 newRegionAddr
= ReserveSomeRegion(defaultReserve
*kBlockSize
);
1854 newRegionSize
= defaultReserve
;
1857 // - If that failed or the requested size is bigger than default,
1858 // go for the requested size exactly.
1859 if (newRegionAddr
== NULL
) {
1860 newRegionAddr
= ReserveSomeRegion(size
*kBlockSize
);
1861 newRegionSize
= size
;
1864 // - If that didn't work, give up.
1865 if (newRegionAddr
== NULL
) {
1869 // - Try to commit the memory.
1870 if (VMPI_commitMemory(newRegionAddr
,
1871 size
*kBlockSize
) == 0)
1873 // Failed. Un-reserve the memory and fail.
1874 ReleaseMemory(newRegionAddr
, newRegionSize
*kBlockSize
);
1878 // If we got here, we've successfully allocated a
1879 // non-contiguous region.
1880 baseAddr
= newRegionAddr
;
1886 // Allocate the requested amount of space as a new region.
1887 newRegionAddr
= (char*)VMPI_allocateAlignedMemory(size
* kBlockSize
);
1888 baseAddr
= newRegionAddr
;
1889 newRegionSize
= size
;
1891 // If that didn't work, give up.
1892 if (newRegionAddr
== NULL
) {
1899 // If we were able to allocate a contiguous block, remove
1900 // the old top sentinel.
1905 // Expand the block list.
1906 size_t newBlocksLen
= blocksLen
+ size
;
1908 // Add space for the "top" sentinel
1912 newBlocks
= (HeapBlock
*)(void *)baseAddr
;
1915 // Copy all the existing blocks.
1916 if (blocks
&& blocks
!= newBlocks
) {
1917 VMPI_memcpy(newBlocks
, blocks
, blocksLen
* sizeof(HeapBlock
));
1919 // Fix up the prev/next pointers of each freelist.
1920 HeapBlock
*freelist
= freelists
;
1921 for (uint32_t i
=0; i
<kNumFreeLists
; i
++) {
1922 HeapBlock
*temp
= freelist
;
1924 if (temp
->prev
!= freelist
) {
1925 temp
->prev
= newBlocks
+ (temp
->prev
-blocks
);
1927 if (temp
->next
!= freelist
) {
1928 temp
->next
= newBlocks
+ (temp
->next
-blocks
);
1930 } while ((temp
= temp
->next
) != freelist
);
1936 // Create a single free block for the new space,
1937 // and add it to the free list.
1938 HeapBlock
*block
= newBlocks
+blocksLen
;
1939 block
->Init(baseAddr
, size
, newPagesDirty());
1941 // link up contiguous blocks
1942 if(blocksLen
&& contiguous
)
1944 // search backwards for first real block
1945 HeapBlock
*b
= &blocks
[blocksLen
-1];
1949 GCAssert(b
>= blocks
);
1951 block
->sizePrevious
= b
->size
;
1952 GCAssert((block
- block
->sizePrevious
)->size
== b
->size
);
1955 // if baseAddr was used for HeapBlocks split
1956 if((char*)newBlocks
== baseAddr
)
1958 size_t numBlocksNeededForHeapBlocks
= numHeapBlocksToNumBlocks(newBlocksLen
);
1959 HeapBlock
*next
= Split(block
, numBlocksNeededForHeapBlocks
);
1960 // this space counts as used space
1961 numAlloc
+= numBlocksNeededForHeapBlocks
;
1965 // get space for region allocations
1966 if(nextRegion
== NULL
) {
1967 nextRegion
= (Region
*)(void *)block
->baseAddr
;
1968 HeapBlock
*next
= Split(block
, 1);
1969 // this space counts as used space
1975 // Save off and add after initializing all blocks.
1976 HeapBlock
*newBlock
= block
;
1978 // Initialize the rest of the new blocks to empty.
1979 size_t freeBlockSize
= block
->size
;
1981 for (uint32_t i
=1; i
< freeBlockSize
; i
++) {
1986 // Fill in the sentinel for the top of the heap.
1989 block
->sizePrevious
= freeBlockSize
;
1991 AddToFreeList(newBlock
);
1993 // save for free'ing
1994 void *oldBlocks
= blocks
;
1997 blocksLen
= newBlocksLen
;
1999 // free old blocks space using new blocks (FreeBlock poisons blocks so can't use old blocks)
2000 if (oldBlocks
&& oldBlocks
!= newBlocks
) {
2001 HeapBlock
*oldBlocksHB
= AddrToBlock(oldBlocks
);
2002 numAlloc
-= oldBlocksHB
->size
;
2003 FreeBlock(oldBlocksHB
);
2006 // If we created a new region, save the base address so we can free later.
2007 if (newRegionAddr
) {
2008 /* The mergeContiguousRegions bit is broken, since we
2009 loop over all regions we may be contiguous with an
2010 existing older HeapBlock and we don't support inserting a
2011 new address range arbritrarily into the HeapBlock
2012 array (contiguous regions must be contiguous heap
2013 blocks vis-a-vie the region block id)
2015 config.mergeContiguousRegions) {
2016 lastRegion->reserveTop += newRegionSize*kBlockSize;
2017 lastRegion->commitTop +=
2018 (size-commitAvail)*kBlockSize;
2021 Region
*newRegion
= NewRegion(newRegionAddr
, // baseAddr
2022 newRegionAddr
+newRegionSize
*kBlockSize
, // reserve top
2023 newRegionAddr
+(size
-commitAvail
)*kBlockSize
, // commit top
2024 newBlocksLen
-(size
-commitAvail
)-1); // block id
2027 GCLog("reserved new region, %p - %p %s\n",
2028 newRegion
->baseAddr
,
2029 newRegion
->reserveTop
,
2030 contiguous
? "contiguous" : "non-contiguous");
2036 if(config
.verbose
) {
2037 GCLog("heap expanded by %d pages\n", size
);
2040 ValidateHeapBlocks();
2046 void GCHeap::RemoveRegion(Region
*region
, bool release
)
2048 Region
**next
= &lastRegion
;
2049 while(*next
!= region
)
2050 next
= &((*next
)->prev
);
2051 *next
= region
->prev
;
2053 ReleaseMemory(region
->baseAddr
,
2054 region
->reserveTop
-region
->baseAddr
);
2056 if(config
.verbose
) {
2057 GCLog("unreserved region 0x%p - 0x%p (commitTop: %p)\n", region
->baseAddr
, region
->reserveTop
, region
->commitTop
);
2063 void GCHeap::FreeAll()
2065 // Release all of the heap regions
2066 while (lastRegion
!= NULL
) {
2067 Region
*region
= lastRegion
;
2068 lastRegion
= lastRegion
->prev
;
2069 if(region
->blockId
== kLargeItemBlockId
) {
2070 // leaks can happen during abort
2071 GCAssertMsg(status
== kMemAbort
, "Allocation of large object not freed");
2072 VMPI_releaseMemoryRegion(region
->baseAddr
, region
->reserveTop
- region
->baseAddr
);
2074 ReleaseMemory(region
->baseAddr
,
2075 region
->reserveTop
-region
->baseAddr
);
2081 void GCHeap::AllocHook(const void *item
, size_t askSize
, size_t gotSize
)
2086 #ifdef MMGC_MEMORY_PROFILER
2091 profiler
->RecordAllocation(item
, askSize
, gotSize
);
2094 #ifdef MMGC_MEMORY_INFO
2095 DebugDecorate(item
, gotSize
);
2097 #ifdef AVMPLUS_SAMPLER
2098 avmplus::recordAllocationSample(item
, gotSize
);
2102 void GCHeap::FinalizeHook(const void *item
, size_t size
)
2104 (void)item
,(void)size
;
2105 #ifdef MMGC_MEMORY_PROFILER
2107 profiler
->RecordDeallocation(item
, size
);
2110 #ifdef AVMPLUS_SAMPLER
2111 avmplus::recordDeallocationSample(item
, size
);
2115 void GCHeap::FreeHook(const void *item
, size_t size
, int poison
)
2117 (void)poison
,(void)item
,(void)size
;
2118 #ifdef MMGC_MEMORY_INFO
2119 DebugFree(item
, poison
, size
, true);
2123 void GCHeap::PseudoFreeHook(const void *item
, size_t size
, int poison
)
2125 (void)poison
,(void)item
,(void)size
;
2126 #ifdef MMGC_MEMORY_INFO
2127 DebugFree(item
, poison
, size
, false);
2130 #endif // MMGC_HOOKS
2132 EnterFrame::EnterFrame() :
2135 m_abortUnwindList(NULL
),
2139 GCHeap
*heap
= GCHeap::GetGCHeap();
2140 EnterFrame
*ef
= m_previous
= heap
->GetEnterFrame();
2142 if(ef
&& ef
->Suspended()) {
2143 // propagate the active gc from the suspended frame
2144 m_gc
= ef
->GetActiveGC();
2147 if(ef
== NULL
|| ef
->Suspended()) {
2153 // this is the first thing we run after the Abort longjmp
2154 EnterFrame::~EnterFrame()
2157 GCHeap
*heap
= m_heap
;
2158 // this prevents us from doing multiple jumps in case leave results in more allocations
2164 void EnterFrame::UnwindAllObjects()
2166 while(m_abortUnwindList
)
2168 AbortUnwindObject
*previous
= m_abortUnwindList
;
2169 m_abortUnwindList
->Unwind();
2170 // The unwind call may remove the handler or may leave it on this list. If it leaves it, then make sure to advance the list,
2171 // otherwise, the list will automatically advance if it's removed.
2172 if (m_abortUnwindList
== previous
)
2174 m_abortUnwindList
= m_abortUnwindList
->next
;
2179 void EnterFrame::AddAbortUnwindObject(AbortUnwindObject
*obj
)
2181 GCAssertMsg(!EnterFrame::IsAbortUnwindObjectInList(obj
), "obj can't be added to list twice!");
2182 // Push it on the front
2183 obj
->next
= m_abortUnwindList
;
2184 if (m_abortUnwindList
)
2186 m_abortUnwindList
->previous
= obj
;
2188 m_abortUnwindList
= obj
;
2191 void EnterFrame::RemoveAbortUnwindObject(AbortUnwindObject
*obj
)
2193 GCAssertMsg(obj
== m_abortUnwindList
|| obj
->previous
!= NULL
, "Object not in list");
2195 if (obj
== m_abortUnwindList
)
2197 m_abortUnwindList
= obj
->next
;
2200 if (obj
->previous
!= NULL
)
2202 (obj
->previous
)->next
= obj
->next
;
2204 if (obj
->next
!= NULL
)
2206 (obj
->next
)->previous
= obj
->previous
;
2210 obj
->previous
= NULL
;
2215 AbortUnwindObject::~AbortUnwindObject()
2217 GCAssertMsg(!EnterFrame::IsAbortUnwindObjectInList(this), "RemoveAbortUnwindObject not called, dangling pointer in list.");
2221 bool EnterFrame::IsAbortUnwindObjectInList(AbortUnwindObject
*obj
)
2223 GCHeap
*heap
= GCHeap::GetGCHeap();
2225 if(heap
&& (frame
= heap
->GetEnterFrame()) != NULL
)
2227 AbortUnwindObject
*list
= frame
->m_abortUnwindList
;
2238 SuspendEnterFrame::SuspendEnterFrame() : m_ef(NULL
)
2240 GCHeap
*heap
= GCHeap::GetGCHeap();
2242 EnterFrame
*ef
= heap
->GetEnterFrame();
2250 SuspendEnterFrame::~SuspendEnterFrame()
2254 GCHeap
*heap
= GCHeap::GetGCHeap();
2255 GCAssertMsg(heap
->GetEnterFrame() == m_ef
, "EnterFrame's not unwound properly");
2256 if(heap
->GetStatus() == kMemAbort
)
2260 void GCHeap::SystemOOMEvent(size_t size
, int attempt
)
2262 if (attempt
== 0 && !statusNotificationBeingSent())
2263 SendFreeMemorySignal(size
/kBlockSize
+ 1);
2269 void GCHeap::SignalObjectTooLarge()
2271 GCLog("Implementation limit exceeded: attempting to allocate too-large object\n");
2272 GetGCHeap()->Abort();
2276 void GCHeap::SignalInconsistentHeapState(const char* reason
)
2278 GCAssert(!"Inconsistent heap state; aborting");
2279 GCLog("Inconsistent heap state: %s\n", reason
);
2280 GetGCHeap()->Abort();
2284 void GCHeap::SignalExternalAllocation(size_t nbytes
)
2286 GCHeap
* heap
= GetGCHeap();
2288 MMGC_LOCK_ALLOW_RECURSION(heap
->m_spinlock
, heap
->m_notificationThread
);
2290 heap
->externalPressure
+= nbytes
;
2292 heap
->CheckForMemoryLimitsExceeded();
2297 void GCHeap::SignalExternalDeallocation(size_t nbytes
)
2299 GCHeap
* heap
= GetGCHeap();
2301 MMGC_LOCK_ALLOW_RECURSION(heap
->m_spinlock
, heap
->m_notificationThread
);
2303 heap
->externalPressure
-= nbytes
;
2304 heap
->CheckForStatusReturnToNormal();
2308 void GCHeap::SignalExternalFreeMemory(size_t minimumBytesToFree
/*= kMaxObjectSize */)
2310 GCHeap
* heap
= GetGCHeap();
2311 GCAssertMsg(heap
!= NULL
, "GCHeap not valid!");
2313 MMGC_LOCK_ALLOW_RECURSION(heap
->m_spinlock
, heap
->m_notificationThread
);
2315 // When calling SendFreeMemorySignal with kMaxObjectSize it will try to release
2316 // as much memory as possible. Otherwise it interprets the parameter as number
2317 // of blocks to be freed. This function uses bytes instead. The value is converted
2318 // to blocks here, except when kMaxObjectSize has been passed in so that it will
2319 // still trigger freeing maximum amount of memory. The division may lose some
2320 // precision, but SendFreeMemorySignal adds one more block to the requested amount
2322 heap
->SendFreeMemorySignal((minimumBytesToFree
!= kMaxObjectSize
) ? minimumBytesToFree
/ GCHeap::kBlockSize
: minimumBytesToFree
);
2325 // This can *always* be called. It will clean up the state on the current thread
2326 // if appropriate, otherwise do nothing. It *must* be called by host code if the
2327 // host code jumps past an MMGC_ENTER instance. (The Flash player does that, in
2328 // some circumstances.)
2331 void GCHeap::SignalImminentAbort()
2333 if (instance
== NULL
)
2335 EnterFrame
* ef
= GetGCHeap()->GetEnterFrame();
2339 // We don't know if we're holding the lock but we can release it anyhow,
2340 // on the assumption that this operation will not cause problems if the
2341 // lock is not held or is held by another thread.
2343 // Release lock so we don't deadlock if exit or longjmp end up coming
2344 // back to GCHeap (all callers must have this lock).
2346 VMPI_lockRelease(&instance
->m_spinlock
);
2348 // If the current thread is holding a lock for a GC that's not currently active on the thread
2349 // then break the lock: the current thread is collecting in that GC, but the Abort has cancelled
2351 ef
->UnwindAllObjects();
2353 // Clear the enterFrame because we're jumping past MMGC_ENTER.
2354 GetGCHeap()->enterFrame
= NULL
;
2357 void GCHeap::Abort()
2360 EnterFrame
*ef
= enterFrame
;
2362 // If we hit abort, we need to turn m_oomHandling back on so that listeners are guaranteed to get this signal
2363 // We also need to set m_notoficationThread to NULL in case we hit abort while we were processing another memory status change
2364 m_oomHandling
= true;
2365 m_notificationThread
= NULL
;
2367 GCLog("error: out of memory\n");
2369 // release lock so we don't deadlock if exit or longjmp end up coming
2370 // back to GCHeap (all callers must have this lock)
2371 VMPI_lockRelease(&m_spinlock
);
2373 // Lock must not be held when we call VMPI_exit, deadlocks ensue on Linux
2374 if(config
.OOMExitCode
!= 0)
2376 VMPI_exit(config
.OOMExitCode
);
2381 // Guard against repeated jumps: ef->m_heap doubles as a flag. We go Abort->longjmp->~EnterFrame->Leave
2382 // and Leave calls StatusChangeNotify and the host code might do another allocation during shutdown
2383 // in which case we want to go to VMPI_abort instead. At that point m_heap will be NULL and the right
2385 if (ef
->m_heap
!= NULL
)
2387 ef
->UnwindAllObjects();
2388 VMPI_longjmpNoUnwind(ef
->jmpbuf
, 1);
2391 GCAssertMsg(false, "MMGC_ENTER missing or we allocated more memory trying to shutdown");
2395 void GCHeap::Enter(EnterFrame
*frame
)
2401 void GCHeap::Leave()
2404 MMGC_LOCK(m_spinlock
);
2406 if(status
== kMemAbort
&& !abortStatusNotificationSent
) {
2407 abortStatusNotificationSent
= true;
2408 StatusChangeNotify(kMemAbort
);
2414 // do this after StatusChangeNotify it affects ShouldNotEnter
2416 // need to check if enterFrame is valid, it might have been nulled out by SignalImminentAbort
2417 EnterFrame
* enter
= enterFrame
;
2419 enterFrame
= enter
->Previous();
2423 // last one out of the pool pulls the plug
2424 if(status
== kMemAbort
&& enterCount
== 0 && abortStatusNotificationSent
&& preventDestruct
== 0) {
2429 void GCHeap::log_percentage(const char *name
, size_t bytes
, size_t bytes_compare
)
2431 bytes_compare
= size_t((bytes
*100.0)/bytes_compare
);
2433 GCLog("%s %u (%.1fM) %u%%\n", name
, (unsigned int)(bytes
/ GCHeap::kBlockSize
), bytes
* 1.0 / (1024*1024), (unsigned int)(bytes_compare
));
2435 GCLog("%s %u (%uK) %u%%\n", name
, (unsigned int)(bytes
/ GCHeap::kBlockSize
), (unsigned int)(bytes
/ 1024), (unsigned int)(bytes_compare
));
2439 void GCHeap::DumpMemoryInfo()
2441 MMGC_LOCK(m_spinlock
);
2442 size_t priv
= VMPI_getPrivateResidentPageCount() * VMPI_getVMPageSize();
2443 size_t mmgc
= GetTotalHeapSize() * GCHeap::kBlockSize
;
2444 size_t unmanaged
= GetFixedMalloc()->GetTotalSize() * GCHeap::kBlockSize
;
2445 size_t fixed_alloced
;
2446 size_t fixed_asksize
;
2447 GetFixedMalloc()->GetUsageInfo(fixed_asksize
, fixed_alloced
);
2450 size_t gc_allocated_total
=0;
2451 size_t gc_ask_total
= 0;
2452 size_t gc_count
= 0;
2453 BasicListIterator
<GC
*> iter(gcManager
.gcs());
2455 while((gc
= iter
.next()) != NULL
)
2457 #ifdef MMGC_MEMORY_PROFILER
2458 GCLog("[mem] GC 0x%p:%s\n", (void*)gc
, GetAllocationName(gc
));
2460 GCLog("[mem] GC 0x%p\n", (void*)gc
);
2462 gc
->DumpMemoryInfo();
2466 gc
->GetUsageInfo(ask
, allocated
);
2467 gc_ask_total
+= ask
;
2468 gc_allocated_total
+= allocated
;
2471 gc_total
+= gc
->GetNumBlocks() * kBlockSize
;
2474 #ifdef MMGC_MEMORY_PROFILER
2475 fixedMalloc
.DumpMemoryInfo();
2478 // Gross stats are not meaningful if the profiler is running, see bugzilla 490014.
2479 // Disabling their printing is just an expedient fix to avoid misleading data being
2480 // printed. There are other, more complicated, fixes we should adopt.
2482 GCLog("[mem] ------- gross stats -----\n");
2483 #ifdef MMGC_MEMORY_PROFILER
2484 if (GCHeap::GetGCHeap()->GetProfiler() == NULL
)
2487 log_percentage("[mem] private", priv
, priv
);
2488 log_percentage("[mem]\t mmgc", mmgc
, priv
);
2489 log_percentage("[mem]\t\t unmanaged", unmanaged
, priv
);
2490 log_percentage("[mem]\t\t managed", gc_total
, priv
);
2491 log_percentage("[mem]\t\t free", (size_t)GetFreeHeapSize() * GCHeap::kBlockSize
, priv
);
2492 log_percentage("[mem]\t other", priv
- mmgc
, priv
);
2493 log_percentage("[mem] \tunmanaged overhead ", unmanaged
-fixed_alloced
, unmanaged
);
2494 log_percentage("[mem] \tmanaged overhead ", gc_total
- gc_allocated_total
, gc_total
);
2495 #ifdef MMGC_MEMORY_PROFILER
2498 log_percentage("[mem] \tunmanaged internal wastage", fixed_alloced
- fixed_asksize
, fixed_alloced
);
2499 log_percentage("[mem] \tmanaged internal wastage", gc_allocated_total
- gc_ask_total
, gc_allocated_total
);
2502 GCLog("[mem] number of collectors %u\n", unsigned(gc_count
));
2504 #ifdef MMGC_MEMORY_PROFILER
2506 GCLog("[mem] No gross stats available when profiler is enabled.\n");
2508 GCLog("[mem] -------- gross stats end -----\n");
2510 #ifdef MMGC_MEMORY_PROFILER
2519 void GCHeap::LogChar(char c
, size_t count
)
2522 char* buf
= count
< 100 ? tmp
: (char*)VMPI_alloc(count
+1);
2525 VMPI_memset(buf
, c
, count
);
2533 void GCHeap::DumpHeapRep()
2535 Region
**regions
= NULL
;
2536 Region
*r
= lastRegion
;
2539 GCLog("Heap representation format: \n");
2540 GCLog("region base address - commitTop/reserveTop\n");
2541 GCLog("[0 == free, 1 == committed, - = uncommitted]*\n");
2543 // count and sort regions
2548 regions
= (Region
**) VMPI_alloc(sizeof(Region
*)*numRegions
);
2549 if (regions
== NULL
)
2552 for(int i
=0; i
< numRegions
; i
++, r
= r
->prev
) {
2554 for(int j
=0; j
< i
; j
++) {
2555 if(r
->baseAddr
< regions
[j
]->baseAddr
) {
2556 memmove(®ions
[j
+1], ®ions
[j
], sizeof(Region
*) * (i
- j
));
2561 regions
[insert
] = r
;
2564 HeapBlock
*spanningBlock
= NULL
;
2565 for(int i
=0; i
< numRegions
; i
++)
2568 GCLog("0x%p - 0x%p/0x%p\n", r
->baseAddr
, r
->commitTop
, r
->reserveTop
);
2570 char *addr
= r
->baseAddr
;
2573 GCAssert(spanningBlock
->baseAddr
+ (spanningBlock
->size
* kBlockSize
) > r
->baseAddr
);
2574 GCAssert(spanningBlock
->baseAddr
< r
->baseAddr
);
2575 char *end
= spanningBlock
->baseAddr
+ (spanningBlock
->size
* kBlockSize
);
2576 if(end
> r
->reserveTop
)
2577 end
= r
->reserveTop
;
2579 LogChar(spanningBlock
->inUse() ? '1' : '0', (end
- addr
)/kBlockSize
);
2582 if(addr
== spanningBlock
->baseAddr
+ (spanningBlock
->size
* kBlockSize
))
2583 spanningBlock
= NULL
;
2586 while(addr
!= r
->commitTop
&& (hb
= AddrToBlock(addr
)) != NULL
) {
2587 GCAssert(hb
->size
!= 0);
2591 else if(hb
->committed
)
2596 for(i
=0, n
=hb
->size
; i
< n
; i
++, addr
+= GCHeap::kBlockSize
) {
2597 if(addr
== r
->reserveTop
) {
2607 LogChar('-', (r
->reserveTop
- addr
) / kBlockSize
);
2614 #ifdef MMGC_MEMORY_PROFILER
2617 void GCHeap::InitProfiler()
2619 GCAssert(IsProfilerInitialized() == false);
2622 #ifdef MMGC_MEMORY_INFO
2623 bool profilingEnabled
= true;
2625 bool profilingEnabled
= VMPI_isMemoryProfilingEnabled();
2627 if(profilingEnabled
)
2629 profiler
= new MemoryProfiler();
2633 #endif //MMGC_MEMORY_PROFILER
2635 #ifdef MMGC_MEMORY_PROFILER
2636 #ifdef MMGC_USE_SYSTEM_MALLOC
2638 void GCHeap::TrackSystemAlloc(void *addr
, size_t askSize
)
2640 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
2641 if(!IsProfilerInitialized())
2644 profiler
->RecordAllocation(addr
, askSize
, VMPI_size(addr
));
2647 void GCHeap::TrackSystemFree(void *addr
)
2649 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
2650 if(addr
&& profiler
)
2651 profiler
->RecordDeallocation(addr
, VMPI_size(addr
));
2654 #endif //MMGC_USE_SYSTEM_MALLOC
2655 #endif // MMGC_MEMORY_PROFILER
2657 void GCHeap::ReleaseMemory(char *address
, size_t size
)
2659 if(config
.useVirtualMemory
) {
2660 bool success
= VMPI_releaseMemoryRegion(address
, size
);
2664 VMPI_releaseAlignedMemory(address
);
2668 void GCManager::destroy()
2670 collectors
.Destroy();
2673 void GCManager::signalStartCollection(GC
* gc
)
2675 BasicListIterator
<GC
*> iter(collectors
);
2677 while((otherGC
= iter
.next()) != NULL
)
2678 otherGC
->policy
.signalStartCollection(gc
);
2681 void GCManager::signalEndCollection(GC
* gc
)
2683 BasicListIterator
<GC
*> iter(collectors
);
2685 while((otherGC
= iter
.next()) != NULL
)
2686 otherGC
->policy
.signalStartCollection(gc
);
2689 /* this method is the heart of the OOM system.
2690 its here that we call out to the mutator which may call
2691 back in to free memory or try to get more.
2693 Note! The caller needs to hold on to the m_spinlock before calling this!
2696 void GCHeap::SendFreeMemorySignal(size_t minimumBlocksToFree
)
2698 // If we're already in the process of sending out memory notifications, don't bother verifying now.
2699 // Also, we only want to send the "free memory" signal while our memory is in a normal state. Once
2700 // we've entered softLimit or abort state, we want to allow the softlimit or abort processing to return
2701 // the heap to normal before continuing.
2703 if (statusNotificationBeingSent() || status
!= kMemNormal
|| !m_oomHandling
)
2706 m_notificationThread
= VMPI_currentThread();
2708 size_t startingTotal
= GetTotalHeapSize() + externalPressure
/ kBlockSize
;
2709 size_t currentTotal
= startingTotal
;
2711 BasicListIterator
<OOMCallback
*> iter(callbacks
);
2712 OOMCallback
*cb
= NULL
;
2713 bool bContinue
= true;
2718 VMPI_lockRelease(&m_spinlock
);
2719 cb
->memoryStatusChange(kFreeMemoryIfPossible
, kFreeMemoryIfPossible
);
2720 VMPI_lockAcquire(&m_spinlock
);
2723 currentTotal
= GetTotalHeapSize() + externalPressure
/ kBlockSize
;
2725 // If we've freed MORE than the minimum amount, we can stop freeing
2726 if ((startingTotal
- currentTotal
) > minimumBlocksToFree
)
2731 } while(cb
!= NULL
&& bContinue
);
2733 iter
.MarkCursorInList();
2735 m_notificationThread
= NULL
;
2738 void GCHeap::StatusChangeNotify(MemoryStatus to
)
2740 // If we're already in the process of sending this notification, don't resend
2741 if ((statusNotificationBeingSent() && to
== status
) || !m_oomHandling
)
2744 m_notificationThread
= VMPI_currentThread();
2746 MemoryStatus oldStatus
= status
;
2749 BasicListIterator
<OOMCallback
*> iter(callbacks
);
2750 OOMCallback
*cb
= NULL
;
2757 VMPI_lockRelease(&m_spinlock
);
2758 cb
->memoryStatusChange(oldStatus
, to
);
2759 VMPI_lockAcquire(&m_spinlock
);
2761 } while(cb
!= NULL
);
2764 m_notificationThread
= NULL
;
2766 CheckForStatusReturnToNormal();
2770 bool GCHeap::ShouldNotEnter()
2772 // don't enter if the heap is already gone or we're aborting but not on the aborting call stack in a nested enter call
2773 GCHeap
*heap
= GetGCHeap();
2775 (heap
->GetStatus() == kMemAbort
&&
2776 (heap
->GetEnterFrame() == NULL
|| heap
->GetEnterFrame()->Suspended())))
2781 bool GCHeap::IsAddressInHeap(void *addr
)
2783 void *block
= (void*)(uintptr_t(addr
) & kBlockMask
);
2784 return SafeSize(block
) != (size_t)-1;
2787 // Every new GC must register itself with the GCHeap.
2788 void GCHeap::AddGC(GC
*gc
)
2790 bool bAdded
= false;
2792 MMGC_LOCK(m_spinlock
);
2793 // hack to allow GCManager's list back in for list mem operations
2794 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2795 m_notificationThread
= VMPI_currentThread();
2796 bAdded
= gcManager
.tryAddGC(gc
);
2797 m_notificationThread
= notificationThreadSave
;
2805 // When the GC is destroyed it must remove itself from the GCHeap.
2806 void GCHeap::RemoveGC(GC
*gc
)
2808 MMGC_LOCK_ALLOW_RECURSION(m_spinlock
, m_notificationThread
);
2809 // hack to allow GCManager's list back in for list mem operations
2810 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2811 m_notificationThread
= VMPI_currentThread();
2812 gcManager
.removeGC(gc
);
2813 m_notificationThread
= notificationThreadSave
;
2814 EnterFrame
* ef
= GetEnterFrame();
2815 if (ef
&& ef
->GetActiveGC() == gc
)
2816 ef
->SetActiveGC(NULL
);
2819 void GCHeap::AddOOMCallback(OOMCallback
*p
)
2821 bool bAdded
= false;
2823 MMGC_LOCK(m_spinlock
);
2824 // hack to allow GCManager's list back in for list mem operations
2825 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2826 m_notificationThread
= VMPI_currentThread();
2827 bAdded
= callbacks
.TryAdd(p
);
2828 m_notificationThread
= notificationThreadSave
;
2836 void GCHeap::RemoveOOMCallback(OOMCallback
*p
)
2838 MMGC_LOCK(m_spinlock
);
2839 // hack to allow GCManager's list back in for list mem operations
2840 vmpi_thread_t notificationThreadSave
= m_notificationThread
;
2841 m_notificationThread
= VMPI_currentThread();
2842 callbacks
.Remove(p
);
2843 m_notificationThread
= notificationThreadSave
;
2846 bool GCHeap::EnsureFreeRegion(bool allowExpansion
)
2848 if(!HaveFreeRegion()) {
2850 HeapBlock
*block
= AllocBlock(1, zero
, 1);
2852 nextRegion
= (Region
*)(void *)block
->baseAddr
;
2853 } else if(allowExpansion
) {
2855 // We must have hit the hard limit or OS limit
2856 if(nextRegion
== NULL
)
2863 GCHeap::Region
*GCHeap::NewRegion(char *baseAddr
, char *rTop
, char *cTop
, size_t blockId
)
2865 Region
*r
= freeRegion
;
2867 freeRegion
= *(Region
**)freeRegion
;
2870 if(roundUp((uintptr_t)nextRegion
, kBlockSize
) - (uintptr_t)nextRegion
< sizeof(Region
))
2871 nextRegion
= NULL
; // fresh page allocated in ExpandHeap
2873 new (r
) Region(this, baseAddr
, rTop
, cTop
, blockId
);
2877 void GCHeap::FreeRegion(Region
*r
)
2880 lastRegion
= r
->prev
;
2881 *(Region
**)r
= freeRegion
;
2887 void GCHeap::EnterLockInit()
2889 if (!instanceEnterLockInitialized
)
2891 instanceEnterLockInitialized
= true;
2892 VMPI_lockInit(&instanceEnterLock
);
2897 void GCHeap::EnterLockDestroy()
2899 GCAssert(instanceEnterLockInitialized
);
2900 VMPI_lockDestroy(&instanceEnterLock
);
2901 instanceEnterLockInitialized
= false;
2904 GCHeap::Region::Region(GCHeap
*heap
, char *baseAddr
, char *rTop
, char *cTop
, size_t blockId
)
2905 : prev(heap
->lastRegion
),
2911 heap
->lastRegion
= this;
2915 void GCHeap::CheckForOOMAbortAllocation()
2917 if(m_notificationThread
== VMPI_currentThread() && status
== kMemAbort
)
2918 GCAssertMsg(false, "Its not legal to perform allocations during OOM kMemAbort callback");
2922 bool GCHeap::QueryCanReturnToNormal()
2924 // must be below soft limit _AND_ above decommit threshold
2925 return GetUsedHeapSize() + externalPressure
/kBlockSize
< config
.heapSoftLimit
&&
2926 FreeMemoryExceedsDecommitThreshold();
2929 bool GCHeap::FreeMemoryExceedsDecommitThreshold()
2931 return GetFreeHeapSize() * 100 > GetTotalHeapSize() * kDecommitThresholdPercentage
;