set executable bit on promote-build.sh (r=cpeyer)
[tamarin-stm.git] / MMgc / GC.cpp
blob658c16981ed60da015848df5b38497bc0143028d
1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
23 * Contributor(s):
24 * Adobe AS3 Team
25 * leon.sha@sun.com
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
41 #include "MMgc.h"
42 #include "StaticAssert.h"
44 #ifdef AVMPLUS_SAMPLER
45 //sampling support
46 #include "avmplus.h"
47 #else
48 #define SAMPLE_FRAME(_x, _s)
49 #define SAMPLE_CHECK()
50 #endif
52 namespace MMgc
54 #ifdef MMGC_MEMORY_PROFILER
55 // get detailed info on each size class allocators
56 const bool dumpSizeClassState = false;
57 #endif
59 /*virtual*/
60 void GCCallback::presweep() {}
62 /*virtual*/
63 void GCCallback::postsweep() {}
65 /*virtual*/
66 void GCCallback::prereap() {}
68 /*virtual*/
69 void GCCallback::postreap() {}
71 /*virtual*/
72 void GCCallback::prereap(void* /*rcobj*/) {}
74 ////////////// GC //////////////////////////////////////////////////////////
76 // Size classes for our GC. From 8 to 128, size classes are spaced
77 // evenly 8 bytes apart. The finest granularity we can achieve is
78 // 8 bytes, as pointers must be 8-byte aligned thanks to our use
79 // of the bottom 3 bits of 32-bit atoms for Special Purposes.
80 // Above that, the size classes have been chosen to maximize the
81 // number of items that fit in a 4096-byte block, given our block
82 // header information.
83 const int16_t GC::kSizeClasses[kNumSizeClasses] = {
84 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, //0-9
85 88, 96, 104, 112, 120, 128, 144, 160, 168, 176, //10-19
86 184, 192, 200, 216, 224, 240, 256, 280, 296, 328, //20-29
87 352, 392, 432, 488, 560, 656, 784, 984, 1312, 1968 //30-39
90 /* kSizeClassIndex[] generated with this code:
91 kSizeClassIndex[0] = 0;
92 for (var i:int = 1; i < kNumSizeClasses; i++)
93 for (var j:int = (kSizeClasses[i-1]>>3), n=(kSizeClasses[i]>>3); j < n; j++)
94 kSizeClassIndex[j] = i;
97 // index'd by size>>3 - 1
98 const uint8_t GC::kSizeClassIndex[246] = {
99 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
100 10, 11, 12, 13, 14, 15, 16, 16, 17, 17,
101 18, 19, 20, 21, 22, 23, 23, 24, 25, 25,
102 26, 26, 27, 27, 27, 28, 28, 29, 29, 29,
103 29, 30, 30, 30, 31, 31, 31, 31, 31, 32,
104 32, 32, 32, 32, 33, 33, 33, 33, 33, 33,
105 33, 34, 34, 34, 34, 34, 34, 34, 34, 34,
106 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
107 35, 35, 36, 36, 36, 36, 36, 36, 36, 36,
108 36, 36, 36, 36, 36, 36, 36, 36, 37, 37,
109 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
110 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
111 37, 37, 37, 38, 38, 38, 38, 38, 38, 38,
112 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
113 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
114 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
115 38, 38, 38, 38, 39, 39, 39, 39, 39, 39,
116 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
117 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
118 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
119 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
120 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
121 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
122 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
123 39, 39, 39, 39, 39, 39
126 #ifdef _MSC_VER
127 # pragma warning(push)
128 # pragma warning(disable:4355) // 'this': used in base member initializer list
129 #endif
131 GC::GC(GCHeap *gcheap, GCMode mode)
133 greedy(mode == kGreedyGC),
134 nogc(mode == kDisableGC),
135 incremental(mode == kIncrementalGC),
136 findUnmarkedPointers(false),
137 validateDefRef(false),
138 keepDRCHistory(false),
139 dontAddToZCTDuringCollection(false),
140 incrementalValidation(false),
141 #ifdef _DEBUG
142 // check for missing write barriers at every Alloc
143 incrementalValidationPedantic(false),
144 #endif
145 rcRootSegments(NULL),
146 policy(this, gcheap),
147 t0(VMPI_getPerformanceCounter()),
148 lastStartMarkIncrementCount(0),
149 sweeps(0),
150 sweepStart(0),
151 emptyWeakRef(0),
153 m_gcThread(NULL),
154 destroying(false),
155 marking(false),
156 collecting(false),
157 presweeping(false),
158 markerActive(0),
159 stackCleaned(true),
160 rememberedStackTop(0),
161 stackEnter(0),
162 enterCount(0),
163 emptyWeakRefRoot(0),
164 m_markStackOverflow(false),
165 mark_item_recursion_control(20), // About 3KB as measured with GCC 4.1 on MacOS X (144 bytes / frame), May 2009
166 sizeClassIndex(kSizeClassIndex), // see comment in GC.h
167 pageMap(),
168 heap(gcheap),
169 finalizedValue(true),
170 smallEmptyPageList(NULL),
171 largeEmptyPageList(NULL),
172 remainingQuickListBudget(0),
173 victimIterator(0),
174 m_roots(0),
175 m_callbacks(0),
176 zct()
177 #ifdef DEBUGGER
178 , m_sampler(NULL)
179 #endif
181 // sanity check for all our types
182 MMGC_STATIC_ASSERT(sizeof(int8_t) == 1);
183 MMGC_STATIC_ASSERT(sizeof(uint8_t) == 1);
184 MMGC_STATIC_ASSERT(sizeof(int16_t) == 2);
185 MMGC_STATIC_ASSERT(sizeof(uint16_t) == 2);
186 MMGC_STATIC_ASSERT(sizeof(int32_t) == 4);
187 MMGC_STATIC_ASSERT(sizeof(uint32_t) == 4);
188 MMGC_STATIC_ASSERT(sizeof(int64_t) == 8);
189 MMGC_STATIC_ASSERT(sizeof(uint64_t) == 8);
190 MMGC_STATIC_ASSERT(sizeof(intptr_t) == sizeof(void *));
191 MMGC_STATIC_ASSERT(sizeof(uintptr_t) == sizeof(void *));
192 #ifdef MMGC_64BIT
193 MMGC_STATIC_ASSERT(sizeof(intptr_t) == 8);
194 MMGC_STATIC_ASSERT(sizeof(uintptr_t) == 8);
195 #else
196 MMGC_STATIC_ASSERT(sizeof(intptr_t) == 4);
197 MMGC_STATIC_ASSERT(sizeof(uintptr_t) == 4);
198 #endif
199 MMGC_STATIC_ASSERT(sizeof(GCLargeAlloc::LargeBlock) % 8 == 0);
200 MMGC_STATIC_ASSERT(sizeof(gcbits_t) == 1);
202 // The size tables above are derived based on a block size of 4096; this
203 // assert keeps us honest. Talk to Lars if you get into trouble here.
205 // Also it appears that some DEBUG-mode guards in SetPageMapValue,
206 // GetPageMapValue, ClearPageMapValue know that the block size is 4096.
207 GCAssert(GCHeap::kBlockSize == 4096);
209 GCAssert(!(greedy && incremental));
210 zct.SetGC(this);
212 VMPI_lockInit(&m_gcLock);
213 VMPI_lockInit(&m_rootListLock);
215 // Global budget for blocks on the quick lists of small-object allocators. This
216 // budget serves as a throttle on the free lists during times when the GC is not
217 // running; when the GC is running the periodic flushing of the quick lists
218 // makes the budget irrelevant. The budget is global so that very active allocators
219 // can have long free lists at the expense of inactive allocators, which must have
220 // short ones.
222 // This is somewhat arbitrary: 4 blocks per allocator. Must initialize this before we
223 // create allocators because they will request a budget when they're created. The
224 // total budget /must/ accomodate at least one block per alloc, and makes very little
225 // sense if it is not at least two blocks per alloc. (Note no memory is allocated, it's
226 // just a budget.)
228 // See comment in GCAlloc.cpp for more about the quick lists.
230 remainingQuickListBudget = (3*kNumSizeClasses) * 4 * GCHeap::kBlockSize;
232 // Create all the allocators up front (not lazy)
233 // so that we don't have to check the pointers for
234 // NULL on every allocation.
235 for (int i=0; i<kNumSizeClasses; i++) {
236 containsPointersAllocs[i] = mmfx_new(GCAlloc(this, kSizeClasses[i], true, false, i));
237 containsPointersRCAllocs[i] = mmfx_new(GCAlloc(this, kSizeClasses[i], true, true, i));
238 noPointersAllocs[i] = mmfx_new(GCAlloc(this, kSizeClasses[i], false, false, i));
241 largeAlloc = mmfx_new(GCLargeAlloc(this));
243 // See comment in GC.h
244 allocsTable[kRCObject] = containsPointersRCAllocs;
245 allocsTable[kContainsPointers] = containsPointersAllocs;
246 allocsTable[kRCObject|kContainsPointers] = containsPointersRCAllocs;
247 allocsTable[0] = noPointersAllocs;
249 VMPI_memset(m_bitsFreelists, 0, sizeof(uint32_t*) * kNumSizeClasses);
250 m_bitsNext = (uint32_t*)heapAlloc(1);
252 // precondition for emptyPageList
253 GCAssert(offsetof(GCLargeAlloc::LargeBlock, next) == offsetof(GCAlloc::GCBlock, next));
256 for(int i=0; i<GCV_COUNT; i++)
258 SetGCContextVariable(i, NULL);
261 allocaInit();
263 emptyWeakRefRoot = new GCRoot(this, &this->emptyWeakRef, sizeof(this->emptyWeakRef));
264 MMGC_GCENTER(this);
265 emptyWeakRef = new (this) GCWeakRef(NULL);
267 Alloc(2048);
269 gcheap->AddGC(this);
270 gcheap->AddOOMCallback(this);
274 #ifdef _MSC_VER
275 # pragma warning(pop)
276 #endif
278 GC::~GC()
280 policy.shutdown();
281 allocaShutdown();
283 // Do this before calling GCHeap::RemoveGC as GCAutoEnter::Destroy
284 // expect this GC to still be the active GC and GCHeap::RemoveGC clears
285 // the active GC.
286 if(stackEnter != NULL) {
287 stackEnter->Destroy(false);
290 heap->RemoveGC(this);
291 heap->RemoveOOMCallback(this);
293 // Force all objects to be destroyed
294 destroying = true;
297 MMGC_GCENTER(this);
298 ForceSweepAtShutdown();
301 for (int i=0; i < kNumSizeClasses; i++) {
302 mmfx_delete( containsPointersAllocs[i]);
303 mmfx_delete(containsPointersRCAllocs[i]);
304 mmfx_delete(noPointersAllocs[i]);
307 if (largeAlloc) {
308 mmfx_delete(largeAlloc);
311 // Go through m_bitsFreelist and collect list of all pointers
312 // that are on page boundaries into new list, pageList
313 void **pageList = NULL;
314 for(int i=0, n=kNumSizeClasses; i<n; i++) {
315 uint32_t* bitsFreelist = m_bitsFreelists[i];
316 while(bitsFreelist) {
317 uint32_t *next = *(uint32_t**)bitsFreelist;
318 if((uintptr_t(bitsFreelist) & GCHeap::kOffsetMask) == 0) {
319 *((void**)bitsFreelist) = pageList;
320 pageList = (void**)bitsFreelist;
322 bitsFreelist = next;
326 // Go through page list and free all pages on it
327 while (pageList) {
328 void **next = (void**) *pageList;
329 heapFree((void*)pageList);
330 pageList = next;
333 pageMap.DestroyPageMapVia(heap);
335 delete emptyWeakRefRoot;
336 GCAssert(!m_roots);
337 GCAssert(!m_callbacks);
339 // apparently the player can't be made to clean up so keep it from crashing at least
340 while(m_roots) {
341 m_roots->Destroy();
344 while(m_callbacks) {
345 m_callbacks->Destroy();
348 zct.Destroy();
350 GCAssertMsg(GetNumBlocks() == 0, "GC accounting off");
352 GCAssertMsg(enterCount == 0, "GC enter/exit paths broken");
354 VMPI_lockDestroy(&m_gcLock);
355 VMPI_lockDestroy(&m_rootListLock);
358 void GC::Collect(bool scanStack)
360 GCAssertMsg(!scanStack || onThread(), "Full collection with stack scan requested however the GC isn't associated with a thread, missing MMGC_GCENTER macro.");
362 if (nogc || markerActive || collecting || Reaping()) {
363 return;
366 ReapZCT(scanStack);
368 if(!marking)
369 StartIncrementalMark();
370 if(marking)
371 FinishIncrementalMark(scanStack);
373 #ifdef _DEBUG
374 // Dumping the stack trace at GC time can be very helpful with stack walk bugs
375 // where something was created, stored away in untraced, unmanaged memory and not
376 // reachable by the conservative stack walk
377 //DumpStackTrace();
378 FindUnmarkedPointers();
379 #endif
381 policy.fullCollectionComplete();
384 #ifdef RECURSIVE_MARK
385 REALLY_INLINE void GC::PushWorkItem(GCWorkItem item)
387 GCAssert(item.ptr != NULL);
388 markerActive++;
389 MarkItem(item);
390 markerActive--;
392 #else
393 REALLY_INLINE void GC::PushWorkItem(GCWorkItem item)
395 GCAssert(item.ptr != NULL);
396 GCAssert(!item.IsGCItem() || IsPointerToGCObject(GetRealPointer(item.ptr)));
397 if (!m_incrementalWork.Push(item))
398 SignalMarkStackOverflow(item);
400 #endif
402 void GC::PushWorkItem_MayFail(GCWorkItem &item)
404 if (item.ptr)
405 PushWorkItem(item);
408 #ifdef _DEBUG
409 bool GC::Trace(const void *stackStart/*=NULL*/, uint32_t stackSize/*=0*/)
411 SAMPLE_FRAME("[mark]", core());
413 // Kill incremental mark since we're gonna wipe the marks.
414 marking = false;
415 ClearMarkStack();
416 m_barrierWork.Clear();
418 // Clear all mark bits.
419 ClearMarks();
421 SAMPLE_CHECK();
423 MarkAllRoots();
425 SAMPLE_CHECK();
427 if(stackStart == NULL) {
428 // grab the stack, push it, and
429 MarkQueueAndStack();
430 } else {
431 GCWorkItem item(stackStart, stackSize, GCWorkItem::kNonGCObject);
432 PushWorkItem(item);
433 Mark();
436 SAMPLE_CHECK();
438 bool failure = m_markStackOverflow;
440 ClearMarks();
442 return !failure;
444 #endif
446 GC::RCRootSegment::RCRootSegment(GC* gc, void* mem, size_t size)
447 : GCRoot(gc, mem, size)
448 , mem(mem)
449 , size(size)
450 , prev(NULL)
451 , next(NULL)
454 void* GC::AllocRCRoot(size_t size)
456 const int hdr_size = (sizeof(void*) + 7) & ~7;
458 GCHeap::CheckForAllocSizeOverflow(size, hdr_size);
460 union {
461 char* block;
462 uintptr_t* block_u;
464 block = mmfx_new_array_opt(char, size + hdr_size, MMgc::kZero);
465 void* mem = (void*)(block + hdr_size);
466 RCRootSegment *segment = new RCRootSegment(this, mem, size);
467 *block_u = (uintptr_t)segment;
468 AddRCRootSegment(segment);
469 return mem;
472 void GC::FreeRCRoot(void* mem)
474 const int hdr_size = (sizeof(void*) + 7) & ~7;
475 union {
476 char* block;
477 RCRootSegment** segmentp;
479 block = (char*)mem - hdr_size;
480 RCRootSegment* segment = *segmentp;
481 RemoveRCRootSegment(segment);
482 delete segment;
483 mmfx_delete_array(block);
486 void GC::CollectionWork()
488 if (nogc)
489 return;
491 if (incremental) {
492 // If we're reaping don't do any work, this simplifies policy event timing and improves
493 // incrementality.
494 if (!collecting && !Reaping()) {
495 if (!marking) {
496 StartIncrementalMark();
498 else if (policy.queryEndOfCollectionCycle()) {
499 FinishIncrementalMark(true);
501 else {
502 IncrementalMark();
506 else {
507 // non-incremental collection
508 Collect();
512 // Note, the interaction with the policy manager in Alloc() should
513 // be the same as in AllocDouble(), which is defined in GC.h.
515 // In Alloc we have separate cases for large and small objects. We want
516 // small-object allocation to be as fast as possible. Hence the relative
517 // mess of #ifdefs below, and the following explanation.
519 // Normally we round up size to 8 bytes and add DebugSize and that sum is
520 // the size that determines whether we use the small-object allocator or
521 // the large-object one:
523 // requestSize = ((size + 7) & ~7) + DebugSize()
525 // Then we shift that three places and subtract 1 to obtain the allocator
526 // index:
528 // index = (requestSize >> 3) - 1
529 // = ((((size + 7) & ~7) + DebugSize()) >> 3) - 1
531 // We want to optimize this. Consider the case of a Release build where
532 // DebugSize() == 0:
534 // = (((size + 7) & ~7) >> 3) - 1
536 // Observe that the & ~7 is redundant because those bits will be lost,
537 // and that 1 = (8 >> 3)
539 // = ((size + 7) >> 3) - (8 >> 3)
540 // = (size + 7 - 8) >> 3
541 // index = (size - 1) >> 3
543 // In Release builds, the guard for small object allocation is
545 // guard = requestSize <= kLargestAlloc
546 // = ((size + 7) & ~7) <= kLargestAlloc
548 // Yet the /effective/ guard, given that not all the bits of size are used
549 // subsequently, is simply
551 // guard = size <= kLargestAlloc
553 // To see this, consider that if size < kLargestAlloc then requestSize <= kLargestAlloc
554 // and if size > kLargestAlloc then requestSize > kLargestAlloc, because requestSize
555 // is rounded up to an 8-byte boundary and kLargestAlloc is already rounded to an 8-byte
556 // boundary.
558 // So in the small-object case in Release builds we use the simpler guard, and defer
559 // the rounding and allocation overflow checking to the large-object case.
561 #ifdef MMGC_MEMORY_INFO
562 #ifndef _DEBUG
563 #error "Really need MMGC_MEMORY_INFO to imply _DEBUG"
564 #endif
565 #endif
566 #ifdef MMGC_MEMORY_PROFILER
567 #ifndef MMGC_HOOKS
568 #error "Really need MMGC_MEMORY_PROFILER to imply MMGC_HOOKS"
569 #endif
570 #endif
572 void *GC::Alloc(size_t size, int flags/*0*/)
574 #ifdef _DEBUG
575 GCAssertMsg(size > 0, "cannot allocate a 0 sized block");
576 GCAssertMsg(onThread(), "GC called from different thread!");
578 if(!nogc && stackEnter == NULL) {
579 GCAssertMsg(false, "A MMGC_GCENTER macro must exist on the stack");
580 GCHeap::SignalInconsistentHeapState("MMGC_GCENTER missing");
581 /*NOTREACHED*/
582 return NULL;
585 // always be marking in pedantic mode
586 if(incrementalValidationPedantic) {
587 if(!marking) {
588 if (!Reaping()) {
589 StartIncrementalMark();
593 #endif
594 #ifdef AVMPLUS_SAMPLER
595 avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
596 if(core)
597 core->sampleCheck();
598 #endif
600 #if defined _DEBUG || defined MMGC_MEMORY_PROFILER
601 const size_t askSize = size; // preserve this for statistics gathering and profiling
602 #endif
603 #if defined _DEBUG
604 GCHeap::CheckForAllocSizeOverflow(size, 7+DebugSize());
605 size = (size+7)&~7; // round up to multiple of 8
606 size += DebugSize(); // add in some (possibly zero) multiple of 8 bytes for debug information
607 #endif
609 void *item; // the allocated object (a user pointer!), or NULL
611 // In Release builds the calls to the underlying Allocs should end up being compiled
612 // as tail calls with reasonable compilers. Try to keep it that way.
614 if (size <= kLargestAlloc)
616 // This is the fast lookup table implementation to find the right allocator.
617 // The table has been lifted into an instance variable because the Player is
618 // compiled with PIC and GCC generates somewhat gnarly code for that.
619 #ifdef _DEBUG
620 const unsigned index = sizeClassIndex[(size>>3)-1];
621 #else
622 const unsigned index = sizeClassIndex[(size-1)>>3];
623 #endif
625 // The table avoids a thre-way decision tree.
626 GCAlloc **allocs = allocsTable[flags & (kRCObject|kContainsPointers)];
628 // assert that I fit
629 GCAssert(size <= allocs[index]->GetItemSize());
631 // assert that I don't fit (makes sure we don't waste space)
632 GCAssert( (index==0) || (size > allocs[index-1]->GetItemSize()));
634 #if defined _DEBUG || defined MMGC_MEMORY_PROFILER
635 item = allocs[index]->Alloc(askSize, flags);
636 #else
637 item = allocs[index]->Alloc(flags);
638 #endif
640 else
642 #ifndef _DEBUG
643 GCHeap::CheckForAllocSizeOverflow(size, 7+DebugSize());
644 size = (size+7)&~7; // round up to multiple of 8
645 size += DebugSize();
646 #endif
647 #if defined _DEBUG || defined MMGC_MEMORY_PROFILER
648 item = largeAlloc->Alloc(askSize, size, flags);
649 #else
650 item = largeAlloc->Alloc(size, flags);
651 #endif
654 // Hooks are run by the lower-level allocators, which also signal
655 // allocation work and trigger GC.
657 GCAssert(item != NULL || (flags & kCanFail) != 0);
659 #ifdef _DEBUG
660 // Note GetUserPointer(item) only required for DEBUG builds and for non-NULL pointers.
662 if(item != NULL) {
663 item = GetUserPointer(item);
664 bool shouldZero = (flags & kZero) || incrementalValidationPedantic;
666 // in debug mode memory is poisoned so we have to clear it here
667 // in release builds memory is zero'd to start and on free/sweep
668 // in pedantic mode uninitialized data can trip the write barrier
669 // detector, only do it for pedantic because otherwise we want the
670 // mutator to get the poisoned data so it crashes if it relies on
671 // uninitialized values
672 if(shouldZero) {
673 VMPI_memset(item, 0, Size(item));
676 #endif
678 return item;
681 // Mmmm.... gcc -O3 inlines Alloc into this in Release builds :-)
683 void *GC::OutOfLineAllocExtra(size_t size, size_t extra, int flags)
685 return AllocExtra(size, extra, flags);
688 void GC::AbortFree(const void* item)
690 // FIXME - https://bugzilla.mozilla.org/show_bug.cgi?id=589102.
691 // (a) This code presumes we got here via delete, but that may not always
692 // be true, and if we didn't then it isn't right to clear the finalized
693 // bit nor the weak ref bit.
694 // (b) If we can't free an object it may be that we should clear it, to
695 // avoid hanging onto pointers to other objects. AtomArray::checkCapacity
696 // clears an object explicitly before freeing it always but should not have
697 // to do that.
698 if(IsFinalized(item))
699 ClearFinalized(item);
700 if(HasWeakRef(item))
701 ClearWeakRef(item);
704 // Observe that nbytes is never more than kBlockSize, and the budget applies only to
705 // objects tied up in quick lists, not in on-block free lists. So given that the
706 // total budget for the GC is at least kBlockSize, the loop is guaranteed to terminate.
708 void GC::ObtainQuickListBudget(size_t nbytes)
710 // Flush quick lists until we have enough for the requested budget. We
711 // pick victims in round-robin fashion: there's a major iterator that
712 // selects the allocator pool, and a minor iterator that selects an
713 // allocator in that pool.
715 while (remainingQuickListBudget <= nbytes) {
716 GCAlloc** allocs = NULL;
717 switch (victimIterator % 3) {
718 case 0: allocs = containsPointersAllocs; break;
719 case 1: allocs = containsPointersRCAllocs; break;
720 case 2: allocs = noPointersAllocs; break;
722 allocs[victimIterator / 3]->CoalesceQuickList();
723 victimIterator = (victimIterator + 1) % (3*kNumSizeClasses);
726 remainingQuickListBudget -= nbytes;
729 void GC::RelinquishQuickListBudget(size_t nbytes)
731 remainingQuickListBudget += nbytes;
734 // I can think of two alternative policies for handling dependent allocations.
736 // One is to treat all memory the same: external code calls SignalDependentAllocation
737 // and SignalDependentDeallocation to account for the volume of data that are
738 // dependent on but not allocated by the GC, and the GC incorporates these data
739 // into policy decisions, heap target computation, and so on.
741 // (Benefits: few surprises policy-wise.
743 // Problems: If the volume (in bytes) of dependent data swamps the normal data
744 // then the collection policy may not work all that well -- collection cost may be
745 // too high, and memory usage may also be too high because the budget is L times
746 // the live memory, which may include a lot of bitmap data.)
748 // The other is to treat dependent memory differently. SignalDependentAllocation
749 // and SignalDependentDeallocation will account for the external memory, and if
750 // the volume of dependent memory is high relative to the budget (which is
751 // computed without regard for the volume of dependent memory) then those two
752 // methods will drive the collector forward one step.
754 // (Benefits: not overly sensitive to the volume of bitmap data, especially in
755 // computing the budget, yet a sufficient number of allocations will force the gc
756 // cycle to complete.
758 // Problems: It is a bit ad-hoc and it may fail to reclaim a large volume of
759 // bitmap data quickly enough.)
761 // I'm guessing the former is better, for now.
763 void GC::SignalDependentAllocation(size_t nbytes)
765 policy.signalDependentAllocation(nbytes);
766 SignalAllocWork(nbytes);
769 void GC::SignalDependentDeallocation(size_t nbytes)
771 policy.signalDependentDeallocation(nbytes);
772 SignalFreeWork(nbytes);
775 void GC::ClearMarks()
777 // ClearMarks may sweep, although I'm far from sure that that's a good idea,
778 // as SignalImminentAbort calls ClearMarks.
780 EstablishSweepInvariants();
782 for (int i=0; i < kNumSizeClasses; i++) {
783 containsPointersRCAllocs[i]->ClearMarks();
784 containsPointersAllocs[i]->ClearMarks();
785 noPointersAllocs[i]->ClearMarks();
787 largeAlloc->ClearMarks();
788 m_markStackOverflow = false;
791 void GC::Finalize()
793 ClearUnmarkedWeakRefs();
795 for(int i=0; i < kNumSizeClasses; i++) {
796 containsPointersRCAllocs[i]->Finalize();
797 containsPointersAllocs[i]->Finalize();
798 noPointersAllocs[i]->Finalize();
800 largeAlloc->Finalize();
801 finalizedValue = !finalizedValue;
804 for(int i=0; i < kNumSizeClasses; i++) {
805 containsPointersRCAllocs[i]->m_finalized = false;
806 containsPointersAllocs[i]->m_finalized = false;
807 noPointersAllocs[i]->m_finalized = false;
811 void GC::SweepNeedsSweeping()
813 EstablishSweepInvariants();
815 // clean up any pages that need sweeping
816 for(int i=0; i < kNumSizeClasses; i++) {
817 containsPointersRCAllocs[i]->SweepNeedsSweeping();
818 containsPointersAllocs[i]->SweepNeedsSweeping();
819 noPointersAllocs[i]->SweepNeedsSweeping();
823 void GC::EstablishSweepInvariants()
825 for(int i=0; i < kNumSizeClasses; i++) {
826 containsPointersRCAllocs[i]->CoalesceQuickList();
827 containsPointersAllocs[i]->CoalesceQuickList();
828 noPointersAllocs[i]->CoalesceQuickList();
832 void GC::ClearMarkStack()
834 // GCRoot have pointers into the mark stack, clear them.
836 MMGC_LOCK(m_rootListLock);
837 GCRoot *r = m_roots;
838 while(r) {
839 r->ClearMarkStackSentinelPointer();
840 r = r->next;
843 m_incrementalWork.Clear();
846 /*static*/
847 void GC::SetHasWeakRef(const void *userptr, bool flag)
849 const void *realptr = GetRealPointer(userptr);
850 GCAssert(GetGC(realptr)->IsPointerToGCObject(realptr));
851 if (flag) {
852 GetGCBits(realptr) |= kHasWeakRef;
853 // Small-object allocators maintain an extra flag
854 // about weak objects in a block, need to set that
855 // flag here.
856 if (!GCLargeAlloc::IsLargeBlock(realptr))
857 GCAlloc::SetBlockHasWeakRef(userptr);
859 else
860 GetGCBits(realptr) &= ~kHasWeakRef;
863 void GC::ClearUnmarkedWeakRefs()
865 GCHashtable::Iterator it(&weakRefs);
867 while (it.nextKey() != NULL) {
868 GCWeakRef* w = (GCWeakRef*)it.value();
869 GCObject* o = w->peek();
870 if (o != NULL && !GC::GetMark(o))
871 ClearWeakRef(o, false);
873 weakRefs.prune();
876 void GC::ForceSweepAtShutdown()
878 // There are two preconditions for Sweep: the mark stacks must be empty, and
879 // the queued bits must all be clear (except as part of kFreelist).
881 // We are doing this at shutdown, so don't bother with pushing the marking through
882 // to the end; just clear the stacks, clear the mark bits, and sweep. The objects
883 // will be finalized and deallocated and the blocks will be returned to the block
884 // manager.
886 ClearMarkStack();
887 m_barrierWork.Clear();
889 ClearMarks();
891 // System invariant: collecting == false || marking == true
892 marking = true;
893 Sweep();
894 marking = false;
897 void GC::Sweep()
899 // Applications using -memstats for peak heap measurements need info printed before the sweep
901 if(heap->Config().gcstats) {
902 gclog("[mem] sweep-start\n");
905 // 'collecting' must be true because it indicates allocations should
906 // start out marked and the write barrier should short-circuit (not
907 // record any hits). We can't rely on write barriers to mark objects
908 // during finalization below since presweep or finalization could write
909 // a new GC object to a root.
911 GCAssert(m_incrementalWork.Count() == 0);
912 GCAssert(m_barrierWork.Count() == 0);
914 // This clears the quick lists in the small-block allocators. It's crucial
915 // that we do that before we set 'collecting' and call any callbacks, because
916 // the cleared quick lists make sure that the slow allocation path that
917 // checks m_gc->collecting is taken, and that is required in order to
918 // allocate objects as marked under some circumstances.
920 EstablishSweepInvariants();
922 collecting = true;
923 zct.StartCollecting();
925 SAMPLE_FRAME("[sweep]", core());
926 sweeps++;
928 size_t heapSize = heap->GetUsedHeapSize();
930 #ifdef MMGC_MEMORY_PROFILER
931 if(heap->Config().autoGCStats) {
932 GCLog("Before sweep memory info:\n");
933 DumpMemoryInfo();
935 #endif
937 GCAssert(m_incrementalWork.Count() == 0);
938 GCAssert(m_barrierWork.Count() == 0);
940 presweeping = true;
941 // invoke presweep on all callbacks
942 for ( GCCallback *cb = m_callbacks; cb ; cb = cb->nextCB )
943 cb->presweep();
944 presweeping = false;
946 SAMPLE_CHECK();
948 // The presweep callbacks can't drive marking or trigger write barriers as the barrier is disabled,
949 // but they can cause elements to be pushed onto the mark stack explicitly and it's necessary to call mark.
950 // One example of callback that pushes work items is the Sampler::presweep(). Another is the read
951 // barrier in GCWeakRef::get() - see Bugzilla 572331.
952 do {
953 if (m_markStackOverflow) {
954 m_markStackOverflow = false;
955 HandleMarkStackOverflow();
957 Mark();
958 } while (m_markStackOverflow);
960 SAMPLE_CHECK();
962 GCAssert(m_incrementalWork.Count() == 0);
963 GCAssert(m_barrierWork.Count() == 0);
965 #ifdef MMGC_HEAP_GRAPH
966 pruneBlacklist();
967 #endif
969 Finalize();
971 SAMPLE_CHECK();
973 GCAssert(m_incrementalWork.Count() == 0);
974 GCAssert(m_barrierWork.Count() == 0);
976 int sweepResults = 0;
978 // ISSUE: this could be done lazily at the expense other GC's potentially expanding
979 // unnecessarily, not sure its worth it as this should be pretty fast
980 GCAlloc::GCBlock *b = smallEmptyPageList;
981 while(b) {
982 GCAlloc::GCBlock *next = GCAlloc::Next(b);
983 GCAlloc* alloc = (GCAlloc*)b->alloc;
984 #ifdef _DEBUG
985 alloc->SweepGuts(b);
986 #endif
987 alloc->FreeChunk(b);
989 sweepResults++;
990 b = next;
992 smallEmptyPageList = NULL;
994 SAMPLE_CHECK();
996 GCLargeAlloc::LargeBlock *lb = largeEmptyPageList;
997 while(lb) {
998 GCLargeAlloc::LargeBlock *next = GCLargeAlloc::Next(lb);
999 #ifdef MMGC_HOOKS
1000 if(heap->HooksEnabled())
1001 heap->FreeHook(GetUserPointer(lb+1), lb->size - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
1002 #endif
1003 int numBlocks = lb->GetNumBlocks();
1004 sweepResults += numBlocks;
1005 VALGRIND_MEMPOOL_FREE(lb, lb);
1006 VALGRIND_MEMPOOL_FREE(lb, lb + 1);
1007 VALGRIND_DESTROY_MEMPOOL(lb);
1008 FreeBlock(lb, numBlocks);
1009 lb = next;
1011 largeEmptyPageList = NULL;
1013 if (heap->Config().eagerSweeping)
1014 SweepNeedsSweeping();
1016 // we potentially freed a lot of memory, tell heap to regulate
1017 heap->Decommit();
1019 SAMPLE_CHECK();
1021 #ifdef MMGC_MEMORY_PROFILER
1022 if(heap->Config().autoGCStats) {
1023 GCLog("After sweep memory info:\n");
1024 DumpMemoryInfo();
1026 #endif
1028 // don't want postsweep to fire WB's
1029 collecting = false;
1030 marking = false;
1031 zct.EndCollecting();
1033 // invoke postsweep callback
1034 for ( GCCallback *cb = m_callbacks; cb ; cb = cb->nextCB )
1035 cb->postsweep();
1037 SAMPLE_CHECK();
1039 if(heap->Config().gcstats) {
1040 // include large pages given back
1041 sweepResults += int(heapSize - heap->GetUsedHeapSize());
1042 double millis = duration(sweepStart);
1043 gclog("[mem] sweep(%d) reclaimed %d whole pages (%d kb) in %.2f millis (%.4f s)\n",
1044 sweeps, sweepResults, sweepResults*GCHeap::kBlockSize / 1024, millis,
1045 duration(t0)/1000);
1048 #ifdef MMGC_HEAP_GRAPH
1049 printBlacklist();
1050 #endif
1054 void* GC::AllocBlock(int size, PageMap::PageType pageType, bool zero, bool canFail)
1056 GCAssert(size > 0);
1058 void *item = heapAlloc(size, GCHeap::kExpand| (zero ? GCHeap::kZero : 0) | (canFail ? GCHeap::kCanFail : 0));
1060 // mark GC pages in page map, small pages get marked one,
1061 // the first page of large pages is 3 and the rest are 2
1062 if(item) {
1063 MarkGCPages(item, 1, pageType);
1064 if(pageType == PageMap::kGCLargeAllocPageFirst) {
1065 MarkGCPages((char*)item+GCHeap::kBlockSize, size - 1, PageMap::kGCLargeAllocPageRest);
1069 return item;
1072 void GC::FreeBlock(void *ptr, uint32_t size)
1074 // Bugzilla 551833: Unmark first so that any OOM or other action triggered
1075 // by heapFree does not examine bits representing pages that are gone.
1076 UnmarkGCPages(ptr, size);
1077 heapFree(ptr, size, false);
1080 void *GC::FindBeginningGuarded(const void *gcItem, bool allowGarbage)
1082 (void)allowGarbage;
1083 void *realItem = NULL;
1084 int bits = GetPageMapValueGuarded((uintptr_t)gcItem);
1085 switch(bits)
1087 case PageMap::kGCAllocPage:
1088 realItem = GCAlloc::FindBeginning(gcItem);
1089 break;
1090 case PageMap::kGCLargeAllocPageFirst:
1091 realItem = GCLargeAlloc::FindBeginning(gcItem);
1092 break;
1093 case PageMap::kGCLargeAllocPageRest:
1094 while(bits == PageMap::kGCLargeAllocPageRest)
1096 gcItem = (void*) ((uintptr_t)gcItem - GCHeap::kBlockSize);
1097 bits = GetPageMapValue((uintptr_t)gcItem);
1099 realItem = GCLargeAlloc::FindBeginning(gcItem);
1100 break;
1101 default:
1102 GCAssertMsg(allowGarbage, "FindBeginningGuarded must not be called on non-managed addresses");
1103 // The NULL return is a documented effect of this function, and is desired, despite
1104 // the assert above.
1105 return NULL;
1107 return GetUserPointer(realItem);
1110 void GC::Mark()
1112 markerActive++;
1113 while(m_incrementalWork.Count()) {
1114 GCWorkItem item = m_incrementalWork.Pop();
1115 MarkItem(item);
1117 markerActive--;
1120 // This must not trigger OOM handling. It is /possible/ to restructure the
1121 // function to allow the use of heapAlloc() and heapFree() but the contortions
1122 // and resulting fragility really do not seem worth it. (Consider that
1123 // heapAlloc can trigger OOM which can invoke GC which can run finalizers
1124 // which can perform allocation that can cause MarkGCPages to be reentered.)
1126 // The use of non-OOM functions may of course lead to worse OOM behavior, but
1127 // if the allocation of large page maps cause OOM events as a result of
1128 // increased fragmentation then the proper fix would be to restructure the page
1129 // map to be less monolithic.
1131 // (See Bugzilla 551833.)
1133 void GC::MarkGCPages(void *item, uint32_t numPages, PageMap::PageType to)
1135 pageMap.ExpandSetAll(heap, item, numPages, to);
1138 void GC::UnmarkGCPages(void *item, uint32_t numpages)
1140 pageMap.ClearAddrs(item, numpages);
1143 void GC::CleanStack(bool force)
1145 if(!force && (stackCleaned || rememberedStackTop == 0))
1146 return;
1147 stackCleaned = true;
1148 VMPI_callWithRegistersSaved(GC::DoCleanStack, this);
1151 /*static*/
1152 void GC::DoCleanStack(void* stackPointer, void* arg)
1154 GC* gc = (GC*)arg;
1155 if( ((char*) stackPointer > (char*)gc->rememberedStackTop) && ((char *)gc->stackEnter > (char*)stackPointer)) {
1156 size_t amount = (char*)stackPointer - (char*)gc->rememberedStackTop;
1157 VMPI_cleanStack(amount);
1161 void GC::MarkQueueAndStack(bool scanStack)
1163 if(scanStack)
1164 VMPI_callWithRegistersSaved(GC::DoMarkFromStack, this);
1165 else
1166 Mark();
1169 /*static*/
1170 void GC::DoMarkFromStack(void* stackPointer, void* arg)
1172 GC* gc = (GC*)arg;
1173 char* stackBase = (char*)gc->GetStackTop();
1175 // this is where we will clear to when CleanStack is called
1176 if(gc->rememberedStackTop < stackPointer)
1177 gc->rememberedStackTop = stackPointer;
1179 // Push the stack onto the mark stack and then mark synchronously until
1180 // everything reachable from the stack has been marked.
1182 GCWorkItem item(stackPointer, uint32_t(stackBase - (char*)stackPointer), GCWorkItem::kStackMemory);
1183 gc->PushWorkItem(item);
1184 gc->Mark();
1187 struct CreateRootFromCurrentStackArgs
1189 GC* gc;
1190 void (*fn)(void* arg);
1191 void *arg;
1194 static void DoCreateRootFromCurrentStack(void* stackPointer, void* arg)
1196 CreateRootFromCurrentStackArgs* context = (CreateRootFromCurrentStackArgs*)arg;
1197 MMgc::GC::AutoRCRootSegment root(context->gc, stackPointer, uint32_t((char*)context->gc->GetStackTop() - (char*)stackPointer));
1198 MMgc::GCAutoEnterPause mmgc_enter_pause(context->gc);
1199 context->fn(context->arg); // Should not be a tail call as those stack variables have destructors
1202 void GC::CreateRootFromCurrentStack(void (*fn)(void* arg), void* arg)
1204 CreateRootFromCurrentStackArgs arg2;
1205 arg2.gc = this;
1206 arg2.fn = fn;
1207 arg2.arg = arg;
1208 VMPI_callWithRegistersSaved(DoCreateRootFromCurrentStack, &arg2);
1211 void GCRoot::init(GC* _gc, const void *_object, size_t _size)
1213 gc = _gc;
1214 object = _object;
1215 size = _size;
1216 markStackSentinel = NULL;
1217 GCAssert(size % 2 == 0);
1218 gc->AddRoot(this);
1221 GCRoot::GCRoot(GC * _gc)
1223 init(_gc, this, FixedMalloc::GetFixedMalloc()->Size(this));
1226 GCRoot::GCRoot(GC * _gc, const void * _object, size_t _size)
1228 init(_gc, _object, _size);
1231 GCRoot::~GCRoot()
1233 Destroy();
1236 void GCRoot::Set(const void * _object, size_t _size)
1238 SetMarkStackSentinelPointer(NULL);
1239 this->object = _object;
1240 this->size = _size;
1243 void GCRoot::SetMarkStackSentinelPointer(GCWorkItem *wi)
1245 if(markStackSentinel != NULL) {
1246 // There is already a sentinel for this item and above
1247 // will be a fragment of the root, clear them both since
1248 // we need to rescan the whole thing.
1249 GCAssert(markStackSentinel->GetSentinelPointer() == this);
1250 GCWorkItem *markStackItem = gc->m_incrementalWork.GetItemAbove(markStackSentinel);
1252 // markStackItem can be NULL if the sentinel was on the top of the stack.
1253 // Its also possible we popped the last fragment and haven't popped the sentinel yet,
1254 // check that the markStackItem is for this root before clearing.
1255 if(markStackItem && markStackItem->iptr + markStackItem->GetSize() == uintptr_t(End())) {
1256 markStackItem->Clear();
1258 markStackSentinel->Clear();
1260 markStackSentinel = wi;
1263 void GCRoot::Destroy()
1265 Set(NULL, 0);
1266 if(gc) {
1267 gc->RemoveRoot(this);
1269 gc = NULL;
1272 GCCallback::GCCallback(GC * _gc) : gc(_gc)
1274 gc->AddCallback(this);
1277 GCCallback::~GCCallback()
1279 if(gc) {
1280 gc->RemoveCallback(this);
1284 void GCCallback::Destroy()
1286 if(gc) {
1287 gc->RemoveCallback(this);
1289 gc = NULL;
1292 #ifdef _DEBUG
1293 bool GC::IsPointerIntoGCObject(const void *item)
1295 int bits = GetPageMapValueGuarded((uintptr_t)item);
1296 switch(bits) {
1297 case PageMap::kGCAllocPage:
1298 return GCAlloc::IsPointerIntoGCObject(item);
1299 case PageMap::kGCLargeAllocPageFirst:
1300 return item >= GCLargeAlloc::FindBeginning(item);
1301 case PageMap::kGCLargeAllocPageRest:
1302 return true;
1303 default:
1304 return false;
1307 #endif
1309 #if 0
1310 // this is a release ready tool for hunting down freelist corruption
1311 void GC::CheckFreelists()
1313 GCAlloc **allocs = containsPointersAllocs;
1314 for (int l=0; l < GC::kNumSizeClasses; l++) {
1315 GCAlloc *a = allocs[l];
1316 GCAlloc::GCBlock *_b = a->m_firstBlock;
1317 while(_b) {
1318 void *fitem = _b->firstFree;
1319 void *prev = 0;
1320 while(fitem) {
1321 if((uintptr_t(fitem) & 7) != 0) {
1322 _asm int 3;
1323 break;
1325 if((uintptr_t(fitem) & GCHeap::kBlockMask) != uintptr_t(_b))
1326 _asm int 3;
1327 prev = fitem;
1328 fitem = *(void**)fitem;
1330 _b = _b->next;
1334 #endif
1336 #ifdef _DEBUG
1338 void GC::RCObjectZeroCheck(RCObject *item)
1340 size_t size = Size(item)/sizeof(uint32_t);
1341 uint32_t *p = (uint32_t*)item;
1342 uint32_t *limit = p+size;
1343 // skip vtable, first 4 bytes are cleared in Alloc
1344 p++;
1345 #ifdef MMGC_64BIT
1346 p++; // vtable is 8-bytes
1347 size--;
1348 #endif
1349 // in incrementalValidation mode manually deleted items
1350 // aren't put on the freelist so skip them
1351 if(incrementalValidation) {
1352 if(*p == uint32_t(GCHeap::FXFreedPoison))
1353 return;
1355 for ( ; p < limit ; p++ )
1357 if(*p)
1359 PrintAllocStackTrace(item);
1360 GCAssertMsg(false, "RCObject didn't clean up itself.");
1361 // If you hit this assert, use the debugger to cast 'item' to the type indicated by the call stack
1362 // in the output (you'll have to qualify the scope - (coreplayer::View*) rather than (View*)
1363 // and check to see that all fields are 0 (even boolean members). This Is a
1364 // performance optimization that lets the GC avoid zero'ing the whole thing itself.
1369 #endif
1371 void GC::gclog(const char *format, ...)
1373 char buf[4096];
1374 va_list argptr;
1376 va_start(argptr, format);
1377 VMPI_vsnprintf(buf, ARRAY_SIZE(buf), format, argptr);
1378 va_end(argptr);
1380 VMPI_log(buf);
1382 // log gross stats any time anything interesting happens
1383 static bool g_in_gclog = false;
1385 bool was_in_gclog;
1387 MMGC_LOCK(heap->gclog_spinlock);
1388 was_in_gclog = g_in_gclog;
1389 g_in_gclog = true;
1392 if(!was_in_gclog && !destroying)
1393 heap->DumpMemoryInfo();
1396 MMGC_LOCK(heap->gclog_spinlock);
1397 g_in_gclog = was_in_gclog;
1401 bool GC::IsRCObject(const void *item)
1403 if ( ! pageMap.AddrIsMappable(uintptr_t(item))
1404 || ((uintptr_t)item & GCHeap::kOffsetMask) == 0)
1405 return false;
1407 int bits = GetPageMapValue((uintptr_t)item);
1408 item = GetRealPointer(item);
1409 switch(bits)
1411 case PageMap::kGCAllocPage:
1412 return GCAlloc::IsRCObject(item);
1413 case PageMap::kGCLargeAllocPageFirst:
1414 return GCLargeAlloc::IsRCObject(item);
1415 default:
1416 return false;
1420 void GC::DumpAlloc(GCAlloc *a, size_t& internal_waste, size_t& overhead)
1422 int inUse = a->GetNumAlloc() * a->GetItemSize();
1423 int maxAlloc = a->GetMaxAlloc()* a->GetItemSize();
1425 overhead = maxAlloc-inUse;
1426 internal_waste = 0;
1428 int efficiency = maxAlloc > 0 ? inUse * 100 / maxAlloc : 100;
1429 if(inUse) {
1430 const char *name = a->ContainsPointers() ? a->ContainsRCObjects() ? "rc" : "gc" : "opaque";
1431 if(heap->config.verbose)
1432 GCLog("[mem] gc[%d] %s allocator: %d%% efficiency %d bytes (%d kb) in use out of %d bytes (%d kb)\n", a->GetItemSize(), name, efficiency, inUse, inUse>>10, maxAlloc, maxAlloc>>10);
1433 #ifdef MMGC_MEMORY_PROFILER
1434 if(heap->HooksEnabled())
1436 size_t askSize = a->GetTotalAskSize();
1437 internal_waste = inUse - askSize;
1438 size_t internal_efficiency = askSize * 100 / inUse;
1439 if(heap->config.verbose)
1440 GCLog("\t\t\t\t %u%% internal efficiency %u bytes (%u kb) actually requested out of %d bytes(%d kb)\n", internal_efficiency, (uint32_t) askSize, (uint32_t)(askSize>>10), inUse, inUse>>10);
1442 #endif
1446 void GC::DumpMemoryInfo()
1448 size_t total = GetNumBlocks() * GCHeap::kBlockSize;
1450 size_t ask;
1451 size_t allocated;
1452 GetUsageInfo(ask, allocated);
1454 heap->log_percentage("[mem] \tmanaged overhead ", total-allocated, total);
1455 #ifdef MMGC_MEMORY_PROFILER
1456 if(heap->HooksEnabled())
1457 heap->log_percentage("[mem] \tmanaged internal wastage", allocated-ask, allocated);
1458 #endif
1460 if(ticksToMillis(markTicks()) != 0 && bytesMarked() != 0) {
1461 uint32_t markRate = (uint32_t) (bytesMarked() / (1024 * ticksToMillis(markTicks()))); // kb/ms == mb/s
1462 GCLog("[mem] \tmark rate %u mb/s\n", markRate);
1464 GCLog("[mem] \tmark increments %d\n", markIncrements());
1465 GCLog("[mem] \tsweeps %d \n", sweeps);
1467 size_t total_overhead = 0;
1468 size_t total_internal_waste = 0;
1469 GCAlloc** allocators[] = {containsPointersRCAllocs, containsPointersAllocs, noPointersAllocs};
1470 for(int j = 0;j<3;j++)
1472 GCAlloc** gc_alloc = allocators[j];
1474 for(int i=0; i < kNumSizeClasses; i++)
1476 size_t internal_waste;
1477 size_t overhead;
1478 DumpAlloc(gc_alloc[i], internal_waste, overhead);
1479 total_internal_waste += internal_waste;
1480 total_overhead += overhead;
1483 GCLog("Overhead %u bytes (%u kb)\n", (uint32_t)total_overhead, (uint32_t)(total_overhead>>10));
1484 #ifdef MMGC_MEMORY_PROFILER
1485 if(heap->HooksEnabled())
1486 GCLog("Internal Wastage %u bytes (%u kb)\n", (uint32_t)total_internal_waste, (uint32_t)(total_internal_waste>>10));
1487 #endif
1490 #ifdef MMGC_MEMORY_PROFILER
1491 // It only makes sense to call this after a END_FinalizeAndSweep event and
1492 // before the next START_StartIncrementalMark event.
1493 void GC::DumpPauseInfo()
1495 if (!nogc && incremental) {
1496 GCLog("[mem] \tpauses in GC, most recent (ms): startmark=%.2f incrementalmark=%.2f finalscan=%.2f finishmark=%.2f reap=%.2f\n",
1497 double(ticksToMicros(policy.timeMaxStartIncrementalMarkLastCollection)) / 1000.0,
1498 double(ticksToMicros(policy.timeMaxIncrementalMarkLastCollection)) / 1000.0,
1499 double(ticksToMicros(policy.timeMaxFinalRootAndStackScanLastCollection)) / 1000.0,
1500 double(ticksToMicros(policy.timeMaxFinalizeAndSweepLastCollection)) / 1000.0,
1501 double(ticksToMicros(policy.timeMaxReapZCTLastCollection)) / 1000.0);
1502 GCLog("[mem] \tpauses in GC, entire run (ms): startmark=%.2f incrementalmark=%.2f finalscan=%.2f finishmark=%.2f reap=%.2f\n",
1503 double(ticksToMicros(policy.timeMaxStartIncrementalMark)) / 1000.0,
1504 double(ticksToMicros(policy.timeMaxIncrementalMark)) / 1000.0,
1505 double(ticksToMicros(policy.timeMaxFinalRootAndStackScan)) / 1000.0,
1506 double(ticksToMicros(policy.timeMaxFinalizeAndSweep)) / 1000.0,
1507 double(ticksToMicros(policy.timeMaxReapZCT)) / 1000.0);
1508 GCLog("[mem] \tpause clustering in GC, most recent: gctime=%.2fms end-to-end=%.2fms; mutator efficacy: %.2f%%\n",
1509 double(ticksToMicros(policy.timeInLastCollection)) / 1000.0,
1510 double(ticksToMicros(policy.timeEndToEndLastCollection)) / 1000.0,
1511 policy.timeInLastCollection == 0 ? // Sometimes there are no collections
1512 100.0 :
1513 double(policy.timeEndToEndLastCollection - policy.timeInLastCollection) * 100.0 / double(policy.timeEndToEndLastCollection));
1516 #endif // MMGC_MEMORY_PROFILER
1518 #ifdef _DEBUG
1520 void GC::CheckFreelists()
1522 for(int i=0; i < kNumSizeClasses; i++)
1524 containsPointersAllocs[i]->CheckFreelist();
1525 containsPointersRCAllocs[i]->CheckFreelist();
1526 noPointersAllocs[i]->CheckFreelist();
1530 void GC::UnmarkedScan(const void *mem, size_t size)
1532 uintptr_t lowerBound = pageMap.MemStart();
1533 uintptr_t upperBound = pageMap.MemEnd();
1535 uintptr_t *p = (uintptr_t *) mem;
1536 uintptr_t *end = p + (size / sizeof(void*));
1538 while(p < end)
1540 uintptr_t val = *p++;
1542 if(val < lowerBound || val >= upperBound)
1543 continue;
1545 // normalize and divide by 4K to get index
1546 int bits = GetPageMapValue(val);
1547 switch(bits)
1549 case PageMap::kNonGC:
1550 continue;
1551 case PageMap::kGCAllocPage:
1552 GCAssert(GCAlloc::ConservativeGetMark((const void*) (val&~7), true));
1553 break;
1554 case PageMap::kGCLargeAllocPageFirst:
1555 GCAssert(GCLargeAlloc::ConservativeGetMark((const void*) (val&~7), true));
1556 break;
1557 default:
1558 GCAssertMsg(false, "Invalid pageMap value");
1559 break;
1564 // For every page in the address range known to this GC, scan the page conservatively
1565 // for pointers and assert that anything that looks like a pointer to an object
1566 // points to an object that's marked.
1568 // FIXME: This looks like it could be prone to signaling false positives and crashes:
1569 // it scans memory that's marked kNonGC, and some of that memory could even be
1570 // unmapped at the VM level?
1572 void GC::FindUnmarkedPointers()
1574 if(findUnmarkedPointers)
1576 uintptr_t m = pageMap.MemStart();
1578 while(m < pageMap.MemEnd())
1580 // divide by 4K to get index
1581 int bits = GetPageMapValue(m);
1582 if(bits == PageMap::kNonGC) {
1583 UnmarkedScan((const void*)m, GCHeap::kBlockSize);
1584 m += GCHeap::kBlockSize;
1585 } else if(bits == PageMap::kGCLargeAllocPageFirst) {
1586 GCLargeAlloc::LargeBlock *lb = (GCLargeAlloc::LargeBlock*)m;
1587 const void *item = GetUserPointer((const void*)(lb+1));
1588 if(GetMark(item) && GCLargeAlloc::ContainsPointers(GetRealPointer(item))) {
1589 UnmarkedScan(item, Size(item));
1591 m += lb->GetNumBlocks() * GCHeap::kBlockSize;
1592 } else if(bits == PageMap::kGCAllocPage) {
1593 // go through all marked objects
1594 GCAlloc::GCBlock *b = (GCAlloc::GCBlock *) m;
1595 GCAlloc* alloc = (GCAlloc*)b->alloc;
1596 for (int i=0; i < alloc->m_itemsPerBlock; i++) {
1597 // If the mark is 0, delete it.
1598 void* item = (char*)b->items + alloc->m_itemSize*i;
1599 if (!GetMark(item) && GCAlloc::ContainsPointers(item)) {
1600 UnmarkedScan(GetUserPointer(item), alloc->m_itemSize - DebugSize());
1604 m += GCHeap::kBlockSize;
1610 void GC::ProbeForMatch(const void *mem, size_t size, uintptr_t value, int recurseDepth, int currentDepth)
1612 uintptr_t lowerBound = pageMap.MemStart();
1613 uintptr_t upperBound = pageMap.MemEnd();
1615 uintptr_t *p = (uintptr_t *) mem;
1616 uintptr_t *end = p + (size / sizeof(void*));
1618 int bits = GetPageMapValue((uintptr_t)mem);
1620 while(p < end)
1622 uintptr_t val = *p++;
1624 if(val < lowerBound || val >= upperBound)
1625 continue;
1627 // did we hit ?
1628 if (val == value)
1630 // ok so let's see where we are
1631 uintptr_t* where = p-1;
1632 GCHeap::HeapBlock* block = heap->AddrToBlock(where);
1633 //GCAssertMsg(block->inUse(), "Not sure how we got here if the block is not in use?");
1634 GCAssertMsg(block->committed, "Means we are probing uncommitted memory. not good");
1635 int* ptr;
1637 switch(bits)
1639 case PageMap::kNonGC:
1641 if (block->size == 1)
1643 // fixed sized entries find out the size of the block
1644 union {
1645 char* fixed_c;
1646 FixedAlloc::FixedBlock* fixed;
1648 fixed_c = block->baseAddr;
1649 int fixedsize = fixed->size;
1651 // now compute which element we are
1652 uintptr_t startAt = (uintptr_t) &(fixed->items[0]);
1653 uintptr_t item = ((uintptr_t)where-startAt) / fixedsize;
1655 ptr = (int*) ( startAt + (item*fixedsize) );
1657 else
1659 // fixed large allocs ; start is after the block
1660 union {
1661 char* ptr_c;
1662 int* ptr_i;
1664 ptr_c = block->baseAddr;
1665 ptr = ptr_i;
1668 break;
1670 default:
1671 ptr = ((int*)FindBeginningGuarded(where)) - 2;
1672 break;
1675 int taggedSize = *ptr;
1676 int* real = (ptr+2);
1678 GCDebugIndent(currentDepth*3);
1679 GCDebugMsg(false, "Location: 0x%08x Object: 0x%08x (size %d)\n", where, real, taggedSize);
1680 GCDebugIndent(currentDepth*3);
1681 PrintAllocStackTrace(real);
1683 if (recurseDepth > 0)
1684 WhosPointingAtMe(real, recurseDepth-1, currentDepth+1);
1690 * This routine searches through all of GC memory looking for references to 'me'
1691 * It ignores already claimed memory thus locating active references only.
1692 * recurseDepth can be set to a +ve value in order to follow the chain of
1693 * pointers arbitrarily deep. Watch out when setting it since you may see
1694 * an exponential blow-up (usu. 1 or 2 is enough). 'currentDepth' is for
1695 * indenting purposes and should be left alone.
1697 void GC::WhosPointingAtMe(void* me, int recurseDepth, int currentDepth)
1699 uintptr_t val = (uintptr_t)me;
1700 uintptr_t m = pageMap.MemStart();
1702 GCDebugIndent(currentDepth*3);
1703 GCDebugMsg(false, "[%d] Probing for pointers to : 0x%08x\n", currentDepth, me);
1704 while(m < pageMap.MemEnd())
1706 int bits = GetPageMapValue(m);
1707 if(bits == PageMap::kNonGC)
1709 ProbeForMatch((const void*)m, GCHeap::kBlockSize, val, recurseDepth, currentDepth);
1710 m += GCHeap::kBlockSize;
1712 else if(bits == PageMap::kGCLargeAllocPageFirst)
1714 GCLargeAlloc::LargeBlock *lb = (GCLargeAlloc::LargeBlock*)m;
1715 const void *item = GetUserPointer((const void*)(lb+1));
1716 if (GetMark(item) && GCLargeAlloc::ContainsPointers(GetRealPointer(item)))
1718 ProbeForMatch(item, Size(item), val, recurseDepth, currentDepth);
1720 m += lb->GetNumBlocks() * GCHeap::kBlockSize;
1722 else if(bits == PageMap::kGCAllocPage)
1724 // go through all marked objects
1725 GCAlloc::GCBlock *b = (GCAlloc::GCBlock *) m;
1726 GCAlloc* alloc = (GCAlloc*)b->alloc;
1727 for (int i=0; i < alloc->m_itemsPerBlock; i++)
1729 void *item = (char*)b->items + alloc->m_itemSize*i;
1730 if (GetMark(item) && GCAlloc::ContainsPointers(item))
1732 ProbeForMatch(GetUserPointer(item), alloc->m_itemSize - DebugSize(), val, recurseDepth, currentDepth);
1735 m += GCHeap::kBlockSize;
1737 else
1739 GCAssertMsg(false, "Oh seems we missed a case...Tom any ideas here?");
1744 #undef ALLOCA_AND_FILL_WITH_SPACES
1745 #endif
1748 void GC::StartIncrementalMark()
1750 policy.signal(GCPolicyManager::START_StartIncrementalMark); // garbage collection starts
1752 GCAssert(!marking);
1753 GCAssert(!collecting);
1754 GCAssert(!Reaping()); // bugzilla 564800
1756 lastStartMarkIncrementCount = markIncrements();
1758 // set the stack cleaning trigger
1759 stackCleaned = false;
1761 marking = true;
1763 GCAssert(m_incrementalWork.Count() == 0);
1764 GCAssert(m_barrierWork.Count() == 0);
1766 SweepNeedsSweeping();
1768 // at this point every object should have no marks or be marked kFreelist
1769 #ifdef _DEBUG
1770 for(int i=0; i < kNumSizeClasses; i++) {
1771 containsPointersRCAllocs[i]->CheckMarks();
1772 containsPointersAllocs[i]->CheckMarks();
1773 noPointersAllocs[i]->CheckMarks();
1775 #endif
1777 #ifdef MMGC_HEAP_GRAPH
1778 markerGraph.clear();
1779 #endif
1781 if (incremental)
1782 MarkAllRoots();
1784 policy.signal(GCPolicyManager::END_StartIncrementalMark);
1786 // FIXME (policy): arguably a bug to do this here if StartIncrementalMark has exhausted its quantum
1787 // doing eager sweeping.
1789 if (incremental)
1790 IncrementalMark();
1793 // The mark stack overflow logic depends on this calling MarkItem directly
1794 // for each of the roots.
1796 void GC::MarkAllRoots(bool deep)
1798 // Need to do this while holding the root lock so we don't end
1799 // up trying to scan a deleted item later, another reason to keep
1800 // the root set small.
1802 MMGC_LOCK(m_rootListLock);
1803 markerActive++;
1804 GCRoot *r = m_roots;
1805 while(r) {
1806 GCWorkItem item = r->GetWorkItem();
1807 if(item.ptr) {
1808 // If this root will be split push a sentinel item and store
1809 // a pointer to it in the root. This will allow us to clean
1810 // the stack if the root is deleted. See GCRoot::Destroy and
1811 // GC::HandleLargeMarkItem
1812 if(item.GetSize() > kMarkItemSplitThreshold) {
1813 PushWorkItem(GCWorkItem(r, GCWorkItem::kGCRoot));
1814 GCWorkItem *item = m_incrementalWork.Peek();
1815 // test for mark stack overflow
1816 if(item->GetSentinelPointer() == r)
1817 r->SetMarkStackSentinelPointer(item);
1819 MarkItem(item);
1820 if (deep)
1821 Mark();
1823 r = r->next;
1825 markerActive--;
1828 // Recover from a mark stack overflow.
1830 // Mark stack overflow occurs when an item cannot be pushed onto the mark stack because
1831 // the top mark stack segment is full and a new segment can't be allocated. In
1832 // practice, any call to GC::PushWorkItem (or its callers, such as GC::Mark, GC::MarkItem,
1833 // GC::MarkAllRoots, GC::MarkQueueAndStack, and not least the write barrier GC::TrapWrite)
1834 // can cause a mark stack overflow.
1836 // Since garbage collection cannot be allowed to abort the program as a result of
1837 // the allocation failure, but must run to completion, overflow is handled specially.
1838 // The mark stack Push method returns an error code, which is detected in GC::PushWorkItem,
1839 // which in turn calls GC::SignalMarkStackOverflow to handle the overflow. Overflow
1840 // is handled by discarding some elements and setting the global m_markStackOverflow flag.
1842 // Any code that needs to drive marking to completion - FinishIncrementalMark and Sweep
1843 // do, as they depend on the heap having been marked before running finalizers and
1844 // clearing out empty blocks - must test m_markStackOverflow: if it is set then it must
1845 // be cleared and the present method, GC::HandleMarkStackOverflow, is called to mop up.
1846 // The caller must perform this test repeatedly until the flag is no longer set.
1848 // The job of HandleMarkStackOverflow is to find marked heap objects that point to
1849 // unmarked objects, and to mark those objects. Effectively it uses the heap as a
1850 // queue of candidate objects, thus minimizing the use of the mark stack. Since marking
1851 // uses the mark stack the marking performed by HandleMarkStackOverflow may also
1852 // overflow the mark stack, but once a marked object has been visited by
1853 // HandleMarkStackOverflow it will not point to any unmarked objects. There are object
1854 // graphs containing pointers from objects visited later to objects skipped earlier
1855 // that may require recovery to be run multiple times, if marking overflows the mark
1856 // stack, but each recovery pass will mark all objects where the reference is from
1857 // an earlier object to a later object, and all non-overflowing subgraphs of those
1858 // objects in either direction.
1860 // Performance might be an issue as the restart may happen several times and the
1861 // scanning performed by HandleMarkStackOverflow covers the entire heap every time. It could
1862 // be more efficient to interleave restarting and marking (eg, never fill the mark stack
1863 // during restarting, but mark after filling it partly, and remember the place in the heap
1864 // where scanning left off, and then iterate), but we should cross that bridge if and when
1865 // restarting turns out to be a problem in practice. (If it does, caching more mark stack
1866 // segments may be a better first fix, too.)
1868 void GC::HandleMarkStackOverflow()
1870 // Crucial for completion that we do not push marked items. MarkItem handles this
1871 // for us: it pushes referenced objects that are not marked. (MarkAllRoots calls
1872 // MarkItem on each root.)
1874 MarkAllRoots(true);
1876 // For all iterator types, GetNextMarkedObject returns true if 'item' has been
1877 // updated to reference a marked, non-free object to mark, false if the allocator
1878 // instance has been exhausted.
1880 void* ptr;
1881 uint32_t size;
1883 markerActive++;
1885 for(int i=0; i < kNumSizeClasses; i++) {
1886 GCAllocIterator iter1(containsPointersRCAllocs[i]);
1887 while (iter1.GetNextMarkedObject(ptr, size)) {
1888 GCWorkItem item(ptr, size, GCWorkItem::kGCObject);
1889 MarkItem(item);
1890 Mark();
1892 GCAllocIterator iter2(containsPointersAllocs[i]);
1893 while (iter2.GetNextMarkedObject(ptr, size)) {
1894 GCWorkItem item(ptr, size, GCWorkItem::kGCObject);
1895 MarkItem(item);
1896 Mark();
1900 GCLargeAllocIterator iter3(largeAlloc);
1901 while (iter3.GetNextMarkedObject(ptr, size)) {
1902 GCWorkItem item(ptr, size, GCWorkItem::kGCObject);
1903 MarkItem(item);
1904 Mark();
1907 markerActive--;
1910 // Signal that attempting to push 'item' onto 'stack' overflowed 'stack'.
1912 // Either 'item' must be pushed onto the stack, replacing some other item there,
1913 // or any state information in the item that indicates that it is stacked must
1914 // be cleared, since it will not be pushed onto the stack. What to do?
1916 // The literature (Jones & Lins) does not provide a lot of guidance. Intuitively it
1917 // seems that we want to maximize the number of items that remain on the stack so
1918 // that the marker will finish processing the maximal number of objects before we
1919 // enter the stack overflow recovery code (in HandleMarkStackOverflow, above). Ergo
1920 // we drop 'item' on the floor.
1922 void GC::SignalMarkStackOverflow(GCWorkItem& item)
1924 GCAssert(item.ptr != NULL);
1925 if (item.IsGCItem())
1926 ClearQueued(item.ptr);
1927 m_markStackOverflow = true;
1930 // HandleLargeMarkItem handles work items that are too big to
1931 // marked atomically. We split the work item into two chunks: a
1932 // small head (which we mark now) and a large tail (which we push
1933 // onto the stack). The tail is a non-gcobject regardless of
1934 // whether the head is a gcobject. THE HEAD MUST BE MARKED FIRST
1935 // because this ensures that the mark on the object is set
1936 // immediately; that is necessary for write barrier correctness.
1938 // Why use kLargestAlloc as the split value?
1940 // Unfortunately for correctness THE HEAD MUST ALSO BE MARKED LAST,
1941 // because the queued bit is traditionally used to prevent the object
1942 // from being deleted explicitly by GC::Free while the object is
1943 // on the mark stack. We can't have it both ways, so split objects
1944 // are protected against being freed by carrying another bit that
1945 // prevents deletion when it is set. Because we do not want to expand
1946 // the number of header bits per object from 4 to 8, we only have
1947 // this extra bit on large objects (where there's plenty of space).
1948 // Thus the cutoff for splitting is exactly kLargestObject: only
1949 // large objects that can carry this bit are split.
1951 // In addition, the queued flag still prevents the object from
1952 // being deleted.
1954 // The free-protection flags are reset by a special GCWorkItem
1955 // that is pushed on the stack below the two parts of the object that
1956 // is being split; when that is later popped, it resets the flag
1957 // and returns.
1959 // The complexity with the free-protection flags may go away if we
1960 // move to a write barrier that does not depend on the queued bit,
1961 // for example, a card-marking barrier.
1963 // If a mark stack overflow occurs the large tail may be popped
1964 // and discarded. This is not a problem: the object as a whole
1965 // is marked, but points to unmarked storage, and the latter
1966 // objects will be picked up as per normal. Discarding the
1967 // tail is entirely equivalent to discarding the work items that
1968 // would result from scanning the tail.
1970 // Large GCRoot's are also split here. MarkAllRoots pushes a
1971 // sentinel item on the stack that will be right below the GCRoot.
1972 // If the GCRoot is deleted it uses the sentinel pointer to clear
1973 // the tail work item from GCRoot::Destroy along with the sentinel
1974 // itself.
1976 bool GC::HandleLargeMarkItem(GCWorkItem &wi, size_t& size)
1978 if (wi.IsSentinelItem()) {
1979 if(wi.GetSentinelType() == GCWorkItem::kGCLargeAlloc) {
1980 // Unprotect an item that was protected earlier, see comment block above.
1981 GCLargeAlloc::UnprotectAgainstFree(wi.GetSentinelPointer());
1984 if(wi.GetSentinelType() == GCWorkItem::kGCRoot) {
1985 // The GCRoot is no longer on the stack, clear the pointers into the stack.
1986 GCRoot *sentinelRoot = (GCRoot*)wi.GetSentinelPointer();
1987 sentinelRoot->ClearMarkStackSentinelPointer();
1989 return true;
1992 if (wi.IsGCItem()) {
1993 // Need to protect it against 'Free': push a magic item representing this object
1994 // that will prevent this split item from being freed, see comment block above.
1995 GCLargeAlloc::ProtectAgainstFree(wi.ptr);
1996 PushWorkItem(GCWorkItem(wi.ptr, GCWorkItem::kGCLargeAlloc));
1999 PushWorkItem(GCWorkItem((void*)(wi.iptr + kLargestAlloc),
2000 uint32_t(size - kLargestAlloc),
2001 wi.HasInteriorPtrs() ? GCWorkItem::kStackMemory : GCWorkItem::kNonGCObject));
2003 size = kMarkItemSplitThreshold;
2004 return false;
2008 // This will mark the item whether the item was previously marked or not.
2009 // The mark stack overflow logic depends on that.
2011 void GC::MarkItem(GCWorkItem &wi)
2013 GCAssert(markerActive);
2015 size_t size = wi.GetSize();
2016 uintptr_t *p = (uintptr_t*) wi.ptr;
2018 // Control mark stack growth and ensure incrementality:
2020 // Here we consider whether to split the object into multiple pieces.
2021 // A cutoff of kLargestAlloc is imposed on us by external factors, see
2022 // discussion below. Ideally we'd choose a limit that isn't "too small"
2023 // because then we'll split too many objects, and isn't "too large"
2024 // because then the mark stack growth won't be throttled properly.
2026 // See bugzilla #495049 for a discussion of this problem.
2028 // See comments above HandleLargeMarkItem for the mechanics of how this
2029 // is done and why kMarkItemSplitThreshold == kLargestAlloc
2030 if (size > kMarkItemSplitThreshold)
2032 if(HandleLargeMarkItem(wi, size))
2033 return;
2036 policy.signalMarkWork(size);
2038 uintptr_t *end = p + (size / sizeof(void*));
2039 uintptr_t thisPage = (uintptr_t)p & GCHeap::kBlockMask;
2040 #ifdef MMGC_POINTINESS_PROFILING
2041 uint32_t could_be_pointer = 0;
2042 uint32_t actually_is_pointer = 0;
2043 #endif
2045 // set the mark bits on this guy
2046 if(wi.IsGCItem())
2048 int b = SetMark(wi.ptr);
2049 (void)b;
2050 #ifdef _DEBUG
2051 // def ref validation does a Trace which can
2052 // cause things on the work queue to be already marked
2053 // in incremental GC
2054 if(!validateDefRef) {
2055 //GCAssert(!b);
2057 #endif
2060 uintptr_t _memStart = pageMap.MemStart();
2061 uintptr_t _memEnd = pageMap.MemEnd();
2063 while(p < end)
2065 uintptr_t val = *p++;
2066 #ifdef MMGC_VALGRIND
2067 if (wi.HasInteriorPtrs()) {
2068 VALGRIND_MAKE_MEM_DEFINED(&val, sizeof(val));
2070 #endif // MMGC_VALGRIND
2072 if(val < _memStart || val >= _memEnd)
2073 continue;
2075 #ifdef MMGC_POINTINESS_PROFILING
2076 could_be_pointer++;
2077 #endif
2079 // normalize and divide by 4K to get index
2080 int bits = GetPageMapValue(val);
2082 if (bits == PageMap::kGCAllocPage)
2084 const void *item;
2085 int itemNum;
2086 GCAlloc::GCBlock *block = (GCAlloc::GCBlock*) (val & GCHeap::kBlockMask);
2088 if (wi.HasInteriorPtrs())
2090 item = (void*) val;
2092 // guard against bogus pointers to the block header
2093 if(item < block->items)
2094 continue;
2096 itemNum = GCAlloc::GetObjectIndex(block, item);
2098 // adjust |item| to the beginning of the allocation
2099 item = block->items + itemNum * block->size;
2101 else
2103 // back up to real beginning
2104 item = GetRealPointer((const void*) (val & ~7));
2106 // guard against bogus pointers to the block header
2107 if(item < block->items)
2108 continue;
2110 itemNum = GCAlloc::GetObjectIndex(block, item);
2112 // if |item| doesn't point to the beginning of an allocation,
2113 // it's not considered a pointer.
2114 if (block->items + itemNum * block->size != item)
2116 #ifdef MMGC_64BIT
2117 // Doubly-inherited classes have two vtables so are offset 8 more bytes than normal.
2118 // Handle that here (shows up with PlayerScriptBufferImpl object in the Flash player)
2119 if ((block->items + itemNum * block->size + sizeof(void *)) == item)
2120 item = block->items + itemNum * block->size;
2121 else
2122 #endif // MMGC_64BIT
2123 continue;
2127 #ifdef MMGC_POINTINESS_PROFILING
2128 actually_is_pointer++;
2129 #endif
2130 gcbits_t& bits2 = block->bits[GCAlloc::GetBitsIndex(block, item)];
2131 if ((bits2 & (kMark|kQueued)) == 0)
2133 uint32_t itemSize = block->size - (uint32_t)DebugSize();
2134 if(((GCAlloc*)block->alloc)->ContainsPointers())
2136 const void *realItem = GetUserPointer(item);
2137 GCWorkItem newItem(realItem, itemSize, GCWorkItem::kGCObject);
2138 if(((uintptr_t)realItem & GCHeap::kBlockMask) != thisPage || mark_item_recursion_control == 0)
2140 bits2 |= kQueued;
2141 PushWorkItem(newItem);
2143 else
2145 mark_item_recursion_control--;
2146 MarkItem(newItem);
2147 mark_item_recursion_control++;
2150 else
2152 bits2 |= kMark;
2153 policy.signalMarkWork(itemSize);
2155 #ifdef MMGC_HEAP_GRAPH
2156 markerGraph.edge(p-1, GetUserPointer(item));
2157 #endif
2160 else if (bits == PageMap::kGCLargeAllocPageFirst || (wi.HasInteriorPtrs() && bits == PageMap::kGCLargeAllocPageRest))
2162 const void* item;
2164 if (wi.HasInteriorPtrs())
2166 if (bits == PageMap::kGCLargeAllocPageFirst)
2168 // guard against bogus pointers to the block header
2169 if ((val & GCHeap::kOffsetMask) < sizeof(GCLargeAlloc::LargeBlock))
2170 continue;
2172 item = (void *) ((val & GCHeap::kBlockMask) | sizeof(GCLargeAlloc::LargeBlock));
2174 else
2176 item = GetRealPointer(FindBeginning((void *) val));
2179 else
2181 // back up to real beginning
2182 item = GetRealPointer((const void*) (val & ~7));
2184 // If |item| doesn't point to the start of the page, it's not
2185 // really a pointer.
2186 if(((uintptr_t) item & GCHeap::kOffsetMask) != sizeof(GCLargeAlloc::LargeBlock))
2187 continue;
2190 #ifdef MMGC_POINTINESS_PROFILING
2191 actually_is_pointer++;
2192 #endif
2194 GCLargeAlloc::LargeBlock *b = GCLargeAlloc::GetLargeBlock(item);
2195 if((b->flags[0] & (kQueued|kMark)) == 0)
2197 uint32_t itemSize = b->size - (uint32_t)DebugSize();
2198 if((b->flags[1] & GCLargeAlloc::kContainsPointers) != 0)
2200 b->flags[0] |= kQueued;
2201 PushWorkItem(GCWorkItem(GetUserPointer(item), itemSize, GCWorkItem::kGCObject));
2203 else
2205 // doesn't need marking go right to black
2206 b->flags[0] |= kMark;
2207 policy.signalMarkWork(itemSize);
2209 #ifdef MMGC_HEAP_GRAPH
2210 markerGraph.edge(p-1, GetUserPointer(item));
2211 #endif
2215 #ifdef MMGC_POINTINESS_PROFILING
2216 policy.signalDemographics(size/sizeof(void*), could_be_pointer, actually_is_pointer);
2217 #endif
2220 void GC::IncrementalMark()
2222 GCAssert(!markerActive);
2224 uint32_t time = incrementalValidation ? 1 : policy.incrementalMarkMilliseconds();
2225 #ifdef _DEBUG
2226 time = 1;
2227 #endif
2229 SAMPLE_FRAME("[mark]", core());
2231 // Don't force collections to finish if the mark stack is empty;
2232 // let the allocation budget be used up.
2234 if(m_incrementalWork.Count() == 0) {
2235 CheckBarrierWork();
2236 if (m_incrementalWork.Count() == 0) {
2237 // Policy triggers off these signals, so we still need to send them.
2238 policy.signal(GCPolicyManager::START_IncrementalMark);
2239 policy.signal(GCPolicyManager::END_IncrementalMark);
2240 return;
2244 markerActive++;
2246 policy.signal(GCPolicyManager::START_IncrementalMark);
2248 // FIXME: tune this so that getPerformanceCounter() overhead is noise
2250 // *** NOTE ON THREAD SAFETY ***
2252 // If anyone feels like writing code that updates checkTimeIncrements (it
2253 // used to be 'static' instead of 'const'), beware that using a static var
2254 // here requires a lock. It may also be wrong to have one shared global for
2255 // the value, and in any case it may belong in the policy manager.
2256 const unsigned int checkTimeIncrements = 100;
2257 uint64_t start = VMPI_getPerformanceCounter();
2259 uint64_t numObjects=policy.objectsMarked();
2260 uint64_t objSize=policy.bytesMarked();
2262 uint64_t ticks = start + time * VMPI_getPerformanceFrequency() / 1000;
2263 do {
2264 unsigned int count = m_incrementalWork.Count();
2265 if (count == 0) {
2266 CheckBarrierWork();
2267 count = m_incrementalWork.Count();
2268 if (count == 0)
2269 break;
2271 if (count > checkTimeIncrements) {
2272 count = checkTimeIncrements;
2274 for(unsigned int i=0; i<count; i++)
2276 GCWorkItem item = m_incrementalWork.Pop();
2277 MarkItem(item);
2279 SAMPLE_CHECK();
2280 } while(VMPI_getPerformanceCounter() < ticks);
2282 policy.signal(GCPolicyManager::END_IncrementalMark);
2284 markerActive--;
2286 if(heap->Config().gcstats) {
2287 numObjects = policy.objectsMarked() - numObjects;
2288 objSize = policy.bytesMarked() - objSize;
2289 double millis = duration(start);
2290 size_t kb = size_t(objSize)>>10;
2291 gclog("[mem] mark(%d) %d objects (%d kb %d mb/s) in %.2f millis (%.4f s)\n",
2292 markIncrements() - lastStartMarkIncrementCount, int(numObjects), kb,
2293 uint32_t(double(kb)/millis), millis, duration(t0)/1000);
2297 void GC::FinishIncrementalMark(bool scanStack)
2299 // Don't finish an incremental mark (i.e., sweep) if we
2300 // are in the midst of a ZCT reap.
2301 if (Reaping())
2303 return;
2306 // Force repeated restarts and marking until we're done. For discussion
2307 // of completion, see the comments above HandleMarkStackOverflow.
2309 while (m_markStackOverflow) {
2310 m_markStackOverflow = false;
2311 HandleMarkStackOverflow(); // may set
2312 FlushBarrierWork(); // m_markStackOverflow
2313 Mark(); // to true again
2316 // finished in Sweep
2317 sweepStart = VMPI_getPerformanceCounter();
2319 // mark roots again, could have changed (alternative is to put WB's on the roots
2320 // which we may need to do if we find FinishIncrementalMark taking too long)
2322 policy.signal(GCPolicyManager::START_FinalRootAndStackScan);
2324 GCAssert(!m_markStackOverflow);
2326 FlushBarrierWork();
2327 MarkAllRoots();
2328 MarkQueueAndStack(scanStack);
2330 // Force repeated restarts and marking until we're done. For discussion
2331 // of completion, see the comments above HandleMarkStackOverflow. Note
2332 // that we must use MarkQueueAndStack rather than plain Mark because there
2333 // is no guarantee that the stack was actually pushed onto the mark stack.
2334 // If we iterate several times there may be a performance penalty as a
2335 // consequence of that, but it's not likely that that will in fact happen,
2336 // and it's not obvious how to fix it.
2338 while (m_markStackOverflow) {
2339 m_markStackOverflow = false;
2340 HandleMarkStackOverflow(); // may set
2341 FlushBarrierWork(); // m_markStackOverflow
2342 MarkQueueAndStack(scanStack); // to true again
2344 ClearMarkStack(); // Frees any cached resources
2345 m_barrierWork.Clear();
2346 zct.Prune(); // Frees unused memory
2348 policy.signal(GCPolicyManager::END_FinalRootAndStackScan);
2350 #ifdef _DEBUG
2351 // need to traverse all marked objects and make sure they don't contain
2352 // pointers to unmarked objects
2353 FindMissingWriteBarriers();
2354 #endif
2356 policy.signal(GCPolicyManager::START_FinalizeAndSweep);
2357 GCAssert(!collecting);
2359 // Sweep is responsible for setting and clearing GC::collecting.
2360 // It also clears GC::marking.
2362 GCAssert(m_incrementalWork.Count() == 0);
2363 GCAssert(m_barrierWork.Count() == 0);
2364 Sweep();
2365 GCAssert(m_incrementalWork.Count() == 0);
2366 GCAssert(m_barrierWork.Count() == 0);
2368 policy.signal(GCPolicyManager::END_FinalizeAndSweep); // garbage collection is finished
2369 #ifdef MMGC_MEMORY_PROFILER
2370 if(heap->Config().autoGCStats)
2371 DumpPauseInfo();
2372 #endif
2375 #ifdef _DEBUG
2376 bool GC::IsWhite(const void *item)
2378 // back up to real beginning
2379 item = GetRealPointer((const void*) item);
2381 int bits = GetPageMapValueGuarded((uintptr_t)item);
2382 switch(bits) {
2383 case PageMap::kGCAllocPage:
2384 return GCAlloc::IsWhite(item);
2385 case PageMap::kGCLargeAllocPageFirst:
2386 return GCLargeAlloc::IsWhite(item);
2388 return false;
2390 #endif // _DEBUG
2392 // Here the purpose is to shift some work from the barrier mark stack to the regular
2393 // mark stack /provided/ the barrier mark stack seems very full. The regular mark stack
2394 // is normally empty.
2396 // To avoid copying data, we will only move entire (full) segments, because they can
2397 // be moved by changing some pointers and counters. We will never move a non-full
2398 // segment.
2400 void GC::CheckBarrierWork()
2402 if (m_barrierWork.EntirelyFullSegments() < 9) // 9 is pretty arbitrary
2403 return;
2404 m_incrementalWork.TransferOneFullSegmentFrom(m_barrierWork);
2407 // Here the purpose is to shift all the work from the barrier mark stack to the regular
2408 // mark stack, unconditionally. This may mean copying mark work from one stack to
2409 // the other (for work that's in a non-full segment), but full segments can just be
2410 // moved by changing some pointers and counters.
2412 // Note it's possible for this to cause a mark stack overflow, and the caller must
2413 // deal with that.
2415 void GC::FlushBarrierWork()
2417 for ( uint32_t numfull = m_barrierWork.EntirelyFullSegments() ; numfull > 0 ; --numfull )
2418 if (!m_incrementalWork.TransferOneFullSegmentFrom(m_barrierWork))
2419 break;
2420 while (m_barrierWork.Count() > 0) {
2421 GCWorkItem item = m_barrierWork.Pop();
2422 PushWorkItem(item);
2426 void GC::WriteBarrierTrap(const void *container)
2428 if (marking)
2429 InlineWriteBarrierTrap(container);
2432 void GC::privateWriteBarrier(const void *container, const void *address, const void *value)
2434 privateInlineWriteBarrier(container, address, value);
2437 void GC::privateWriteBarrierRC(const void *container, const void *address, const void *value)
2439 privateInlineWriteBarrierRC(container, address, value);
2442 /* static */ void GC::WriteBarrierRC(const void *address, const void *value)
2444 GC* gc = GC::GetGC(address);
2445 gc->privateInlineWriteBarrierRC(address, value);
2448 /* static */ void GC::WriteBarrierRC_ctor(const void *address, const void *value)
2450 GC* gc = GC::GetGC(address);
2451 if (gc->marking)
2452 gc->InlineWriteBarrierTrap(gc->FindBeginningFast(address));
2453 gc->WriteBarrierWriteRC_ctor(address, value);
2456 /* static */ void GC::WriteBarrierRC_dtor(const void *address)
2458 GC* gc = GC::GetGC(address);
2459 gc->WriteBarrierWriteRC_dtor(address);
2462 /* static */ void GC::WriteBarrier(const void *address, const void *value)
2464 GC* gc = GC::GetGC(address);
2465 gc->privateInlineWriteBarrier(address, value);
2468 // It might appear that this can be optimized easily, but not so - there's a
2469 // lot of logic hiding here, and little to gain from hand-inlining.
2471 void GC::privateConservativeWriteBarrierNoSubstitute(const void *address)
2473 GCAssert(marking);
2474 if(IsPointerToGCPage(address))
2475 InlineWriteBarrierTrap(FindBeginningFast(address));
2478 void GC::WriteBarrierNoSubstitute(const void *container, const void *value)
2480 (void)value; // Can't get rid of this parameter now; part of an existing API
2482 GCAssert(container != NULL);
2483 GCAssert(IsPointerToGCPage(container));
2484 GCAssert(FindBeginningGuarded(container) == container);
2485 if (marking)
2486 InlineWriteBarrierTrap(container);
2489 // Add 'container' to the remembered set.
2491 // IncrementalMark may move a segment off the remembered set;
2492 // FinishIncrementalMark will take care of what remains.
2494 // Observe that if adding the item to the remembered set (a secondary mark stack)
2495 // fails, then the item is just pushed onto the regular mark stack; if that too
2496 // fails then normal stack overflow handling takes over. That is what we want,
2497 // as it guarantees that the item will be traced again.
2499 void GC::WriteBarrierHit(const void* container)
2501 GCAssert(IsPointerToGCObject(GetRealPointer(container)));
2502 if (collecting)
2504 // It's the job of the allocators to make sure the rhs is marked if
2505 // it is allocated on a page that's not yet swept. In particular,
2506 // the barrier is not needed to make sure that that object is kept
2507 // alive.
2509 // Ergo all we need to do here is revert the lhs to marked and return.
2510 SetMark(container);
2511 return;
2513 GCWorkItem item(container, (uint32_t)Size(container), GCWorkItem::kGCObject);
2514 // Note, pushing directly here works right now because PushWorkItem never
2515 // performs any processing (breaking up a large object into shorter
2516 // segments, for example). If that changes, we must probably introduce
2517 // PushBarrierItem to do the same thing for m_barrierWork.
2518 if (!m_barrierWork.Push(item))
2519 PushWorkItem(item);
2522 void GC::movePointers(void **dstArray, uint32_t dstOffset, const void **srcArray, uint32_t srcOffset, size_t numPointers)
2524 if(marking && GetMark(dstArray) && ContainsPointers(dstArray) &&
2525 // don't push small items that are moving pointers inside the same array
2526 (dstArray != srcArray || Size(dstArray) > kMarkItemSplitThreshold)) {
2527 // this could be optimized to just re-scan the dirty region
2528 InlineWriteBarrierTrap(dstArray);
2530 VMPI_memmove(dstArray + dstOffset, srcArray + srcOffset, numPointers * sizeof(void*));
2533 void GC::movePointersWithinBlock(void** array, uint32_t dstOffsetInBytes, uint32_t srcOffsetInBytes, size_t numPointers, bool zeroEmptied)
2535 if (srcOffsetInBytes == dstOffsetInBytes || numPointers == 0)
2536 return;
2538 if (marking &&
2539 GetMark(array) &&
2540 ContainsPointers(array) &&
2541 // don't push small items that are moving pointers inside the same array
2542 Size(array) > kMarkItemSplitThreshold)
2544 // this could be optimized to just re-scan the dirty region
2545 InlineWriteBarrierTrap(array);
2548 size_t const bytesToMove = numPointers * sizeof(void*);
2549 VMPI_memmove((char*)array + dstOffsetInBytes, (char*)array + srcOffsetInBytes, bytesToMove);
2551 if (zeroEmptied)
2553 size_t zeroOffsetInBytes, bytesToZero;
2554 if (srcOffsetInBytes > dstOffsetInBytes)
2556 // moving down, zero the end
2557 bytesToZero = srcOffsetInBytes - dstOffsetInBytes;
2558 zeroOffsetInBytes = dstOffsetInBytes + bytesToMove;
2560 else
2562 // moving up, zero the start
2563 bytesToZero = dstOffsetInBytes - srcOffsetInBytes;
2564 zeroOffsetInBytes = srcOffsetInBytes;
2566 VMPI_memset((char*)array + zeroOffsetInBytes, 0, bytesToZero);
2570 bool GC::ContainsPointers(const void *item)
2572 item = GetRealPointer(item);
2573 if (GCLargeAlloc::IsLargeBlock(item)) {
2574 return GCLargeAlloc::ContainsPointers(item);
2575 } else {
2576 return GCAlloc::ContainsPointers(item);
2580 uint32_t *GC::AllocBits(int numBytes, int sizeClass)
2582 uint32_t *bits;
2584 GCAssert(numBytes % 4 == 0);
2586 #ifdef MMGC_64BIT // we use first 8-byte slot for the free list
2587 if (numBytes == 4)
2588 numBytes = 8;
2589 #endif
2591 // hit freelists first
2592 if(m_bitsFreelists[sizeClass]) {
2593 bits = m_bitsFreelists[sizeClass];
2594 m_bitsFreelists[sizeClass] = *(uint32_t**)bits;
2595 VMPI_memset(bits, 0, sizeof(uint32_t*));
2596 return bits;
2599 if(!m_bitsNext) {
2600 // Bugzilla 551833: It's OK to use heapAlloc here (as opposed to
2601 // heap->AllocNoOOM, say) because the caller knows AllocBits()
2602 // can trigger OOM.
2603 m_bitsNext = (uint32_t*)heapAlloc(1);
2606 int leftOver = GCHeap::kBlockSize - ((uintptr_t)m_bitsNext & GCHeap::kOffsetMask);
2607 if(leftOver >= numBytes) {
2608 bits = m_bitsNext;
2609 if(leftOver == numBytes)
2610 m_bitsNext = 0;
2611 else
2612 m_bitsNext += numBytes/sizeof(uint32_t);
2613 } else {
2614 if(leftOver>=int(sizeof(void*))) {
2615 // put waste in freelist
2616 for(int i=0, n=kNumSizeClasses; i<n; i++) {
2617 GCAlloc *a = noPointersAllocs[i];
2618 if(!a->m_bitsInPage && a->m_numBitmapBytes <= leftOver) {
2619 FreeBits(m_bitsNext, a->m_sizeClassIndex);
2620 break;
2624 m_bitsNext = 0;
2625 // recurse rather than duplicating code
2626 return AllocBits(numBytes, sizeClass);
2628 return bits;
2631 void GC::AddRoot(GCRoot *root)
2633 MMGC_LOCK(m_rootListLock);
2634 root->prev = NULL;
2635 root->next = m_roots;
2636 if(m_roots)
2637 m_roots->prev = root;
2638 m_roots = root;
2641 void GC::RemoveRoot(GCRoot *root)
2643 MMGC_LOCK(m_rootListLock);
2644 if( m_roots == root )
2645 m_roots = root->next;
2646 else
2647 root->prev->next = root->next;
2649 if(root->next)
2650 root->next->prev = root->prev;
2653 void GC::AddCallback(GCCallback *cb)
2655 cb->prevCB = NULL;
2656 cb->nextCB = m_callbacks;
2657 if(m_callbacks)
2658 m_callbacks->prevCB = cb;
2659 m_callbacks = cb;
2662 void GC::RemoveCallback(GCCallback *cb)
2664 if( m_callbacks == cb )
2665 m_callbacks = cb->nextCB;
2666 else
2667 cb->prevCB->nextCB = cb->nextCB;
2669 if(cb->nextCB)
2670 cb->nextCB->prevCB = cb->prevCB;
2673 GCObject *GCWeakRef::get()
2675 // Bugzilla 572331.
2676 // It is possible for presweep handlers to extract pointers to unmarked objects
2677 // from weakrefs and store them into marked objects. Since the write barrier is
2678 // disabled during collecting we must ensure that the unmarked object becomes
2679 // marked by some other means in that situation, and we do that here by introducing
2680 // a local read barrier that resurrects those objects.
2682 // The read barrier just pushes the to-be-resurrected object onto the mark
2683 // stack; the marker will be run in Sweep() after the presweep calls are done.
2685 if (m_obj != 0) {
2686 GC *gc = GC::GetGC(m_obj);
2687 if (gc->Collecting() && !GC::GetMark(m_obj)) {
2688 // If this assertion fails then we have a finalizer (C++ destructor on a
2689 // GCFinalizableObject) resurrecting an object. That should not happen,
2690 // because we clear all GCWeakRefs pointing to unmarked objects before
2691 // running finalizers.
2693 // Bugzilla 575631 (explanation below)
2694 // GCAssert(gc->Presweeping());
2696 // Bugzilla 575631: GCAlloc::Finalize's interleaving
2697 // of mark-bit resets and finalizer invocations means
2698 // that the conditions above don't imply we're in
2699 // Presweep. Nonetheless, marking an object with
2700 // cleared mark bits isn't legal in GCAlloc::Finalize,
2701 // so instead of asserting that we are presweeping, we
2702 // use that condition as a guard.
2703 if (gc->Presweeping())
2704 gc->PushWorkItem(GCWorkItem(m_obj, uint32_t(GC::Size(m_obj)), GCWorkItem::kGCObject));
2708 return (GCObject*)m_obj;
2711 /*static*/
2712 GCWeakRef* GC::GetWeakRef(const void *userptr)
2714 GC *gc = GetGC(userptr);
2715 GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(userptr);
2717 GCAssert(gc->IsPointerToGCPage(userptr));
2718 GCAssert(gc->IsPointerToGCObject(GetRealPointer(userptr)));
2719 GCAssert((gc->GetGCBits(GetRealPointer(userptr)) & GCAlloc::kFreelist) != GCAlloc::kFreelist);
2721 if(ref == NULL) {
2722 ref = new (gc) GCWeakRef(userptr);
2723 gc->weakRefs.put(userptr, ref);
2724 SetHasWeakRef(userptr, true);
2725 } else {
2726 GCAssert(ref->get() == userptr);
2728 return ref;
2731 void GC::ClearWeakRef(const void *item, bool allowRehash)
2733 GCWeakRef *ref = (GCWeakRef*) weakRefs.remove(item, allowRehash);
2734 GCAssert(weakRefs.get(item) == NULL);
2735 GCAssert(ref != NULL || heap->GetStatus() == kMemAbort);
2736 if(ref) {
2737 GCAssert(ref->isNull() || ref->peek() == item);
2738 ref->m_obj = NULL;
2739 SetHasWeakRef(item, false);
2743 #ifdef _DEBUG
2744 void GC::WhitePointerScan(const void *mem, size_t size)
2746 uintptr_t *p = (uintptr_t *) mem;
2747 // the minus 8 skips the GCEndOfObjectPoison and back pointer
2748 uintptr_t *end = p + ((size) / sizeof(void*));
2750 while(p < end)
2752 uintptr_t val = *p;
2753 if(val == GCHeap::GCEndOfObjectPoison)
2754 break;
2755 if(IsWhite((const void*) (val&~7)) &&
2756 *(((int32_t*)(val&~7))+1) != int32_t(GCHeap::GCFreedPoison) &&
2757 *(((int32_t*)(val&~7))+1) != int32_t(GCHeap::GCSweptPoison))
2759 GCDebugMsg(false, "Object %p allocated here:\n", mem);
2760 PrintAllocStackTrace(mem);
2761 GCDebugMsg(false, "Didn't mark pointer at %p, object %p allocated here:\n", p, (void*)val);
2762 PrintAllocStackTrace((const void*)(val&~7));
2763 GCAssert(false);
2765 p++;
2769 void GC::FindMissingWriteBarriers()
2771 if(!incrementalValidation)
2772 return;
2774 uintptr_t m = pageMap.MemStart();
2775 while(m < pageMap.MemEnd())
2777 // divide by 4K to get index
2778 int bits = GetPageMapValue(m);
2779 switch(bits)
2781 case PageMap::kNonGC:
2782 m += GCHeap::kBlockSize;
2783 break;
2784 case PageMap::kGCLargeAllocPageFirst:
2786 GCLargeAlloc::LargeBlock *lb = (GCLargeAlloc::LargeBlock*)m;
2787 const void *item = GetUserPointer((const void*)(lb+1));
2788 if(GetMark(item) && GCLargeAlloc::ContainsPointers(item)) {
2789 WhitePointerScan(item, lb->size - DebugSize());
2791 m += lb->GetNumBlocks() * GCHeap::kBlockSize;
2793 break;
2794 case PageMap::kGCAllocPage:
2796 // go through all marked objects in this page
2797 GCAlloc::GCBlock *b = (GCAlloc::GCBlock *) m;
2798 GCAlloc* alloc = (GCAlloc*)b->alloc;
2799 if(alloc->ContainsPointers()) {
2800 for (int i=0; i< alloc->m_itemsPerBlock; i++) {
2802 // find all marked (but not kFreelist) objects and search them
2803 void *item = (char*)b->items + alloc->m_itemSize*i;
2804 if(!GetMark(item) || GetQueued(item))
2805 continue;
2807 WhitePointerScan(GetUserPointer(item), alloc->m_itemSize - DebugSize());
2810 m += GCHeap::kBlockSize;
2812 break;
2813 default:
2814 GCAssert(false);
2815 break;
2819 #endif //_DEBUG
2821 void *GC::heapAlloc(size_t siz, int flags)
2823 void *ptr = heap->Alloc((int)siz, flags);
2824 if(ptr)
2825 policy.signalBlockAllocation(siz);
2826 return ptr;
2829 void GC::heapFree(void *ptr, size_t siz, bool profile)
2831 if(!siz)
2832 siz = heap->Size(ptr);
2833 policy.signalBlockDeallocation(siz);
2834 heap->FreeInternal(ptr, profile, true);
2837 size_t GC::GetBytesInUse()
2839 size_t ask;
2840 size_t allocated;
2841 GetUsageInfo(ask, allocated);
2842 (void)ask;
2843 return allocated;
2846 void GC::GetUsageInfo(size_t& totalAskSize, size_t& totalAllocated)
2848 totalAskSize = 0;
2849 totalAllocated = 0;
2851 size_t ask;
2852 size_t allocated;
2854 GCAlloc** allocators[] = {containsPointersRCAllocs, containsPointersAllocs, noPointersAllocs};
2855 for(int j = 0;j<3;j++)
2857 GCAlloc** gc_alloc = allocators[j];
2859 for(int i=0; i < kNumSizeClasses; i++)
2861 gc_alloc[i]->GetUsageInfo(ask, allocated);
2862 totalAskSize += ask;
2863 totalAllocated += allocated;
2867 largeAlloc->GetUsageInfo(ask, allocated);
2868 totalAskSize += ask;
2869 totalAllocated += allocated;
2872 void GC::allocaInit()
2874 top_segment = NULL;
2875 stacktop = NULL;
2876 #ifdef _DEBUG
2877 stackdepth = 0;
2878 #endif
2879 pushAllocaSegment(AVMPLUS_PARAM_ALLOCA_DEFSIZE);
2882 void GC::allocaShutdown()
2884 while (top_segment != NULL)
2885 popAllocaSegment();
2886 top_segment = NULL;
2887 stacktop = NULL;
2890 void GC::allocaUnwind()
2892 allocaShutdown();
2893 allocaInit();
2896 void* GC::allocaPush(size_t nbytes, AllocaAutoPtr& x)
2898 GCAssert(x.unwindPtr == NULL);
2899 x.gc = this;
2900 x.unwindPtr = stacktop;
2901 nbytes = GCHeap::CheckForAllocSizeOverflow(nbytes, 7) & ~7;
2902 if ((char*)stacktop + nbytes <= top_segment->limit) {
2903 stacktop = (char*)stacktop + nbytes;
2904 return x.unwindPtr;
2906 return allocaPushSlow(nbytes);
2909 void GC::allocaPopToSlow(void* top)
2911 GCAssert(top_segment != NULL);
2912 GCAssert(!(top >= top_segment->start && top <= top_segment->limit));
2913 while (!(top >= top_segment->start && top <= top_segment->limit))
2914 popAllocaSegment();
2915 GCAssert(top_segment != NULL);
2918 void* GC::allocaPushSlow(size_t nbytes)
2920 size_t alloc_nbytes = nbytes;
2921 if (alloc_nbytes < AVMPLUS_PARAM_ALLOCA_DEFSIZE)
2922 alloc_nbytes = AVMPLUS_PARAM_ALLOCA_DEFSIZE;
2923 pushAllocaSegment(alloc_nbytes);
2924 void *p = stacktop;
2925 stacktop = (char*)stacktop + nbytes;
2926 return p;
2929 void GC::pushAllocaSegment(size_t nbytes)
2931 GCAssert(nbytes % 8 == 0);
2932 #ifdef _DEBUG
2933 stackdepth += nbytes;
2934 #endif
2935 void* memory = AllocRCRoot(nbytes);
2936 AllocaStackSegment* seg = mmfx_new(AllocaStackSegment);
2937 seg->start = memory;
2938 seg->limit = (void*)((char*)memory + nbytes);
2939 seg->top = NULL;
2940 seg->prev = top_segment;
2941 if (top_segment != NULL)
2942 top_segment->top = stacktop;
2943 top_segment = seg;
2944 stacktop = memory;
2947 void GC::popAllocaSegment()
2949 #ifdef _DEBUG
2950 stackdepth -= (char*)top_segment->limit - (char*)top_segment->start;
2951 #endif
2952 FreeRCRoot(top_segment->start);
2953 AllocaStackSegment* seg = top_segment;
2954 top_segment = top_segment->prev;
2955 if (top_segment != NULL)
2956 stacktop = top_segment->top;
2957 mmfx_delete(seg);
2960 GCAutoEnter::GCAutoEnter(GC *gc, EnterType type) : m_gc(NULL), m_prevgc(NULL)
2962 if(gc) {
2963 GC *prevGC = gc->heap->GetEnterFrame()->GetActiveGC();
2964 if(prevGC != gc) {
2965 // must enter first as it acquires the gc lock
2966 if(gc->ThreadEnter(this, /*doCollectionWork=*/true, type == kTryEnter)) {
2967 m_gc = gc;
2968 m_prevgc = prevGC;
2974 GCAutoEnter::~GCAutoEnter()
2976 Destroy(true);
2979 /*virtual*/
2980 void GCAutoEnter::Unwind()
2982 if(m_gc) {
2983 GC *gc = m_gc;
2984 gc->SignalImminentAbort();
2988 void GCAutoEnter::Destroy(bool doCollectionWork)
2990 if(m_gc) {
2991 m_gc->ThreadLeave(doCollectionWork, m_prevgc);
2992 m_gc = m_prevgc = NULL;
2996 GCAutoEnterPause::GCAutoEnterPause(GC *gc) : gc(gc), enterSave(gc->GetAutoEnter())
2998 GCAssertMsg(gc->GetStackEnter() != 0, "Invalid MMGC_GC_ROOT_THREAD usage, GC not already entered, random crashes will ensue");
2999 gc->ThreadLeave(/*doCollectionWork=*/false, gc);
3002 GCAutoEnterPause::~GCAutoEnterPause()
3004 GCAssertMsg(gc->GetStackEnter() == 0, "Invalid MMGC_GC_ROOT_THREAD usage, GC not exitted properly, random crashes will ensue");
3005 gc->ThreadEnter(enterSave, false, false);
3008 bool GC::ThreadEnter(GCAutoEnter *enter, bool doCollectionWork, bool tryEnter)
3010 if(!VMPI_lockTestAndAcquire(&m_gcLock)) {
3011 if(tryEnter)
3012 return false;
3013 if(m_gcThread != VMPI_currentThread())
3014 VMPI_lockAcquire(&m_gcLock);
3017 heap->SetActiveGC(this);
3019 if(enterCount++ == 0) {
3020 heap->GetEnterFrame()->AddAbortUnwindObject(enter);
3021 stackEnter = enter;
3022 m_gcThread = VMPI_currentThread();
3023 if(doCollectionWork) {
3024 ThreadEdgeWork();
3027 return true;
3030 void GC::ThreadLeave( bool doCollectionWork, GC *prevGC)
3033 if(enterCount-1 == 0) {
3034 if(doCollectionWork) {
3035 ThreadEdgeWork();
3037 heap->GetEnterFrame()->RemoveAbortUnwindObject(stackEnter);
3040 // We always pop the active GC but have to do so before releasing the lock
3041 #ifdef DEBUG
3042 GC* curgc =
3043 #endif
3044 heap->SetActiveGC(prevGC);
3045 GCAssert(curgc == this);
3047 if(--enterCount == 0) {
3048 stackEnter = NULL;
3049 // cleared so we remain thread ambivalent
3050 rememberedStackTop = NULL;
3051 m_gcThread = NULL;
3052 VMPI_lockRelease(&m_gcLock);
3056 void GC::ThreadEdgeWork()
3058 if(destroying)
3059 return;
3061 if(policy.queryFullCollectionQueued())
3062 Collect(false);
3063 else
3064 ReapZCT(false);
3066 if(!stackCleaned)
3067 CleanStack();
3070 void GC::memoryStatusChange(MemoryStatus, MemoryStatus to)
3072 // ZCT blockage: what if we get here from zct growth?
3074 // Mark/sweep blockage: what about mark stack,
3075 // presweep,post-sweep,finalize allocations?
3077 // if ZCT or GC can't complete we can't free memory! currently
3078 // we do nothing, which means we rely on reserve or other
3079 // listeners to free memory or head straight to abort
3081 if(to == kFreeMemoryIfPossible) {
3082 if(onThread()) {
3083 Collect();
3084 } else {
3085 // If we're not already in the middle of collecting from another thread's GC, then try to...
3086 GCAutoEnter enter(this, GCAutoEnter::kTryEnter);
3087 if(enter.Entered()) {
3088 Collect(false);
3090 // else nothing can be done
3095 #ifdef DEBUG
3096 void GC::ShouldBeInactive()
3098 GCAssert(m_gcThread == NULL);
3099 GCAssert(stackEnter == NULL);
3100 GCAssert(top_segment != NULL && top_segment->prev == NULL && top_segment->start == stacktop);
3101 GCAssert(VMPI_lockTestAndAcquire(&m_gcLock) && VMPI_lockRelease(&m_gcLock));
3103 #endif
3105 void GC::SignalImminentAbort()
3107 policy.SignalImminentAbort();
3108 zct.SignalImminentAbort();
3110 if (collecting || marking)
3112 ClearMarkStack();
3113 m_barrierWork.Clear();
3114 ClearMarks();
3115 m_markStackOverflow = false;
3116 collecting = false;
3117 marking = false;
3118 markerActive = 0;
3121 if(GetAutoEnter() != NULL) {
3122 GetAutoEnter()->Destroy(false);
3126 GC::AutoRCRootSegment::AutoRCRootSegment(GC* gc, void* mem, size_t size)
3127 : RCRootSegment(gc, mem, size)
3129 gc->AddRCRootSegment(this);
3132 GC::AutoRCRootSegment::~AutoRCRootSegment()
3134 GetGC()->RemoveRCRootSegment(this);
3137 #ifdef MMGC_HEAP_GRAPH
3139 void GC::addToBlacklist(const void *gcptr)
3141 blacklist.add(gcptr, gcptr);
3144 void GC::removeFromBlacklist(const void *gcptr)
3146 blacklist.remove(gcptr);
3149 const void *GC::findGCGraphBeginning(const void *addr, bool &wasDeletedGCRoot)
3151 /* These are all the possibilities
3152 1) GC small object
3153 2) GC large object
3154 3) GCRoot
3155 4) OS stack
3157 if(!IsPointerToGCPage(addr)) {
3158 GCRoot *r = m_roots;
3159 while(r) {
3160 if(addr >= r->Get() && addr < r->End())
3161 return r->Get();
3162 r = r->next;
3165 // could be a deleted GCRoot
3166 GCHeap::HeapBlock *b = heap->AddrToBlock(addr);
3167 if(b) {
3168 wasDeletedGCRoot = true;
3169 if(b->size == 1) {
3170 return FixedAlloc::FindBeginning(addr);
3171 } else {
3172 return GetUserPointer(b->baseAddr);
3176 return FindBeginningGuarded(addr, true); // will return NULL for OS stack
3179 void GC::dumpBackPointerChain(const void *p, HeapGraph& g)
3181 GCLog("Dumping back pointer chain for 0x%p\n", p);
3182 PrintAllocStackTrace(p);
3183 dumpBackPointerChainHelper(p, g);
3184 GCLog("End back pointer chain for 0x%p\n", p);
3187 void GC::dumpBackPointerChainHelper(const void *p, HeapGraph& g)
3189 GCHashtable *pointers = g.getPointers(p);
3190 if(pointers) {
3191 GCHashtable::Iterator iter(pointers);
3192 const void *addr = iter.nextKey();
3193 if(addr) {
3194 bool wasDeletedGCRoot=false;
3195 const void *container = findGCGraphBeginning(addr, wasDeletedGCRoot);
3196 const void *displayContainer = container ? container : addr;
3197 uint32_t offset = container ? (char*)addr - (char*)container : 0;
3198 const char *containerDescription = IsPointerToGCPage(container) ? "gc" : (container ? "gcroot" : "stack");
3199 if(wasDeletedGCRoot)
3200 containerDescription = "gcroot-deleted";
3201 GCLog("0x%p(%x)(%s) -> 0x%p\n", displayContainer, offset, containerDescription, p);
3202 if(container) {
3203 PrintAllocStackTrace(container);
3204 dumpBackPointerChainHelper(container, g);
3210 void HeapGraph::edge(const void *addr, const void *newValue)
3212 const void *oldValue = GC::Pointer(*(void**)addr);
3213 newValue = GC::Pointer(newValue);
3214 GCHashtable *addresses;
3215 if(oldValue) {
3216 addresses = (GCHashtable*)backEdges.get(oldValue);
3217 if(addresses) {
3218 addresses->remove(addr);
3221 if(newValue) {
3222 addresses = (GCHashtable*)backEdges.get(newValue);
3223 if(!addresses) {
3224 addresses = mmfx_new(GCHashtable());
3225 backEdges.put(newValue, addresses);
3227 addresses->add(addr, addr);
3231 void HeapGraph::clear()
3233 GCHashtable_VMPI::Iterator iter(&backEdges);
3234 const void *key;
3235 while((key = iter.nextKey()) != NULL) {
3236 GCHashtable *addresses = (GCHashtable*)iter.value();
3237 mmfx_delete(addresses);
3239 backEdges.clear();
3242 GCHashtable *HeapGraph::getPointers(const void *obj)
3244 return (GCHashtable*)backEdges.get(obj);
3247 void GC::pruneBlacklist()
3249 if(blacklist.count() > 0) {
3250 GCHashtable::Iterator iter(&blacklist);
3251 const void *p;
3252 while((p = iter.nextKey()) != NULL) {
3253 if(!GetMark(p)) {
3254 removeFromBlacklist(p);
3260 void GC::printBlacklist()
3262 if(blacklist.count() > 0) {
3263 GCHashtable::Iterator iter(&blacklist);
3264 const void *p;
3265 while((p = iter.nextKey()) != NULL) {
3266 GCLog("Blacklist item 0x%p %s found in marker graph\n", p, markerGraph.getPointers(p) ? "was" : "wasn't");
3267 GCLog("Blacklist item 0x%p %s found in mutator graph\n", p, mutatorGraph.getPointers(p) ? "was" : "wasn't");
3268 dumpBackPointerChain(p, markerGraph);
3272 #endif