1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
41 #ifndef __GC_inlines__
42 #define __GC_inlines__
44 // Inline functions for GCRoot, GC, GC::AllocaAutoPtr, GCWorkItem, Cleaner
45 // Inline functions for the write barrier are in WriteBarrier.h for now.
51 REALLY_INLINE
void *GCRoot::operator new(size_t size
)
53 return FixedMalloc::GetFixedMalloc()->OutOfLineAlloc(size
, MMgc::kZero
);
56 REALLY_INLINE
void GCRoot::operator delete (void *object
)
58 FixedMalloc::GetFixedMalloc()->OutOfLineFree(object
);
61 REALLY_INLINE
void GCRoot::ClearMarkStackSentinelPointer()
63 markStackSentinel
= NULL
;
66 REALLY_INLINE GCWorkItem
*GCRoot::GetMarkStackSentinelPointer()
68 return markStackSentinel
;
71 REALLY_INLINE GCWorkItem
GCRoot::GetWorkItem() const
73 return GCWorkItem(object
, (uint32_t)size
, GCWorkItem::kNonGCObject
);
78 REALLY_INLINE
void *GC::GetGCContextVariable(int var
) const
80 return m_contextVars
[var
];
83 REALLY_INLINE
void GC::SetGCContextVariable(int var
, void *val
)
85 m_contextVars
[var
] = val
;
88 REALLY_INLINE
avmplus::AvmCore
*GC::core() const
90 return (avmplus::AvmCore
*)GetGCContextVariable(GCV_AVMCORE
);
93 REALLY_INLINE GC
* GC::GetActiveGC() {
94 return GCHeap::GetGCHeap()->GetEnterFrame()->GetActiveGC();
97 REALLY_INLINE
void GC::QueueCollection()
99 policy
.queueFullCollection();
102 REALLY_INLINE
void GC::SignalAllocWork(size_t size
)
104 if (policy
.signalAllocWork(size
))
108 REALLY_INLINE
void GC::SignalFreeWork(size_t size
)
110 policy
.signalFreeWork(size
);
113 REALLY_INLINE
void *GC::PleaseAlloc(size_t size
, int flags
)
115 return Alloc(size
, flags
| kCanFail
);
118 // Normally extra will not be zero (overloaded 'new' operators take care of that)
119 // so the overflow check is not actually redundant.
121 REALLY_INLINE
void *GC::AllocExtra(size_t size
, size_t extra
, int flags
)
123 return Alloc(GCHeap::CheckForAllocSizeOverflow(size
, extra
), flags
);
126 REALLY_INLINE
void *GC::Calloc(size_t count
, size_t elsize
, int flags
)
128 return Alloc(GCHeap::CheckForCallocSizeOverflow(count
, elsize
), flags
);
131 #if defined _DEBUG || defined MMGC_MEMORY_PROFILER
132 #define SIZEARG size ,
137 // See comments around GC::Alloc that explain why the guard and table lookup for the
138 // small-allocator cases are correct.
140 REALLY_INLINE
void *GC::AllocPtrZero(size_t size
)
142 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER
143 if (size
<= kLargestAlloc
)
144 return GetUserPointer(containsPointersAllocs
[sizeClassIndex
[(size
-1)>>3]]->Alloc(SIZEARG
GC::kContainsPointers
|GC::kZero
));
146 return Alloc(size
, GC::kContainsPointers
|GC::kZero
);
149 REALLY_INLINE
void *GC::AllocPtrZeroFinalized(size_t size
)
151 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER
152 if (size
<= kLargestAlloc
)
153 return GetUserPointer(containsPointersAllocs
[sizeClassIndex
[(size
-1)>>3]]->Alloc(SIZEARG
GC::kContainsPointers
|GC::kZero
|GC::kFinalize
));
155 return Alloc(size
, GC::kContainsPointers
|GC::kZero
|GC::kFinalize
);
158 REALLY_INLINE
void *GC::AllocRCObject(size_t size
)
160 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER
161 if (size
<= kLargestAlloc
)
162 return GetUserPointer(containsPointersRCAllocs
[sizeClassIndex
[(size
-1)>>3]]->Alloc(SIZEARG
GC::kContainsPointers
|GC::kZero
|GC::kRCObject
|GC::kFinalize
));
164 return Alloc(size
, GC::kContainsPointers
|GC::kZero
|GC::kRCObject
|GC::kFinalize
);
167 REALLY_INLINE
void* GC::AllocDouble()
169 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER && !defined MMGC_MEMORY_PROFILER
170 return GetUserPointer(noPointersAllocs
[0]->Alloc(0));
176 // For AllocExtra the trick is that we can compute (size|extra) quickly without risk of overflow
177 // and compare it to half the maximum small-alloc size (rounded down to 8 bytes), and if the guard
178 // passes then we can definitely take the quick path. Most allocations are small.
180 // As 'extra' won't usually be known at compile time the fallback case won't usually compile away,
181 // though, so we risk bloating the code slightly here.
183 REALLY_INLINE
void *GC::AllocExtraPtrZero(size_t size
, size_t extra
)
185 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER
186 if ((size
|extra
) <= (kLargestAlloc
/2 & ~7)) {
188 return GetUserPointer(containsPointersAllocs
[sizeClassIndex
[(size
-1)>>3]]->Alloc(SIZEARG
GC::kContainsPointers
|GC::kZero
));
191 return OutOfLineAllocExtra(size
, extra
, GC::kContainsPointers
|GC::kZero
);
194 REALLY_INLINE
void *GC::AllocExtraPtrZeroFinalized(size_t size
, size_t extra
)
196 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER
197 if ((size
|extra
) <= (kLargestAlloc
/2 & ~7)) {
199 return GetUserPointer(containsPointersAllocs
[sizeClassIndex
[(size
-1)>>3]]->Alloc(SIZEARG
GC::kContainsPointers
|GC::kZero
|GC::kFinalize
));
202 return OutOfLineAllocExtra(size
, extra
, GC::kContainsPointers
|GC::kZero
|GC::kFinalize
);
205 REALLY_INLINE
void *GC::AllocExtraRCObject(size_t size
, size_t extra
)
207 #if !defined _DEBUG && !defined AVMPLUS_SAMPLER
208 if ((size
|extra
) <= kLargestAlloc
/2) {
210 return GetUserPointer(containsPointersRCAllocs
[sizeClassIndex
[(size
-1)>>3]]->Alloc(SIZEARG
GC::kContainsPointers
|GC::kZero
|GC::kRCObject
|GC::kFinalize
));
213 return OutOfLineAllocExtra(size
, extra
, GC::kContainsPointers
|GC::kZero
|GC::kRCObject
|GC::kFinalize
);
218 // Implementations of operator delete call FreeNotNull directly.
219 REALLY_INLINE
void GC::Free(const void *item
)
226 REALLY_INLINE
void GC::FreeNotNull(const void *item
)
228 GCAssert(item
!= NULL
);
229 GCAssertMsg(onThread(), "GC called from a different thread or not associated with a thread, missing MMGC_GCENTER macro perhaps.");
230 GetBlockHeader(item
)->alloc
->Free(GetRealPointer(item
));
233 REALLY_INLINE
void GC::AddRCRootSegment(RCRootSegment
*segment
)
235 segment
->next
= rcRootSegments
;
237 rcRootSegments
->prev
= segment
;
238 rcRootSegments
= segment
;
241 REALLY_INLINE
void GC::RemoveRCRootSegment(RCRootSegment
*segment
)
243 if (segment
->next
!= NULL
)
244 segment
->next
->prev
= segment
->prev
;
245 if (segment
->prev
!= NULL
)
246 segment
->prev
->next
= segment
->next
;
248 rcRootSegments
= segment
->next
;
252 REALLY_INLINE
size_t GC::Size(const void *ptr
)
254 return GetBlockHeader(ptr
)->size
- DebugSize();
258 REALLY_INLINE GC
* GC::GetGC(const void *item
)
260 GC
*gc
= GetBlockHeader(item
)->gc
;
261 // we don't want to rely on the gcheap thread local but it makes a good
262 // sanity check against misuse of this function
263 // GCAssert(gc == GCHeap::GetGCHeap()->GetActiveGC());
269 REALLY_INLINE gcbits_t
& GC::GetGCBits(const void *realptr
)
271 GCBlockHeader
* block
= GetBlockHeader(realptr
);
272 return block
->bits
[(uintptr_t(realptr
)& 0xFFF) >> block
->bitsShift
];
276 REALLY_INLINE gcbits_t
& GC::GetGCBits(const void *realptr
)
278 if (GCLargeAlloc::IsLargeBlock(realptr
))
279 return GCLargeAlloc::GetGCBits(realptr
);
281 return GCAlloc::GetGCBits(realptr
);
286 REALLY_INLINE
bool GC::ContainsPointers(const void *userptr
)
288 const void *realptr
= GetRealPointer(userptr
);
289 GCAssert(GetGC(userptr
)->IsPointerToGCObject(realptr
));
290 return GetBlockHeader(realptr
)->containsPointers
!= 0;
294 REALLY_INLINE
bool GC::IsRCObject(const void *userptr
)
296 const void *realptr
= GetRealPointer(userptr
);
297 GCAssert(GetGC(userptr
)->IsPointerToGCObject(realptr
));
298 return GetBlockHeader(realptr
)->rcobject
!= 0;
302 REALLY_INLINE
int GC::GetMark(const void *userptr
)
304 const void *realptr
= GetRealPointer(userptr
);
305 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
306 return GetGCBits(realptr
) & kMark
;
310 REALLY_INLINE
int GC::SetMark(const void *userptr
)
312 const void *realptr
= GetRealPointer(userptr
);
313 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
314 gcbits_t
& bits
= GetGCBits(realptr
);
315 int set
= bits
& kMark
;
321 REALLY_INLINE
int GC::GetQueued(const void *userptr
)
323 const void *realptr
= GetRealPointer(userptr
);
324 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
325 return GetGCBits(realptr
) & kQueued
;
328 REALLY_INLINE
void GC::ClearQueued(const void *userptr
)
330 const void *realptr
= GetRealPointer(userptr
);
331 GCAssert(IsPointerToGCObject(realptr
));
332 GetGCBits(realptr
) &= ~kQueued
;
336 REALLY_INLINE
void GC::ClearFinalized(const void *userptr
)
338 const void *realptr
= GetRealPointer(userptr
);
339 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
340 GetGCBits(realptr
) &= ~kFinalizable
;
344 REALLY_INLINE
void GC::SetFinalize(const void *userptr
)
346 const void *realptr
= GetRealPointer(userptr
);
347 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
348 GetGCBits(realptr
) |= kFinalizable
;
352 REALLY_INLINE
int GC::IsFinalized(const void *userptr
)
354 const void *realptr
= GetRealPointer(userptr
);
355 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
356 return GetGCBits(realptr
) & kFinalizable
;
360 REALLY_INLINE
int GC::HasWeakRef(const void *userptr
)
362 const void *realptr
= GetRealPointer(userptr
);
363 GCAssert(GetGC(realptr
)->IsPointerToGCObject(realptr
));
364 return GetGCBits(realptr
) & kHasWeakRef
;
367 REALLY_INLINE GCHeap
*GC::GetGCHeap() const
372 REALLY_INLINE
void GC::ReapZCT(bool scanStack
)
377 REALLY_INLINE
bool GC::Reaping()
379 return zct
.IsReaping();
382 REALLY_INLINE
bool GC::IncrementalMarking()
387 REALLY_INLINE
bool GC::Collecting()
392 REALLY_INLINE
bool GC::Presweeping()
397 REALLY_INLINE
void *GC::FindBeginning(const void *gcItem
)
399 return FindBeginningGuarded(gcItem
);
402 REALLY_INLINE
void *GC::FindBeginningFast(const void *gcItem
)
404 PageMap::PageType bits
= GetPageMapValue((uintptr_t)gcItem
);
405 if (bits
== PageMap::kGCAllocPage
)
406 return GetUserPointer(GCAlloc::FindBeginning(gcItem
));
407 while (bits
== PageMap::kGCLargeAllocPageRest
)
409 gcItem
= (void*) ((uintptr_t)gcItem
- GCHeap::kBlockSize
);
410 bits
= GetPageMapValue((uintptr_t)gcItem
);
412 return GetUserPointer(GCLargeAlloc::FindBeginning(gcItem
));
415 REALLY_INLINE
bool GC::IsPointerToGCPage(const void *item
)
417 return GetPageMapValueGuarded((uintptr_t)item
) != 0;
420 REALLY_INLINE
bool GC::IsPointerToGCObject(const void *realPtr
)
422 return GetRealPointer(FindBeginningGuarded(realPtr
, true)) == realPtr
;
426 REALLY_INLINE
double GC::duration(uint64_t start
)
428 return (double(VMPI_getPerformanceCounter() - start
) * 1000) / VMPI_getPerformanceFrequency();
432 REALLY_INLINE
uint64_t GC::ticksToMicros(uint64_t ticks
)
434 return (ticks
*1000000)/VMPI_getPerformanceFrequency();
438 REALLY_INLINE
uint64_t GC::ticksToMillis(uint64_t ticks
)
440 return (ticks
*1000)/VMPI_getPerformanceFrequency();
443 REALLY_INLINE
uint64_t GC::bytesMarked()
445 return policy
.bytesMarked();
448 REALLY_INLINE
uint64_t GC::markTicks()
450 return policy
.timeStartIncrementalMark
+ policy
.timeIncrementalMark
;
453 REALLY_INLINE
uint32_t GC::markIncrements()
455 return (uint32_t)policy
.countIncrementalMark
;
458 REALLY_INLINE
bool GC::Destroying()
463 REALLY_INLINE
uintptr_t GC::GetStackTop() const
465 // temporary crutch until we're moved over to the MMGC_GCENTER system
466 if(stackEnter
== NULL
)
467 return VMPI_getThreadStackBase();
468 return GetStackEnter();
471 REALLY_INLINE
uintptr_t GC::GetStackEnter() const
473 return (uintptr_t)stackEnter
;
476 REALLY_INLINE GCAutoEnter
*GC::GetAutoEnter()
481 REALLY_INLINE
bool GC::onThread()
483 return VMPI_currentThread() == m_gcThread
;
486 REALLY_INLINE
void GC::FreeBits(uint32_t *bits
, int sizeClass
)
489 for(int i
=0, n
=noPointersAllocs
[sizeClass
]->m_numBitmapBytes
; i
<n
;i
++)
490 GCAssert(((uint8_t*)bits
)[i
] == 0);
492 *(uint32_t**)bits
= m_bitsFreelists
[sizeClass
];
493 m_bitsFreelists
[sizeClass
] = bits
;
496 REALLY_INLINE
bool GC::IsMarkedThenMakeQueued(const void* userptr
)
498 const void* realptr
= GetRealPointer(userptr
);
499 gcbits_t
& bits
= GetGCBits(realptr
);
501 bits
^= (kMark
|kQueued
);
507 REALLY_INLINE
bool GC::IsQueued(const void* userptr
)
509 const void* realptr
= GetRealPointer(userptr
);
510 return (GetGCBits(realptr
) & kQueued
) != 0;
513 REALLY_INLINE
PageMap::PageType
GC::GetPageMapValue(uintptr_t addr
) const
515 GCAssert(pageMap
.AddrIsMappable(addr
));
516 return pageMap
.AddrToVal(addr
);
519 REALLY_INLINE
PageMap::PageType
GC::GetPageMapValueGuarded(uintptr_t addr
)
521 if (pageMap
.AddrIsMappable(addr
))
522 return GetPageMapValue(addr
);
523 MMGC_STATIC_ASSERT(PageMap::kNonGC
== 0);
524 return PageMap::kNonGC
;
527 REALLY_INLINE
void GC::AddToSmallEmptyBlockList(GCAlloc::GCBlock
*b
)
529 b
->next
= smallEmptyPageList
;
530 smallEmptyPageList
= b
;
533 REALLY_INLINE
void GC::AddToLargeEmptyBlockList(GCLargeAlloc::LargeBlock
*lb
)
535 lb
->next
= largeEmptyPageList
;
536 largeEmptyPageList
= lb
;
539 #ifdef MMGC_REFCOUNT_PROFILING
540 REALLY_INLINE
void GC::AddToZCT(RCObject
*obj
, bool initial
=false)
542 zct
.Add(obj
, initial
);
545 REALLY_INLINE
void GC::RemoveFromZCT(RCObject
*obj
, bool final
=false)
547 zct
.Remove(obj
, final
);
550 REALLY_INLINE
void GC::AddToZCT(RCObject
*obj
)
555 REALLY_INLINE
void GC::RemoveFromZCT(RCObject
*obj
)
561 REALLY_INLINE
void GC::PreventImmediateReaping(RCObject
* obj
)
568 REALLY_INLINE
const void *GC::Pointer(const void *p
)
570 return (const void*)(((uintptr_t)p
)&~7);
573 REALLY_INLINE
size_t GC::GetNumBlocks()
575 return policy
.blocksOwnedByGC();
578 REALLY_INLINE
void* GC::allocaTop()
583 REALLY_INLINE
void GC::allocaPopTo(void* top
)
585 if (top
>= top_segment
->start
&& top
<= top_segment
->limit
)
588 allocaPopToSlow(top
);
592 REALLY_INLINE
void* GC::GetAttachedSampler()
597 REALLY_INLINE
void GC::SetAttachedSampler(void *sampler
)
603 REALLY_INLINE
GC::AllocaAutoPtr::AllocaAutoPtr()
609 REALLY_INLINE
GC::AllocaAutoPtr::~AllocaAutoPtr()
612 gc
->allocaPopTo(unwindPtr
);
615 REALLY_INLINE
Cleaner::Cleaner()
619 REALLY_INLINE
Cleaner::~Cleaner()
622 VMPI_memset(v
, 0, size
);
628 REALLY_INLINE Cleaner
& Cleaner::operator=(const Cleaner
& /*rhs*/)
633 REALLY_INLINE
void Cleaner::set(const void * _v
, size_t _size
)
639 REALLY_INLINE
GCWorkItem::GCWorkItem(const void *p
, uint32_t s
, GCWorkItemType workItemType
)
641 #ifdef MMGC_INTERIOR_PTRS
642 , _size(s
| uint32_t(kHasInteriorPtrs
) | uint32_t(workItemType
))
644 , _size(s
| uint32_t(workItemType
))
647 GCAssert((s
& 3) == 0);
650 GCAssert(GC::GetGC(p
)->FindBeginningGuarded(p
) == p
);
655 REALLY_INLINE
GCWorkItem::GCWorkItem(const void *p
, GCSentinelItemType type
)
656 : iptr(uintptr_t(p
) | type
),
660 REALLY_INLINE
void GCWorkItem::Clear()
663 // use sentinel so we're skipped off the fast path in MarkItem
664 _size
= kSentinelSize
;
667 REALLY_INLINE
bool GCAutoEnter::Entered()
673 #endif /* __GC_inlines__ */